Ip camera using ffmpeg drawing on screen - c++

I'm using ffmpeg 1.2 to take video from ip camera.I make it draw on the screen, so I wonder if there is some event mechanism to to know if it is time to call av_read_frame?
If I read frame not so frequent as the camera gives frames i get segmentation fault = on some malloc functions inside ffmpeg routines(video_get_buffer)
I also get segmentation fault just when drawing on screen.
In Render function call every 0 miliseconds
void BasicGLPane::DrawNextFrame()
{
int f=1;
while(av_read_frame(pFormatCtx, &packet)>=0)
{
// Is this a packet from the video stream?
if(packet.stream_index==videoStream)
{
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &FrameFinished,
&packet);
// Did we get a video frame?
if(FrameFinished)
{
f++;
this->fram->Clear();
// if (pFrame->pict_type == AV_PICTURE_TYPE_I) wxMessageBox("I cadr");
if (pFrame->pict_type != AV_PICTURE_TYPE_I)
printMVMatrix(f, pFrame, pCodecCtx);
pFrameRGB->linesize[0]= pCodecCtx->width*3; // in case of rgb4 one plane
sws_scale(swsContext, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
//glGenTextures(1, &VideoTexture);
if ((*current_Vtex)==VideoTexture) current_Vtex = &VideoTexture2;else current_Vtex = &VideoTexture;
glBindTexture(GL_TEXTURE_2D, (*current_Vtex));
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
//glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
//glDeleteTextures(1, &VideoTexture);
GLenum err;
while ((err = glGetError()) != GL_NO_ERROR)
{
cerr << "OpenGL error: " << err << endl;
}
// av_free(buffer);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
if (f>1) break;
}
//av_free(pFrameRGB);
}
The picture I get on the screen is strange (green quads and red lines are motion vectors of those quads)
http://i.stack.imgur.com/9HJ9t.png

If you hang on the video device, you should read frames as soon as camera has them; you should either query the camera hardware for supported FPS, or get this information from the codec. If no information is available, you have to guess.
It is suspicious that you get a crash when you don't read the frame in time; the worst which should happen in this case would be a lost frame.
Depending on the camera colorspace you may also need to convert it before showing it on a screen. I don't see you doing that.

Related

Save an opengl texture in tiff file from an other thread

I'm trying to save a texture in tiff file from an other thread. But the only result i get is a white picture, I think it come from the glcontext ( because it's not possible to have one glcontext for several thread). That's why i've tried to create two glcontext and share the display context. But still i don't have the gl texture. I can't get the texture from the second opengl context
. I'm tring to do that because at the end the texture will be a video stream from a camera .
Here is my context creation :
static PIXELFORMATDESCRIPTOR pfd =
{
sizeof(PIXELFORMATDESCRIPTOR), // Size Of This Pixel Format Descriptor
1, // Version Number
PFD_DRAW_TO_WINDOW | // Format Must Support Window
PFD_SUPPORT_OPENGL | // Format Must Support OpenGL
PFD_DOUBLEBUFFER, // Must Support Double Buffering
PFD_TYPE_RGBA, // Request An RGBA Format
8, // Select Our Color Depth
0, 0, 0, 0, 0, 0, // Color Bits Ignored
0, // No Alpha Buffer
0, // Shift Bit Ignored
0, // No Accumulation Buffer
0, 0, 0, 0, // Accumulation Bits Ignored
16, // 16Bit Z-Buffer (Depth Buffer)
0, // No Stencil Buffer
0, // No Auxiliary Buffer
PFD_MAIN_PLANE, // Main Drawing Layer
0, // Reserved
0, 0, 0 // Layer Masks Ignored
};
GLuint PixelFormat;
// create the pixel pixel format descriptor
PixelFormat = ChoosePixelFormat(dc, &pfd);
// set the pixel format descriptor
SetPixelFormat(dc, PixelFormat, &pfd);
gl = wglCreateContext(dc);
gl2 = wglCreateContext(dc);
wglShareLists(gl, gl2);
wglMakeCurrent(dc, gl);
GLenum g= glewInit();
wglewInit();
loadImage();
rec = new Recorder(dc,gl2);
rec->Start_StopRecord(text, true);
Here is the code to save to tiff file :
Recorder::Recorder(HDC &hdc, HGLRC &_gl)
{
isStarted = false;
dc = hdc;
gl = _gl;
}
Recorder::~Recorder()
{
if (isStarted) {
isStarted = false;
recordThread.join();
CloseTifFile();
delete mp_fileTifIn;
}
}
void Recorder::Start_StopRecord(GLuint Texture, bool launched){
if (launched) {
if (isStarted) {
wglMakeCurrent(dc, gl);
isStarted = false;
recordThread.join();
CloseTifFile();
pixels.release();
}
else {
isStarted = true;
//wglMakeCurrent(NULL, NULL);
//RecordShot(&Texture);
recordThread = std::thread(&Recorder::RecordShot, this,&Texture);
}
}
}
void Recorder::RecordShot(GLuint* texture){
wglMakeCurrent(dc, gl);
OpenTifFile(*texture);
pixels = std::unique_ptr<int>(new int[width*height]);
//while (isStarted) {
WriteTif8Bits(*texture);
WriteDirectory();
//Sleep(16);
//}
pixels.release();
}
void Recorder::OpenTifFile(GLuint &Texture){
char* filename="../test3.tiff";
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_HEIGHT,&height);
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_WIDTH,&width);
mp_fileTifIn = TIFFOpen(filename,"w");
}
void Recorder::CloseTifFile(){
TIFFClose(mp_fileTifIn);
}
/*
* Open Sub data for a Tiff file (allow multiple picture in one tif file)
*/
void Recorder::WriteDirectory(){
TIFFWriteDirectory(mp_fileTifIn);
}
void Recorder::WriteTif8Bits(GLuint &Texture){
//Setup Tiff Configuration
TIFFSetField(mp_fileTifIn,TIFFTAG_IMAGEWIDTH,width);
TIFFSetField(mp_fileTifIn,TIFFTAG_IMAGELENGTH,height);
TIFFSetField(mp_fileTifIn,TIFFTAG_SAMPLESPERPIXEL,4);
TIFFSetField(mp_fileTifIn,TIFFTAG_BITSPERSAMPLE,8);
TIFFSetField(mp_fileTifIn,TIFFTAG_ROWSPERSTRIP,TIFFDefaultStripSize(mp_fileTifIn,width));
TIFFSetField(mp_fileTifIn,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT);
TIFFSetField(mp_fileTifIn,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG);
TIFFSetField(mp_fileTifIn, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
TIFFSetField(mp_fileTifIn, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
glBindTexture(GL_TEXTURE_2D,Texture);
assert(glGetError() == GL_NO_ERROR);
glGetTexImage(GL_TEXTURE_2D,0,GL_RGBA,GL_UNSIGNED_BYTE,pixels.get());
assert(glGetError() == GL_NO_ERROR);
//Image Reversal
Reverse(pixels.get(), height, width);
//Write one picture
/*for (int row = 0; row < height; row++) {
TIFFWriteScanline(mp_fileTifIn, pixels.get(), row, 0);
lineChange(pixels.get(), width);
}*/
TIFFWriteEncodedStrip(mp_fileTifIn, 0, pixels.get(), height*width * sizeof(int));
}
void Recorder::lineChange(int* pointer, int width) {
pointer -= width;
}
void Recorder::Reverse(int* pointer, int height, int width) {
pointer += (height - 1) * width;
}
And here is the loadImages function
int loadImage() {
wglMakeCurrent(dc, gl);
cv::Mat image;
image = cv::imread(std::string("C:/Users/Public/Pictures/Sample Pictures/Desert.jpg"), CV_LOAD_IMAGE_COLOR);
if (!image.data)
return -1;
cvNamedWindow("try", cv::WINDOW_AUTOSIZE);
cv::imshow("try", image);
cv::flip(image, image, 0);
glGenTextures(1, &text);
GLenum g=glGetError();
glBindTexture(GL_TEXTURE_2D, text);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image.cols, image.rows, 0, GL_BGR, GL_UNSIGNED_BYTE, image.ptr());
return 0;
}
Here is a test project where i'm loading a picture with opencv and i try to save it in an other thread : https://mega.nz/#!SBMUnJRI!dLC_l9hmCkhIDDUaygHuq4Kw2SKIuxRE7m19md74p0k
To run the project you need Opencv and glew, libtiff is already packaged inside
If you think somethink is missing for this post, i invite you to comment it before downgrade as i'm following my subject
I finally solved my problem by doing all opengl action in one thread ( i retrieve the image and display it in one thread, and i save it in another, which doesn't need an openglcontext)
An other thing that were confusing me is a bad configuration of
glGetTexImage(GL_TEXTURE_2D,0,GL_RGBA,GL_UNSIGNED_BYTE,this->rec[0].pixels.get());
But my textures are GL_TEXTURE_RECTANGLE_NV, that's why i had only a white picture sometimes.

Is GDI+ with opengl possible?

I tried to make gdiplus and opengl works together in one offscreen hdc. Current code is something like this (no Internet access)
create a gdiplus bitmap A
create a gdiplus graphics from A as B
get B's HDC
choose a proper pixelformat for HDC
create a gdiplus graphics from HDC as C
create an opengl context from HDC as D
drawing with C and D
release HDC to B
draw A to other graphics
C is needed because I found that after I release the HDC to B, it is not possible for opengl to change the bitmap.
To check if things work, I added a bitmap E by reading gl pixels before (9)
There are still a problem in current program
After releasing the hdc, the bitmap A losses its alpha information.
The bitmap E works fine with correct alpha, but the GL_BGRA cannot read out alpha so I have to read GL_RGBA data and do a per pixel convertion into gdiplus color format.
Should I just use E or is there any other attempt?
Example of releasehdc alpha lossing:
Bitmap bitmap(100,100);
Graphics graphics(&bitmap);
HDC hdc=graphics.GetHDC();
Graphics graphics2(hdc);
graphics2.Clear(Color(128,255,0,0));
graphics2.Flush();
graphics.ReleaseHDC(hdc);
//draw bitmap to other graphics
Note: I just figure out that gdiplus didn't actually use the alpha channel in the HDC, only the rgb channels are shared, so I wonder how it works with a BGRA target bitmap
I think the best way to use OpenGL and GDI+ together is to generate a texture in GDI+, then load that into OpenGL.
The gist of this is:
void MakeTexture(GLuint& texId)
{
Bitmap offscreen(512, 512, PixelFormat32bppARGB);
Graphics gr(&offscreen);
gr.Clear(Color(128, 255, 0, 0));
Gdiplus::SolidBrush brush(Color(255, 0, 0, 255));
Pen pen(Color(128, 255, 0, 0), 16.f);
Font font(L"Arial", 48.f);
Rect r(25, 25, 100, 100);
gr.DrawRectangle(&pen, r);
gr.DrawString(TEXT("TEST STRING"), -1, &font, PointF(50, 50), &brush);
vector<DWORD> argb;
GetBitsLockBits(offscreen, argb, 1);
genTexture(texId, offscreen.GetWidth(), offscreen.GetHeight(), argb);
}
void GetBitsLockBits(Bitmap& bmp, vector<DWORD>& argb, bool invert = 0)
{
BitmapData bmpData;
RectF rectf;
Unit unit;
bmp.GetBounds(&rectf, &unit);
Rect rect(rectf.X, rectf.Y, rectf.Width, rectf.Height);
printf("Got rect %d %d %d %d\n", rect.X, rect.Y, rect.Width, rect.Height);
bmp.LockBits(&rect, ImageLockModeRead, PixelFormat32bppARGB, &bmpData);
printf("BMP has w=%d h=%d stride=%d\n", bmpData.Width, bmpData.Height, bmpData.Stride);
argb.resize(bmpData.Width * bmpData.Height);
if (invert)
for (int i = 0; i < bmpData.Height; i++)
memcpy(&argb[i * bmpData.Width], (GLuint*)bmpData.Scan0 + (bmpData.Height - 1 - i) * bmpData.Width, bmpData.Width * 4);
else if (bmpData.Stride == bmpData.Width * 4)
memcpy(&argb[0], bmpData.Scan0, bmpData.Width * bmpData.Height * 4); // If the bmp is padded then
// this won't read the image correctly (it will read it with pad bits between)
else
for (int i = 0; i < bmpData.Height; i++)
memcpy(&argb[i * bmpData.Width], (GLuint*)bmpData.Scan0 + i * bmpData.Width, bmpData.Width * 4);
bmp.UnlockBits(&bmpData);
}
void genTexture(GLuint& texId, int w, int h, const vector<DWORD>& argb)
{
glGenTextures(1, &texId); CHECK_GL;
glBindTexture(GL_TEXTURE_2D, texId); CHECK_GL;
glPixelStorei(GL_UNPACK_ALIGNMENT, 4); CHECK_GL;
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); CHECK_GL;
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); CHECK_GL;
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, w, h, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, &argb[0]); CHECK_GL;
}
Trying to have OpenGL and GDI+ cooperate by drawing into the same window directly will probably get you very flickery results.

Display ffmpeg frames on opgel texture

I am using Dranger tutorial01 (ffmpeg) to decode the video and get AVI frames. I want to use OpenGL to display the video.
http://dranger.com/ffmpeg/tutorial01.html
The main function is as follows:
int main (int argc, char** argv) {
// opengl stuff
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(800, 600);
glutCreateWindow("Hello GL");
glutReshapeFunc(changeViewport);
glutDisplayFunc(render);
GLenum err = glewInit();
if(GLEW_OK !=err){
fprintf(stderr, "GLEW error");
return 1;
}
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
GLuint texture;
glGenTextures(1, &texture); //Make room for our texture
glBindTexture(GL_TEXTURE_2D, texture);
//ffmpeg stuff
AVFormatContext *pFormatCtx = NULL;
int i, videoStream;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer = NULL;
AVDictionary *optionsDict = NULL;
if(argc < 2) {
printf("Please provide a movie file\n");
return -1;
}
// Register all formats and codecs
av_register_all();
// Open video file
if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
return -1; // Couldn't open file
// Retrieve stream information
if(avformat_find_stream_info(pFormatCtx, NULL)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, argv[1], 0);
// Find the first video stream
videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
return -1; // Could not open codec
// Allocate video frame
pFrame=av_frame_alloc();
// Allocate an AVFrame structure
pFrameRGB=av_frame_alloc();
if(pFrameRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
struct SwsContext *sws_ctx = sws_getContext(pCodecCtx->width,
pCodecCtx->height, pCodecCtx->pix_fmt, 800,
600, PIX_FMT_RGB24, SWS_BICUBIC, NULL,
NULL, NULL);
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
// Read frames and save first five frames to disk
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
&packet);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
/* sws_scale
(
sws_ctx,
(uint8_t const * const *)pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
pFrameRGB->data,
pFrameRGB->linesize
);
*/
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
// additional opengl
glBindTexture(GL_TEXTURE_2D, texture);
//gluBuild2DMipmaps(GL_TEXTURE_2D, 3, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_INT, pFrameRGB->data[0]);
// glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, 840, 460, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
glTexImage2D(GL_TEXTURE_2D, //Always GL_TEXTURE_2D
0, //0 for now
GL_RGB, //Format OpenGL uses for image
pCodecCtx->width, pCodecCtx->height, //Width and height
0, //The border of the image
GL_RGB, //GL_RGB, because pixels are stored in RGB format
GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are stored
//as unsigned numbers
pFrameRGB->data[0]); //The actual pixel data
// additional opengl end
// Save the frame to disk
if(++i<=5)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
i);
}
}
glColor3f(1,1,1);
glBindTexture(GL_TEXTURE_2D, texture);
glBegin(GL_QUADS);
glTexCoord2f(0,1);
glVertex3f(0,0,0);
glTexCoord2f(1,1);
glVertex3f(pCodecCtx->width,0,0);
glTexCoord2f(1,0);
glVertex3f(pCodecCtx->width, pCodecCtx->height,0);
glTexCoord2f(0,0);
glVertex3f(0,pCodecCtx->height,0);
glEnd();
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
avformat_close_input(&pFormatCtx);
return 0;
}
Unfortunately i could not find my solution here
ffmpeg video to opengl texture
The program compiles but does not show any video on the texture. Just a OpenGL window is created.
One problem is your use of a single buffered pixel format. Most modern operating systems use window composition which relies on double buffered pixel formats. Easy enough to change:
--- glutInitDisplayMode(GLUT_RGBA);
+++ glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
At the end of the render function call glutSwapBuffers().
The other problem is that you never enter glutMainLoop so no events (like drawing requests from the OS) get processed. Also some parts of your code must go into the render function.
The frame decoding and texture upload must be placed in either an idle handler (you didn't create one) followed by a call of glutPostRedisplay() or directly in the render function:
void render(void) {
/* ... */
--- while(av_read_frame(pFormatCtx, &packet)>=0) {
+++ if(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
/* sws_scale
(
sws_ctx,
(uint8_t const * const *)pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
pFrameRGB->data,
pFrameRGB->linesize
);
*/
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
// additional opengl
glBindTexture(GL_TEXTURE_2D, texture);
At this point you should use glTexSubImage2D instead of glTexImage2D, because it's way faster. However you must create the texture with glTexImage2D first; do this once before calling glutMainLoop().
glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
/*
glTexImage2D(GL_TEXTURE_2D, //Always GL_TEXTURE_2D
0, //0 for now
GL_RGB, //Format OpenGL uses for image
pCodecCtx->width, pCodecCtx->height, //Width and height
0, //The border of the image
GL_RGB, //GL_RGB, because pixels are stored
//in RGB format
GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are
//stored as unsigned numbers
pFrameRGB->data[0]); //The actual pixel data
*/
// additional opengl end
}
glColor3f(1,1,1);
glBindTexture(GL_TEXTURE_2D, texture);
glBegin(GL_QUADS);
glTexCoord2f(0,1);
glVertex3f(0,0,0);
glTexCoord2f(1,1);
glVertex3f(pCodecCtx->width,0,0);
glTexCoord2f(1,0);
glVertex3f(pCodecCtx->width, pCodecCtx->height,0);
glTexCoord2f(0,0);
glVertex3f(0,pCodecCtx->height,0);
glEnd();
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
/* ... */
glutSwapBuffers();
}
I was doing pretty much the same exact thing as you, and was either running into an issue with what looked like an improperly decoded image where my video should have been playing. I found this page trying to solve this problem, and referring to datenwolf's answer was able to get the video playing by adding the glTexSubImage2D function in. However, when I tried to take the glTexImage2D function out, except for a single initial call for the creation, my previously working video was replaced with a white rectangle. So for some reason, it works for me when calling glTexImage2D followed by glTexSubImage2D for every frame.

Loading and Converting a HBITMAP to an OpenGL Texture

I want to load a HBITMAP from a resource file and use it as an OpenGL texture. The code I use:
HBITMAP hBmp = (HBITMAP) LoadImage(hInstance,
MAKEINTRESOURCE(id), IMAGE_BITMAP, 0, 0, LR_CREATEDIBSECTION);
BITMAP BM;
GetObject(hBmp, sizeof(BM), &BM);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
GLvoid* bits = BM.bmBits;
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, BM.bmWidth,
BM.bmHeight, 0, GL_BGRA_EXT,
GL_UNSIGNED_BYTE,
bits);
But I allways get an error from Visual Studio that I dont have access to the pointer bits. The error is at the last line of code:
bits);
I actually can use the NULL pointer instead of bits without getting an error. I can output bits to using a messagebox. Does anybody have an idea what is wrong with my code?
From the GetObject documentation:
If hgdiobj is a handle to a bitmap created by any other means, GetObject returns only the width, height, and color format information of the bitmap. You can obtain the bitmap's bit values by calling the GetDIBits or GetBitmapBits function.
In context, "other means" is anything other than CreateDIBSection. You're not using CreateDIBSection, you're using LoadImage. Which category the LR_CREATEDIBSECTION flag puts you into is unclear, but the workaround is clear: Use GetDIBits.
Working solution for GLUT on Windows XP
(tcc or lcc compiler)
GLuint LoadTexture(GLuint tex, const char * filename)
{
HBITMAP hBitmap;
BITMAP bm;
HINSTANCE hInstance = GetModuleHandle(NULL);
//standard bmp 24 bit
//supported resolutions 64x64, 128x128, 256x256, 512x512
//type "char" has a 1 byte size, other types take more byte and will not work
unsigned char * data;
unsigned char R, G, B;
//LoadImage() loads the bmp picture as an interlaced image
hBitmap = LoadImage(NULL, filename, IMAGE_BITMAP, 0, 0, LR_LOADFROMFILE|LR_CREATEDIBSECTION);
GetObject(hBitmap, sizeof(BITMAP), &bm);
//get the address of the start of the image data in memory
data = bm.bmBits;
//swap R, G, B values for correct color display
int index, i;
for (i = 0; i < bm.bmWidth * bm.bmHeight ; i++)
{
index = i*3;
B = data[index]; G = data[index+1]; R = data[index+2];
data[index] = R; data[index+1] = G; data[index+2] = B;
}
//print image parameters
printf ("bmType %u\n",bm.bmType);
printf ("bmWidth %u\n",bm.bmWidth);
printf ("bmHeight %u\n",bm.bmHeight);
printf ("bmWidthBytes %u\n",bm.bmWidthBytes);
printf ("bmPlanes %u\n",bm.bmPlanes);
printf ("bmBitsPixel %u\n",bm.bmBitsPixel);
printf ("bmBits %p\n",bm.bmBits);
printf ("hInstance %p\n",hInstance);
//create texture from loaded bmp image
glGenTextures( 1, &tex);
glBindTexture( GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, 4, bm.bmWidth, bm.bmHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, bm.bmBits);
printf ("--- texture %u created ---\n", tex);
//texture filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
}
void init(void)
{
//enable texturing
glEnable(GL_TEXTURE_2D);
//load texture from bmp 24 bit image file
//bmp exported from mspaint for windows xp
LoadTexture(1, "image1.bmp");
LoadTexture(2, "image2.bmp");
LoadTexture(3, "image3.bmp");
LoadTexture(4, "image4.bmp");
LoadTexture(5, "image5.bmp");
LoadTexture(6, "image6.bmp");
. . . . . . . . . . . . . . . . . . . .
Compiling.
no cyrilic names, "" for names with spases
tcc C:\tcc\src\box\box.c -o C:\tcc\src\box\box.exe -LC:\tcc\lib -luser32 -lgdi32 -lopengl32 -lglu32 -lglut32 -Wl,-subsystem=console
(without console -Wl,-subsystem=windows)

display a text using a SDL_Surface converted in a texture

As said, I'm trying to display a text using a SDL_Surface converted in a texture here's my code :
if (SDL_Init(SDL_INIT_VIDEO) == 0) {
//Create a surface
switch (method) {
case 2: // load a texture with SDL_Image:
{
surface = IMG_Load("../../../../../data/box3.png");
}
break;
case 3: // load a texture with SDL_TTF:
SDL_Color textColor={ 255, 255, 0, 1 };
if (TTF_Init() == 0){;
TTF_Font *font;
font = TTF_OpenFont("../../../../../data/Bedizen.ttf", 20);
if (font != NULL){
qDebug() << TTF_FontFaceFamilyName(font);
surface = TTF_RenderText_Solid(font, ".....", textColor );
}
else
qDebug() << "Error (Font) : " << TTF_GetError();
}
else
qDebug() << "Error (Font) : " << TTF_GetError();
break;
}
if (surface != NULL){
GLint nbOfColors;
GLenum texture_format = 0;
qDebug("surface : %dx%d / %dbpp / %d", surface->w, surface->h,
surface->format->BytesPerPixel, surface->pitch);
MemoryDump(surface->pixels, surface->pitch, surface->h, surface->format->BytesPerPixel);
// get the number of channels in the SDL surface
nbOfColors = surface->format->BytesPerPixel;
switch (nbOfColors) {
case 1:
texture_format = GL_ALPHA;
break;
case 3: // no alpha channel
if (surface->format->Rmask == 0x000000ff)
texture_format = GL_RGB;
else
texture_format = GL_BGR;
break;
case 4: // contains an alpha channel
if (surface->format->Rmask == 0x000000ff)
texture_format = GL_RGBA;
else
texture_format = GL_BGRA;
break;
default:
qDebug() << "Warning: the image is not truecolor...";
break;
}
glEnable( GL_TEXTURE_2D );
// Have OpenGL generate a texture object handle for us
glGenTextures( 1, &texture );
// Bind the texture object
glBindTexture( GL_TEXTURE_2D, texture );
// Set the texture's stretching properties
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
// Edit the texture object's image data using the information SDL_Surface gives us
glTexImage2D( GL_TEXTURE_2D, 0, nbOfColors, surface->w, surface->h, 0,
texture_format, GL_UNSIGNED_BYTE, surface->pixels );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
}
else{
qDebug() << "Error (SDL) : " << SDL_GetError();
}
}
which works very well with IMG_LOAD() but not with TTF_RenderText_Solid(), so I managed to find it by my self, at first I tought that the surface from TTF_RTS() came with not the good pixel mapping but I was able to find out, thanks to an home-made memory dump that it was the good one.
Ok I was missing 2 lines in my rendering loop:
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_COLOR, GL_ONE_MINUS_SRC_ALPHA);