Save an opengl texture in tiff file from an other thread - c++

I'm trying to save a texture in tiff file from an other thread. But the only result i get is a white picture, I think it come from the glcontext ( because it's not possible to have one glcontext for several thread). That's why i've tried to create two glcontext and share the display context. But still i don't have the gl texture. I can't get the texture from the second opengl context
. I'm tring to do that because at the end the texture will be a video stream from a camera .
Here is my context creation :
static PIXELFORMATDESCRIPTOR pfd =
{
sizeof(PIXELFORMATDESCRIPTOR), // Size Of This Pixel Format Descriptor
1, // Version Number
PFD_DRAW_TO_WINDOW | // Format Must Support Window
PFD_SUPPORT_OPENGL | // Format Must Support OpenGL
PFD_DOUBLEBUFFER, // Must Support Double Buffering
PFD_TYPE_RGBA, // Request An RGBA Format
8, // Select Our Color Depth
0, 0, 0, 0, 0, 0, // Color Bits Ignored
0, // No Alpha Buffer
0, // Shift Bit Ignored
0, // No Accumulation Buffer
0, 0, 0, 0, // Accumulation Bits Ignored
16, // 16Bit Z-Buffer (Depth Buffer)
0, // No Stencil Buffer
0, // No Auxiliary Buffer
PFD_MAIN_PLANE, // Main Drawing Layer
0, // Reserved
0, 0, 0 // Layer Masks Ignored
};
GLuint PixelFormat;
// create the pixel pixel format descriptor
PixelFormat = ChoosePixelFormat(dc, &pfd);
// set the pixel format descriptor
SetPixelFormat(dc, PixelFormat, &pfd);
gl = wglCreateContext(dc);
gl2 = wglCreateContext(dc);
wglShareLists(gl, gl2);
wglMakeCurrent(dc, gl);
GLenum g= glewInit();
wglewInit();
loadImage();
rec = new Recorder(dc,gl2);
rec->Start_StopRecord(text, true);
Here is the code to save to tiff file :
Recorder::Recorder(HDC &hdc, HGLRC &_gl)
{
isStarted = false;
dc = hdc;
gl = _gl;
}
Recorder::~Recorder()
{
if (isStarted) {
isStarted = false;
recordThread.join();
CloseTifFile();
delete mp_fileTifIn;
}
}
void Recorder::Start_StopRecord(GLuint Texture, bool launched){
if (launched) {
if (isStarted) {
wglMakeCurrent(dc, gl);
isStarted = false;
recordThread.join();
CloseTifFile();
pixels.release();
}
else {
isStarted = true;
//wglMakeCurrent(NULL, NULL);
//RecordShot(&Texture);
recordThread = std::thread(&Recorder::RecordShot, this,&Texture);
}
}
}
void Recorder::RecordShot(GLuint* texture){
wglMakeCurrent(dc, gl);
OpenTifFile(*texture);
pixels = std::unique_ptr<int>(new int[width*height]);
//while (isStarted) {
WriteTif8Bits(*texture);
WriteDirectory();
//Sleep(16);
//}
pixels.release();
}
void Recorder::OpenTifFile(GLuint &Texture){
char* filename="../test3.tiff";
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_HEIGHT,&height);
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_WIDTH,&width);
mp_fileTifIn = TIFFOpen(filename,"w");
}
void Recorder::CloseTifFile(){
TIFFClose(mp_fileTifIn);
}
/*
* Open Sub data for a Tiff file (allow multiple picture in one tif file)
*/
void Recorder::WriteDirectory(){
TIFFWriteDirectory(mp_fileTifIn);
}
void Recorder::WriteTif8Bits(GLuint &Texture){
//Setup Tiff Configuration
TIFFSetField(mp_fileTifIn,TIFFTAG_IMAGEWIDTH,width);
TIFFSetField(mp_fileTifIn,TIFFTAG_IMAGELENGTH,height);
TIFFSetField(mp_fileTifIn,TIFFTAG_SAMPLESPERPIXEL,4);
TIFFSetField(mp_fileTifIn,TIFFTAG_BITSPERSAMPLE,8);
TIFFSetField(mp_fileTifIn,TIFFTAG_ROWSPERSTRIP,TIFFDefaultStripSize(mp_fileTifIn,width));
TIFFSetField(mp_fileTifIn,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT);
TIFFSetField(mp_fileTifIn,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG);
TIFFSetField(mp_fileTifIn, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
TIFFSetField(mp_fileTifIn, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
glBindTexture(GL_TEXTURE_2D,Texture);
assert(glGetError() == GL_NO_ERROR);
glGetTexImage(GL_TEXTURE_2D,0,GL_RGBA,GL_UNSIGNED_BYTE,pixels.get());
assert(glGetError() == GL_NO_ERROR);
//Image Reversal
Reverse(pixels.get(), height, width);
//Write one picture
/*for (int row = 0; row < height; row++) {
TIFFWriteScanline(mp_fileTifIn, pixels.get(), row, 0);
lineChange(pixels.get(), width);
}*/
TIFFWriteEncodedStrip(mp_fileTifIn, 0, pixels.get(), height*width * sizeof(int));
}
void Recorder::lineChange(int* pointer, int width) {
pointer -= width;
}
void Recorder::Reverse(int* pointer, int height, int width) {
pointer += (height - 1) * width;
}
And here is the loadImages function
int loadImage() {
wglMakeCurrent(dc, gl);
cv::Mat image;
image = cv::imread(std::string("C:/Users/Public/Pictures/Sample Pictures/Desert.jpg"), CV_LOAD_IMAGE_COLOR);
if (!image.data)
return -1;
cvNamedWindow("try", cv::WINDOW_AUTOSIZE);
cv::imshow("try", image);
cv::flip(image, image, 0);
glGenTextures(1, &text);
GLenum g=glGetError();
glBindTexture(GL_TEXTURE_2D, text);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image.cols, image.rows, 0, GL_BGR, GL_UNSIGNED_BYTE, image.ptr());
return 0;
}
Here is a test project where i'm loading a picture with opencv and i try to save it in an other thread : https://mega.nz/#!SBMUnJRI!dLC_l9hmCkhIDDUaygHuq4Kw2SKIuxRE7m19md74p0k
To run the project you need Opencv and glew, libtiff is already packaged inside
If you think somethink is missing for this post, i invite you to comment it before downgrade as i'm following my subject

I finally solved my problem by doing all opengl action in one thread ( i retrieve the image and display it in one thread, and i save it in another, which doesn't need an openglcontext)
An other thing that were confusing me is a bad configuration of
glGetTexImage(GL_TEXTURE_2D,0,GL_RGBA,GL_UNSIGNED_BYTE,this->rec[0].pixels.get());
But my textures are GL_TEXTURE_RECTANGLE_NV, that's why i had only a white picture sometimes.

Related

OpenGL loading rgb texture from stb image

I have a class for creating textures from a path but when i try to load in a texture with 3 channels
(rgb) it gives me read access violation exception when running this line
glTextureSubImage2D(m_RendererID, 0, 0, 0, m_Width, m_Height, m_DataFormat, GL_UNSIGNED_BYTE, data);
I tried to change the parameters in the stbi load function but that didn't work.
When i load the image it sets the correct width, height and channel data so i don't know what i am doing wrong
The pointer to the data isn't nullptr either
OpenGLTexture2D::OpenGLTexture2D(const std::string& path)
{
RADIANT_PROFILE_FUNCTION();
m_Path = path;
stbi_set_flip_vertically_on_load(1);
int width, height, channels;
stbi_uc* data = stbi_load(path.c_str(), &width, &height, &channels, 0);
RADIANT_CORE_ASSERT(data, "Failed To Load Image");
m_Width = width;
m_Height = height;
if (channels == 4) {
m_InternalFormat = GL_RGBA8;
m_DataFormat = GL_RGBA;
}
else if (channels == 3) {
m_InternalFormat = GL_RGB8;
m_DataFormat = GL_RGB;
}
else {
RADIANT_CORE_ERROR("Texture Format Not Supported, Channels: {0})", channels);
}
glCreateTextures(GL_TEXTURE_2D, 1, &m_RendererID);
glTextureStorage2D(m_RendererID, 1, m_InternalFormat, m_Width, m_Height);
glTextureParameteri(m_RendererID, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTextureParameteri(m_RendererID, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTextureSubImage2D(m_RendererID, 0, 0, 0, m_Width, m_Height, m_DataFormat, GL_UNSIGNED_BYTE, data);
stbi_image_free(data);
}
When an RGB image is loaded to a texture object, GL_UNPACK_ALIGNMENT needs to be set to 1.
By default GL_UNPACK_ALIGNMENT is 4, so each line of an image is assumed to be aligned to 4 bytes. The pixels in the buffer have a size of 3 bytes and are tightly packed, this would cause a misalignment:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTextureSubImage2D(m_RendererID, 0, 0, 0, m_Width, m_Height, m_DataFormat, GL_UNSIGNED_BYTE, data);

Is GDI+ with opengl possible?

I tried to make gdiplus and opengl works together in one offscreen hdc. Current code is something like this (no Internet access)
create a gdiplus bitmap A
create a gdiplus graphics from A as B
get B's HDC
choose a proper pixelformat for HDC
create a gdiplus graphics from HDC as C
create an opengl context from HDC as D
drawing with C and D
release HDC to B
draw A to other graphics
C is needed because I found that after I release the HDC to B, it is not possible for opengl to change the bitmap.
To check if things work, I added a bitmap E by reading gl pixels before (9)
There are still a problem in current program
After releasing the hdc, the bitmap A losses its alpha information.
The bitmap E works fine with correct alpha, but the GL_BGRA cannot read out alpha so I have to read GL_RGBA data and do a per pixel convertion into gdiplus color format.
Should I just use E or is there any other attempt?
Example of releasehdc alpha lossing:
Bitmap bitmap(100,100);
Graphics graphics(&bitmap);
HDC hdc=graphics.GetHDC();
Graphics graphics2(hdc);
graphics2.Clear(Color(128,255,0,0));
graphics2.Flush();
graphics.ReleaseHDC(hdc);
//draw bitmap to other graphics
Note: I just figure out that gdiplus didn't actually use the alpha channel in the HDC, only the rgb channels are shared, so I wonder how it works with a BGRA target bitmap
I think the best way to use OpenGL and GDI+ together is to generate a texture in GDI+, then load that into OpenGL.
The gist of this is:
void MakeTexture(GLuint& texId)
{
Bitmap offscreen(512, 512, PixelFormat32bppARGB);
Graphics gr(&offscreen);
gr.Clear(Color(128, 255, 0, 0));
Gdiplus::SolidBrush brush(Color(255, 0, 0, 255));
Pen pen(Color(128, 255, 0, 0), 16.f);
Font font(L"Arial", 48.f);
Rect r(25, 25, 100, 100);
gr.DrawRectangle(&pen, r);
gr.DrawString(TEXT("TEST STRING"), -1, &font, PointF(50, 50), &brush);
vector<DWORD> argb;
GetBitsLockBits(offscreen, argb, 1);
genTexture(texId, offscreen.GetWidth(), offscreen.GetHeight(), argb);
}
void GetBitsLockBits(Bitmap& bmp, vector<DWORD>& argb, bool invert = 0)
{
BitmapData bmpData;
RectF rectf;
Unit unit;
bmp.GetBounds(&rectf, &unit);
Rect rect(rectf.X, rectf.Y, rectf.Width, rectf.Height);
printf("Got rect %d %d %d %d\n", rect.X, rect.Y, rect.Width, rect.Height);
bmp.LockBits(&rect, ImageLockModeRead, PixelFormat32bppARGB, &bmpData);
printf("BMP has w=%d h=%d stride=%d\n", bmpData.Width, bmpData.Height, bmpData.Stride);
argb.resize(bmpData.Width * bmpData.Height);
if (invert)
for (int i = 0; i < bmpData.Height; i++)
memcpy(&argb[i * bmpData.Width], (GLuint*)bmpData.Scan0 + (bmpData.Height - 1 - i) * bmpData.Width, bmpData.Width * 4);
else if (bmpData.Stride == bmpData.Width * 4)
memcpy(&argb[0], bmpData.Scan0, bmpData.Width * bmpData.Height * 4); // If the bmp is padded then
// this won't read the image correctly (it will read it with pad bits between)
else
for (int i = 0; i < bmpData.Height; i++)
memcpy(&argb[i * bmpData.Width], (GLuint*)bmpData.Scan0 + i * bmpData.Width, bmpData.Width * 4);
bmp.UnlockBits(&bmpData);
}
void genTexture(GLuint& texId, int w, int h, const vector<DWORD>& argb)
{
glGenTextures(1, &texId); CHECK_GL;
glBindTexture(GL_TEXTURE_2D, texId); CHECK_GL;
glPixelStorei(GL_UNPACK_ALIGNMENT, 4); CHECK_GL;
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); CHECK_GL;
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); CHECK_GL;
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, w, h, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, &argb[0]); CHECK_GL;
}
Trying to have OpenGL and GDI+ cooperate by drawing into the same window directly will probably get you very flickery results.

Display ffmpeg frames on opgel texture

I am using Dranger tutorial01 (ffmpeg) to decode the video and get AVI frames. I want to use OpenGL to display the video.
http://dranger.com/ffmpeg/tutorial01.html
The main function is as follows:
int main (int argc, char** argv) {
// opengl stuff
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(800, 600);
glutCreateWindow("Hello GL");
glutReshapeFunc(changeViewport);
glutDisplayFunc(render);
GLenum err = glewInit();
if(GLEW_OK !=err){
fprintf(stderr, "GLEW error");
return 1;
}
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
GLuint texture;
glGenTextures(1, &texture); //Make room for our texture
glBindTexture(GL_TEXTURE_2D, texture);
//ffmpeg stuff
AVFormatContext *pFormatCtx = NULL;
int i, videoStream;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer = NULL;
AVDictionary *optionsDict = NULL;
if(argc < 2) {
printf("Please provide a movie file\n");
return -1;
}
// Register all formats and codecs
av_register_all();
// Open video file
if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
return -1; // Couldn't open file
// Retrieve stream information
if(avformat_find_stream_info(pFormatCtx, NULL)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, argv[1], 0);
// Find the first video stream
videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
return -1; // Could not open codec
// Allocate video frame
pFrame=av_frame_alloc();
// Allocate an AVFrame structure
pFrameRGB=av_frame_alloc();
if(pFrameRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
struct SwsContext *sws_ctx = sws_getContext(pCodecCtx->width,
pCodecCtx->height, pCodecCtx->pix_fmt, 800,
600, PIX_FMT_RGB24, SWS_BICUBIC, NULL,
NULL, NULL);
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
// Read frames and save first five frames to disk
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
&packet);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
/* sws_scale
(
sws_ctx,
(uint8_t const * const *)pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
pFrameRGB->data,
pFrameRGB->linesize
);
*/
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
// additional opengl
glBindTexture(GL_TEXTURE_2D, texture);
//gluBuild2DMipmaps(GL_TEXTURE_2D, 3, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_INT, pFrameRGB->data[0]);
// glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, 840, 460, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
glTexImage2D(GL_TEXTURE_2D, //Always GL_TEXTURE_2D
0, //0 for now
GL_RGB, //Format OpenGL uses for image
pCodecCtx->width, pCodecCtx->height, //Width and height
0, //The border of the image
GL_RGB, //GL_RGB, because pixels are stored in RGB format
GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are stored
//as unsigned numbers
pFrameRGB->data[0]); //The actual pixel data
// additional opengl end
// Save the frame to disk
if(++i<=5)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
i);
}
}
glColor3f(1,1,1);
glBindTexture(GL_TEXTURE_2D, texture);
glBegin(GL_QUADS);
glTexCoord2f(0,1);
glVertex3f(0,0,0);
glTexCoord2f(1,1);
glVertex3f(pCodecCtx->width,0,0);
glTexCoord2f(1,0);
glVertex3f(pCodecCtx->width, pCodecCtx->height,0);
glTexCoord2f(0,0);
glVertex3f(0,pCodecCtx->height,0);
glEnd();
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
avformat_close_input(&pFormatCtx);
return 0;
}
Unfortunately i could not find my solution here
ffmpeg video to opengl texture
The program compiles but does not show any video on the texture. Just a OpenGL window is created.
One problem is your use of a single buffered pixel format. Most modern operating systems use window composition which relies on double buffered pixel formats. Easy enough to change:
--- glutInitDisplayMode(GLUT_RGBA);
+++ glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
At the end of the render function call glutSwapBuffers().
The other problem is that you never enter glutMainLoop so no events (like drawing requests from the OS) get processed. Also some parts of your code must go into the render function.
The frame decoding and texture upload must be placed in either an idle handler (you didn't create one) followed by a call of glutPostRedisplay() or directly in the render function:
void render(void) {
/* ... */
--- while(av_read_frame(pFormatCtx, &packet)>=0) {
+++ if(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
/* sws_scale
(
sws_ctx,
(uint8_t const * const *)pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
pFrameRGB->data,
pFrameRGB->linesize
);
*/
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
// additional opengl
glBindTexture(GL_TEXTURE_2D, texture);
At this point you should use glTexSubImage2D instead of glTexImage2D, because it's way faster. However you must create the texture with glTexImage2D first; do this once before calling glutMainLoop().
glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
/*
glTexImage2D(GL_TEXTURE_2D, //Always GL_TEXTURE_2D
0, //0 for now
GL_RGB, //Format OpenGL uses for image
pCodecCtx->width, pCodecCtx->height, //Width and height
0, //The border of the image
GL_RGB, //GL_RGB, because pixels are stored
//in RGB format
GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are
//stored as unsigned numbers
pFrameRGB->data[0]); //The actual pixel data
*/
// additional opengl end
}
glColor3f(1,1,1);
glBindTexture(GL_TEXTURE_2D, texture);
glBegin(GL_QUADS);
glTexCoord2f(0,1);
glVertex3f(0,0,0);
glTexCoord2f(1,1);
glVertex3f(pCodecCtx->width,0,0);
glTexCoord2f(1,0);
glVertex3f(pCodecCtx->width, pCodecCtx->height,0);
glTexCoord2f(0,0);
glVertex3f(0,pCodecCtx->height,0);
glEnd();
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
/* ... */
glutSwapBuffers();
}
I was doing pretty much the same exact thing as you, and was either running into an issue with what looked like an improperly decoded image where my video should have been playing. I found this page trying to solve this problem, and referring to datenwolf's answer was able to get the video playing by adding the glTexSubImage2D function in. However, when I tried to take the glTexImage2D function out, except for a single initial call for the creation, my previously working video was replaced with a white rectangle. So for some reason, it works for me when calling glTexImage2D followed by glTexSubImage2D for every frame.

How to draw text on an EGL texture using Skia SkBitmap?

I am looking for a way to draw text on EGLTexture using Skia library in C/C++. I am planning to run a program that does it on Android platform during bootup before SurfaceFlinger comes up.
Please don't point me to Android Java examples as that is not what I am looking for. I am troubleshooting UI issue at frame buffer level. I am looking for a way to do this in C/C++ using Android native libs (Skia etc).
I have a sample program that can render an image using SkBitmap onto EGLTexture. I was able to display it on monitor. I followed same example and came up with a strategy like this. But it doesn't work though.
0) Clear the screen with green color
1) Create SkBitmap of size 640x480.
2) Create EGLTexture backed by the pixel buffer returned by SkBitmap.lockPixels()
3) Draw text on SkBitmap using SkCanvas. Upload the bitmap into the above texture.
4) Then draw the texture on current surface
I used boot animation program (of android) as my starting point for this.
When I ran this program, all I am seeing is green color. I checked for errors of EGL calls. They all seem to succeed. Thanks for your help
Posted this question on google groups. Brian from Google has some pointers here:
https://groups.google.com/d/topic/skia-discuss/aC5f6HB4gSU/discussion
Below is code that implements the above.
#define EXPECT_NO_GL_ERROR(stmt) \
do { \
stmt; \
const EGLint error_code = eglGetError(); \
if (EGL_SUCCESS != error_code){ \
LOGD("GLTest: GL error code %d at %s:%d", error_code, __FILE__, __LINE__); \
__android_log_assert("GLTest", "GLtest", "GlTest"); \
}\
} while(0)
struct Texture
{
GLint w;
GLint h;
GLuint id;
};
bool GLTest::frametest()
{
Texture texFrame;
// Paint screen with green color
glShadeModel (GL_FLAT);
glDisable (GL_DITHER);
glDisable (GL_SCISSOR_TEST);
glClearColor(0, 1, 0, 1);
glClear (GL_COLOR_BUFFER_BIT);
eglSwapBuffers(mDisplay, mSurface);
SkGraphics::Init();
SkBitmap bitmap;
bitmap.setConfig(SkBitmap::kARGB_8888_Config, 640, 480);
bitmap.allocPixels();
if (NO_ERROR != initTexture(&texFrame, bitmap))
{
LOGD("GLTest: Unable to create a texture that is backed by SkBitmap");
return false;
}
SkCanvas canvas(bitmap);
SkPaint textAttribs;
textAttribs.setColor(0xFFFFFFFF);
textAttribs.setTextSize(SkIntToScalar(24));
const nsecs_t startTime = systemTime();
int frame_count = 0;
do
{
nsecs_t now = systemTime();
double time = now - startTime;
canvas.drawColor(0xFF0000FF);
canvas.drawText("Hello world", strlen("Hello world"), 200, 400,
textAttribs);
initTexture(&texFrame, bitmap); // Upload bitmap into canvas
glEnable (GL_BLEND);
EXPECT_NO_GL_ERROR(glBindTexture(GL_TEXTURE_2D, texFrame.id));
EXPECT_NO_GL_ERROR(glDrawTexiOES(0, 0, 0, texFrame.w, texFrame.h));
EGLBoolean res = eglSwapBuffers(mDisplay, mSurface);
if (res == EGL_FALSE)
break;
frame_count++;
if (0 == (frame_count % 150))
LOGD("GLTest: Completed %d frames", frame_count);
// 12fps: don't animate too fast to preserve CPU
const nsecs_t sleepTime = 83333 - ns2us(systemTime() - now);
if (sleepTime > 0)
usleep(sleepTime);
} while (!exitPending());
return false;
}
status_t GLTest::initTexture(Texture* texture, SkBitmap &bitmap)
{
bitmap.lockPixels();
const int w = bitmap.width();
const int h = bitmap.height();
const void* p = bitmap.getPixels();
GLint crop[4] =
{ 0, h, w, -h };
texture->w = w;
texture->h = h;
EXPECT_NO_GL_ERROR(glGenTextures(1, &(texture->id)));
EXPECT_NO_GL_ERROR(glBindTexture(GL_TEXTURE_2D, texture->id));
switch (bitmap.getConfig())
{
case SkBitmap::kA8_Config:
EXPECT_NO_GL_ERROR(
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, w, h, 0, GL_ALPHA, GL_UNSIGNED_BYTE, p));
break;
case SkBitmap::kARGB_4444_Config:
EXPECT_NO_GL_ERROR(
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, p));
break;
case SkBitmap::kARGB_8888_Config:
EXPECT_NO_GL_ERROR(
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, p));
break;
case SkBitmap::kRGB_565_Config:
EXPECT_NO_GL_ERROR(
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, w, h, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, p));
break;
default:
break;
}
EXPECT_NO_GL_ERROR(
glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_CROP_RECT_OES, crop));
EXPECT_NO_GL_ERROR(
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
EXPECT_NO_GL_ERROR(
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
EXPECT_NO_GL_ERROR(
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT));
EXPECT_NO_GL_ERROR(
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT));
return NO_ERROR;
}
I found why my code didn't work. When creating textures, the width and height must be power of 2. For example, if the width is 1920, then texture should be created with a width of 2048 (as 2048 is next 2 power).
Changed initTexture to below. Now I am able to draw text to SkBitmap and then upload the bitmap to texture and draw the texture.
Below is new initTexture that uploads given bitmap to a texture.
bool initTexture(Texture* texture, const SkBitmap &bitmap)
{
bool result = true;
SkAutoLockPixels alp(bitmap);
const int w = bitmap.width();
const int h = bitmap.height();
const void* p = bitmap.getPixels();
int tw = 1 << (31 - __builtin_clz(w));
int th = 1 << (31 - __builtin_clz(h));
if (tw < w)
tw <<= 1;
if (th < h)
th <<= 1;
if (NULL == texture)
return false;
if (texture->id != 0)
{
glBindTexture(GL_TEXTURE_2D, texture->id);
switch (bitmap.getConfig())
{
case SkBitmap::kA8_Config:
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_ALPHA, GL_UNSIGNED_BYTE, p);
break;
case SkBitmap::kARGB_4444_Config:
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, p);
break;
case SkBitmap::kARGB_8888_Config:
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, p);
break;
case SkBitmap::kRGB_565_Config:
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, p);
break;
default:
break;
}
return true;
}
GLint crop[4] = { 0, h, w, -h };
texture->w = w;
texture->h = h;
glEnable (GL_TEXTURE_2D);
glGenTextures(1, &(texture->id));
glBindTexture(GL_TEXTURE_2D, texture->id);
switch (bitmap.getConfig())
{
case SkBitmap::kA8_Config:
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, w, h, 0, GL_ALPHA, GL_UNSIGNED_BYTE, p);
break;
case SkBitmap::kARGB_4444_Config:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, p);
break;
case SkBitmap::kARGB_8888_Config:
if (tw != w || th != h)
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, tw, th, 0, GL_RGBA,
GL_UNSIGNED_BYTE, 0);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA,
GL_UNSIGNED_BYTE, p);
}
else
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, p);
}
break;
case SkBitmap::kRGB_565_Config:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, w, h, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, p);
break;
default:
break;
}
glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_CROP_RECT_OES, crop);
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
return result;
}

OpenGL Texture binding issue from a BMP image

i'm trying to load a BMP image of size 1024x1024 and bind it as a texture in openGL .
for a BMP of size, 256x256, output is as desired.But for a BMP of size 1024x1024,
it throws up following error
The program has unexpectedly finished.
C:\Users\xxxx\Desktop\lapping-build-desktop-Qt_4_8_1_for_Desktop_-_MinGW__Qt_SDK__Debug\debug\lapping.exe exited with code -1073741819
the code for loading the BMP,
Texture2d* loadBMP( char *fname){
int i,j, w, h, bits;
unsigned long l;
GLubyte c[3];
Texture2d* tex;
FILE *fin;
unsigned char head[54];
fin = fopen( fname,"rb");
if( fin == NULL){
printf("Bitmap '%s' not found\n", fname);
return NULL;
}
fread( head, 54, 1, fin);
w = (head[21]<<24)|(head[20]<<16)|(head[19]<<8)|head[18]; //returns the width of the image
h = (head[25]<<24)|(head[24]<<16)|(head[23]<<8)|head[22];//returns the height of the image
tex = new Texture2d[1];
tex->w = w;
tex->h = h;
tex->buf = new GLubyte[h*w*4];
tex->id = avTotalTextures++;
for(i=h-1;i>=0;i--){
l = i*w*4;
for(j=0;j<w;j++){
fread( c, 1, 3, fin);
tex->buf[l++] = c[2];
tex->buf[l++] = c[1];
tex->buf[l++] = c[0];
tex->buf[l++] = 255;
}
}
fclose( fin);
printf("Bitmap_load '%s' loaded\n", fname);
return tex;
}
the code for binding the texture ,
void Object3d::bindTexture( Texture2d *t)
{
if( t == NULL)
return;
tex = t;
glBindTexture(GL_TEXTURE_2D, tex->id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_NEAREST);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, tex->buf);
gluBuild2DMipmaps( GL_TEXTURE_2D, GL_RGBA, 256, 256, GL_RGBA, GL_UNSIGNED_BYTE, tex->buf);
delete [] tex->buf;
tex->buf = NULL;
}
this works well for a BMP of size 256x256. but when i try to get this working for a bmp of 1024x1024 by changing the width and height parameters in
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, tex->buf);
gluBuild2DMipmaps( GL_TEXTURE_2D, GL_RGBA, 256, 256, GL_RGBA, GL_UNSIGNED_BYTE, tex->buf);
from 0,0 and 256,256 to 1024, 1024 respectively, it throws up the previously mentioned error. i have referred to openGL specification, but couldn't come up with a reason as to why this wouldn't work.