glBindTexture(GL_TEXTURE_2D_ARRAY, texture_id);
glTexStorage3D(GL_TEXTURE_2D_ARRAY,
1, // No mipmaps
GL_RGBA8, // Internal format
width, height, // width,height
1 // Number of layers
);
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0, // Mipmap number
0, 0, 0, // xoffset, yoffset, zoffset
width, height, 1, // width, height, depth
GL_RGBA8, // format
GL_UNSIGNED_BYTE, // type
image); // pointer to data
For testing I only create an array of length 1. I am currently using OpenGL 4.3 but I want to switch back to OpenGL 3.3 which means that I can not use glTexStorage3D.
So I tried to switch to glTexImage3D
glBindTexture(GL_TEXTURE_2D_ARRAY, texture_id);
glTexImage3D(GL_TEXTURE_2D_ARRAY,
1, // level
GL_RGBA8, // Internal format
width, height, 1, // width,height,depth
0, // border?
GL_RGBA, // format
GL_UNSIGNED_BYTE, // type
0); // pointer to data
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0, // Mipmap number
0, 0, 0, // xoffset, yoffset, zoffset
width, height, 1, // width, height, depth
GL_RGBA8, // format
GL_UNSIGNED_BYTE, // type
image); // pointer to data
But it is not working and I am not sure what I am doing wrong.
Edit: I should probably add that it is working with glTexStorage3d.
The main problem is with the second argument you are using for glTexImage3D():
glTexImage3D(GL_TEXTURE_2D_ARRAY,
1, // level
GL_RGBA8, // Internal format
width, height, 1, // width,height,depth
0, // border?
GL_RGBA, // format
GL_UNSIGNED_BYTE, // type
0); // pointer to data
For glTexImage3D(), the argument is named level, and is the 0-based index of the level you're specifying data for, or just allocating when the last argument is NULL. This is different from the levels (note plural) argument of glTexStorage3D(), which is the count of levels to be allocated.
In fact, the second argument of glTexImage3D() directly corresponds to the second argument of glTexSubImage3D(), which you're already passing as 0.
So the correct call simply uses 0 for the second argument:
glTexImage3D(GL_TEXTURE_2D_ARRAY,
0, // level
GL_RGBA8, // Internal format
width, height, 1, // width,height,depth
0, // border?
GL_RGBA, // format
GL_UNSIGNED_BYTE, // type
0); // pointer to data
In addition, I'm surprised that your glTexSubImage3D() calls work. GL_RGBA8 is not valid as the 9th argument. In this case, this is a format, and not an internalFormat, meaning that it is an unsized format. The value in this case should be GL_RGBA:
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0, // Mipmap number
0, 0, 0, // xoffset, yoffset, zoffset
width, height, 1, // width, height, depth
GL_RGBA, // format
GL_UNSIGNED_BYTE, // type
image); // pointer to data
Related
I have a class for creating textures from a path but when i try to load in a texture with 3 channels
(rgb) it gives me read access violation exception when running this line
glTextureSubImage2D(m_RendererID, 0, 0, 0, m_Width, m_Height, m_DataFormat, GL_UNSIGNED_BYTE, data);
I tried to change the parameters in the stbi load function but that didn't work.
When i load the image it sets the correct width, height and channel data so i don't know what i am doing wrong
The pointer to the data isn't nullptr either
OpenGLTexture2D::OpenGLTexture2D(const std::string& path)
{
RADIANT_PROFILE_FUNCTION();
m_Path = path;
stbi_set_flip_vertically_on_load(1);
int width, height, channels;
stbi_uc* data = stbi_load(path.c_str(), &width, &height, &channels, 0);
RADIANT_CORE_ASSERT(data, "Failed To Load Image");
m_Width = width;
m_Height = height;
if (channels == 4) {
m_InternalFormat = GL_RGBA8;
m_DataFormat = GL_RGBA;
}
else if (channels == 3) {
m_InternalFormat = GL_RGB8;
m_DataFormat = GL_RGB;
}
else {
RADIANT_CORE_ERROR("Texture Format Not Supported, Channels: {0})", channels);
}
glCreateTextures(GL_TEXTURE_2D, 1, &m_RendererID);
glTextureStorage2D(m_RendererID, 1, m_InternalFormat, m_Width, m_Height);
glTextureParameteri(m_RendererID, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTextureParameteri(m_RendererID, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTextureSubImage2D(m_RendererID, 0, 0, 0, m_Width, m_Height, m_DataFormat, GL_UNSIGNED_BYTE, data);
stbi_image_free(data);
}
When an RGB image is loaded to a texture object, GL_UNPACK_ALIGNMENT needs to be set to 1.
By default GL_UNPACK_ALIGNMENT is 4, so each line of an image is assumed to be aligned to 4 bytes. The pixels in the buffer have a size of 3 bytes and are tightly packed, this would cause a misalignment:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTextureSubImage2D(m_RendererID, 0, 0, 0, m_Width, m_Height, m_DataFormat, GL_UNSIGNED_BYTE, data);
I'm trying to save a texture in tiff file from an other thread. But the only result i get is a white picture, I think it come from the glcontext ( because it's not possible to have one glcontext for several thread). That's why i've tried to create two glcontext and share the display context. But still i don't have the gl texture. I can't get the texture from the second opengl context
. I'm tring to do that because at the end the texture will be a video stream from a camera .
Here is my context creation :
static PIXELFORMATDESCRIPTOR pfd =
{
sizeof(PIXELFORMATDESCRIPTOR), // Size Of This Pixel Format Descriptor
1, // Version Number
PFD_DRAW_TO_WINDOW | // Format Must Support Window
PFD_SUPPORT_OPENGL | // Format Must Support OpenGL
PFD_DOUBLEBUFFER, // Must Support Double Buffering
PFD_TYPE_RGBA, // Request An RGBA Format
8, // Select Our Color Depth
0, 0, 0, 0, 0, 0, // Color Bits Ignored
0, // No Alpha Buffer
0, // Shift Bit Ignored
0, // No Accumulation Buffer
0, 0, 0, 0, // Accumulation Bits Ignored
16, // 16Bit Z-Buffer (Depth Buffer)
0, // No Stencil Buffer
0, // No Auxiliary Buffer
PFD_MAIN_PLANE, // Main Drawing Layer
0, // Reserved
0, 0, 0 // Layer Masks Ignored
};
GLuint PixelFormat;
// create the pixel pixel format descriptor
PixelFormat = ChoosePixelFormat(dc, &pfd);
// set the pixel format descriptor
SetPixelFormat(dc, PixelFormat, &pfd);
gl = wglCreateContext(dc);
gl2 = wglCreateContext(dc);
wglShareLists(gl, gl2);
wglMakeCurrent(dc, gl);
GLenum g= glewInit();
wglewInit();
loadImage();
rec = new Recorder(dc,gl2);
rec->Start_StopRecord(text, true);
Here is the code to save to tiff file :
Recorder::Recorder(HDC &hdc, HGLRC &_gl)
{
isStarted = false;
dc = hdc;
gl = _gl;
}
Recorder::~Recorder()
{
if (isStarted) {
isStarted = false;
recordThread.join();
CloseTifFile();
delete mp_fileTifIn;
}
}
void Recorder::Start_StopRecord(GLuint Texture, bool launched){
if (launched) {
if (isStarted) {
wglMakeCurrent(dc, gl);
isStarted = false;
recordThread.join();
CloseTifFile();
pixels.release();
}
else {
isStarted = true;
//wglMakeCurrent(NULL, NULL);
//RecordShot(&Texture);
recordThread = std::thread(&Recorder::RecordShot, this,&Texture);
}
}
}
void Recorder::RecordShot(GLuint* texture){
wglMakeCurrent(dc, gl);
OpenTifFile(*texture);
pixels = std::unique_ptr<int>(new int[width*height]);
//while (isStarted) {
WriteTif8Bits(*texture);
WriteDirectory();
//Sleep(16);
//}
pixels.release();
}
void Recorder::OpenTifFile(GLuint &Texture){
char* filename="../test3.tiff";
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_HEIGHT,&height);
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_WIDTH,&width);
mp_fileTifIn = TIFFOpen(filename,"w");
}
void Recorder::CloseTifFile(){
TIFFClose(mp_fileTifIn);
}
/*
* Open Sub data for a Tiff file (allow multiple picture in one tif file)
*/
void Recorder::WriteDirectory(){
TIFFWriteDirectory(mp_fileTifIn);
}
void Recorder::WriteTif8Bits(GLuint &Texture){
//Setup Tiff Configuration
TIFFSetField(mp_fileTifIn,TIFFTAG_IMAGEWIDTH,width);
TIFFSetField(mp_fileTifIn,TIFFTAG_IMAGELENGTH,height);
TIFFSetField(mp_fileTifIn,TIFFTAG_SAMPLESPERPIXEL,4);
TIFFSetField(mp_fileTifIn,TIFFTAG_BITSPERSAMPLE,8);
TIFFSetField(mp_fileTifIn,TIFFTAG_ROWSPERSTRIP,TIFFDefaultStripSize(mp_fileTifIn,width));
TIFFSetField(mp_fileTifIn,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT);
TIFFSetField(mp_fileTifIn,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG);
TIFFSetField(mp_fileTifIn, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
TIFFSetField(mp_fileTifIn, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
glBindTexture(GL_TEXTURE_2D,Texture);
assert(glGetError() == GL_NO_ERROR);
glGetTexImage(GL_TEXTURE_2D,0,GL_RGBA,GL_UNSIGNED_BYTE,pixels.get());
assert(glGetError() == GL_NO_ERROR);
//Image Reversal
Reverse(pixels.get(), height, width);
//Write one picture
/*for (int row = 0; row < height; row++) {
TIFFWriteScanline(mp_fileTifIn, pixels.get(), row, 0);
lineChange(pixels.get(), width);
}*/
TIFFWriteEncodedStrip(mp_fileTifIn, 0, pixels.get(), height*width * sizeof(int));
}
void Recorder::lineChange(int* pointer, int width) {
pointer -= width;
}
void Recorder::Reverse(int* pointer, int height, int width) {
pointer += (height - 1) * width;
}
And here is the loadImages function
int loadImage() {
wglMakeCurrent(dc, gl);
cv::Mat image;
image = cv::imread(std::string("C:/Users/Public/Pictures/Sample Pictures/Desert.jpg"), CV_LOAD_IMAGE_COLOR);
if (!image.data)
return -1;
cvNamedWindow("try", cv::WINDOW_AUTOSIZE);
cv::imshow("try", image);
cv::flip(image, image, 0);
glGenTextures(1, &text);
GLenum g=glGetError();
glBindTexture(GL_TEXTURE_2D, text);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image.cols, image.rows, 0, GL_BGR, GL_UNSIGNED_BYTE, image.ptr());
return 0;
}
Here is a test project where i'm loading a picture with opencv and i try to save it in an other thread : https://mega.nz/#!SBMUnJRI!dLC_l9hmCkhIDDUaygHuq4Kw2SKIuxRE7m19md74p0k
To run the project you need Opencv and glew, libtiff is already packaged inside
If you think somethink is missing for this post, i invite you to comment it before downgrade as i'm following my subject
I finally solved my problem by doing all opengl action in one thread ( i retrieve the image and display it in one thread, and i save it in another, which doesn't need an openglcontext)
An other thing that were confusing me is a bad configuration of
glGetTexImage(GL_TEXTURE_2D,0,GL_RGBA,GL_UNSIGNED_BYTE,this->rec[0].pixels.get());
But my textures are GL_TEXTURE_RECTANGLE_NV, that's why i had only a white picture sometimes.
I tried to make gdiplus and opengl works together in one offscreen hdc. Current code is something like this (no Internet access)
create a gdiplus bitmap A
create a gdiplus graphics from A as B
get B's HDC
choose a proper pixelformat for HDC
create a gdiplus graphics from HDC as C
create an opengl context from HDC as D
drawing with C and D
release HDC to B
draw A to other graphics
C is needed because I found that after I release the HDC to B, it is not possible for opengl to change the bitmap.
To check if things work, I added a bitmap E by reading gl pixels before (9)
There are still a problem in current program
After releasing the hdc, the bitmap A losses its alpha information.
The bitmap E works fine with correct alpha, but the GL_BGRA cannot read out alpha so I have to read GL_RGBA data and do a per pixel convertion into gdiplus color format.
Should I just use E or is there any other attempt?
Example of releasehdc alpha lossing:
Bitmap bitmap(100,100);
Graphics graphics(&bitmap);
HDC hdc=graphics.GetHDC();
Graphics graphics2(hdc);
graphics2.Clear(Color(128,255,0,0));
graphics2.Flush();
graphics.ReleaseHDC(hdc);
//draw bitmap to other graphics
Note: I just figure out that gdiplus didn't actually use the alpha channel in the HDC, only the rgb channels are shared, so I wonder how it works with a BGRA target bitmap
I think the best way to use OpenGL and GDI+ together is to generate a texture in GDI+, then load that into OpenGL.
The gist of this is:
void MakeTexture(GLuint& texId)
{
Bitmap offscreen(512, 512, PixelFormat32bppARGB);
Graphics gr(&offscreen);
gr.Clear(Color(128, 255, 0, 0));
Gdiplus::SolidBrush brush(Color(255, 0, 0, 255));
Pen pen(Color(128, 255, 0, 0), 16.f);
Font font(L"Arial", 48.f);
Rect r(25, 25, 100, 100);
gr.DrawRectangle(&pen, r);
gr.DrawString(TEXT("TEST STRING"), -1, &font, PointF(50, 50), &brush);
vector<DWORD> argb;
GetBitsLockBits(offscreen, argb, 1);
genTexture(texId, offscreen.GetWidth(), offscreen.GetHeight(), argb);
}
void GetBitsLockBits(Bitmap& bmp, vector<DWORD>& argb, bool invert = 0)
{
BitmapData bmpData;
RectF rectf;
Unit unit;
bmp.GetBounds(&rectf, &unit);
Rect rect(rectf.X, rectf.Y, rectf.Width, rectf.Height);
printf("Got rect %d %d %d %d\n", rect.X, rect.Y, rect.Width, rect.Height);
bmp.LockBits(&rect, ImageLockModeRead, PixelFormat32bppARGB, &bmpData);
printf("BMP has w=%d h=%d stride=%d\n", bmpData.Width, bmpData.Height, bmpData.Stride);
argb.resize(bmpData.Width * bmpData.Height);
if (invert)
for (int i = 0; i < bmpData.Height; i++)
memcpy(&argb[i * bmpData.Width], (GLuint*)bmpData.Scan0 + (bmpData.Height - 1 - i) * bmpData.Width, bmpData.Width * 4);
else if (bmpData.Stride == bmpData.Width * 4)
memcpy(&argb[0], bmpData.Scan0, bmpData.Width * bmpData.Height * 4); // If the bmp is padded then
// this won't read the image correctly (it will read it with pad bits between)
else
for (int i = 0; i < bmpData.Height; i++)
memcpy(&argb[i * bmpData.Width], (GLuint*)bmpData.Scan0 + i * bmpData.Width, bmpData.Width * 4);
bmp.UnlockBits(&bmpData);
}
void genTexture(GLuint& texId, int w, int h, const vector<DWORD>& argb)
{
glGenTextures(1, &texId); CHECK_GL;
glBindTexture(GL_TEXTURE_2D, texId); CHECK_GL;
glPixelStorei(GL_UNPACK_ALIGNMENT, 4); CHECK_GL;
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); CHECK_GL;
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); CHECK_GL;
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, w, h, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, &argb[0]); CHECK_GL;
}
Trying to have OpenGL and GDI+ cooperate by drawing into the same window directly will probably get you very flickery results.
I am looking for a way to draw text on EGLTexture using Skia library in C/C++. I am planning to run a program that does it on Android platform during bootup before SurfaceFlinger comes up.
Please don't point me to Android Java examples as that is not what I am looking for. I am troubleshooting UI issue at frame buffer level. I am looking for a way to do this in C/C++ using Android native libs (Skia etc).
I have a sample program that can render an image using SkBitmap onto EGLTexture. I was able to display it on monitor. I followed same example and came up with a strategy like this. But it doesn't work though.
0) Clear the screen with green color
1) Create SkBitmap of size 640x480.
2) Create EGLTexture backed by the pixel buffer returned by SkBitmap.lockPixels()
3) Draw text on SkBitmap using SkCanvas. Upload the bitmap into the above texture.
4) Then draw the texture on current surface
I used boot animation program (of android) as my starting point for this.
When I ran this program, all I am seeing is green color. I checked for errors of EGL calls. They all seem to succeed. Thanks for your help
Posted this question on google groups. Brian from Google has some pointers here:
https://groups.google.com/d/topic/skia-discuss/aC5f6HB4gSU/discussion
Below is code that implements the above.
#define EXPECT_NO_GL_ERROR(stmt) \
do { \
stmt; \
const EGLint error_code = eglGetError(); \
if (EGL_SUCCESS != error_code){ \
LOGD("GLTest: GL error code %d at %s:%d", error_code, __FILE__, __LINE__); \
__android_log_assert("GLTest", "GLtest", "GlTest"); \
}\
} while(0)
struct Texture
{
GLint w;
GLint h;
GLuint id;
};
bool GLTest::frametest()
{
Texture texFrame;
// Paint screen with green color
glShadeModel (GL_FLAT);
glDisable (GL_DITHER);
glDisable (GL_SCISSOR_TEST);
glClearColor(0, 1, 0, 1);
glClear (GL_COLOR_BUFFER_BIT);
eglSwapBuffers(mDisplay, mSurface);
SkGraphics::Init();
SkBitmap bitmap;
bitmap.setConfig(SkBitmap::kARGB_8888_Config, 640, 480);
bitmap.allocPixels();
if (NO_ERROR != initTexture(&texFrame, bitmap))
{
LOGD("GLTest: Unable to create a texture that is backed by SkBitmap");
return false;
}
SkCanvas canvas(bitmap);
SkPaint textAttribs;
textAttribs.setColor(0xFFFFFFFF);
textAttribs.setTextSize(SkIntToScalar(24));
const nsecs_t startTime = systemTime();
int frame_count = 0;
do
{
nsecs_t now = systemTime();
double time = now - startTime;
canvas.drawColor(0xFF0000FF);
canvas.drawText("Hello world", strlen("Hello world"), 200, 400,
textAttribs);
initTexture(&texFrame, bitmap); // Upload bitmap into canvas
glEnable (GL_BLEND);
EXPECT_NO_GL_ERROR(glBindTexture(GL_TEXTURE_2D, texFrame.id));
EXPECT_NO_GL_ERROR(glDrawTexiOES(0, 0, 0, texFrame.w, texFrame.h));
EGLBoolean res = eglSwapBuffers(mDisplay, mSurface);
if (res == EGL_FALSE)
break;
frame_count++;
if (0 == (frame_count % 150))
LOGD("GLTest: Completed %d frames", frame_count);
// 12fps: don't animate too fast to preserve CPU
const nsecs_t sleepTime = 83333 - ns2us(systemTime() - now);
if (sleepTime > 0)
usleep(sleepTime);
} while (!exitPending());
return false;
}
status_t GLTest::initTexture(Texture* texture, SkBitmap &bitmap)
{
bitmap.lockPixels();
const int w = bitmap.width();
const int h = bitmap.height();
const void* p = bitmap.getPixels();
GLint crop[4] =
{ 0, h, w, -h };
texture->w = w;
texture->h = h;
EXPECT_NO_GL_ERROR(glGenTextures(1, &(texture->id)));
EXPECT_NO_GL_ERROR(glBindTexture(GL_TEXTURE_2D, texture->id));
switch (bitmap.getConfig())
{
case SkBitmap::kA8_Config:
EXPECT_NO_GL_ERROR(
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, w, h, 0, GL_ALPHA, GL_UNSIGNED_BYTE, p));
break;
case SkBitmap::kARGB_4444_Config:
EXPECT_NO_GL_ERROR(
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, p));
break;
case SkBitmap::kARGB_8888_Config:
EXPECT_NO_GL_ERROR(
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, p));
break;
case SkBitmap::kRGB_565_Config:
EXPECT_NO_GL_ERROR(
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, w, h, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, p));
break;
default:
break;
}
EXPECT_NO_GL_ERROR(
glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_CROP_RECT_OES, crop));
EXPECT_NO_GL_ERROR(
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
EXPECT_NO_GL_ERROR(
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
EXPECT_NO_GL_ERROR(
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT));
EXPECT_NO_GL_ERROR(
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT));
return NO_ERROR;
}
I found why my code didn't work. When creating textures, the width and height must be power of 2. For example, if the width is 1920, then texture should be created with a width of 2048 (as 2048 is next 2 power).
Changed initTexture to below. Now I am able to draw text to SkBitmap and then upload the bitmap to texture and draw the texture.
Below is new initTexture that uploads given bitmap to a texture.
bool initTexture(Texture* texture, const SkBitmap &bitmap)
{
bool result = true;
SkAutoLockPixels alp(bitmap);
const int w = bitmap.width();
const int h = bitmap.height();
const void* p = bitmap.getPixels();
int tw = 1 << (31 - __builtin_clz(w));
int th = 1 << (31 - __builtin_clz(h));
if (tw < w)
tw <<= 1;
if (th < h)
th <<= 1;
if (NULL == texture)
return false;
if (texture->id != 0)
{
glBindTexture(GL_TEXTURE_2D, texture->id);
switch (bitmap.getConfig())
{
case SkBitmap::kA8_Config:
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_ALPHA, GL_UNSIGNED_BYTE, p);
break;
case SkBitmap::kARGB_4444_Config:
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, p);
break;
case SkBitmap::kARGB_8888_Config:
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, p);
break;
case SkBitmap::kRGB_565_Config:
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, p);
break;
default:
break;
}
return true;
}
GLint crop[4] = { 0, h, w, -h };
texture->w = w;
texture->h = h;
glEnable (GL_TEXTURE_2D);
glGenTextures(1, &(texture->id));
glBindTexture(GL_TEXTURE_2D, texture->id);
switch (bitmap.getConfig())
{
case SkBitmap::kA8_Config:
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, w, h, 0, GL_ALPHA, GL_UNSIGNED_BYTE, p);
break;
case SkBitmap::kARGB_4444_Config:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, p);
break;
case SkBitmap::kARGB_8888_Config:
if (tw != w || th != h)
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, tw, th, 0, GL_RGBA,
GL_UNSIGNED_BYTE, 0);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA,
GL_UNSIGNED_BYTE, p);
}
else
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, p);
}
break;
case SkBitmap::kRGB_565_Config:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, w, h, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, p);
break;
default:
break;
}
glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_CROP_RECT_OES, crop);
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
return result;
}
i'm trying to load a BMP image of size 1024x1024 and bind it as a texture in openGL .
for a BMP of size, 256x256, output is as desired.But for a BMP of size 1024x1024,
it throws up following error
The program has unexpectedly finished.
C:\Users\xxxx\Desktop\lapping-build-desktop-Qt_4_8_1_for_Desktop_-_MinGW__Qt_SDK__Debug\debug\lapping.exe exited with code -1073741819
the code for loading the BMP,
Texture2d* loadBMP( char *fname){
int i,j, w, h, bits;
unsigned long l;
GLubyte c[3];
Texture2d* tex;
FILE *fin;
unsigned char head[54];
fin = fopen( fname,"rb");
if( fin == NULL){
printf("Bitmap '%s' not found\n", fname);
return NULL;
}
fread( head, 54, 1, fin);
w = (head[21]<<24)|(head[20]<<16)|(head[19]<<8)|head[18]; //returns the width of the image
h = (head[25]<<24)|(head[24]<<16)|(head[23]<<8)|head[22];//returns the height of the image
tex = new Texture2d[1];
tex->w = w;
tex->h = h;
tex->buf = new GLubyte[h*w*4];
tex->id = avTotalTextures++;
for(i=h-1;i>=0;i--){
l = i*w*4;
for(j=0;j<w;j++){
fread( c, 1, 3, fin);
tex->buf[l++] = c[2];
tex->buf[l++] = c[1];
tex->buf[l++] = c[0];
tex->buf[l++] = 255;
}
}
fclose( fin);
printf("Bitmap_load '%s' loaded\n", fname);
return tex;
}
the code for binding the texture ,
void Object3d::bindTexture( Texture2d *t)
{
if( t == NULL)
return;
tex = t;
glBindTexture(GL_TEXTURE_2D, tex->id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_NEAREST);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, tex->buf);
gluBuild2DMipmaps( GL_TEXTURE_2D, GL_RGBA, 256, 256, GL_RGBA, GL_UNSIGNED_BYTE, tex->buf);
delete [] tex->buf;
tex->buf = NULL;
}
this works well for a BMP of size 256x256. but when i try to get this working for a bmp of 1024x1024 by changing the width and height parameters in
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, tex->buf);
gluBuild2DMipmaps( GL_TEXTURE_2D, GL_RGBA, 256, 256, GL_RGBA, GL_UNSIGNED_BYTE, tex->buf);
from 0,0 and 256,256 to 1024, 1024 respectively, it throws up the previously mentioned error. i have referred to openGL specification, but couldn't come up with a reason as to why this wouldn't work.