How to get a C method to accept UIImage parameter? - c++

I am trying to do some Image processing on a UIImage using some EAGLView code from the GLImageProcessing sample from Apple. The sample code is configured to perform processing to a pre-installed image (Image.png). I am trying to modify the code so that it will accept a UIImage (or at least CGImage data) of my choice and process that instead. Problem is, the texture-loader method loadTexture() (below) seems to accept only C structures as parameters, and I have not been able to get it to accept a UIImage* or a CGImage as a parameter. Can someone give me a clue as how to bridge the gap so that I can pass my UIImage into the C-method?
------------ from Texture.h ---------------
#ifndef TEXTURE_H
#define TEXTURE_H
#include "Imaging.h"
void loadTexture(const char *name, Image *img, RendererInfo *renderer);
#endif /* TEXTURE_H */
----------------from Texture.m---------------------
#import <UIKit/UIKit.h>
#import "Texture.h"
static unsigned int nextPOT(unsigned int x)
{
x = x - 1;
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >>16);
return x + 1;
}
// This is not a fully generalized image loader. It is an example of how to use
// CGImage to directly access decompressed image data. Only the most commonly
// used image formats are supported. It will be necessary to expand this code
// to account for other uses, for example cubemaps or compressed textures.
//
// If the image format is supported, this loader will Gen a OpenGL 2D texture object
// and upload texels from it, padding to POT if needed. For image processing purposes,
// border pixels are also replicated here to ensure proper filtering during e.g. blur.
//
// The caller of this function is responsible for deleting the GL texture object.
void loadTexture(const char *name, Image *img, RendererInfo *renderer)
{
GLuint texID = 0, components, x, y;
GLuint imgWide, imgHigh; // Real image size
GLuint rowBytes, rowPixels; // Image size padded by CGImage
GLuint POTWide, POTHigh; // Image size padded to next power of two
CGBitmapInfo info; // CGImage component layout info
CGColorSpaceModel colormodel; // CGImage colormodel (RGB, CMYK, paletted, etc)
GLenum internal, format;
GLubyte *pixels, *temp = NULL;
CGImageRef CGImage = [UIImage imageNamed:[NSString stringWithUTF8String:name]].CGImage;
rt_assert(CGImage);
if (!CGImage)
return;
// Parse CGImage info
info = CGImageGetBitmapInfo(CGImage); // CGImage may return pixels in RGBA, BGRA, or ARGB order
colormodel = CGColorSpaceGetModel(CGImageGetColorSpace(CGImage));
size_t bpp = CGImageGetBitsPerPixel(CGImage);
if (bpp < 8 || bpp > 32 || (colormodel != kCGColorSpaceModelMonochrome && colormodel != kCGColorSpaceModelRGB))
{
// This loader does not support all possible CGImage types, such as paletted images
CGImageRelease(CGImage);
return;
}
components = bpp>>3;
rowBytes = CGImageGetBytesPerRow(CGImage); // CGImage may pad rows
rowPixels = rowBytes / components;
imgWide = CGImageGetWidth(CGImage);
imgHigh = CGImageGetHeight(CGImage);
img->wide = rowPixels;
img->high = imgHigh;
img->s = (float)imgWide / rowPixels;
img->t = 1.0;
// Choose OpenGL format
switch(bpp)
{
default:
rt_assert(0 && "Unknown CGImage bpp");
case 32:
{
internal = GL_RGBA;
switch(info & kCGBitmapAlphaInfoMask)
{
case kCGImageAlphaPremultipliedFirst:
case kCGImageAlphaFirst:
case kCGImageAlphaNoneSkipFirst:
format = GL_BGRA;
break;
default:
format = GL_RGBA;
}
break;
}
case 24:
internal = format = GL_RGB;
break;
case 16:
internal = format = GL_LUMINANCE_ALPHA;
break;
case 8:
internal = format = GL_LUMINANCE;
break;
}
// Get a pointer to the uncompressed image data.
//
// This allows access to the original (possibly unpremultiplied) data, but any manipulation
// (such as scaling) has to be done manually. Contrast this with drawing the image
// into a CGBitmapContext, which allows scaling, but always forces premultiplication.
CFDataRef data = CGDataProviderCopyData(CGImageGetDataProvider(CGImage));
rt_assert(data);
pixels = (GLubyte *)CFDataGetBytePtr(data);
rt_assert(pixels);
// If the CGImage component layout isn't compatible with OpenGL, fix it.
// On the device, CGImage will generally return BGRA or RGBA.
// On the simulator, CGImage may return ARGB, depending on the file format.
if (format == GL_BGRA)
{
uint32_t *p = (uint32_t *)pixels;
int i, num = img->wide * img->high;
if ((info & kCGBitmapByteOrderMask) != kCGBitmapByteOrder32Host)
{
// Convert from ARGB to BGRA
for (i = 0; i < num; i++)
p[i] = (p[i] << 24) | ((p[i] & 0xFF00) << 8) | ((p[i] >> 8) & 0xFF00) | (p[i] >> 24);
}
// All current iPhoneOS devices support BGRA via an extension.
if (!renderer->extension[IMG_texture_format_BGRA8888])
{
format = GL_RGBA;
// Convert from BGRA to RGBA
for (i = 0; i < num; i++)
#if __LITTLE_ENDIAN__
p[i] = ((p[i] >> 16) & 0xFF) | (p[i] & 0xFF00FF00) | ((p[i] & 0xFF) << 16);
#else
p[i] = ((p[i] & 0xFF00) << 16) | (p[i] & 0xFF00FF) | ((p[i] >> 16) & 0xFF00);
#endif
}
}
// Determine if we need to pad this image to a power of two.
// There are multiple ways to deal with NPOT images on renderers that only support POT:
// 1) scale down the image to POT size. Loses quality.
// 2) pad up the image to POT size. Wastes memory.
// 3) slice the image into multiple POT textures. Requires more rendering logic.
//
// We are only dealing with a single image here, and pick 2) for simplicity.
//
// If you prefer 1), you can use CoreGraphics to scale the image into a CGBitmapContext.
POTWide = nextPOT(img->wide);
POTHigh = nextPOT(img->high);
if (!renderer->extension[APPLE_texture_2D_limited_npot] && (img->wide != POTWide || img->high != POTHigh))
{
GLuint dstBytes = POTWide * components;
GLubyte *temp = (GLubyte *)malloc(dstBytes * POTHigh);
for (y = 0; y < img->high; y++)
memcpy(&temp[y*dstBytes], &pixels[y*rowBytes], rowBytes);
img->s *= (float)img->wide/POTWide;
img->t *= (float)img->high/POTHigh;
img->wide = POTWide;
img->high = POTHigh;
pixels = temp;
rowBytes = dstBytes;
}
// For filters that sample texel neighborhoods (like blur), we must replicate
// the edge texels of the original input, to simulate CLAMP_TO_EDGE.
{
GLuint replicatew = MIN(MAX_FILTER_RADIUS, img->wide-imgWide);
GLuint replicateh = MIN(MAX_FILTER_RADIUS, img->high-imgHigh);
GLuint imgRow = imgWide * components;
for (y = 0; y < imgHigh; y++)
for (x = 0; x < replicatew; x++)
memcpy(&pixels[y*rowBytes+imgRow+x*components], &pixels[y*rowBytes+imgRow-components], components);
for (y = imgHigh; y < imgHigh+replicateh; y++)
memcpy(&pixels[y*rowBytes], &pixels[(imgHigh-1)*rowBytes], imgRow+replicatew*components);
}
if (img->wide <= renderer->maxTextureSize && img->high <= renderer->maxTextureSize)
{
glGenTextures(1, &texID);
glBindTexture(GL_TEXTURE_2D, texID);
// Set filtering parameters appropriate for this application (image processing on screen-aligned quads.)
// Depending on your needs, you may prefer linear filtering, or mipmap generation.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, internal, img->wide, img->high, 0, format, GL_UNSIGNED_BYTE, pixels);
}
if (temp) free(temp);
CFRelease(data);
CGImageRelease(CGImage);
img->texID = texID;
}
Side Note: The above code is the original and unmodified sample code from Apple and does not generate any errors when compiled. However, when I try to modify the .h and .m to accept a UIImage* parameter (as below) the compiler generates the following error:"Error: expected declaration specifiers or "..." before UIImage"
----------Modified .h Code that generates the Compiler Error:-------------
void loadTexture(const char name, Image *img, RendererInfo *renderer, UIImage* newImage)

You are probably importing this .h into a .c somewhere. That tells the compiler to use C rather than Objective-C. UIKit.h (and it's many children) are in Objective-C and cannot be compiled by a C compiler.
You can rename all you .c files to .m, but what you really probably want is just to use CGImageRef and import CGImage.h. CoreGraphics is C-based. UIKit is Objective-C. There is no problem, if you want, for Texture.m to be in Objective-C. Just make sure that Texture.h is pure C. Alternatively (and I do this a lot with C++ code), you can make a Texture+C.h header that provides just the C-safe functions you want to expose. Import Texture.h in Objective-C code, and Texture+C.h in C code. Or name them the other way around if more convenient, with a Texture+ObjC.h.

It sounds like your file isn't importing the UIKit header.

WHy are you passing new image to loadTexture, instead of using loadTexture's own UImage loading to open the new image you want?
loadTexture:
void loadTexture(const char *name, Image *img, RendererInfo *renderer)
{
GLuint texID = 0, components, x, y;
GLuint imgWide, imgHigh; // Real image size
GLuint rowBytes, rowPixels; // Image size padded by CGImage
GLuint POTWide, POTHigh; // Image size padded to next power of two
CGBitmapInfo info; // CGImage component layout info
CGColorSpaceModel colormodel; // CGImage colormodel (RGB, CMYK, paletted, etc)
GLenum internal, format;
GLubyte *pixels, *temp = NULL;
[Why not have the following fetch your UIImage?]
CGImageRef CGImage = [UIImage imageNamed:[NSString stringWithUTF8String:name]].CGImage;
rt_assert(CGImage);
if (!CGImage)
return;

Related

DirectX 11 and FreeType

Has anyone ever integrated FreeType with DirectX 11 for font rendering? The only article I seem to find is DirectX 11 Font Rendering. I can't seem to match the correct DXGI_FORMAT for rendering the grayscale bitmap that FreeType creates for a glyph.
There's three ways to handle greyscale textures in Direct3D 11:
Option (1): You can use an RGB format and replicate the channels. For example, you'd use DXGI_R8G8B8A8_UNORM and set R,G,B to the single monochrome channel and the A to all opaque (0xFF). You can handle Monochrome + Alpha (2 channel) data the same way.
This conversion is supported when loading .DDS luminance formats (D3DFMT_L8, D3DFMT_L8A8) by DirectXTex library and the texconv command-line tool with the -xlum switch.
This makes the texture up to 4 times larger in memory, but easily integrates using standard shaders.
Option (2): You keep the monochrome texture as a single channel using DXGI_FORMAT_R8_UNORM as your format. You then render using a custom shader which replicates the red channel to RGB at runtime.
This is in fact what the tutorial blog post you linked to is doing:
///////// PIXEL SHADER
float4 main(float2 uv : TEXCOORD0) : SV_Target0
{
return float4(Decal.Sample(Bilinear, uv).rrr, 1.f);
}
For Monochrome + Alpha (2-channel) you'd use DXGI_FORMAT_R8G8_UNORM and then your custom shader would use .rrrg as the swizzle.
Option (3): You can compress the monochrome data to the DXGI_FORMAT_BC2 format using a custom encoder. This is implemented in DirectX Tool Kit's MakeSpriteFont tool when using /TextureFormat:CompressedMono
// CompressBlock (16 pixels (4x4 block) stored as 16 bytes)
long alphaBits = 0;
int rgbBits = 0;
int pixelCount = 0;
for (int y = 0; y < 4; y++)
{
for (int x = 0; x < 4; x++)
{
long alpha;
int rgb;
// This is the single monochrome channel
int value = bitmapData[blockX + x, blockY + y];
if (options.NoPremultiply)
{
// If we are not premultiplied, RGB is always white and we have 4 bit alpha.
alpha = value >> 4;
rgb = 0;
}
else
{
// For premultiplied encoding, quantize the source value to 2 bit precision.
if (value < 256 / 6)
{
alpha = 0;
rgb = 1;
}
else if (value < 256 / 2)
{
alpha = 5;
rgb = 3;
}
else if (value < 256 * 5 / 6)
{
alpha = 10;
rgb = 2;
}
else
{
alpha = 15;
rgb = 0;
}
}
// Add this pixel to the alpha and RGB bit masks.
alphaBits |= alpha << (pixelCount * 4);
rgbBits |= rgb << (pixelCount * 2);
pixelCount++;
}
}
// The resulting BC2 block is:
// uint64_t = alphaBits
// uint16_t = 0xFFFF
// uint16_t = 0x0
// uint32_t = rgbBits
The resulting texture is then rendered using a standard alpha-blending shader. Since it uses 1 byte per pixel, this is effectively the same size as if you were using DXGI_FORMAT_R8_UNORM.
This technique does not work for 2-channel data, but works great for alpha-blended monochrome images like font glyphs.

DirectX9 texture shows up distorted

For some reason on machines that only conditionally (D3DPTEXTURECAPS_NONPOW2CONDITIONAL) supports the "non power of two" (NONPOW2) textures I'm getting this kind of image distortion:
(On machines that fully supports NONPOW2 textures everything works fine.)
This is a 1280x720 resolution RGB24 (D3DFMT_X8R8G8B8).
I know there are four rules when dealing with conditionally supported textures:
1.The texture addressing mode for the texture stage is set to D3DTADDRESS_CLAMP.
mpDevice->SetSamplerState(0, D3DSAMP_ADDRESSU, D3DTADDRESS_CLAMP);
mpDevice->SetSamplerState(0, D3DSAMP_ADDRESSV, D3DTADDRESS_CLAMP);
mpDevice->SetSamplerState(0, D3DSAMP_ADDRESSW, D3DTADDRESS_CLAMP);
2.Texture wrapping for the texture stage is disabled (D3DRS_WRAP n set to 0).
mpDevice->SetRenderState(D3DRS_WRAP0, 0);
3.Mipmapping is not in use (use magnification filter only).
mpDevice->SetSamplerState(0, D3DSAMP_MIPFILTER, D3DTEXF_NONE);
4.Texture formats must not be D3DFMT_DXT1 through D3DFMT_DXT5.
I'm using D3DFMT_X8R8G8B8...
Texture is created like this:
mpDevice->CreateTexture(width, height, 1, D3DUSAGE_DYNAMIC, D3DFMT_X8R8G8B8, D3DPOOL_DEFAULT, &pTexture, nullptr);
And updated like this:
D3DLOCKED_RECT lockedRect;
pTexture->LockRect(0, &lockedRect, nullptr, D3DLOCK_DISCARD);
// const IppiSize roiSize = { width, height };
// ippiCopy_8u_C3AC4R((Ipp8u*) pPixels, width * 3, (Ipp8u*) lockedRect.pBits, width * 4, roiSize); // RGB to RGBA
// ippiCopy_8u_AC4C3R((Ipp8u*) pPixels, width * 4, (Ipp8u*) lockedRect.pBits, width * 3, roiSize); // RGBA to RGB
// TEMP.
unsigned char* pDst = (unsigned char*) lockedRect.pBits;
const unsigned char* pSrc = pPixels;
for (int k = 0; k < width; k++)
{
for (int j = 0; j < height; j++)
{
pDst[0] = pSrc[0];
pDst[1] = pSrc[1];
pDst[2] = pSrc[2];
pDst[3] = 255;
pDst += 4;
pSrc += 3;
}
}
pTexture->UnlockRect(0);
Does anyone had any similar problem? Or have any idea on what is going on?
Maybe there's something wrong with the RGB data itself?
Thanks.

What's the best way to read texture info from an image file and get to the pixels using SDL2?

You can create a texture in SDL2 using CreateTexture() and then get access to the pixels in that texture using LockTexture(). But in order to do so you need to have passed the SDL_TEXTUREACCESS_STREAMING flag to the CreateTexture call.
There's a fairly standard helper library for loading images called SDL_image. I use it to read image files into textures (textures are graphics card resident images for the casual observer). I'm currently loading my textures using IMG_LoadTexture(). My problem is I can't see how to set the SDL_TEXTUREACCESS_STREAMING flag in this case. So I can't get pixel data for textures loaded with SDL_image?
The reason I want to get to the pixels is to extract nine-patch data from them. (I may well end up having 9 textures). So I only need this info once at the start and I only need to read the texture data, not write it. I'd also like to use preexisting image file reading libraries if at all possible.
So the question is: What's the best way to read texture info from an image file and get to the pixels using SDL2?
Decided it was better to do it with surfaces and then convert to textures. This code works. It only does the top left corner of the nine-patch to keep it simple. (This also ignores the complication of the nine patch sizing info in the first row and column of the nine patch image).
typedef unsigned char byte_t;
NinePatch::NinePatch(const string fname, SDL_Renderer * renderer) {
// get the surface and the bits per pixel
SDL_Surface * surface = IMG_Load(fname.c_str());
int bytes_per_pixel = surface->format->BytesPerPixel;
// keep things simple by only looking at 4 byte/pixel nine-patches
if (bytes_per_pixel != 4) {
log_msg("Loading " + fname +
" expecting pixel data to be 4 but it has: " +
to_string(bytes_per_pixel));
exit(1);
}
// offsets into the surface that divide the surface into a nine-patch
unsigned int left, right, top, bottom;
// find the widths we need by looking at the top row of pixels
byte_t * ptr = (byte_t*)surface->pixels;
uint32_t pixel, last_pixel = 0;
for (int i = 0; i < surface->w; i++) {
// we know they're 4 byte pixels cause otherwise we don't get here.
pixel = *(uint32_t*)ptr;
// look for "edges" in the top row of pixel data
if (pixel > last_pixel) {
left = i;
}
else if (pixel < last_pixel) {
right = i;
}
last_pixel = pixel;
// get the next pixel across
ptr += bytes_per_pixel;
}
// find the heights we need by looking at the left column of pixels
ptr = (byte_t*)surface->pixels;
last_pixel = 0;
for (int i = 0; i < surface->h; i++) {
// we know they're 4 byte pixels cause otherwise we don't get here.
pixel = *(uint32_t*)ptr;
// look for "edges" in the left column of pixel data
if (pixel > last_pixel) {
top = i;
}
else if (pixel < last_pixel) {
bottom = i;
}
last_pixel = pixel;
// get the next pixel down
ptr += bytes_per_pixel * surface->w;
}
// SDL interprets each pixel as a 32-bit number, so our masks
// must depend on the endianness (byte order) of the machine
Uint32 rmask, gmask, bmask, amask;
#if SDL_BYTEORDER == SDL_BIG_ENDIAN
rmask = 0xff000000;
gmask = 0x00ff0000;
bmask = 0x0000ff00;
amask = 0x000000ff;
#else
rmask = 0x000000ff;
gmask = 0x0000ff00;
bmask = 0x00ff0000;
amask = 0xff000000;
#endif
const uint32_t unused_flags = 0;
const int pixel_size = 32; // in bits
// scratch surface we use for breaking the nine-patch
// surface into little textures.
SDL_Surface * s;
SDL_Rect src_rect;
// create a surface to hold the top left corner
s = SDL_CreateRGBSurface(unused_flags, left, top,
pixel_size, rmask, gmask, bmask, amask);
// copy part of the nine-patch image surface into the new surface
src_rect.x = 0;
src_rect.y = 0;
src_rect.w = left;
src_rect.h = top;
SDL_BlitSurface(surface, &src_rect, s, NULL);
// convert the new corner surface into a texture
top_left_texture = SDL_CreateTextureFromSurface(renderer, s);
// free the scratch surface
SDL_FreeSurface(s);
}

DDS texture transparency rendered black Opengl

I am currently trying to render textured objects in Opengl. Everything worked fine until I wanted to render a texture with transparency. Instead of showing the the object transparent it just rendered in total black.
The method fo loading the texture file is this:
// structures for reading and information variables
char magic[4];
unsigned char header[124];
unsigned int width, height, linearSize, mipMapCount, fourCC;
unsigned char* dataBuffer;
unsigned int bufferSize;
fstream file(path, ios::in|ios::binary);
// read magic and header
if (!file.read((char*)magic, sizeof(magic))){
cerr<< "File " << path << " not found!"<<endl;
return false;
}
if (magic[0]!='D' || magic[1]!='D' || magic[2]!='S' || magic[3]!=' '){
cerr<< "File does not comply with dds file format!"<<endl;
return false;
}
if (!file.read((char*)header, sizeof(header))){
cerr<< "Not able to read file information!"<<endl;
return false;
}
// derive information from header
height = *(int*)&(header[8]);
width = *(int*)&(header[12]);
linearSize = *(int*)&(header[16]);
mipMapCount = *(int*)&(header[24]);
fourCC = *(int*)&(header[80]);
// determine dataBuffer size
bufferSize = mipMapCount > 1 ? linearSize * 2 : linearSize;
dataBuffer = new unsigned char [bufferSize*2];
// read data and close file
if (file.read((char*)dataBuffer, bufferSize/1.5))
cout<<"Loading texture "<<path<<" successful"<<endl;
else{
cerr<<"Data of file "<<path<<" corrupted"<<endl;
return false;
}
file.close();
// check pixel format
unsigned int format;
switch(fourCC){
case FOURCC_DXT1:
format = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
break;
case FOURCC_DXT3:
format = GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
break;
case FOURCC_DXT5:
format = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
break;
default:
cerr << "Compression type not supported or corrupted!" << endl;
return false;
}
glGenTextures(1, &ID);
glBindTexture(GL_TEXTURE_2D, ID);
glPixelStorei(GL_UNPACK_ALIGNMENT,1);
unsigned int blockSize = (format == GL_COMPRESSED_RGBA_S3TC_DXT1_EXT) ? 8 : 16;
unsigned int offset = 0;
/* load the mipmaps */
for (unsigned int level = 0; level < mipMapCount && (width || height); ++level) {
unsigned int size = ((width+3)/4)*((height+3)/4)*blockSize;
glCompressedTexImage2D(GL_TEXTURE_2D, level, format, width, height,
0, size, dataBuffer + offset);
offset += size;
width /= 2;
height /= 2;
}
textureType = DDS_TEXTURE;
return true;
In the fragment shader I just set the gl_FragColor = texture2D( myTextureSampler, UVcoords )
I hope that there is an easy explanation such as some code missing.
In the openGL initialization i glEnabled GL_Blend and set a blend function.
Does anyone have an idea of what I did wrong?
Make sure the blend function is the correct function for what you are trying to accomplish. For what you've described that should be glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
You probably shouldn't set the blend function in your openGL initialization function but should wrap it around your draw calls like:
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
//gl draw functions (glDrawArrays,glDrawElements,etc..)
glDisable(GL_BLEND)
Are you clearing the 2D texture binding before you swap buffers? i.e ...
glBindTexture(GL_TEXTURE_2D, 0);

OpenGL Issue Drawing a Large Image Texture causing Skewing

I'm trying to store a 1365x768 image on a 2048x1024 texture in OpenGL ES but the resulting image once drawn appears skewed. If I run the same 1365x768 image through gluScaleImage() and fit it onto the 2048x1024 texture it looks fine when drawn but this OpenGL call is slow and hurts performance.
I'm doing this on an Android device (Motorola Milestone) which has 256MB of memory. Not sure if the memory is a factor though since it works fine when scaled using gluScaleImage() (it's just slower.)
Mapping smaller textures (854x480 onto 1024x512, for example) works fine though. Does anyone know why this is and suggestions for what I can do about it?
Update
Some code snippets to help understand context...
// uiImage is loaded. The texture dimensions are determined from upsizing the image
// dimensions to a power of two size:
// uiImage->_width = 1365
// uiImage->_height = 768
// width = 2048
// height = 1024
// Once the image is loaded:
// INT retval = gluScaleImage(GL_RGBA, uiImage->_width, uiImage->_height, GL_UNSIGNED_BYTE, uiImage->_texels, width, height, GL_UNSIGNED_BYTE, data);
copyImage(GL_RGBA, uiImage->_width, uiImage->_height, GL_UNSIGNED_BYTE, uiImage->_texels, width, height, GL_UNSIGNED_BYTE, data);
if (pixelFormat == RGB565 || pixelFormat == RGBA4444)
{
unsigned char* tempData = NULL;
unsigned int* inPixel32;
unsigned short* outPixel16;
tempData = new unsigned char[height*width*2];
inPixel32 = (unsigned int*)data;
outPixel16 = (unsigned short*)tempData;
if(pixelFormat == RGB565)
{
// "RRRRRRRRGGGGGGGGBBBBBBBBAAAAAAAA" --> "RRRRRGGGGGGBBBBB"
for(unsigned int i = 0; i < numTexels; ++i, ++inPixel32)
{
*outPixel16++ = ((((*inPixel32 >> 0) & 0xFF) >> 3) << 11) |
((((*inPixel32 >> 8) & 0xFF) >> 2) << 5) |
((((*inPixel32 >> 16) & 0xFF) >> 3) << 0);
}
}
if(tempData != NULL)
{
delete [] data;
data = tempData;
}
}
// [snip..]
// Copy function (mostly)
static void copyImage(GLint widthin, GLint heightin, const unsigned int* datain, GLint widthout, GLint heightout, unsigned int* dataout)
{
unsigned int* p1 = const_cast<unsigned int*>(datain);
unsigned int* p2 = dataout;
int nui = widthin * sizeof(unsigned int);
for(int i = 0; i < heightin; i++)
{
memcpy(p2, p1, nui);
p1 += widthin;
p2 += widthout;
}
}
In the render code, without changing my texture coordinates I should see the correct image when using gluScaleImage() and a smaller image (that requires some later correction factors) for the copyImage() code. This is what happens when the image is small (854x480 for example works fine with copyImage()) but when I use the 1365x768 image, that's when the skewing appears.
Finally solved the issue. First thing to know is what's the maximum texture size allowed for the device:
GLint texSize;
glGetIntegerv(GL_MAX_TEXTURE_SIZE, &texSize);
When I ran this the texture size max for the Motorola Milestone was 2048x2048, which was fine in my case.
After messing with the texture mapping to no end I finally decided to try opening and resaving the image..and voilĂ  it suddenly began working. I don't know what was wrong with the format the original image was stored in but as advice to anyone else experiencing a similar problem: might be worth looking at your image itself.