Extracting Pixels From SDL2 Surface Created With SDL_TTF - c++

I'm working on a program that creates an SDL_Surface using http://www.fontspace.com/work-ins-studio/variane-script. I set the background of the surface to be transparent. Then I extract the pixels from the surface, and place them in part of an opengl texture.
It all works fine, except that the text ends up looking like this (the text should read "testing")
My question: Did I mess up the math somehow and do this myself, or is this just the behaviour of SDL_TTF? And, if it is just the behaviour of SDL_TTF, how do I work around it to get pixel data that I can use?
Here is the relevant code:
int main(int argc, char* args[]) {
//other sdl and opengl overhead stuff here...
TTF_Init();
//shader setup here...
TTF_Font *font;
font = TTF_OpenFont("VarianeScript.ttf", 50);
SDL_Surface* surface;
SDL_Color color = { 255, 0, 0 };
surface = TTF_RenderText_Solid(font, "testing", color);
SDL_SetSurfaceAlphaMod(surface, 255);
int surfaceWidth = surface->w;
int surfaceHeight = surface->h;
Uint8 red, green, blue, alpha;
float* textImage = new float[(surfaceWidth * surfaceHeight) * 4];
int countText = 0;
SDL_LockSurface(surface);
Uint8* p = (Uint8*)surface->pixels;
for (int y = 0; y < surfaceHeight; ++y) {
for (int x = 0; x < (surfaceWidth); ++x) {
Uint8 pixel = p[(y * surface->w) + x];
SDL_GetRGBA(pixel, surface->format, &red, &green, &blue, &alpha);
textImage[countText] = ((float)red / 255.0f);
++countText;
textImage[countText] = ((float)green / 255.0f);
++countText;
textImage[countText] = ((float)blue / 255.0f);
++countText;
textImage[countText] = ((float)alpha / 255.0f);
++countText;
}
}
SDL_UnlockSurface(surface);
SDL_FreeSurface(surface);
GLuint texture;
float* image;
int width = 1000, height = 1000;
int textX = width - (int)(width / 1.5);
int textY = height - (int)(height / 1.5);
setupTexture(texture, shader, width, height, image, textImage, textX, textY, surfaceWidth, surfaceHeight);
//etc...
also (the important part starts around where I declare the startpos variables)
void setupTexture(GLuint &texture, Shader &shader, int &width, int &height, float* &image, float* text, int textX, int textY, int textW, int textH) {
glGenTextures(1, &texture);
image = new float[(width * height) * 3];
for (int a = 0; a < (width * height) * 3; ) {
if (a < ((width * height) * 3) / 2) {
image[a] = 0.5f;
++a;
image[a] = 1.0f;
++a;
image[a] = 0.3f;
++a;
}
else {
image[a] = 0.0f;
++a;
image[a] = 0.5f;
++a;
image[a] = 0.7f;
++a;
}
}
int startpos1, startpos2;
for(int y = 0; y < textH; ++y) {
for(int x = 0; x < textW; ++x) {
startpos1 = (((y + textY) * width) * 3) + ((x + textX) * 3);
startpos2 = ((y * textW) *4) + (x * 4);
if (text[startpos2 + 3] != 0.0) {
image[startpos1] = text[startpos2];
image[startpos1 + 1] = text[startpos2 + 1];
image[startpos1 + 2] = text[startpos2 + 2];
}
}
}
glActiveTexture(GL_TEXTURE0);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, image);
glUniform1i(glGetUniformLocation(shader.shaderProgram, "texSampler"), 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}

Your problem is in the way you extract pixel from surface:
Uint8 pixel = p[(y * surface->w) + x];
You assume that each pixel takes one byte (could be verified by inspecting surface->format->BytesPerPixel), and that each row is surface->w*1 bytes long - but it isn't. Instead, each row is surface->pitch bytes long, so your code should be
Uint8 pixel = p[y * surface->pitch + x];
(that still assumes it is 1 byte long, but that's beside the point).
It is quite weird that you use floats to represent pixel data, as it gives you nothing here aside from much slower loading.

Related

Do I pass the wrong data to glTexImage2D?

I'm trying to make an OpenGL texture by populating a pixel buffer with data from a baked font. I'm taking each value from the font array and making a bitmap essentially.
The problem is when I'm displaying the full texture I get noise. However by creating an 8x8 texture of one glyph the texture is displayed correctly.
The pixel buffer is 8bit monochrome, so I pass GL_ALPHA as buffer format.
I tried using 32bpp GL_RGBA format as well and it yields the same result.
DebugFont
LoadBakedFont(void)
{
glEnable(GL_BLEND);
glEnable(GL_TEXTURE_2D);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
unsigned char baked_font[128][8] = {} //In my source code this is not empty :)
unsigned char *pixels = (unsigned char*)malloc(sizeof(unsigned char) * 128 * 8 * 8);
memset(pixels, 0, sizeof(unsigned char) * 128 * 8 * 8);
int counter = 0;
for(int i = 0; i < 128; ++i)
{
for(int j = 0; j < 8; ++j)
{
for(int k = 0; k < 8; ++k)
{
unsigned char val = (baked_font[i][j] >> k & 1);
pixels[counter++] = val == 1 ? 0xff : 0x00;
}
}
}
//Renders the exlamation mark perfectly
for(int y = 0; y < 8; ++y)
{
for(int x = 0; x < 8; ++x)
{
unsigned char *test = pixels + (0x21 * 64);
if(test[y * 8 + x])
printf("#");
else
printf(".");
}
printf("\n");
}
//POD struct
DebugFont font;
glGenTextures(1, &font.tex);
glBindTexture(GL_TEXTURE_2D, font.tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 8 * 128, 8, 0, GL_ALPHA, GL_UNSIGNED_BYTE, pixels);
glBindTexture(GL_TEXTURE_2D, 0);
free(pixels);
return font;
}
void
DrawTexture(DebugFont font)
{
glBindTexture(GL_TEXTURE_2D, font.tex);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex2f(0,0);
glTexCoord2f(1.0f, 0.0f); glVertex2f(8 * 128,0);
glTexCoord2f(1.0f, 1.0f); glVertex2f(8 * 128, 8);
glTexCoord2f(0.0f, 1.0f); glVertex2f(0, 8);
glEnd();
glBindTexture(GL_TEXTURE_2D, 0);
}
Random noise?
Exclamation Mark
The way you arrange the data makes sense for a tall 8x1024 image where each 8x8 makes up a character.
But you load it as a 1024x8 image instead, putting all the pixels in the wrong places.

Heap corruption from this method

I'm getting a heap corruption error whenever the following method is called:
bool TextureData::loadFromMemory(char* memData, int w, int h)
{
GLuint texId;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &texId);
glTextureId = static_cast<int>(texId);
glBindTexture(GL_TEXTURE_2D, texId); // Bind the texture
glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
memWidth = width = w;
memHeight = height = h;
int newPad = 0;//(4 - (w*4)%4)==4 ? 0 : (4 - (w*4)%4);
int oldPad = 0;//(4 - (w*3)%4)==4 ? 0 : (4 - (w*3)%4);
size_t size = (width+newPad)*height * 4;
char* data = 0;
data = new char[size];
memcpy(data, memData, size * sizeof(char));
// Texture generation
gluBuild2DMipmaps(GL_TEXTURE_2D, 4, memWidth, memHeight, GL_RGBA, GL_UNSIGNED_BYTE, data);
delete [] data;
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
return true;
}
Where the char* data array given to this method is generated like this:
int imageSize = width_ * height_ * 4;
data_ = new char[imageSize];
SDL_LockSurface(TextureImage);
for (Uint y = 0; y < height_; ++y)
{
for (Uint x = 0; x < width_; ++x)
{
size_t currPixel = (y * width_ + x) * 4;
SDL_Color rgb;
Uint32 data = getPixel(TextureImage, x, y);
SDL_GetRGB(data, TextureImage->format, &rgb.r, &rgb.g, &rgb.b);
data_[currPixel] = (char)rgb.r;
data_[currPixel + 1] = (char)rgb.g;
data_[currPixel + 2] = (char)rgb.b;
data_[currPixel + 3] = (char)255;
}
}
SDL_UnlockSurface(TextureImage);
SDL_FreeSurface(TextureImage);
I can't figure out why the loadFromMemory method is causing a heap corruption and need a review, if anyone has any ideas.

Why do taller letters appear deformed when rendering text with FreeType in OpenGL?

I have managed to draw text with FreeType in OpenGL 4, but the taller letters (e.g. g, d, f, etc.) are somehow being drawn too tall. This is what it looks like. This is what it is supposed to look like. The tall letters are too tall, while the "normal height" letters are just fine.
struct FontChar {
float tx; // texcoord x position
float tw; // texcoord x width
glm::ivec2 size; // face->glyph->bitmap.width, face->glyph->bitmap.rows
glm::ivec2 bearing; // face->glyph->bitmap_left, face->glyph->bitmap_top
glm::ivec2 advance; // face->glyph->advance.x, face->glyph->advance.y
} fontChars[128]; // this is populated properly with FreeType
std::vector<float> vertices;
const float sx = 2.0f / 1920.0f;
const float sy = 2.0f / 1080.0f;
float x = 0.0f;
float y = 0.0f;
for (char c : text) {
const float vx = x + fontChars[c].bearing.x * sx;
const float vy = y + fontChars[c].bearing.y * sy;
const float w = fontChars[c].size.x * sx;
const float h = fontChars[c].size.y * sy;
float tx = fontChars[c].tx;
float tw = fontChars[c].tw;
std::vector<float> quad = { // pos_x, pos_y, tex_x, tex_y
vx, vy, tx, 0.0f,
vx + w, vy, tx + tw, 0.0f,
vx + w, vy - h, tx + tw, 1.0f,
vx + w, vy - h, tx + tw, 1.0f,
vx, vy - h, tx, 1.0f,
vx, vy, tx, 0.0f
};
vertices.insert(vertices.begin(), quad.begin(), quad.end());
x += float(fontChars[c].advance.x >> 6) * sx;
y += float(fontChars[c].advance.y >> 6) * sy;
}
I then buffer the vertices into a vertex buffer, and then I draw it. The only code that could affect the height is const float h = fontChars[c].size.y * sy, but the size is taken straight from FreeType, and the sy works for the "normal height" letters. This leads me to believe that it could be due to the glyph textures being put into a texture atlas.
FT_Set_Pixel_Sizes(face, 0, size);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
std::array<FontChar, 128> characters{};
unsigned int w = 0;
unsigned int h = 0;
for (unsigned char c = 0; c < 128; c++) {
if (FT_Load_Char(face, c, FT_LOAD_BITMAP_METRICS_ONLY)) {
throw std::runtime_error("Failed to load glyph");
}
w += face->glyph->bitmap.width;
h = std::max(face->glyph->bitmap.rows, h); // maybe this is the issue???
}
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
unsigned int x = 0;
for (unsigned char c = 0; c < 128; c++) {
if (FT_Load_Char(face, c, FT_LOAD_RENDER)) {
throw std::runtime_error("Failed to load glyph");
}
glTexSubImage2D(GL_TEXTURE_2D, 0, x, 0, face->glyph->bitmap.width, face->glyph->bitmap.rows, GL_RED, GL_UNSIGNED_BYTE, face->glyph->bitmap.buffer);
FontChar character = {
(float)x / (float)w,
(float)face->glyph->bitmap.width / (float)w,
glm::ivec2(face->glyph->bitmap.width, face->glyph->bitmap.rows),
glm::ivec2(face->glyph->bitmap_left, face->glyph->bitmap_top),
glm::ivec2(face->glyph->advance.x, face->glyph->advance.y)
};
characters[c] = character;
x += face->glyph->bitmap.width;
}
The only other place where I do anything that could influence this vertical stretching behavior is when I find the max height of the characters. I do this so I can find the proper dimensions of the texture atlas, which is just 1 character tall by n characters wide. I'm still not sure how this could cause the behavior though.
I have found the issue. My instincts were correct; the issue was related to the height of the texture atlas. I was not plugging the heights of the glyph bitmaps into the actual vertices, I was instead using the entire height of the texture. All I had to do was pass the heights of the characters into the FontChar struct when populating the fontChars array, and then I made my vertices go from 0.0f to the height instead of 0.0f to 1.0f. This worked except now all of my text was too tall. Then I realized that I am using an orthographic matrix which extends the x coordinates from [-1, 1] to [-width/height, width/height], and since I was using separate scale factors (sx and sy), my scaling was incorrect. To fix, I just got rid of sy and replaced every sy with sx. I also added 2 pixels between each texture in the atlas so I don't get any smearing between textures. Here is the final result.

OpenGL fails when resizing buffer

I need to update an array of pixels to the screen every frame. It works initially, however when I try to resize the screen it glitches and eventually throws EXC_BAD_ACCESS 1. I already checked that the buffer is allocated to the correct size before every frame, however it does not seem to affect the result.
#include <stdio.h>
#include <stdlib.h>
#include <GLUT/GLUT.h>
unsigned char *buffer = NULL;
int width = 400, height = 400;
unsigned int screenTexture;
void Display()
{
for (int y = 0; y < height; y+=4) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3] = 255;
}
}
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
// This function results in EXC_BAD_ACCESS 1, although the buffer is always correctly allocated
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, buffer);
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, 0, 1);
glMatrixMode(GL_MODELVIEW);
glBegin (GL_QUADS);
glTexCoord2f(0,0); glVertex2i(0, 0);
glTexCoord2f(1,0); glVertex2i(width,0);
glTexCoord2f(1,1); glVertex2i(width,height);
glTexCoord2f(0,1); glVertex2i(0, height);
glEnd ();
glFlush();
glutPostRedisplay();
}
void Resize(int w, int h)
{
width = w;
height = h;
buffer = (unsigned char *)realloc(buffer, sizeof(unsigned char) * width * height * 3);
if (!buffer) {
printf("Error Reallocating buffer\n");
exit(1);
}
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE);
glutInitWindowSize(width, height);
glutCreateWindow("Rasterizer");
glutDisplayFunc(Display);
glutReshapeFunc(Resize);
glGenTextures(1, &screenTexture);
glBindTexture(GL_TEXTURE_2D, screenTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glDisable(GL_DEPTH_TEST);
buffer = (unsigned char *)malloc(sizeof(unsigned char) * width * height * 3);
glutMainLoop();
}
After resizing the screen does not display properly either:
What is causing this problem? The code compiles and runs you just have to link GLUT and OpenGL.
As #genpfault mentioned, OpenGL reads 4 bytes per pixel instead of your assumption of 3.
Instead of changing GL_UNPACK_ALIGNMENT, you can also change your code to the correct assumption of 4 bytes per pixel via a simple struct:
struct pixel {
unsigned char r, g, b;
unsigned char unused;
};
Then, instead of using the magic constant 3, you can use the much clearer sizeof(struct pixel). This makes it easier to read and to convey the intent of the code, and it doesn't result in any extra code (as the structure is "effectively" an array of 4 bytes).
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, buffer);
^^^^^^
GL_UNPACK_ALIGNMENT defaults to 4, not 1. So OpenGL will read 4 bytes for every pixel, not the 3 that you're assuming.
Set GL_UNPACK_ALIGNMENT to 1 using glPixelStorei().
It sounds like you found something that works, but I don't think the problem was properly diagnosed. I believe the biggest issue is in the way you initialize your texture data here:
for (int y = 0; y < height; y+=4) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3] = 255;
}
}
This only sets data in every 4th row, and then only for every 3rd byte within those rows. To initialize all the data to white, you need to increment the row number (y) by 1 instead of 4, and set all 3 components inside the loop:
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3 ] = 255;
buffer[(x + y * width) * 3 + 1] = 255;
buffer[(x + y * width) * 3 + 2] = 255;
}
}
You also need to set GL_UNPACK_ALIGNMENT to 1:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
This controls the row alignment (not the pixel alignment, as suggested in a couple other answers). The default value for GL_UNPACK_ALIGNMENT is 4. But with 3 bytes per pixel in the GL_RGB format you are using, the size of a row is only a multiple of 4 bytes if the number of pixels is a multiple of 4. So for tightly packed rows with 3 bytes/pixel, the value needs to be set to 1.

Fbo textures get flipped/rotated

I am capturing a couple of images through fbo's. I then reuse these images, adding something to them (using fbo's and shaders). Now, for some reason the images get rotated and I have no idea where it happens.
Below some of the code the bug may be connected with. I can supply more code on request.
I save the images like this:
glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
int bpp = 4; // Assuming a 32-bit display with a byte each for red, green, blue, and alpha.
ByteBuffer buffer = BufferUtils.createByteBuffer(SAVE_WIDTH * SAVE_HEIGHT * bpp);
glReadPixels(0, 0, SAVE_WIDTH, SAVE_HEIGHT, GL_RGBA, GL_UNSIGNED_BYTE, buffer );
File file = new File("picture" + k + ".png"); // The file to save to.
String format = "png"; // Example: "PNG" or "JPG"
BufferedImage image = new BufferedImage(SAVE_WIDTH, SAVE_HEIGHT, BufferedImage.TYPE_INT_ARGB);
for(int x = 0; x < SAVE_WIDTH; x++)
for(int y = 0; y < SAVE_HEIGHT; y++)
{
int i = (x + (SAVE_WIDTH * y)) * bpp;
int r = buffer.get(i) & 0xFF;
int g = buffer.get(i + 1) & 0xFF;
int b = buffer.get(i + 2) & 0xFF;
int a = buffer.get(i + 3) & 0xFF;
image.setRGB(x, SAVE_HEIGHT - (y + 1), (a << 24) | (r << 16) | (g << 8) | b);
}
try {
ImageIO.write(image, format, file);
} catch (IOException e) {
e.printStackTrace();
}
And I load them like this:
ByteBuffer buf = null;
File file = new File(filename);
if (file.exists()) {
try {
BufferedImage image = ImageIO.read(file);
buf = Util.getImageDataFromImage(image);
} catch (IOException ex) {
Logger.getLogger(SkyBox.class.getName()).log(Level.SEVERE, null, ex);
}
} else {
int length = SAVE_WIDTH * SAVE_HEIGHT * 4;
buf = ByteBuffer.allocateDirect(length);
for (int i = 0; i < length; i++)
buf.put((byte)0xFF);
buf.rewind();
}
// Create a new texture object in memory and bind it
glBindTexture(GL_TEXTURE_2D, pictureTextureId);
// All RGB bytes are aligned to each other and each component is 1 byte
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// Upload the texture data and generate mip maps (for scaling)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, SAVE_WIDTH, SAVE_HEIGHT, 0,
GL_RGBA, GL_UNSIGNED_BYTE, buf);
// Setup what to do when the texture has to be scaled
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_NEAREST);
getImageDataFromImage()
WritableRaster wr = bufferedImage.getRaster();
DataBuffer db = wr.getDataBuffer();
DataBufferByte dbb = (DataBufferByte) db;
ByteBuffer byteBuffer = ByteBuffer.allocateDirect(dbb.getData().length);
byte[] bytes = dbb.getData();
for(int i=0; i<bytes.length; i+=4) {
byteBuffer.put(bytes[i+3]);
byteBuffer.put(bytes[i+2]);
byteBuffer.put(bytes[i+1]);
byteBuffer.put(bytes[i]);
}
byteBuffer.flip();
return byteBuffer;
Rotated, or flipped in the vertical? If they're flipped, then that's because OpenGL and image file formats don't neccesarily agree on the origin of the coordinate system. With OpenGL and the usual projection setups the origin is in the lower left. Most image file formats and IO libraries assume the origin in the upper left.