I have managed to draw text with FreeType in OpenGL 4, but the taller letters (e.g. g, d, f, etc.) are somehow being drawn too tall. This is what it looks like. This is what it is supposed to look like. The tall letters are too tall, while the "normal height" letters are just fine.
struct FontChar {
float tx; // texcoord x position
float tw; // texcoord x width
glm::ivec2 size; // face->glyph->bitmap.width, face->glyph->bitmap.rows
glm::ivec2 bearing; // face->glyph->bitmap_left, face->glyph->bitmap_top
glm::ivec2 advance; // face->glyph->advance.x, face->glyph->advance.y
} fontChars[128]; // this is populated properly with FreeType
std::vector<float> vertices;
const float sx = 2.0f / 1920.0f;
const float sy = 2.0f / 1080.0f;
float x = 0.0f;
float y = 0.0f;
for (char c : text) {
const float vx = x + fontChars[c].bearing.x * sx;
const float vy = y + fontChars[c].bearing.y * sy;
const float w = fontChars[c].size.x * sx;
const float h = fontChars[c].size.y * sy;
float tx = fontChars[c].tx;
float tw = fontChars[c].tw;
std::vector<float> quad = { // pos_x, pos_y, tex_x, tex_y
vx, vy, tx, 0.0f,
vx + w, vy, tx + tw, 0.0f,
vx + w, vy - h, tx + tw, 1.0f,
vx + w, vy - h, tx + tw, 1.0f,
vx, vy - h, tx, 1.0f,
vx, vy, tx, 0.0f
};
vertices.insert(vertices.begin(), quad.begin(), quad.end());
x += float(fontChars[c].advance.x >> 6) * sx;
y += float(fontChars[c].advance.y >> 6) * sy;
}
I then buffer the vertices into a vertex buffer, and then I draw it. The only code that could affect the height is const float h = fontChars[c].size.y * sy, but the size is taken straight from FreeType, and the sy works for the "normal height" letters. This leads me to believe that it could be due to the glyph textures being put into a texture atlas.
FT_Set_Pixel_Sizes(face, 0, size);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
std::array<FontChar, 128> characters{};
unsigned int w = 0;
unsigned int h = 0;
for (unsigned char c = 0; c < 128; c++) {
if (FT_Load_Char(face, c, FT_LOAD_BITMAP_METRICS_ONLY)) {
throw std::runtime_error("Failed to load glyph");
}
w += face->glyph->bitmap.width;
h = std::max(face->glyph->bitmap.rows, h); // maybe this is the issue???
}
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
unsigned int x = 0;
for (unsigned char c = 0; c < 128; c++) {
if (FT_Load_Char(face, c, FT_LOAD_RENDER)) {
throw std::runtime_error("Failed to load glyph");
}
glTexSubImage2D(GL_TEXTURE_2D, 0, x, 0, face->glyph->bitmap.width, face->glyph->bitmap.rows, GL_RED, GL_UNSIGNED_BYTE, face->glyph->bitmap.buffer);
FontChar character = {
(float)x / (float)w,
(float)face->glyph->bitmap.width / (float)w,
glm::ivec2(face->glyph->bitmap.width, face->glyph->bitmap.rows),
glm::ivec2(face->glyph->bitmap_left, face->glyph->bitmap_top),
glm::ivec2(face->glyph->advance.x, face->glyph->advance.y)
};
characters[c] = character;
x += face->glyph->bitmap.width;
}
The only other place where I do anything that could influence this vertical stretching behavior is when I find the max height of the characters. I do this so I can find the proper dimensions of the texture atlas, which is just 1 character tall by n characters wide. I'm still not sure how this could cause the behavior though.
I have found the issue. My instincts were correct; the issue was related to the height of the texture atlas. I was not plugging the heights of the glyph bitmaps into the actual vertices, I was instead using the entire height of the texture. All I had to do was pass the heights of the characters into the FontChar struct when populating the fontChars array, and then I made my vertices go from 0.0f to the height instead of 0.0f to 1.0f. This worked except now all of my text was too tall. Then I realized that I am using an orthographic matrix which extends the x coordinates from [-1, 1] to [-width/height, width/height], and since I was using separate scale factors (sx and sy), my scaling was incorrect. To fix, I just got rid of sy and replaced every sy with sx. I also added 2 pixels between each texture in the atlas so I don't get any smearing between textures. Here is the final result.
I've read the docs: https://libgdx.badlogicgames.com/ci/nightlies/docs/api/com/badlogic/gdx/graphics/g2d/SpriteBatch.html#draw-com.badlogic.gdx.graphics.Texture-float-float-float-float-float-float-float-float-float-int-int-int-int-boolean-boolean-
I thought I understood the behavior of the x, y, originX, and originY parameters, but apparently not. I am playing around with a simple tutorial where a SpriteBatch is drawn onto the screen. The game is using OrthographicCamera and FitViewport. The following code draws the texture in the middle of the screen
float halfW = width * 0.5f;
float halfH = height * 0.5f;
batch.draw(cavemanTexture, // Texture
-halfW, -halfH, // x, y
halfW, halfH, // originX, originY
width, height, // width, height
WORLD_TO_SCREEN, WORLD_TO_SCREEN, // scaleX, scaleY
0.0f, // rotation
0, 0, // srcX, srcY
width, height, // srcWidth, srcHeight
false, false); // flipX, flipY
I guess I do not understand what is meant when the docs say "offset" in the sentence, "The rectangle is offset by originX, originY relative to the origin."
To me if you wanted to put the texture in the center of the screen you could simply do the following:
batch.draw(cavemanTexture, // Texture
-halfW, -halfH, // x, y
0, 0, // originX, originY
width, height, // width, height
WORLD_TO_SCREEN, WORLD_TO_SCREEN, // scaleX, scaleY
0.0f, // rotation
0, 0, // srcX, srcY
width, height, // srcWidth, srcHeight
false, false); // flipX, flipY
Because that would move the bottom left corner down and to the left, enough to center the texture. But if I try that, then the image actually disappears off screen.
Here is the full original code:
package com.cookbook.samples;
import com.badlogic.gdx.Gdx;
import com.badlogic.gdx.graphics.Color;
import com.badlogic.gdx.graphics.GL20;
import com.badlogic.gdx.graphics.OrthographicCamera;
import com.badlogic.gdx.graphics.Texture;
import com.badlogic.gdx.graphics.Texture.TextureFilter;
import com.badlogic.gdx.graphics.g2d.SpriteBatch;
import com.badlogic.gdx.utils.viewport.FitViewport;
import com.badlogic.gdx.utils.viewport.Viewport;
public class SpriteBatchSample extends GdxSample {
private static final Color BACKGROUND_COLOR = new Color(0.39f, 0.58f, 0.92f, 1.0f);
private static final float WORLD_TO_SCREEN = 1.0f / 100.0f;
private static final float SCENE_WIDTH = 12.80f;
private static final float SCENE_HEIGHT = 7.20f;
private OrthographicCamera camera;
private Viewport viewport;
private SpriteBatch batch;
private Texture cavemanTexture;
private Color oldColor;
#Override
public void create() {
camera = new OrthographicCamera();
viewport = new FitViewport(SCENE_WIDTH, SCENE_HEIGHT, camera);
batch = new SpriteBatch();
oldColor = new Color();
cavemanTexture = new Texture(Gdx.files.internal("data/caveman.png"));
cavemanTexture.setFilter(TextureFilter.Nearest, TextureFilter.Nearest);
}
#Override
public void dispose() {
batch.dispose();
cavemanTexture.dispose();
}
#Override
public void render() {
Gdx.gl.glClearColor(BACKGROUND_COLOR.r,
BACKGROUND_COLOR.g,
BACKGROUND_COLOR.b,
BACKGROUND_COLOR.a);
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
batch.setProjectionMatrix(camera.combined);
batch.begin();
int width = cavemanTexture.getWidth();
int height = cavemanTexture.getHeight();
float originX = width * 0.5f;
float originY = height * 0.5f;
// Render caveman centered on the screen
batch.draw(cavemanTexture, // Texture
-originX, -originY, // x, y
originX, originY, // originX, originY
width, height, // width, height
WORLD_TO_SCREEN, WORLD_TO_SCREEN, // scaleX, scaleY
0.0f, // rotation
0, 0, // srcX, srcY
width, height, // srcWidth, srcHeight
false, false); // flipX, flipY
// Render caveman on the top left corner at 2x size
batch.draw(cavemanTexture,
-4.0f - originX, 1.5f - originY,
originX, originY,
width, height,
WORLD_TO_SCREEN * 2.0f, WORLD_TO_SCREEN * 2.0f,
0.0f,
0, 0,
width, height,
false, false);
// Render caveman on the bottom left corner at 0.5x size
batch.draw(cavemanTexture,
-4.0f - originX, -1.5f - originY,
originX, originY,
width, height,
WORLD_TO_SCREEN * 0.5f, WORLD_TO_SCREEN * 0.5f,
0.0f,
0, 0,
width, height,
false, false);
// Render caveman on top right corner at 2x size and rotated 45 degrees
batch.draw(cavemanTexture,
4.0f - originX, 1.5f - originY,
originX, originY,
width, height,
WORLD_TO_SCREEN * 2.0f, WORLD_TO_SCREEN * 2.0f,
45.0f,
0, 0,
width, height,
false, false);
// Render caveman on bottom right corner at 1.5x size and flipped around X and Y
batch.draw(cavemanTexture,
4.0f - originX, -1.5f - originY,
originX, originY,
width, height,
WORLD_TO_SCREEN * 1.5f, WORLD_TO_SCREEN * 1.5f,
0.0f,
0, 0,
cavemanTexture.getWidth(), height,
true, true);
// Save batch color
oldColor.set(batch.getColor());
// Render blue caveman
batch.setColor(Color.CYAN);
batch.draw(cavemanTexture,
-2.0f - originX, -originY,
originX, originY,
width, height,
WORLD_TO_SCREEN, WORLD_TO_SCREEN,
0.0f,
0, 0,
width, height,
false, false);
// Render red caveman
batch.setColor(Color.RED);
batch.draw(cavemanTexture,
-originX, -originY + 2.0f,
originX, originY,
width, height,
WORLD_TO_SCREEN, WORLD_TO_SCREEN,
0.0f,
0, 0,
width, height,
false, false);
// Render green caveman
batch.setColor(Color.GREEN);
batch.draw(cavemanTexture,
2.0f - originX, -originY,
originX, originY,
width, height,
WORLD_TO_SCREEN, WORLD_TO_SCREEN,
0.0f,
0, 0,
width, height,
false, false);
// Render yellow caveman
batch.setColor(Color.YELLOW);
batch.draw(cavemanTexture,
-originX, -originY - 2.0f,
originX, originY,
width, height,
WORLD_TO_SCREEN, WORLD_TO_SCREEN,
0.0f,
0, 0,
width, height,
false, false);
batch.setColor(oldColor);
batch.end();
}
#Override
public void resize(int width, int height) {
viewport.update(width, height, false);
}
}
The name of your scale factor (WORLD_TO_SCREEN) makes me suspicious that you misunderstand how Viewports work. The whole point of using a Viewport at all is so you don't have to think about the screen size when you are placing stuff in your game world. The size you pass to the Viewport constructor is how much of the game world you want to be visible. When you draw something using a Viewport's camera, its size should not be related to screen dimensions, because the Viewport has already abstracted that away. You should be drawing it at the size it should be in terms of world units.
If the origin is left at (0, 0), the texture is drawn with its bottom left corner at (x, y). The origin is an offset from the bottom corner, about which the scale and rotation are applied. If you used a scale of 1 and rotation of 0, the origin x and y would have no effect whatsoever.
If you are not planning to draw sprites with rotation, I would avoid the complicated draw methods and just use SpriteBatch.draw(textureRegion, x, y, width, height). In a typical game, you will have a TextureAtlas of TextureRegions, so you'll never be passing a Texture object to SpriteBatch.
You will either want to think of your world in meter (or similar) units, or if you're doing retro pixel art you might want to use retro pixel (not screen pixel) units. So your caveman might be 1m tall and you want to see a 16m x 9m tall view of your game world. In this case, you would instantiate your viewport with FitViewport(16f, 9f). And when you drew your caveman, you'd draw it with something like
batch.draw(cavemanRegion, x, y, 1f / cavemanRegion.height * cavemanRegion.Width, 1f)
where x and y are where the bottom left corner should be in the game world.
Here the object is scaled and rotated around x,y
public class Enemy {
static final float scale = .4f;
int type_ship;
int x = 0;
int y = 0;
int angle = 0;
Rectangle rect;
TextureRegion region;
public Enemy(int type_ship, int x, int y) {
this.type_ship = type_ship;
this.x = x;
this.y = y;
rect = SpriteMaps.get_sprite_rect(SpriteMaps.ENEMY_RED);
region = new TextureRegion(Textures.getShipsTexture(),
(int)rect.x, (int)rect.y, (int)rect.width, (int)rect.height);
}
public void update() {
}
public void draw(SpriteBatch batch) {
float width = rect.width;
float height = rect.height;
float draw_x = this.x - width / 2;
float draw_y = this.y - height / 2;
batch.draw(region, draw_x, draw_y, width / 2, height / 2,
rect.width, rect.height, scale, scale, angle);
}
}
I want to convert ClipCoord to ScreenCoord but I don't know what is the right way to get the Canvas width and height.
(Canvas = the drawable area in the image plane)
glm::vec4 clipCoords = vec4f(0.1f, 0.3f, 1.0f, 1.0f) // random point
float canvasWidth = 2;
float canvasHeight = 2;
GLfloat ndcX = (clipCoords.x + canvasWidth / 2.0f) / canvasWidth;
GLfloat ndcY = (clipCoords.y + canvasHeight / 2.0f) / canvasHeight;
GLint pixelX = ndcX * SCREEN_WIDTH;
GLint pixelY = (1 - ndcY) * SCREEN_HEIGHT;
In OpenGL, the canvas is the nearPlane in the perspective projection.
I found old thread with same question
so I have the answer now.
So you need a function that takes a point glm::vec2 ( convert it to glm::vec2 by using the matrix multiplication from the camera matrix ) and then get it to Screen by value mapping.
A helpful link might be this
It depends on the library you are using for creating the window with the OpenGL Context. E.g. if you use GLFW (a great library for window creation btw), you get the the framebuffer size with glfwGetFramebufferSize(GLFWwindow* window, int* width, int* height), like so:
int width;
int height;
glfwGetFramebufferSize(window, &width, &height);
But this is only the framebuffer size, if you want to get the viewport, which is how many pixel will be written from OpenGL into the framebuffer, glGetIntegerv(GLenum pname, GLint* data is what you are looking for:
GLint* viewport = new GLint[4];
glGetIntegerv(GL_VIEWPORT, viewport);
int x = viewport[0];
int y = viewport[1];
int width = viewport[2];
int height = viewport[3];
delete[] viewport;
Im trying to get the coordinates of the Box2D objects to fit with the coordinates of the objects on screen(these being displayed with textures). I'm already aware that box2D works in meters so i would have to find a conversion factor that works for my program(in this case i said 50 pixels is 2 meters).
So would first set the position and size of the box with this:
b2BodyDef def
def.postition.Set(0.02 * x, y* 0.02)
def.SetAsBox(0.02 * w/2, 0.02* h/2)
And since im using openGL i would just make a texture and render it like you normally would but this time I'm tring to match the textures x and y to the x and y of its collision box in Box2D.
And when i run the program whenever i try to move my player texture off of the floor texture, it falls off before it reaches the end. This let me know for sure that my box2D bodies and my OpenGL textures were not sharing the same positions. but whatever i do to try and fix that nothing ever changes(ex: i tried changing the conversion factor to 1m = 20p instead of 50p, i tried not converting the coordinates at all then i got some really wired result that also didnt work, and a bunch of other stuff). The worst part is box2D did the SAME THING when i was using SDL 2, and i switched to OpenGL thinking that this wouldn't happen again.
images from the demo:
https://drive.google.com/file/d/0B4anrJs1BRh1aVpOYXI3T3JlWWM/view?usp=drivesdk
https://drive.google.com/file/d/0B4anrJs1BRh1dVlTb0czMTE3RWc/view?usp=drivesdk
Code for the tile class(currently the only class in my projects that uses box2D)
void tile::load(bool square, std::string path)
{
if(square)
{
glGenTextures(1, &texID);
SDL_Surface *sur = IMG_Load(path.c_str());
w = sur->w;
h = sur->h;
glBindTexture(GL_TEXTURE_2D, texID);
glTexImage2D(GL_TEXTURE_2D, 0, 4, sur->w, sur->h, 0, GL_RGBA, GL_UNSIGNED_BYTE, sur->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture( GL_TEXTURE_2D, NULL );
}
}
void tile::addRect(int x, int y, int wt, int ht, bool dyn)
{
b2BodyDef def;
//Convert the x and y into meters.
def.position.Set(vars::P2M * x, vars::P2M * y);
if(dyn)
{
def.type = b2_dynamicBody;
}
b = vars::world->CreateBody(&def);
b2PolygonShape shape;
//Convert the width and height entered into meters
shape.SetAsBox(vars::P2M * w/2,vars::P2M * h/2);
float th = w * 0.02;
float tw = h * 0.02;
b2FixtureDef fix;
fix.shape = &shape;
fix.density = 1.0;
b->CreateFixture(&fix);
h = h;
w = w;
}
void tile::Render()
{
glLoadIdentity();
b2Vec2 pos;
pos = b->GetPosition();
float x = pos.x;
float y = pos.y;
//Convert the x and y of the body to pixels.
glTranslatef(x * vars::M2P, y * vars::M2P, 0);
glBindTexture( GL_TEXTURE_2D, texID);
glBegin( GL_QUADS );
glTexCoord2f(0.f, 0.f); glVertex2f(0.f, 0.f);
glTexCoord2f(1.f, 0.f); glVertex2f(w, 0);
glTexCoord2f(1.f, 1.f); glVertex2f(w, h);
glTexCoord2f(0.f, 1.f); glVertex2f(0, h);
glEnd();
}
void tile::Update()
{
b2Vec2 vel;
vel = b->GetLinearVelocity();
if(key[SDL_SCANCODE_D])
{
d_key = true;
}
else
{
d_key = false;
}
if(key[SDL_SCANCODE_A])
{
a_key = true;
}
else
{
a_key = false;
}
if(a_key)
{
//Move the box left
vel.x = -5;
}
if(d_key)
{
//Move the box right
vel.x = 5;
}
b->SetLinearVelocity(vel);
}
If anyone has an solution for this, please post it because im out of options and i dont know what to do anymore. Thanks.
oh and btw,
vars::M2P = 50 and vars::P2M = 1/vars::M2P(which in-turn is 0.02).
I'm trying to draw to a renderbuffer (512x512) that's larger than the screen size (i.e., 320x480).
After doing a glReadPixels, the image looks correct, except once the dimensions of the image exceed that of the screen size- in this example, past 320 horizontal and 480 vertical. What causes this anomaly? Is there something I'm missing?
When the window size is >= the size of the renderbuffer, this code works absolutely fine.
Example image that was rendered to the buffer & glReadPixel'd:
http://img593.imageshack.us/img593/3220/rendertobroke.png
unsigned int canvasFrameBuffer;
bglGenFramebuffers(1, &canvasFrameBuffer);
bglBindFramebuffer(BGL_RENDERBUFFER, canvasFrameBuffer);
// Attach renderbuffer
unsigned int canvasRenderBuffer;
bglGenRenderbuffers(1, &canvasRenderBuffer);
bglBindRenderbuffer(BGL_RENDERBUFFER, canvasRenderBuffer);
bglRenderbufferStorage(BGL_RENDERBUFFER, BGL_RGBA4, width, height);
bglFramebufferRenderbuffer(BGL_FRAMEBUFFER, BGL_COLOR_ATTACHMENT0, BGL_RENDERBUFFER, canvasRenderBuffer);
bglViewport(0, 0, width, height);
Matrix::matrix_t identity, colorMatrix;
Matrix::LoadIdentity(&identity);
Matrix::LoadIdentity(&colorMatrix);
bglClearColor(1.0f, 1.0f, 1.0f, 1.0f);
bglClear(BGL_COLOR_BUFFER_BIT);
Vector::vector_t oldPos, oldScale;
Vector::Copy(&oldPos, &pos);
Vector::Mul(&pos, 0.0f);
Vector::Copy(&oldScale, &scale);
Vector::Load(&scale, 1, 1, 1);
int oldHAlign = halignment;
int oldVAlign = valignment;
halignment = Font::HALIGN_LEFT;
valignment = Font::VALIGN_BOTTOM;
float oldXRatio = vid.xratio;
float oldYRatio = vid.yratio;
vid.xratio = 1;
vid.yratio = 1;
Drawing::Set2D(this->size.x, this->size.y); // glOrtho and setup projection/modelview matrices
Draw(&identity, &colorMatrix);
Vector::Copy(&pos, &oldPos);
Vector::Copy(&scale, &oldScale);
halignment = oldHAlign;
valignment = oldVAlign;
vid.xratio = oldXRatio;
vid.yratio = oldYRatio;
byte *buffer = (byte*)Z_Malloc(width * height * 3, ZT_STATIC);
bglPixelStorei(BGL_PACK_ALIGNMENT, 1);
bglReadPixels(0, 0, width, height, BGL_RGB, BGL_UNSIGNED_BYTE, buffer);
byte *final = RGBtoLuminance(buffer, width, height);
SaveTGA("canvas.tga", final, width, height, 1);
Z_Free(buffer);
// unbind frame buffer
bglBindRenderbuffer(BGL_RENDERBUFFER, 0);
bglBindFramebuffer(BGL_FRAMEBUFFER, 0);
bglDeleteRenderbuffers(1, &canvasRenderBuffer);
bglDeleteFramebuffers(1, &canvasFrameBuffer);
bglViewport(0, 0, vid.width, vid.height);
Here's the answer.
Change this line:
bglBindFramebuffer(BGL_RENDERBUFFER, canvasFrameBuffer);
to this:
bglBindFramebuffer(BGL_FRAMEBUFFER, canvasFrameBuffer);