I'm trying to render text using SDL. Obviously SDL does not support rendering text by itself, so I went with this approach:
load font file
raster glyphs in the font to a bitmap
pack all bitmaps in a large texture, forming a spritesheet of glyphs
render text as a sequence of glyph-sprites: copy rectangles from the texture to the target
First steps are handled using FreeType library. It can generate bitmaps for many kinds of fonts and provide a lot of extra info about the glyphs. FreeType-generated bitmaps are (by default) alpha channel only. For every glyph I basically get a 2D array of A values in range 0 - 255. For simplicity reasons the MCVE below needs only SDL, I already embedded FreeType-generated bitmap in the source code.
Now, the question is: how should I manage the texture that consists of such bitmaps?
What blending mode should I use?
What modulation should I use?
What should the texture be filled with? FreeType provides alpha channel only, SDL generally wants a texture of RGBA pixels. What values should I use for RGB?
How do I draw text in specific color? I don't want to make a separate texture for each color.
FreeType documentation says: For optimal rendering on a screen the bitmap should be used as an alpha channel in linear blending with gamma correction. SDL blending mode documentation doesn't list anything named linear blending so I used a custom one but I'm not sure if I got it right.
I'm not sure if I got some of SDL calls right as some of them are poorly documented (I already know that locking with empty rectangles crashes on Direct3D), especially how to copy data using SDL_LockTexture.
#include <string>
#include <stdexcept>
#include <SDL.h>
constexpr unsigned char pixels[] = {
0, 0, 0, 0, 0, 0, 0, 30, 33, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 169, 255, 155, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 83, 255, 255, 229, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 189, 233, 255, 255, 60, 0, 0, 0, 0, 0,
0, 0, 0, 0, 33, 254, 83, 250, 255, 148, 0, 0, 0, 0, 0,
0, 0, 0, 0, 129, 227, 2, 181, 255, 232, 3, 0, 0, 0, 0,
0, 0, 0, 2, 224, 138, 0, 94, 255, 255, 66, 0, 0, 0, 0,
0, 0, 0, 68, 255, 48, 0, 15, 248, 255, 153, 0, 0, 0, 0,
0, 0, 0, 166, 213, 0, 0, 0, 175, 255, 235, 4, 0, 0, 0,
0, 0, 16, 247, 122, 0, 0, 0, 88, 255, 255, 71, 0, 0, 0,
0, 0, 105, 255, 192, 171, 171, 171, 182, 255, 255, 159, 0, 0, 0,
0, 0, 203, 215, 123, 123, 123, 123, 123, 196, 255, 239, 6, 0, 0,
0, 44, 255, 108, 0, 0, 0, 0, 0, 75, 255, 255, 77, 0, 0,
0, 142, 252, 22, 0, 0, 0, 0, 0, 5, 238, 255, 164, 0, 0,
5, 234, 184, 0, 0, 0, 0, 0, 0, 0, 156, 255, 242, 8, 0,
81, 255, 95, 0, 0, 0, 0, 0, 0, 0, 68, 255, 255, 86, 0,
179, 249, 14, 0, 0, 0, 0, 0, 0, 0, 3, 245, 255, 195, 0
};
[[noreturn]] inline void throw_error(const char* desc, const char* sdl_err)
{
throw std::runtime_error(std::string(desc) + sdl_err);
}
void update_pixels(
SDL_Texture& texture,
const SDL_Rect& texture_rect,
const unsigned char* src_alpha,
int src_size_x,
int src_size_y)
{
void* pixels;
int pitch;
if (SDL_LockTexture(&texture, &texture_rect, &pixels, &pitch))
throw_error("could not lock texture: ", SDL_GetError());
auto pixel_buffer = reinterpret_cast<unsigned char*>(pixels);
for (int y = 0; y < src_size_y; ++y) {
for (int x = 0; x < src_size_x; ++x) {
// this assumes SDL_PIXELFORMAT_RGBA8888
unsigned char* const rgba = pixel_buffer + x * 4;
unsigned char& r = rgba[0];
unsigned char& g = rgba[1];
unsigned char& b = rgba[2];
unsigned char& a = rgba[3];
r = 0xff;
g = 0xff;
b = 0xff;
a = src_alpha[x];
}
src_alpha += src_size_y;
pixel_buffer += pitch;
}
SDL_UnlockTexture(&texture);
}
int main(int /* argc */, char* /* argv */[])
{
if (SDL_Init(SDL_INIT_VIDEO) < 0)
throw_error("could not init SDL: ", SDL_GetError());
SDL_Window* window = SDL_CreateWindow("Hello World",
SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED,
1024, 768,
SDL_WINDOW_RESIZABLE);
if (!window)
throw_error("could not create window: ", SDL_GetError());
SDL_Renderer* renderer = SDL_CreateRenderer(window, -1, 0);
if (!renderer)
throw_error("could not create renderer: ", SDL_GetError());
SDL_Texture* texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_STREAMING, 512, 512);
if (!texture)
throw_error("could not create texture: ", SDL_GetError());
SDL_SetTextureColorMod(texture, 255, 0, 0);
SDL_Rect src_rect;
src_rect.x = 0;
src_rect.y = 0;
src_rect.w = 15;
src_rect.h = 17;
update_pixels(*texture, src_rect, pixels, src_rect.w, src_rect.h);
/*
* FreeType documentation: For optimal rendering on a screen the bitmap should be used as
* an alpha channel in linear blending with gamma correction.
*
* The blending used is therefore:
* dstRGB = (srcRGB * srcA) + (dstRGB * (1 - srcA))
* dstA = (srcA * 0) + (dstA * 1) = dstA
*/
SDL_BlendMode blend_mode = SDL_ComposeCustomBlendMode(
SDL_BLENDFACTOR_SRC_ALPHA, SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA, SDL_BLENDOPERATION_ADD,
SDL_BLENDFACTOR_ZERO, SDL_BLENDFACTOR_ONE, SDL_BLENDOPERATION_ADD);
if (SDL_SetTextureBlendMode(texture, blend_mode))
throw_error("could not set texture blending: ", SDL_GetError());
while (true) {
SDL_SetRenderDrawColor(renderer, 255, 255, 0, 255);
SDL_RenderClear(renderer);
SDL_Rect dst_rect;
dst_rect.x = 100;
dst_rect.y = 100;
dst_rect.w = src_rect.w;
dst_rect.h = src_rect.h;
SDL_RenderCopy(renderer, texture, &src_rect, &dst_rect);
SDL_RenderPresent(renderer);
SDL_Delay(16);
SDL_Event event;
while (SDL_PollEvent(&event)) {
switch (event.type) {
case SDL_KEYUP:
switch (event.key.keysym.sym) {
case SDLK_ESCAPE:
return 0;
}
break;
case SDL_QUIT:
return 0;
}
}
}
return 0;
}
Expected result: red letter "A" on yellow background.
Actual result: malformed red lines inside black square on yellow background.
I suspect that lines are broken because there is a bug within pointer arithmetics inside update_pixels but I have no idea what's causing the black square.
First of all, part of this stuff is already done in SDL_ttf library. You could use it to rasterise glyphs to surfaces or generate multichar text surface.
Your src_alpha += src_size_y; is incorrect - you copy row by row, but skip by column length, not row length. It should be src_size_x. That results in incorrect offset on each row and only first row of your copied image is correct.
Your colour packing when writing to texture is backwards. See https://wiki.libsdl.org/SDL_PixelFormatEnum#order - Packed component order (high bit -> low bit): SDL_PACKEDORDER_RGBA, meaning R is packed at highest bits while A is at lowest. So, when representing it with unsigned char*, First byte is A and fourth is R:
unsigned char& r = rgba[3];
unsigned char& g = rgba[2];
unsigned char& b = rgba[1];
unsigned char& a = rgba[0];
You don't need custom blending, use SDL_BLENDMODE_BLEND, that is 'standard' "src-alpha, one-minus-src-alpha" formula everyone uses (note that it does not blend dst alpha channel itself, nor uses it to any extent; when blending, we only care about src alpha).
Finally one more approach to this: you could put your glyph luminance value (alpha, whatever it is called, the point is it only have one channel) and put it into every channel. That way you could do additive blending without using alpha at all, don't even need RGBA texture. Glyph colour could still be multiplied with colour mod. SDL_ttf implements just that.
My Hard drive got corrupted and I had to set everything up again.
The Problem is that previously working code is now throwing errors.
QList<QColor> colors = {
QColor(0, 255, 255, 255),
QColor(0, 200, 255, 255),
QColor(0, 170, 255, 255),
QColor(0, 150, 255, 255),
QColor(0, 130, 255, 255),
};
Error:
D:\dev\est_tsd\tests\testgis.cpp:19: error: C2440: ‘initializing’: cannot convert from 'initializer-list' to 'QList'
No constructor could take the source type, or constructor overload resolution was ambiguous
I read that Qt supports initializer list with QList now
(Name of the kit: Desktop Qt 5.3 MSVC2013 OpenGL 64bit). What am I missing?
Help would be much appreciated.
You are using copy initialization semantic instead of direct list initialization. You should check if you have in .pro file:
CONFIG += c++11
and then use:
QList<QColor> colors{
QColor(0, 255, 255, 255),
QColor(0, 200, 255, 255),
QColor(0, 170, 255, 255),
QColor(0, 150, 255, 255),
QColor(0, 130, 255, 255)
};
Try removing the last comma.
It becomes:
QList<QColor> colors = {
QColor(0, 255, 255, 255),
QColor(0, 200, 255, 255),
QColor(0, 170, 255, 255),
QColor(0, 150, 255, 255),
QColor(0, 130, 255, 255)};
I have a project in which i must detect 3 specific colors in many leaves pictures: Green, Yellow and Brown.
I'm using the following image as an example:
The objective to detect the different colors is to determine if the tree is sick or not, so it's really important to be able to tell correctly what is green, yellow and brown, even in small amounts of pixels.
I wrote the following code:
//Load the image
Mat img_bgr = imread("c:\\testeimagem\\theeye\\greening32.jpg", 1);
if (img_bgr.empty()){
cout << "Nenhuma imagem foi carregada..." << endl;
return -1;
}
//Display the image
namedWindow("Original Image", WINDOW_NORMAL);
imshow("Original Image", img_bgr);
waitKey(0);
destroyAllWindows;
//Conversion to HSV
Mat img_hsv;
cvtColor(img_bgr, img_hsv, CV_BGR2HSV_FULL);
//Extracting colors - HSV
Mat cores_divididas, green, yellow, brown;
//Yellow
inRange(img_hsv, Scalar(28, 240, 240), Scalar(33, 255, 255), yellow);
imwrite("c:\\testeimagem\\theeye\\yellow.jpg", yellow);
//Green
inRange(img_hsv, Scalar(38, 100, 100), Scalar(70, 190, 190), green);
imwrite("c:\\testeimagem\\theeye\\green.jpg", green);
//Brown
inRange(img_hsv, Scalar(10, 90, 90), Scalar(20, 175, 175), brown);
imwrite("c:\\testeimagem\\theeye\\brown.jpg", brown);
namedWindow("Yellow", WINDOW_NORMAL);
imshow("Yellow", yellow);
namedWindow("Green", WINDOW_NORMAL);
imshow("Green", green);
namedWindow("Brown", WINDOW_NORMAL);
imshow("Brown", brown);
waitKey(0);
destroyAllWindows;
return 0;
If you guys compile this code, you will notice that the green color is not properly detected and the other colors aren't detected at all.
As a guide for reference values, I used this trackbar.
Try out these ranges:
//Yellow
inRange(img_hsv, Scalar(28, 0, 0), Scalar(33, 255, 255), yellow);
imwrite("yellow.jpg", yellow);
//Green
inRange(img_hsv, Scalar(38, 0, 0), Scalar(70, 255, 255), green);
imwrite("green.jpg", green);
//Brown
inRange(img_hsv, Scalar(10, 0, 0), Scalar(20, 255, 255), brown);
imwrite("brown.jpg", brown);
On your leaf image it seems there is no brown pigment at all. I tested it out with this leaf, Brownish leaf
, and it looks ok.
The reason why I tried these ranges is behind the fact that the true color information is (correct me if I'm wrong) embedded in the Hue quantity.
Obs.: Go with CV_BGR2HSV, as already mentioned.
When you convert the original image's color space, try using CV_BGR2HSV instead of CV_BGR2HSV_FULL. The link you referenced provides reference values based on CV_BGR2HSV, in which the hue has a different range of values, so that's probably at least one factor causing your issues.