I think that in previous versions FreeImage_ConvertTo24Bits(FIBITMAP*) work ok in any type of FIBITMAP* but in 3.18 it returns nullptr if the FIBITMAP* is a floating point texture.
Is there anything I missed? Anyone has noticed this change too? Should I make a intermediate transform?
Thank you in advance for any tip.
I didn't manage to make it work so I coded my own version I'll put this here for future readers:
inline BYTE LinToSRGB(float C)
{
C = std::fmin( std::fmax(C, 0.0f), 1.0f);
if (C > 0.0031308f) {
C = 1.055f * (pow(C, (1.0f / 2.4f))) - 0.055f;
} else {
C = 12.92f * C;
}
return static_cast<BYTE>(C*255.0f);
}
FIBITMAP* ConvertTo24Bits(FIBITMAP* origImage)
{
FIBITMAP* modImage = FreeImage_ConvertTo24Bits(origImage);
if(modImage!=nullptr)
{
return modImage;
}
const FREE_IMAGE_TYPE src_type = FreeImage_GetImageType(origImage);
const unsigned width = FreeImage_GetWidth(origImage);
const unsigned height = FreeImage_GetHeight(origImage);
modImage = FreeImage_Allocate(width, height,24);
const unsigned src_pitch = FreeImage_GetPitch(origImage);
const unsigned dst_pitch = FreeImage_GetPitch(modImage);
const BYTE *src_bits = (BYTE*)FreeImage_GetBits(origImage);
BYTE *dst_bits = (BYTE*)FreeImage_GetBits(modImage);
if(src_type == FIT_RGBAF)
{
for(unsigned y = 0; y < height; y++) {
const FIRGBAF *src_pixel = (FIRGBAF*) src_bits;
for(unsigned x = 0; x < width; x++) {
// convert and skip alpha channel
*dst_bits = LinToSRGB(src_pixel[x].blue);
dst_bits++;
*dst_bits = LinToSRGB(src_pixel[x].green);
dst_bits++;
*dst_bits = LinToSRGB(src_pixel[x].red);
dst_bits++;
}
src_bits += src_pitch;
}
}
else if(src_type == FIT_RGBF)
{
for(unsigned y = 0; y < height; y++) {
const FIRGBF *src_pixel = (FIRGBF*) src_bits;
for(unsigned x = 0; x < width; x++) {
// convert and skip alpha channel
*dst_bits = LinToSRGB(src_pixel[x].blue);
dst_bits++;
*dst_bits = LinToSRGB(src_pixel[x].green);
dst_bits++;
*dst_bits = LinToSRGB(src_pixel[x].red);
dst_bits++;
}
src_bits += src_pitch;
}
}
return modImage;
}
Related
I am trying to adjust the brightness and contrast of an RGB image but the output is not what I expect.
The function is callbacked from createTrackbar() function with values from 0 to 100.
Please check the image below. I would appreciate some help. Thanks.
void brightness_callback(int brightness, void *userdata)
{
int height = image_input.rows, width = image_input.cols;
image_output = Mat::zeros(image_input.size(), image_input.type());
int widthStep = image_input.step;
int nChannels = 3;
uchar *pDataInput = (uchar *)image_input.data;
uchar *pDataOutput = (uchar *)image_output.data;
for (int x = 0; x < height; x++, pDataInput += widthStep, pDataOutput += widthStep) {
uchar *pRowInput = pDataInput;
uchar *pRowOutput = pDataOutput;
for (int y = 0; y < width; y++, pRowInput += nChannels, pRowOutput += nChannels) {
uchar B = pRowInput[0];
uchar G = pRowInput[1];
uchar R = pRowInput[2];
pRowOutput[0] = truncate((uchar)(B + brightness));
pRowOutput[1] = truncate((uchar)(G + brightness));
pRowOutput[2] = truncate((uchar)(R + brightness));
}
}
imshow(window_original, image_output);
}
uchar truncate(uchar value) {
if (value < 0) return 0;
else if (value > 255) return 255;
return value;
}
I'm trying to use GPU Delegate in Tensorflow Lite on iOS. My model has inputs and outputs as OpenCV BGR image ([258, 540, 3]). How can I set inputs and outputs in C++ tensorflow lite interpreter? I tried to use this code
int input = interpreter->inputs()[0];
float* out = interpreter->typed_tensor<float>(input);
NSData* slicedData = [self inputDataFromCvMat:slicedImage];
uint8_t* in = (uint8_t*) slicedData.bytes;
ProcessInputWithFloatModel(in, out, WIDTH, HEIGHT, CHANNELS);
void ProcessInputWithFloatModel(uint8_t* input, float* buffer, int image_width, int image_height, int image_channels) {
for (int y = 0; y < wanted_input_height; ++y) {
float* out_row = buffer + (y * wanted_input_width * wanted_input_channels);
for (int x = 0; x < wanted_input_width; ++x) {
const int in_x = (y * image_width) / wanted_input_width;
const int in_y = (x * image_height) / wanted_input_height;
uint8_t* input_pixel =
input + (in_y * image_width * image_channels) + (in_x * image_channels);
float* out_pixel = out_row + (x * wanted_input_channels);
for (int c = 0; c < wanted_input_channels; ++c) {
out_pixel[c] = (input_pixel[c] - input_mean) / input_std;
}
}
}
}
- (NSData *)inputDataFromCvMat:(Mat)image {
NSMutableData *inputData = [[NSMutableData alloc] initWithCapacity:0];
for (int row = 0; row < HEIGHT + 10; row++) {
for (int col = 0; col < WIDTH + 10; col++) {
Vec3b intensity = image.at<Vec3b>(row, col);
int blue = intensity.val[0];
int green = intensity.val[1];
int red = intensity.val[2];
// we need to put pixel values in BGR (model was trained with opencv)
[inputData appendBytes:&blue length:sizeof(blue)];
[inputData appendBytes:&green length:sizeof(green)];
[inputData appendBytes:&red length:sizeof(red)];
}
}
return inputData;
}
but I don't know what is wrong
After some research, I managed to get it working
const int wanted_input_width = 258;
const int wanted_input_height = 540;
const int wanted_input_channels = 3;
Mat image = ...
// write to input
int input = interpreter->inputs()[0];
float* out = interpreter->typed_tensor<float>(input);
uint8_t* in = image.ptr<uint8_t>(0);
ProcessInputWithFloatModel(in, out);
// run interpreter
if (interpreter->Invoke() != kTfLiteOk) {
LOG(FATAL) << "Failed to invoke!";
}
// get output
int output_idx = interpreter->outputs()[0];
float* output = interpreter->typed_output_tensor<float>(output_idx);
Mat outputMat = ProcessOutputWithFloatModel(output);
/// Preprocess the input image and feed the TFLite interpreter buffer for a float model.
void ProcessInputWithFloatModel(uint8_t* input, float* buffer) {
for (int y = 0; y < wanted_input_height; ++y) {
float* out_row = buffer + (y * wanted_input_width * wanted_input_channels);
for (int x = 0; x < wanted_input_width; ++x) {
uint8_t* input_pixel = input + (y * wanted_input_width * wanted_input_channels) + (x * wanted_input_channels);
float* out_pixel = out_row + (x * wanted_input_channels);
for (int c = 0; c < wanted_input_channels; ++c) {
out_pixel[c] = input_pixel[c] / 255.0f;
}
}
}
}
Mat ProcessOutputWithFloatModel(float* input) {
cv::Mat image = cv::Mat::zeros(wanted_input_height, wanted_input_width, CV_8UC3);
for (int y = 0; y < wanted_input_height; ++y) {
for (int x = 0; x < wanted_input_width; ++x) {
float* input_pixel = input + (y * wanted_input_width * wanted_input_channels) + (x * wanted_input_channels);
cv::Vec3b & color = image.at<cv::Vec3b>(cv::Point(x, y));
color[0] = (uchar) floor(input_pixel[0] * 255.0f);
color[1] = (uchar) floor(input_pixel[1] * 255.0f);
color[2] = (uchar) floor(input_pixel[2] * 255.0f);
}
}
return image;
}
I am using a lookup table to convert raw pixel data between color spaces and coding variants. This is the definition of my LUT:
typedef struct
{
unsigned char data[3];
} rgb;
rgb LUTYUVTORGB[256][256][256];
It is initialized like this:
// loop through all possible values
for (int in_1 = 0; in_1 < 256; in_1++) {
for (int in_2 = 0; in_2 < 256; in_2++) {
for (int in_3 = 0; in_3 < 256; in_3++) {
int out_1, out_2, out_3;
// LUT YUV -> RGB
// convert to rgb (http://softpixel.com/~cwright/programming/colorspace/yuv/)
out_1 = (int)(in_1 + 1.4075 * (in_3 - 128));
out_2 = (int)(in_1 - 0.3455 * (in_2 - 128) - (0.7169 * (in_3 - 128)));
out_3 = (int)(in_1 + 1.7790 * (in_2 - 128));
// clamp values
if (out_1 < 0) { out_1 = 0; } else if (out_1 > 255) { out_1 = 255; }
if (out_2 < 0) { out_2 = 0; } else if (out_2 > 255) { out_2 = 255; }
if (out_3 < 0) { out_3 = 0; } else if (out_3 > 255) { out_3 = 255; }
// set values in LUT
LUTYUVTORGB[in_1][in_2][in_3].data[0] = (unsigned char)out_1;
LUTYUVTORGB[in_1][in_2][in_3].data[1] = (unsigned char)out_2;
LUTYUVTORGB[in_1][in_2][in_3].data[2] = (unsigned char)out_3;
}
}
}
The LUT is then applied to copy the raw pixel data to a QImage():
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
xpos = (y*w + x); // don't calculate 3 times
buff[x * 3 + 0] = psImage->comps[0].data[xpos];
buff[x * 3 + 1] = psImage->comps[1].data[xpos];
buff[x * 3 + 2] = psImage->comps[2].data[xpos];
}
memcpy(image.scanLine(y), buff, bytes_per_line);
}
The values of the LUT are static, and have to be initialized every time the programm starts. Is there any way to initialize it via the preprocessor? Or would it be recommendable to save it in a file?
EDIT: The conversion is used in a time critical video application where every frame has to be processed individually.
Thanks very much in advance!
I have created one dimensional array for this table, it is convenient to save and load such array. I think using this array in runtime wont decrease performance. But I didn't test it for performance differences.
#include <stdio.h>
#include <stdlib.h>
#define LUTSIZE 0x1000000
typedef struct
{
unsigned char data[3];
} rgb;
rgb *LUT;
inline int LUT_index(int in_1, int in_2, int in_3) {
return in_1 * 0x10000 + in_2 * 0x100 + in_3 * 0x1;
}
inline rgb LUT_value(int in_1, int in_2, int in_3) {
return LUT[LUT_index(in_1,in_2,in_3)];
}
void save(rgb *LUT, char* fileName) {
FILE* file = fopen(fileName,"wb");
int index;
for (int in_1 = 0; in_1 < 256; in_1++) {
for (int in_2 = 0; in_2 < 256; in_2++) {
for (int in_3 = 0; in_3 < 256; in_3++) {
int out_1, out_2, out_3;
// LUT YUV -> RGB
// convert to rgb (http://softpixel.com/~cwright/programming/colorspace/yuv/)
out_1 = (int)(in_1 + 1.4075 * (in_3 - 128));
out_2 = (int)(in_1 - 0.3455 * (in_2 - 128) - (0.7169 * (in_3 - 128)));
out_3 = (int)(in_1 + 1.7790 * (in_2 - 128));
// clamp values
if (out_1 < 0) { out_1 = 0; } else if (out_1 > 255) { out_1 = 255; }
if (out_2 < 0) { out_2 = 0; } else if (out_2 > 255) { out_2 = 255; }
if (out_3 < 0) { out_3 = 0; } else if (out_3 > 255) { out_3 = 255; }
index = LUT_index(in_1,in_2,in_3);
// set values in LUT
LUT[index].data[0] = (unsigned char)out_1;
LUT[index].data[1] = (unsigned char)out_2;
LUT[index].data[2] = (unsigned char)out_3;
}
}
}
fwrite((void*)LUT, sizeof(rgb),LUTSIZE,file);
fclose(file);
}
void read(rgb *LUT, char* fileName) {
FILE* file = fopen(fileName, "rb");
fread((void*)LUT,sizeof(rgb),LUTSIZE,file);
fclose(file);
}
int main(int argc, char *argv[])
{
LUT = (rgb*)malloc(LUTSIZE * sizeof(rgb));
save(LUT, "LUT_data");
rgb testValue = LUT_value(5,3,7);
printf("%d %d %d\n", testValue.data[0], testValue.data[1], testValue.data[2]);
read(LUT, "LUT_data");
testValue = LUT_value(5,3,7);
printf("%d %d %d\n", testValue.data[0], testValue.data[1], testValue.data[2]);
free(LUT);
}
I am new to kinect project
And I am implementing a depth threshold when distance is greater than 400mm
for (UINT y = 0; y < pImg->rows; ++y)
{
// Get row pointers for Mats
const USHORT* pDepthRow = depth->ptr<USHORT>(y);
for (UINT x = 0; x < pImg->cols; ++x)
{
USHORT raw_depth = pDepthRow[x];
SHORT realDepth = NuiDepthPixelToDepth(raw_depth);
// If depth value is valid, convert and copy it
if (raw_depth != 65535)
{
if(realDepth >400 ) //greater than 400mm
{
pImg->at<Vec4b>(y,x)[0] = 255;
pImg->at<Vec4b>(y,x)[1] = 255;
pImg->at<Vec4b>(y,x)[2] = 255;
pImg->at<Vec4b>(y,x)[3] = 255;
}
else
{
pImg->at<Vec4b>(y,x)[0] = 0;
pImg->at<Vec4b>(y,x)[1] = 0;
pImg->at<Vec4b>(y,x)[2] = 0;
pImg->at<Vec4b>(y,x)[3] = 0;
}
}
}
It seems get the correct result but reduces the frame rate massively.
When I want to get rid of the loop by using the cv::inRange, but this function only support 8U1C when the raw depth is 16U.
So what else can I use to segment the depth according to the real distance?
Try to improve performance by storing a reference to the pixel.
Change this:
if (realDepth > 400) //greater than 400mm
{
pImg->at<Vec4b>(y,x)[0] = 255;
pImg->at<Vec4b>(y,x)[1] = 255;
pImg->at<Vec4b>(y,x)[2] = 255;
pImg->at<Vec4b>(y,x)[3] = 255;
}
else
{
pImg->at<Vec4b>(y,x)[0] = 0;
pImg->at<Vec4b>(y,x)[1] = 0;
pImg->at<Vec4b>(y,x)[2] = 0;
pImg->at<Vec4b>(y,x)[3] = 0;
}
To this:
(I donĀ“t know what T is because I dont know what pImg is.
T should be equal to the return value of the at method. I assume it is Vec4b.)
T& pixel = pImg->at<Vec4b>(y, x); // probably Vec4b& pixel = ..
if (realDepth > 400) //greater than 400mm
{
pixel[0] = 255;
pixel[1] = 255;
pixel[2] = 255;
pixel[3] = 255;
}
else
{
pixel[0] = 0;
pixel[1] = 0;
pixel[2] = 0;
pixel[3] = 0;
}
The TMX map is loading correctly but it seems to be positioning my tiles incorrectly.
I'm using the TMX Parser from here: https://code.google.com/p/tmx-parser/
It loads the TMX fine, with no errors. But it's only positioning the tiles according to the their location in the spritesheet.
Here is the code sample:
void Game::DrawMap()
{
SDL_Rect rect_CurTile;
SDL_Rect pos;
int DrawX;
int DrawY;
for (int i = 0; i < map->GetNumLayers(); ++i)
{
// Get a layer.
currLayer = map->GetLayer(i);
for (int x = 0; x < currLayer->GetWidth(); ++x)
{
for (int y = 0; y < currLayer->GetHeight(); ++y)
{
int CurTile = currLayer->GetTileId(x, y);
int Num_Of_Cols = 8;
int tileset_col = (CurTile % Num_Of_Cols);
tileset_col++;
int tileset_row = (CurTile / Num_Of_Cols);
rect_CurTile.x = (1 + (32 + 1) * tileset_col);
rect_CurTile.y = (1 + (32 + 1) * tileset_row);
rect_CurTile.w = 32;
rect_CurTile.h = 32;
DrawX = (x * 32);
DrawY = (y * 32);
pos.x = DrawX;
pos.y = DrawY;
pos.w = 32;
pos.h = 32;
apply_surfaceClip(DrawX,DrawY, surfaceTileset, destSurface, &rect_CurTile);
sprTexture = SDL_CreateTextureFromSurface(mRenderer,destSurface);
SDL_RenderCopy(mRenderer,sprTexture,&rect_CurTile,&pos);
}
}
}
void apply_surfaceClip( int x, int y, SDL_Surface* source, SDL_Surface* destination, SDL_Rect* clip = NULL )
{
//Holds offsets
SDL_Rect offset;
//Get offsets
offset.x = x;
offset.y = y;
//Blit
SDL_BlitSurface( source, clip, destination, &offset );
}
I fixed the issue the problem was when using two layers it was drawing zeros here is the finished sample
for (int i = 0; i < map->GetNumLayers(); ++i)
{
// Get a layer.
currLayer = map->GetLayer(i);
for (int x = 0; x < currLayer->GetWidth(); ++x)
{
for (int y = 0; y < currLayer->GetHeight(); ++y)
{
int CurTile = currLayer->GetTileId(x, y);
if(CurTile == 0)
{
continue;
}
int Num_Of_Cols = 8;
int tileset_col = (CurTile % Num_Of_Cols);
int tileset_row = (CurTile / Num_Of_Cols);
std::cout << CurTile << std::endl;
rect_CurTile.x = (1 + (32 + 1) * tileset_col);
rect_CurTile.y = (1 + (32 + 1) * tileset_row);
rect_CurTile.w = 32;
rect_CurTile.h = 32;
DrawX = (x * 32);
DrawY = (y * 32);
pos.x = DrawX;
pos.y = DrawY;
pos.w = 32;
pos.h = 32;
apply_surfaceClip(DrawX,DrawY, surfaceTileset, destSurface, &rect_CurTile);
sprTexture = SDL_CreateTextureFromSurface(mRenderer,destSurface);
SDL_RenderCopy(mRenderer,sprTexture,&rect_CurTile,&pos);
}
}
}