Converting RGB to Luv - c++

i'm trying to convert an rgb image to Luv, i have some problem. The L component is good, but when i show the u and v component both are black(all pixels have value 0).
for (int i = 0; i<height; i++)
for (int j = 0; j<width; j++)
{
Vec3b v3 = src.at<Vec3b>(i, j);
float b = ((float)v3[0]) / 255;
float g = ((float)v3[1]) / 255;
float r = ((float)v3[2]) / 255;
float x = r * 0.412453 + g * 0.357580 + b * 0.180423;
float y = r * 0.212671 + g * 0.715160 + b * 0.072169;
float z = r * 0.019334 + g * 0.119193 + b * 0.950227;
//L
if (y > 0.008856) {
l_mat.at<uchar>(i, j) = 255 / 100 * (116 * pow(y, 1.0 / 3.0));
dst.at<Vec3b>(i, j)[0] = 255 / 100 * (116 * pow(y, 1.0 / 3.0));
// printf("%d / " , l_mat.at<uchar>(i, j));
}
else {
l_mat.at<uchar>(i, j) = 255 / 100 * (903.3 * y);
dst.at<Vec3b>(i, j)[0] = 255 / 100 * (903.3 * y);
}
float u = 4 * x / (x + 15 * y + 3 * z);
float v = 9 * y / (x + 15 * y + 3 * z);
//printf("u: %.2f , v:%.2f || ", u, v);
//U
u_mat.at<uchar>(i, j) = 255 / 354 * (13 * l_mat.at<uchar>(i, j)*(u - 0.19793943) + 134);
//printf("%d / ", u_mat.at<uchar>(i, j));
dst.at<Vec3b>(i, j) = 255 / 354 * (13 * l_mat.at<uchar>(i, j)*(u - 0.19793943) + 134);
//v
v_mat.at<uchar>(i, j) = 255 / 262 * (13 * l_mat.at<uchar>(i, j)*(v - 0.46831096)+140);
dst.at<Vec3b>(i, j) = 255 / 262 * (13 * l_mat.at<uchar>(i, j)*(v - 0.46831096) + 140);
}
I have to do the conversions pixel by pixel, i can't use cvtcolor.

Related

How do I add a wave to a curved surface composed of triangle primatives in C++?

I want to preface this post: This is perhaps more of a math question than a coding question.
I am developing a plant (lettuce) model which involves somewhat complex geometry. At this stage I have a surface curved in 2 dimensions but now I want to add waviness to this curved surface but am having a hard time envisioning how to do so. The surface is made of triangle primatives, the primatives take xyz vectors to encode location of vertices. I am using an API termed HELIOS to develop this procedural model of lettuce. I essentially created the surface with for loops and the sine function. Disclaimer: I do not have a strong background in geometry, computer graphics, or C++.
Here is the relevant code:
#include "Context.h"
#include "Visualizer.h"
using namespace helios;
using namespace std;
vector<uint> addLeaf(float leaf_length, float leaf_width, float leaf_bend_x, float leaf_bend_y, float rotation_z, float rotation_x, float displacement, float radius, Context* context ) {
std::vector<uint> UUIDs;
// float leaf_length = 10;
float Nz = 10; // params.s1_leaf_subdivisions ; number of times to split the dimension
float dz = leaf_length / Nz; // length of each subdivision
// float leaf_width = 10;
float Ny = 10; // params.s1_leaf_subdivisions ; number of times to split the dimension
float dy = leaf_width / Ny; // length of each subdivision
// leaf wave
// float A_3 = leaf_length * float(0.5); // Half waves on the leaf 10
// float A_2 = leaf_length * float(0.1); // amplitude 0.25
float A_3 = 1; // Half waves on the leaf 10
float A_2 = 1; // amplitude 0.25
float leaf_amplitude = leaf_length / float(10);
// the 2 * dx extends the sine wave a bit beyond 1/2 wavelength so base of leaves come together
for (int i = 0; i < Nz + (2 * dz); i++) {
for (float j = 0; j < Ny; j++) {
float z = i * dz; //for each subdivision in z define Z coord
float y = j * dy; //for each subdivision in y define y coord
float x = 0; // we will also need an x coord
float sz = dz; // the next step in z will be equal to a subdivision in z
float sy = dy; // the next step in y will be equal to a subdivision in y
float z_i = z * M_PI / (Nz * dz); // the second coord for z is z_i needed to define a triangle primitive
float sz_i = (z + sz) * M_PI / (Nz * dz); //
// this would be y_1 in sorghum model
float y_i = (y * M_PI / (Ny * dy)) / (A_3); // the second coord for y is y_i needed to define a triangle primitive
float sy_i = ((y + sy) * M_PI / (Ny * dy)) / (A_3);
//waviness of leaf
float leaf_wave_1;
float leaf_wave_2;
float leaf_wave_3;
float leaf_wave_4;
if (j == 0) {
leaf_wave_1 = A_2 * sin(z_i);
leaf_wave_2 = A_2 * sin(sz_i);
} else {
leaf_wave_1 = A_2 * sin(z_i);
leaf_wave_2 = A_2 * sin(sz_i);
}
// Now define x based on z,y and add leaf bend in x and y
x = leaf_bend_x * sin(z_i);
x = ((x*radius + displacement + (leaf_bend_y * sin(y_i))) / 2) + leaf_wave_1;
vec3 v0(x*radius + displacement, y, z);
x = leaf_bend_x * sin(sz_i);
x = ((x*radius + displacement + (leaf_bend_y * sin(y_i))) / 2) + leaf_wave_2;
vec3 v1(x*radius + displacement, y, z + sz);
if (j == Nz - 1) {
leaf_wave_3 = sin(sz_i) * A_2;
leaf_wave_4 = sin(z_i) * A_2;
} else {
leaf_wave_3 = sin(sz_i) * A_2;
leaf_wave_4 = sin(z_i) * A_2;
}
x = leaf_bend_x * sin(sz_i);
x = ((x*radius + displacement + (leaf_bend_y * sin(sy_i))) / 2) + leaf_wave_3 ;
vec3 v2(x*radius + displacement, y + sy, z + sz);
x = leaf_bend_x * sin(z_i);
x = ((x*radius + displacement + (leaf_bend_y * sin(sy_i))) / 2) + leaf_wave_4 ;
vec3 v3(x*radius + displacement, y + sy, z);
// set of two triangles which form a rectangle or square as subunits of leaf
UUIDs.push_back(context->addTriangle(v0, v1, v2, RGB::cyan));
UUIDs.push_back(context->addTriangle(v0, v2, v3, RGB::magenta));
}
}
return UUIDs;
}
// call to functions and build lettuce geometries
int main( void ){
Context context;
float leaf_length = 10;
float leaf_width = 10;
float radius = 1; // additional control leaf curvature
// add leaves one by one; 'i' here is # of leaves external to whorl
for (int i = 0; i < 6; i++) {
if (i == 0)addLeaf(leaf_length, leaf_width, 0.5*leaf_length, 0.5*leaf_width, 4 * M_PI / 9*i, 0, i/5, radius, &context);
// if (i == 1)addLeaf(leaf_length, leaf_width, 0.5*leaf_length, 0.5*leaf_width, 4 * M_PI / 9*i, -M_PI/ 20, i/5, radius, &context);
// if (i == 2)addLeaf(leaf_length, leaf_width, 0.5*leaf_length, 0.5*leaf_width, 4 * M_PI / 9*i, -M_PI/ 10, i/5, radius, &context);
// if (i == 3)addLeaf(leaf_length, leaf_width, 0.5*leaf_length, 0.5*leaf_width, 4 * M_PI / 9*i, -M_PI/ 9, i/5, radius, &context);
// if (i == 4)addLeaf(leaf_length, leaf_width, 0.5*leaf_length, 0.5*leaf_width, 4 * M_PI / 9*i, -M_PI/ 7, i/5, radius, &context);
// if (i == 5)addLeaf(leaf_length, leaf_width, 0.5*leaf_length, 0.5*leaf_width, 4 * M_PI / 9*i, -M_PI/ 5, i/5, radius, &context);
}
Visualizer visualizer(800);
visualizer.buildContextGeometry(&context);
visualizer.setLightingModel(Visualizer::LIGHTING_PHONG);
visualizer.plotInteractive();
}
I tried to use a sine function and an additional for loop to create a series of values to add to the X coordinate of the triangles but did not obtain the result I was looking for.
This is how you can create a Wave Geometry.
you can keep updating the m_fTime values to animate the wave.
// m_iWaveFlowOut -> value to be either 0 or 1
//m_fFrequency -> Number of waves
//m_fAmplitude -> Amplitude of wave
void Generate()
{
const int n8 = m_iNSegments * 8; // size of VBO gfx data
const int sz0 = m_iMSegments * n8; // size of VBO gfx data
const int sz1 = (m_iMSegments - 1) * (m_iNSegments - 1) * 6;// size of indices
verticesRect.clear();
indicesRect.clear();
int a,i, j, k, b;
float x, y, z, dx, dy, l;
glm::vec3 u, v, nor;
dx = 2.0 * ( m_fWidth / float(m_iNSegments - 1));
dy = 2.0 * ( m_fHeight / float(m_iMSegments - 1));
for (a = 0, y = -m_fHeight, j = 0; j < m_iMSegments; j++, y += dy)
for (x = -m_fWidth, i = 0; i < m_iNSegments; i++, x += dx)
{
float dist = glm::length(glm::vec2(x + m_fxOffset, y + m_fyOffset));
float attenuation, kc, kq;
kc = 1.0; kq = 0.0;
attenuation = 1.0f;
if (m_bUseAttenuation) {
attenuation = 1.0 / (kc + (this->m_fKl * dist) + (kq * pow(dist, 2)));
if (attenuation > 1.0) attenuation = 1.0;
}
switch (m_WAVETYPE)
{
case Sum_Wave2::WAVE2_TYPE::COS:
z = (-m_fAmplitude * attenuation) * cos(((x + m_fxOffset) / m_fFrequency) + m_fTime * m_iWaveFlowOut);
break;
case Sum_Wave2::WAVE2_TYPE::SIN:
z = (-m_fAmplitude * attenuation) * sin(((y + m_fyOffset) / m_fFrequency) + m_fTime * m_iWaveFlowOut);
break;
case Sum_Wave2::WAVE2_TYPE::RING:
z = (-m_fAmplitude * attenuation) * sin((glm::length(glm::vec2(x + m_fxOffset, y + m_fyOffset)) + m_fTime * m_iWaveFlowOut) / m_fFrequency);
break;
default:
z = 0.0;
break;
}
verticesRect.push_back(x); a++;
verticesRect.push_back(y); a++;
verticesRect.push_back(z); a++;
// Normal ( will be recomputed later)
verticesRect.push_back(0.0); a++;
verticesRect.push_back(0.0); a++;
verticesRect.push_back(1.0); a++;
// TexCoord
verticesRect.push_back((x + m_fWidth) / (m_fWidth + m_fWidth)); a++;
verticesRect.push_back((y + m_fHeight) / (m_fHeight + m_fHeight)); a++;
}
// triangulation indices
for(a = 0, j = 1; j < m_iMSegments; j++ )
for (i = 1; i < m_iNSegments; i++)
{
b = ((m_iNSegments * j) + i) * 8;
// First triangle per quad
indicesRect.push_back(b - 8); a++;
indicesRect.push_back(b - 8 - n8); a++;
indicesRect.push_back(b); a++;
// Second triangle per quad
indicesRect.push_back(b - 8 - n8); a++;
indicesRect.push_back(b - n8); a++;
indicesRect.push_back(b); a++;
// recompute inner normals
for (k = 0; k < 3; k++) {
u[k] = verticesRect[indicesRect[a - 6] + k] - verticesRect[indicesRect[a - 4] + k];
v[k] = verticesRect[indicesRect[a - 5] + k] - verticesRect[indicesRect[a - 4] + k];
}
glm::vec3 cross1 = crossProduct(u, v);
cross1 = glm::normalize(cross1);
for (k = 0; k < 3; k++) {
u[k] = verticesRect[indicesRect[a - 3] + k] - verticesRect[indicesRect[a - 1] + k];
v[k] = verticesRect[indicesRect[a - 2] + k] - verticesRect[indicesRect[a - 1] + k];
}
glm::vec3 cross2 = crossProduct(u, v);
cross2 = glm::normalize(cross2);
for (k = 0; k < 3; k++) {
verticesRect[indicesRect[a - 1] + 3 + k] = 0.5 * (cross1[k] + cross2[k]);
}
}
for (i = 0; i < sz1; i++) {
indicesRect[i] = indicesRect[i] /= 8;
}
}

Why does this downscaling algorithm produce artifacts?

I have a scaling algorithm which appears to scale the image to the right size, but produces artefacts(slight image corruption) in the right half of the image. As I am inexperienced using pointers, I suspect I may have blundered with my pointer arithmetic!
To run the project on OSX:
1. Download from :https://www.dropbox.com/s/myme1z1mkxjwyjf/artifact.zip?dl=0
Open the xcodeproj file found in proj.ios
All code that is of relevance, is in HelloWorldScene.cpp
In function test(), you can comment out / uncomment the method we wish to test.:
void HelloWorld::test(){
testCopy(); //In this case the image appears as expected. (a simple copy)
// testScale(); //In this case there are strange artifacts on the right tip of the arrow.
}
Test copy, is my attempt at just copying the contents of the buffer without doing anything bad like memory corruption, leaking etc... The image appears on the screen looking ok!
void HelloWorld::testCopy(){
std::string infile = _imageName;
Image* img = new Image();
img->initWithImageFile(infile);
auto odata = img->getData();
Image* copy = new Image();
int components = 4;
auto finalDataLen = img->getDataLen();
auto finalData = static_cast<unsigned char*>(malloc(finalDataLen));
for (int i = 0; i<img->getWidth(); i++) {
for (int j = 0; j<img->getHeight(); j++) {
unsigned char *pixel = odata + (i + j * img->getWidth()) * components;
unsigned char *fpixel = finalData + (i + j * img->getWidth()) * components;
fpixel[0] = pixel[0];
fpixel[1] = pixel[1];
fpixel[2] = pixel[2];
fpixel[3] = pixel[3];
}
}
copy->initWithRawData(finalData, finalDataLen, img->getWidth(), img->getHeight(), 8);
Texture2D* tk = new Texture2D();
tk->initWithImage(copy);
Sprite* foo = Sprite::createWithTexture(tk);
foo->setPosition(Director::getInstance()->getVisibleSize().width/2,Director::getInstance()->getVisibleSize().height/2);
foo->setScale(0.8);
this->addChild(foo);
delete img;
delete copy;
return;
}
Now comment out testCopy(); and uncomment testScale(); In this case the image appears but with some corruption the right side of the image!
void HelloWorld::testScale(){
std::string infile = _imageName;
Image* img = new Image();
img->initWithImageFile(infile);
Image* scl = new Image();
scaleImage(img, scl, 0.8);
Texture2D* tk = new Texture2D(); //Texture is needed as long as the sprite exists, so we aren't deleting it.
tk->initWithImage(scl);
Sprite* foo = Sprite::createWithTexture(tk);
foo->setPosition(Director::getInstance()->getVisibleSize().width/2,Director::getInstance()->getVisibleSize().height/2);
this->addChild(foo);
delete img;
delete scl;
return;
}
void HelloWorld::scaleImage(Image* original,Image* scaledImage,const float& scale){
int width = scale*original->getWidth();
int height = scale*original->getHeight();
int x=4;
unsigned char* data = original->getData();
auto dataLen = width * height * x * sizeof(unsigned char);
auto data2 = static_cast<unsigned char*>(malloc(dataLen));
//sprshrink seems to be the problem method.
sprshrink(data2, width, height, data, original->getWidth(), original->getHeight());
scaledImage->initWithRawData(data2, dataLen, width, height, 8);
}
//Why does this method produce artifcats ?
void HelloWorld::sprshrink(unsigned char *dest, int dwidth, int dheight, unsigned char *src, int swidth, int sheight){
int x, y;
int i, ii;
float red, green, blue, alpha;
float xfrag, yfrag, xfrag2, yfrag2;
float xt, yt, dx, dy;
int xi, yi;
dx = ((float)swidth)/dwidth;
dy = ((float)sheight)/dheight;
for(yt= 0, y=0;y<dheight;y++, yt += dy)
{
yfrag = (float) ceil(yt) - yt;
if(yfrag == 0)
yfrag = 1;
yfrag2 = yt+dy - (float) floor(yt + dy);
if(yfrag2 == 0 && dy != 1.0f)
yfrag2 = 1;
for(xt = 0, x=0;x<dwidth;x++, xt+= dx)
{
xi = (int) xt;
yi = (int) yt;
xfrag = (float) ceil(xt) - xt;
if(xfrag == 0)
xfrag = 1;
xfrag2 = xt+dx - (float) floor(xt+dx);
if(xfrag2 == 0 && dx != 1.0f)
xfrag2 = 1;
red = xfrag * yfrag * src[(yi*swidth+xi)*4];
green = xfrag * yfrag * src[(yi*swidth+xi)*4+1];
blue = xfrag * yfrag * src[(yi*swidth+xi)*4+2];
alpha = xfrag * yfrag * src[(yi*swidth+xi)*4+3];
for(i=0; xi + i + 1 < xt+dx-1; i++)
{
red += yfrag * src[(yi*swidth+xi+i+1)*4];
green += yfrag * src[(yi*swidth+xi+i+1)*4+1];
blue += yfrag * src[(yi*swidth+xi+i+1)*4+2];
alpha += yfrag * src[(yi*swidth+xi+i+1)*4+3];
}
red += xfrag2 * yfrag * src[(yi*swidth+xi+i+1)*4];
green += xfrag2 * yfrag * src[(yi*swidth+xi+i+1)*4+1];
blue += xfrag2 * yfrag * src[(yi*swidth+xi+i+1)*4+2];
alpha += xfrag2 * yfrag * src[(yi*swidth+xi+i+1)*4+3];
for(i=0; yi+i+1 < yt +dy-1 && yi + i+1 < sheight;i++)
{
red += xfrag * src[((yi+i+1)*swidth+xi)*4];
green += xfrag * src[((yi+i+1)*swidth+xi)*4+1];
blue += xfrag * src[((yi+i+1)*swidth+xi)*4+2];
alpha += xfrag * src[((yi+i+1)*swidth+xi)*4+3];
for (ii = 0; xi + ii + 1 < xt + dx - 1 && xi + ii + 1 < swidth; ii++)
{
red += src[((yi+i+1)*swidth+xi+ii+1)*4];
green += src[((yi+i+1)*swidth+xi+ii+1)*4+1];
blue += src[((yi+i+1)*swidth+xi+ii+1)*4+2];
alpha += src[((yi+i+1)*swidth+xi+ii+1)*4+3];
}
red += xfrag2 * src[((yi+i+1)*swidth+xi+ii+1)*4];
green += xfrag2 * src[((yi+i+1)*swidth+xi+ii+1)*4+1];
blue += xfrag2 * src[((yi+i+1)*swidth+xi+ii+1)*4+2];
alpha += xfrag2 * src[((yi+i+1)*swidth+xi+ii+1)*4+3];
}
if (yi + i + 1 < sheight)
{
red += xfrag * yfrag2 * src[((yi + i + 1)*swidth + xi) * 4];
green += xfrag * yfrag2 * src[((yi + i + 1)*swidth + xi) * 4 + 1];
blue += xfrag * yfrag2 * src[((yi + i + 1)*swidth + xi) * 4 + 2];
alpha += xfrag * yfrag2 * src[((yi + i + 1)*swidth + xi) * 4 + 3];
for (ii = 0; xi + ii + 1 < xt + dx - 1 && xi + ii + 1 < swidth; ii++)
{
red += yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4];
green += yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 1];
blue += yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 2];
alpha += yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 3];
}
}
if (yi + i + 1 < sheight && x + xi + 1 < swidth)
{
red += xfrag2 * yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4];
green += xfrag2 * yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 1];
blue += xfrag2 * yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 2];
alpha += xfrag2 * yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 3];
}
red /= dx * dy;
green /= dx * dy;
blue /= dx * dy;
alpha /= dx * dy;
red = clamp(red, 0, 255);
green = clamp(green, 0, 255);
blue = clamp(blue, 0, 255);
alpha = clamp(alpha, 0, 255);
dest[(y*dwidth+x)*4] = (unsigned char) red;
dest[(y*dwidth+x)*4+1] = (unsigned char) green;
dest[(y*dwidth+x)*4+2] = (unsigned char) blue;
dest[(y*dwidth+x)*4+3] = (unsigned char) alpha;
}
}
}
I suspect my downscaling algorithm (sprshrink) works (because it is someone elses! :D), and suspect that I am blundering with my usage of pointers in testScale()! What do you think ? Am I allocating and using my pointers properly? What am I doing wrong?
Images:
Clear:
Artefacts when running testScale() instead of testCopy() (comment out testCopy).

Algorithm to downscale / interpolate an image using buffers?

I'm trying to downscale an image from a large size (as large as 960x960) to possibly as small as 32x32. I have the following code I use to get the raw pixels:
Image* img = new Image();
img->initWithImageFile(fileNameWithPath);
int x=3;
if(img->hasAlpha()){
x=4;
}
unsigned char *data = new unsigned char[img->getDataLen()*x];
data = img->getData();
// [0][0] => Left-Top Pixel !
// But cocos2d Location Y-axis is Bottom(0) to Top(max)
//This is for changing pixels in the original image
//Skip this loop if there are no changes to be made (converting to grayscale etc).
for(int i=0;i<img->getWidth();i++)
{
for(int j=0;j<img->getHeight();j++)
{
unsigned char *pixel = data + (i + j * img->getWidth()) * x;
// You can see/change pixels' RGBA value(0-255) here !
unsigned char r = *pixel;
unsigned char g = *(pixel + 1);
unsigned char b = *(pixel + 2) ;
unsigned char a = *(pixel + 3);
//pixel[2] = 255; //Example: Setting the blue component to 255
}
}
I can create an output image by doing:
int width = scale*img->getWidth();
int height = scale*img->getHeight();
Image* scaledImage = new Image();
auto dataLen = width * height * x * sizeof(unsigned char);
auto data2 = static_cast<unsigned char*>(malloc(dataLen));
scaledImage->initWithRawData(data2, dataLen, width, height, 8);
And I can set the individual pixels of the output image by doing:
unsigned char *pixel2 = data2 + (i + j * width) * x;
The question is how to average / interpolate the pixels from the original image efficiently (using minimum cpu and memory, with the preference to use more cpu if necessary and less memory).
Challenges:
The downscaled image and the original image may not be perfect
multiples.
The downscaled image can be as small as 0.1 of the
original image size.
most images will be downscaled to 0.4 to 0.1 of the original image, so these are the most important ranges.
EDIT: If you think some other interpolation algorithm would be better suited (instead of bilinear) then I am open to it. The challenge is writing an efficient algorithm to average / interpolate the individual pixels..
How do I interpolate the individual pixels of the original image ?
Memory isn't an issue, you need an input buffer with the original image and an output buffer with the rescaled image, you don't need any more.
Bilinear interpolation isn't very suitable for downsampling by a large factor as it doesn't really differ from nearest sampling. You only interpolate four neighbouring pixels and so are vulnerable to aliasing effects, particularly on computer-generated images.
This function uses the averaging method.
/*
resize an image using the averaging method.
Note that dwidth and dheight must be smaller than or equal to swidth, sheight.
*/
void sprshrink(unsigned char *dest, int dwidth, int dheight, unsigned char *src, int swidth, int sheight)
{
int x, y;
int i, ii;
float red, green, blue, alpha;
float xfrag, yfrag, xfrag2, yfrag2;
float xt, yt, dx, dy;
int xi, yi;
dx = ((float)swidth)/dwidth;
dy = ((float)sheight)/dheight;
for(yt= 0, y=0;y<dheight;y++, yt += dy)
{
yfrag = ceil(yt) - yt;
if(yfrag == 0)
yfrag = 1;
yfrag2 = yt+dy - (float) floor(yt + dy);
if(yfrag2 == 0 && dy != 1.0f)
yfrag2 = 1;
for(xt = 0, x=0;x<dwidth;x++, xt+= dx)
{
xi = (int) xt;
yi = (int) yt;
xfrag = (float) ceil(xt) - xt;
if(xfrag == 0)
xfrag = 1;
xfrag2 = xt+dx - (float) floor(xt+dx);
if(xfrag2 == 0 && dx != 1.0f)
xfrag2 = 1;
red = xfrag * yfrag * src[(yi*swidth+xi)*4];
green = xfrag * yfrag * src[(yi*swidth+xi)*4+1];
blue = xfrag * yfrag * src[(yi*swidth+xi)*4+2];
alpha = xfrag * yfrag * src[(yi*swidth+xi)*4+3];
for(i=0; xi + i + 1 < xt+dx-1; i++)
{
red += yfrag * src[(yi*swidth+xi+i+1)*4];
green += yfrag * src[(yi*swidth+xi+i+1)*4+1];
blue += yfrag * src[(yi*swidth+xi+i+1)*4+2];
alpha += yfrag * src[(yi*swidth+xi+i+1)*4+3];
}
red += xfrag2 * yfrag * src[(yi*swidth+xi+i+1)*4];
green += xfrag2 * yfrag * src[(yi*swidth+xi+i+1)*4+1];
blue += xfrag2 * yfrag * src[(yi*swidth+xi+i+1)*4+2];
alpha += xfrag2 * yfrag * src[(yi*swidth+xi+i+1)*4+3];
for(i=0; yi+i+1 < yt +dy-1 && yi + i+1 < sheight;i++)
{
red += xfrag * src[((yi+i+1)*swidth+xi)*4];
green += xfrag * src[((yi+i+1)*swidth+xi)*4+1];
blue += xfrag * src[((yi+i+1)*swidth+xi)*4+2];
alpha += xfrag * src[((yi+i+1)*swidth+xi)*4+3];
for (ii = 0; xi + ii + 1 < xt + dx - 1 && xi + ii + 1 < swidth; ii++)
{
red += src[((yi+i+1)*swidth+xi+ii+1)*4];
green += src[((yi+i+1)*swidth+xi+ii+1)*4+1];
blue += src[((yi+i+1)*swidth+xi+ii+1)*4+2];
alpha += src[((yi+i+1)*swidth+xi+ii+1)*4+3];
}
if (yi + i + 1 < sheight && xi + ii + 1 < swidth)
{
red += xfrag2 * src[((yi+i+1)*swidth+xi+ii+1)*4];
green += xfrag2 * src[((yi+i+1)*swidth+xi+ii+1)*4+1];
blue += xfrag2 * src[((yi+i+1)*swidth+xi+ii+1)*4+2];
alpha += xfrag2 * src[((yi+i+1)*swidth+xi+ii+1)*4+3];
}
}
if (yi + i + 1 < sheight)
{
red += xfrag * yfrag2 * src[((yi + i + 1)*swidth + xi) * 4];
green += xfrag * yfrag2 * src[((yi + i + 1)*swidth + xi) * 4 + 1];
blue += xfrag * yfrag2 * src[((yi + i + 1)*swidth + xi) * 4 + 2];
alpha += xfrag * yfrag2 * src[((yi + i + 1)*swidth + xi) * 4 + 3];
for (ii = 0; xi + ii + 1 < xt + dx - 1 && xi + ii + 1 < swidth; ii++)
{
red += yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4];
green += yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 1];
blue += yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 2];
alpha += yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 3];
}
}
if (yi + i + 1 < sheight && xi + ii + 1 < swidth)
{
red += xfrag2 * yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4];
green += xfrag2 * yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 1];
blue += xfrag2 * yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 2];
alpha += xfrag2 * yfrag2 * src[((yi + i + 1)*swidth + xi + ii + 1) * 4 + 3];
}
red /= dx * dy;
green /= dx * dy;
blue /= dx * dy;
alpha /= dx * dy;
red = clamp(red, 0, 255);
green = clamp(green, 0, 255);
blue = clamp(blue, 0, 255);
alpha = clamp(alpha, 0, 255);
dest[(y*dwidth+x)*4] = (unsigned char) red;
dest[(y*dwidth+x)*4+1] = (unsigned char) green;
dest[(y*dwidth+x)*4+2] = (unsigned char) blue;
dest[(y*dwidth+x)*4+3] = (unsigned char) alpha;
}
}
}
It is maintained in the Baby X resource compiler on github.
https://github.com/MalcolmMcLean/babyxrc

How to optimize YUV to RGB color conversion code

I have written a function to convert an image in YUV420P to RGB but it is taking 30 millisecond to convert an image (size: 1280 x 720) into RGB, but when I am using ffmpeg function ( as this) to convert YUV image into RGB its taking only 2 millisecond for the same image. What is the problem with my code ? How can I optimize the code that I have written ??
My code is given below
int step = origImage->widthStep;
uchar *data = (uchar *)origImage->imageData;
int size = origImage->width * origImage->height;
IplImage* img1 = cvCreateImage(cvGetSize(origImage), IPL_DEPTH_8U, 3);
for (int i = 0; i<origImage->height; i++)
{
for (int j=0; j<origImage->width; j++)
{
float Y = data[i*step + j];
float U = data[ (int)(size + (i/2)*(step/2) + j/2) ];
float V = data[ (int)(size*1.25 + (i/2)*(step/2) + j/2)];
float R = Y + 1.402 * (V - 128);
float G = Y - 0.344 * (U - 128) - 0.714 * (V - 128);
float B = Y + 1.772 * (U - 128);
if (R < 0){ R = 0; } if (G < 0){ G = 0; } if (B < 0){ B = 0; }
if (R > 255 ){ R = 255; } if (G > 255) { G = 255; } if (B > 255) { B = 255; }
cvSet2D(img1, i, j,cvScalar(B,G,R));
}
}
Here, try this(should reduce to 25 milliseconds):
int step = origImage->widthStep;
uchar *data = (uchar *)origImage->imageData;
int size = origImage->width * origImage->height;
IplImage* img1 = cvCreateImage(cvGetSize(origImage), IPL_DEPTH_8U, 3);
int stepDb2=step /2;
float sizeMb1d25=size*1.25 ;
int origImagePTheight=origImage->height;
int origImagePTwidth=origImage->width;
for (int i = 0; i<origImagePTheight; i++)
{
float idb2=i/2;
int iStep=i*step;
for (int j=0; j<origImagePTwidth; j++)
{
float variable=idb2*stepDb2 + j/2;
float Y = data[iStep + j];
float U = -128 + data[ (int)(size + variable) ];
float V = -128 + data[ (int)(sizeMb1d25 + variable)];
float R = Y + 1.402 * V ;
float G = Y - 0.344 * U - 0.714 * V;
float B = Y + 1.772 * U;
R= R * !(R<0);
G= G * !(G<0);
B= B * !(B<0);
R=R*(!(R>255)) + 255 * (R>255);
G=G*(!(G>255)) + 255 * (G>255);
B=B*(!(B>255)) + 255 * (B>255);
cvSet2D(img1, i, j,cvScalar(B,G,R));
}
}

Gradient algorithm produces little white dots

I'm working on an algorithm to generate point to point linear gradients. I have a rough proof of concept implementation done:
GLuint OGLENGINEFUNCTIONS::CreateGradient( std::vector<ARGBCOLORF> &input,POINTFLOAT start, POINTFLOAT end, int width, int height,bool radial )
{
std::vector<POINT> pol;
std::vector<GLubyte> pdata(width * height * 4);
std::vector<POINTFLOAT> linearpts;
std::vector<float> lookup;
float distance = GetDistance(start,end);
RoundNumber(distance);
POINTFLOAT temp;
float incr = 1 / (distance + 1);
for(int l = 0; l < 100; l ++)
{
POINTFLOAT outA;
POINTFLOAT OutB;
float dirlen;
float perplen;
POINTFLOAT dir;
POINTFLOAT ndir;
POINTFLOAT perp;
POINTFLOAT nperp;
POINTFLOAT perpoffset;
POINTFLOAT diroffset;
dir.x = end.x - start.x;
dir.y = end.y - start.y;
dirlen = sqrt((dir.x * dir.x) + (dir.y * dir.y));
ndir.x = static_cast<float>(dir.x * 1.0 / dirlen);
ndir.y = static_cast<float>(dir.y * 1.0 / dirlen);
perp.x = dir.y;
perp.y = -dir.x;
perplen = sqrt((perp.x * perp.x) + (perp.y * perp.y));
nperp.x = static_cast<float>(perp.x * 1.0 / perplen);
nperp.y = static_cast<float>(perp.y * 1.0 / perplen);
perpoffset.x = static_cast<float>(nperp.x * l * 0.5);
perpoffset.y = static_cast<float>(nperp.y * l * 0.5);
diroffset.x = static_cast<float>(ndir.x * 0 * 0.5);
diroffset.y = static_cast<float>(ndir.y * 0 * 0.5);
outA.x = end.x + perpoffset.x + diroffset.x;
outA.y = end.y + perpoffset.y + diroffset.y;
OutB.x = start.x + perpoffset.x - diroffset.x;
OutB.y = start.y + perpoffset.y - diroffset.y;
for (float i = 0; i < 1; i += incr)
{
temp = GetLinearBezier(i,outA,OutB);
RoundNumber(temp.x);
RoundNumber(temp.y);
linearpts.push_back(temp);
lookup.push_back(i);
}
for (unsigned int j = 0; j < linearpts.size(); j++) {
if(linearpts[j].x < width && linearpts[j].x >= 0 &&
linearpts[j].y < height && linearpts[j].y >=0)
{
pdata[linearpts[j].x * 4 * width + linearpts[j].y * 4 + 0] = (GLubyte) j;
pdata[linearpts[j].x * 4 * width + linearpts[j].y * 4 + 1] = (GLubyte) j;
pdata[linearpts[j].x * 4 * width + linearpts[j].y * 4 + 2] = (GLubyte) j;
pdata[linearpts[j].x * 4 * width + linearpts[j].y * 4 + 3] = (GLubyte) 255;
}
}
lookup.clear();
linearpts.clear();
}
return CreateTexture(pdata,width,height);
}
It works as I would expect most of the time, but at certain angles it produces little white dots. I can't figure out what does this.
This is what it looks like at most angles (good) http://img9.imageshack.us/img9/5922/goodgradient.png
But once in a while it looks like this (bad): http://img155.imageshack.us/img155/760/badgradient.png
What could be causing the white dots?
Is there maybe also a better way to generate my gradients if no solution is possible for this?
Thanks
I think you have a bug indexing into the pdata byte vector. Your x domain is [0, width) but when you multiply out the indices you're doing x * 4 * width. It should probably be x * 4 + y * 4 * width or x * 4 * height + y * 4 depending on whether you're data is arranged row or column major.