I'm making a project where i need to change the lightness, and contrast of an image, it's lightness not brightness.
So my code at the start was
for (int y = 0; y < dst.rows; y++) {
for (int x = 0; x < dst.cols; x++) {
int b = dst.data[dst.channels() * (dst.cols * y + x) + 0];
int g = dst.data[dst.channels() * (dst.cols * y + x) + 1];
int r = dst.data[dst.channels() * (dst.cols * y + x) + 2];
... other processing stuff i'm doing
and it's good, doing it really fast, but when i try to make the hsv to hsl conversion to set the l value that i need it gets reaaaaaaally slow;
my hsl to hsl lines of code are
cvtColor(dst, dst, CV_BGR2HSV);
Vec3b pixel = dst.at<cv::Vec3b>(y, x); // read pixel (0,0)
double H = pixel.val[0];
double S = pixel.val[1];
double V = pixel.val[2];
h = H;
l = (2 - S) * V;
s = s * V;
s /= (l <= 1) ? (l) : 2 - (l);
l /= 2;
/* i will further make here the calcs to set the l i want */
H = h;
l *= 2;
s *= (l <= 1) ? l : 2 - l;
V = (l + s) / 2;
S = (2 * s) / (l + s);
pixel.val[0] = H;
pixel.val[1] = S;
pixel.val[2] = V;
cvtColor(dst, dst, CV_HSV2BGR);
and i ran it and was slow, so i was take of the lines to see which one was making it slow and i figure out it was cvtColor(dst, dst, CV_BGR2HSV);
So there's a way to make it faster than using cvtCOlor, or its time issue is something that can be handled?
I think (I haven't opened the text editor, but it seems) that you need to generate the entire image in HSV and then call cvtColor once for the entire image. Meaning that you should call cvtColor once instead of once for every pixel. That should give you a significant boost in speed.
You would do this:
cvtColor(dst, dst, CV_BGR2HSV);
for (int y = 0; y < dst.rows; y++) {
for (int x = 0; x < dst.cols; x++) {
Vec3b pixel = dst.at<cv::Vec3b>(y, x); // read current pixel
double H = pixel.val[0];
double S = pixel.val[1];
double V = pixel.val[2];
h = H;
l = (2 - S) * V;
s = s * V;
s /= (l <= 1) ? (l) : 2 - (l);
l /= 2;
H = h;
l *= 2;
s *= (l <= 1) ? l : 2 - l;
V = (l + s) / 2;
S = (2 * s) / (l + s);
pixel.val[0] = H;
pixel.val[1] = S;
pixel.val[2] = V;
}
}
cvtColor(dst, dst, CV_HSV2BGR);
Related
I was learning filters in OpenCV, but I'm a little confused about the Laplacian filter. My result is very different from the Laplacian filter in OpenCV lib.
For first, I use a Gaussian filter for the image:
Mat filtroGauss(Mat src){
Mat gauss = src.clone();
Mat temp(src.rows+2,src.cols+2,DataType<uchar>::type);
int y,x;
for (y=0; y<src.rows; y++){
for (x=0; x<src.cols; x++) temp.at<uchar>(y+1,x+1) = src.at<uchar>(y,x);
}
int mask[lenMask*lenMask];
mask[0] = mask[2] = mask[6] = mask[8] = 1;
mask[1] = mask[3] = mask[5] = mask[7] = 2;
mask[4] = 4;
int denominatore = 0;
for (int i=0; i<lenMask*lenMask; i++) denominatore += mask[i];
int value[lenMask*lenMask];
for(y=0; y<src.rows; y++){
for (x=0; x<src.cols; x++){
value[0] = temp.at<uchar>(y-1,x-1)*mask[0];
value[1] = temp.at<uchar>(y-1,x)*mask[1];
value[2] = temp.at<uchar>(y-1,x+1)*mask[2];
value[3] = temp.at<uchar>(y,x-1)*mask[3];
value[4] = temp.at<uchar>(y,x)*mask[4];
value[5] = temp.at<uchar>(y,x+1)*mask[5];
value[6] = temp.at<uchar>(y+1,x-1)*mask[6];
value[7] = temp.at<uchar>(y+1,x)*mask[7];
value[8] = temp.at<uchar>(y+1,x+1)*mask[8];
int avg = 0;
for(int i=0; i<lenMask*lenMask; i++)avg+=value[i];
avg = avg/denominatore;
gauss.at<uchar>(y,x) = avg;
}
}
return gauss;
}
Then I use the Laplacian function:
L(y,x) = f(y-1,x) + f(y+1,x) + f(y,x-1) + f(y,x+1) + 4*f(y,x)
Mat filtroLaplace(Mat src){
Mat output = src.clone();
Mat temp = src.clone();
int y,x;
for (y =1; y<src.rows-1; y++){
for(x =1; x<src.cols-1; x++){
output.at<uchar>(y,x) = temp.at<uchar>(y-1,x) + temp.at<uchar>(y+1,x) + temp.at<uchar>(y,x-1) + temp.at<uchar>(y,x+1) -4*( temp.at<uchar>(y,x));
}
}
return output;
}
And here is the final result from my code:
OpenCV result:
Let's rewrite the function a little, so it's easier to discuss:
cv::Mat filtroLaplace(cv::Mat src)
{
cv::Mat output = src.clone();
for (int y = 1; y < src.rows - 1; y++) {
for (int x = 1; x < src.cols - 1; x++) {
int sum = src.at<uchar>(y - 1, x)
+ src.at<uchar>(y + 1, x)
+ src.at<uchar>(y, x - 1)
+ src.at<uchar>(y, x + 1)
- 4 * src.at<uchar>(y, x);
output.at<uchar>(y, x) = sum;
}
}
return output;
}
The source of your problem is sum. Let's examine its range in scope of this algorithm, by taking the two extremes:
Black pixel, surrounded by 4 white. That means 255 + 255 + 255 + 255 - 4 * 0 = 1020.
White pixel, surrounded by 4 black. That means 0 + 0 + 0 + 0 - 4 * 255 = -1020.
When you perform output.at<uchar>(y, x) = sum; there's an implicit cast of the int back to unsigned char -- the high order bits simply get chopped off and the value overflows.
The correct approach to handle this situation (which OpenCV takes), is to perform saturation before the actual cast. Essentially
if (sum < 0) {
sum = 0;
} else if (sum > 255) {
sum = 255;
}
OpenCV provides function cv::saturate_cast<T> to do just this.
There's an additional problem that you're not handling the edge rows/columns of the input image -- you just leave them at the original value. Since you're not asking about that, I'll leave solving that as an excercise to the reader.
Code:
cv::Mat filtroLaplace(cv::Mat src)
{
cv::Mat output = src.clone();
for (int y = 1; y < src.rows - 1; y++) {
for (int x = 1; x < src.cols - 1; x++) {
int sum = src.at<uchar>(y - 1, x)
+ src.at<uchar>(y + 1, x)
+ src.at<uchar>(y, x - 1)
+ src.at<uchar>(y, x + 1)
- 4 * src.at<uchar>(y, x);
output.at<uchar>(y, x) = cv::saturate_cast<uchar>(sum);
}
}
return output;
}
Sample input:
Output of corrected filtroLaplace:
Output of cv::Laplacian:
I have this sample of code that I try to understand it:
__global__ void
d_boxfilter_rgba_x(unsigned int *od, int w, int h, int r)
{
float scale = 1.0f / (float)((r << 1) + 1);
unsigned int y = blockIdx.x*blockDim.x + threadIdx.x;
if (y < h)
{
float4 t = make_float4(0.0f);
for (int x = -r; x <= r; x++)
{
t += tex2D(rgbaTex, x, y);
}
od[y * w] = rgbaFloatToInt(t * scale);
for (int x = 1; x < w; x++)
{
t += tex2D(rgbaTex, x + r, y);
t -= tex2D(rgbaTex, x - r - 1, y);
od[y * w + x] = rgbaFloatToInt(t * scale);
}
}
}
__global__ void
d_boxfilter_rgba_y(unsigned int *id, unsigned int *od, int w, int h, int r)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id = &id[x];
od = &od[x];
float scale = 1.0f / (float)((r << 1) + 1);
float4 t;
// partea din stanga
t = rgbaIntToFloat(id[0]) * r;
for (int y = 0; y < (r + 1); y++)
{
t += rgbaIntToFloat(id[y*w]);
}
od[0] = rgbaFloatToInt(t * scale);
for (int y = 1; y < (r + 1); y++)
{
t += rgbaIntToFloat(id[(y + r) * w]);
t -= rgbaIntToFloat(id[0]);
od[y * w] = rgbaFloatToInt(t * scale);
}
// main loop
for (int y = (r + 1); y < (h - r); y++)
{
t += rgbaIntToFloat(id[(y + r) * w]);
t -= rgbaIntToFloat(id[((y - r) * w) - w]);
od[y * w] = rgbaFloatToInt(t * scale);
}
// right side
for (int y = h - r; y < h; y++)
{
t += rgbaIntToFloat(id[(h - 1) * w]);
t -= rgbaIntToFloat(id[((y - r) * w) - w]);
od[y * w] = rgbaFloatToInt(t * scale);
}
}
This should be a box filter with CUDA.
From what I have read this should make an average with a given radius.
But in d_boxfilter_rgba_y make something like this:
od[0] = rgbaFloatToInt(t * scale);
I don't understand why is used this scale and why are made all that loops when there should be just one. To calculate the value from -r to +r and divide this by a number of pixels.
Can somebody help me?
To calculate the average of a box with radius 1 (3 values), you do:
(box[0] + box[1] + box[2]) / 3 // which is equal to
(box[0] + box[1] + box[2] * 1/3 // which is equal to your scale factor
The calculation of scale is:
1.0f / (float)((r << 1) + 1); // equal to
1 / ((r * 2) + 1) // equal to
1 / (2r + 1) // 2r because you go to the left and right and +1 for the middle
The two for loops are used, because the "sliding window" optimisation is used. First the first box is calculated:
for (int x = -r; x <= r; x++)
{
t += tex2D(rgbaTex, x, y);
}
And then for each step to the right, the value right of the box is added and the most left value of the box is removed. That way you can calculate the sum of the box with just 2 operations instead of 2*r + 1 operations.
for (int x = 1; x < w; x++)
{
t += tex2D(rgbaTex, x + r, y);
t -= tex2D(rgbaTex, x - r - 1, y);
od[y * w + x] = rgbaFloatToInt(t * scale);
}
}
This is how I managed to use a Sobel Kernel on a GRAYSCALE image.However,I dont actually get how to modify it for a color image.
void Soble()
{
Mat img;
int w = 3;
int k = w / 2;
char fname[MAX_PATH];
openFileDlg(fname);
img = imread(fname, CV_LOAD_IMAGE_GRAYSCALE);
gaussianFiltering(img);
Mat destinationImg = img.clone();
float sobelY[3][3] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 };
float sobelX[3][3] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 };
for (int i = k; i < img.rows - k; i++)
{
for (int j = k; j < img.cols - k; j++)
{
float Gx = 0, Gy = 0;
for (int l = 0; l < w; l++)
{
for (int p = 0; p < w; p++)
{
Gx += img.at<uchar>(i + l - k, j + p - k)*sobelX[l][p];
Gy += img.at<uchar>(i + l - k, j + p - k)*sobelY[l][p];
}
}
destinationImg.at<uchar>(i, j) = sqrt(Gx*Gx + Gy * Gy) / (4 * sqrt(2));
}
}
imshow("Intermediar",destinationImg);
imshow("Initial", img);
waitKey(0);
}
I thought of using each RGB chanel but it does not work and even give some errors.
float GxR = 0, GyR = 0;
float GxG = 0, GyG = 0;
float GxB = 0, GyB = 0;
for (int l = 0; l < w; l++)
{
for (int p = 0; p < w; p++)
{
GxR += img.at<Vec3b>[0](i + l - k, j + p - k)*sobelX[l][p];
GxG += img.at<Vec3b>[1](i + l - k, j + p - k)*sobelX[l][p];
GxB += img.at<Vec3b>[2](i + l - k, j + p - k)*sobelX[l][p];
GyR += img.at<Vec3b>[0](i + l - k, j + p - k)*sobelY[l][p];
GyG += img.at<Vec3b>[1](i + l - k, j + p - k)*sobelY[l][p];
GyB += img.at<Vec3b>[2](i + l - k, j + p - k)*sobelY[l][p];
}
}
destinationImg.at<Vec3b>[0](i, j) = sqrt(GxR*GxR + GyR * GyR) / (4 * sqrt(2));
destinationImg.at<Vec3b>[1](i, j) = sqrt(GxG*GxG + GyB * GyB) / (4 * sqrt(2));
destinationImg.at<Vec3b>[2](i, j) = sqrt(GxG*GxG + GyG * GyG) / (4 * sqrt(2));
Can you please explain how can this code must be rewritten?
You access the image data the wrong way.
destinationImg.at<Vec3b>[0](i, j)
destinationImg is a Mat of type Vec3b. That means it's a 2d array of three dimensional vectors.
You'r [ ] operator is in the wrong place...
The subscript error message tells you that you're using that operator on something that is neither a pointer nor an array which is not possible.
You get the other error message because you have that operator where the (i,j) is expected.
First you have to get one of these vectors, then you can get its elements.
destinationImg.at<Vec3b>(i,j) will give you the vector at i,j.
destinationImg.at<Vec3b>(i,j)[0] will give you the first element of that vector.
Example from the OpenCV documentation:
Vec3b intensity = img.at<Vec3b>(y, x);
uchar blue = intensity.val[0];
uchar green = intensity.val[1];
uchar red = intensity.val[2];
http://docs.opencv.org/2.4.13.2/doc/user_guide/ug_mat.html
I have written a function to convert an image in YUV420P to RGB but it is taking 30 millisecond to convert an image (size: 1280 x 720) into RGB, but when I am using ffmpeg function ( as this) to convert YUV image into RGB its taking only 2 millisecond for the same image. What is the problem with my code ? How can I optimize the code that I have written ??
My code is given below
int step = origImage->widthStep;
uchar *data = (uchar *)origImage->imageData;
int size = origImage->width * origImage->height;
IplImage* img1 = cvCreateImage(cvGetSize(origImage), IPL_DEPTH_8U, 3);
for (int i = 0; i<origImage->height; i++)
{
for (int j=0; j<origImage->width; j++)
{
float Y = data[i*step + j];
float U = data[ (int)(size + (i/2)*(step/2) + j/2) ];
float V = data[ (int)(size*1.25 + (i/2)*(step/2) + j/2)];
float R = Y + 1.402 * (V - 128);
float G = Y - 0.344 * (U - 128) - 0.714 * (V - 128);
float B = Y + 1.772 * (U - 128);
if (R < 0){ R = 0; } if (G < 0){ G = 0; } if (B < 0){ B = 0; }
if (R > 255 ){ R = 255; } if (G > 255) { G = 255; } if (B > 255) { B = 255; }
cvSet2D(img1, i, j,cvScalar(B,G,R));
}
}
Here, try this(should reduce to 25 milliseconds):
int step = origImage->widthStep;
uchar *data = (uchar *)origImage->imageData;
int size = origImage->width * origImage->height;
IplImage* img1 = cvCreateImage(cvGetSize(origImage), IPL_DEPTH_8U, 3);
int stepDb2=step /2;
float sizeMb1d25=size*1.25 ;
int origImagePTheight=origImage->height;
int origImagePTwidth=origImage->width;
for (int i = 0; i<origImagePTheight; i++)
{
float idb2=i/2;
int iStep=i*step;
for (int j=0; j<origImagePTwidth; j++)
{
float variable=idb2*stepDb2 + j/2;
float Y = data[iStep + j];
float U = -128 + data[ (int)(size + variable) ];
float V = -128 + data[ (int)(sizeMb1d25 + variable)];
float R = Y + 1.402 * V ;
float G = Y - 0.344 * U - 0.714 * V;
float B = Y + 1.772 * U;
R= R * !(R<0);
G= G * !(G<0);
B= B * !(B<0);
R=R*(!(R>255)) + 255 * (R>255);
G=G*(!(G>255)) + 255 * (G>255);
B=B*(!(B>255)) + 255 * (B>255);
cvSet2D(img1, i, j,cvScalar(B,G,R));
}
}
I'm trying to write an OpenCV program that counts fish eggs for someone else. It currently takes their uploaded image, normalizes, blurs, thresholds, dilates, distance transforms, thresholds again, and then finds contours (like in a typical watershed tutorial).
The problem I'm having is that the lighting conditions can vary quite a bit, so even with my adaptive threshold values, the accuracy of the algorithm also varies wildly. If there's a gradient brightness across the image it seems to do especially poorly. Sometimes the objects are very bright against the background and other times they're almost the same luminosity. Are there any particularly effective ways to find objects in varying light conditions?
Sample images:
Because anything larger than 100 pixels isn't relevant to your image, I would construct a fourier band pass filter to remove these structures.
Here is an implementation I use, based off the one in ImageJ. In this implementation the input image is mirror padded to reduce edge artifacts.
static void GenerateBandFilter(thrust::host_vector<float>& filter, const BandPassSettings& band, const FrameSize& frame)
{
//From https://imagej.nih.gov/ij/plugins/fft-filter.html
if (band.do_band_pass == false)
{
return;
}
if (frame.width != frame.height)
{
throw std::runtime_error("Frame height and width should be the same");
}
auto maxN = static_cast<int>(std::max(frame.width, frame.height));//todo make sure they are the same
auto filterLargeC = 2.0f*band.max_dx / maxN;
auto filterSmallC = 2.0f*band.min_dx / maxN;
auto scaleLargeC = filterLargeC*filterLargeC;
auto scaleSmallC = filterSmallC*filterSmallC;
auto filterLargeR = 2.0f*band.max_dy / maxN;
auto filterSmallR = 2.0f*band.min_dy / maxN;
auto scaleLargeR = filterLargeR*filterLargeR;
auto scaleSmallR = filterSmallR*filterSmallR;
// loop over rows
for (auto j = 1; j < maxN / 2; j++)
{
auto row = j * maxN;
auto backrow = (maxN - j)*maxN;
auto rowFactLarge = exp(-(j*j) * scaleLargeR);
auto rowFactSmall = exp(-(j*j) * scaleSmallR);
// loop over columns
for (auto col = 1; col < maxN / 2; col++)
{
auto backcol = maxN - col;
auto colFactLarge = exp(-(col*col) * scaleLargeC);
auto colFactSmall = exp(-(col*col) * scaleSmallC);
auto factor = (((1 - rowFactLarge*colFactLarge) * rowFactSmall*colFactSmall));
filter[col + row] *= factor;
filter[col + backrow] *= factor;
filter[backcol + row] *= factor;
filter[backcol + backrow] *= factor;
}
}
auto fixy = [&](float t){return isinf(t) ? 0 : t; };
auto rowmid = maxN * (maxN / 2);
auto rowFactLarge = fixy(exp(-(maxN / 2)*(maxN / 2) * scaleLargeR));
auto rowFactSmall = fixy(exp(-(maxN / 2)*(maxN / 2) *scaleSmallR));
filter[maxN / 2] *= ((1 - rowFactLarge) * rowFactSmall);
filter[rowmid] *= ((1 - rowFactLarge) * rowFactSmall);
filter[maxN / 2 + rowmid] *= ((1 - rowFactLarge*rowFactLarge) * rowFactSmall*rowFactSmall); //
rowFactLarge = fixy(exp(-(maxN / 2)*(maxN / 2) *scaleLargeR));
rowFactSmall = fixy(exp(-(maxN / 2)*(maxN / 2) *scaleSmallR));
for (auto col = 1; col < maxN / 2; col++){
auto backcol = maxN - col;
auto colFactLarge = exp(-(col*col) * scaleLargeC);
auto colFactSmall = exp(-(col*col) * scaleSmallC);
filter[col] *= ((1 - colFactLarge) * colFactSmall);
filter[backcol] *= ((1 - colFactLarge) * colFactSmall);
filter[col + rowmid] *= ((1 - colFactLarge*rowFactLarge) * colFactSmall*rowFactSmall);
filter[backcol + rowmid] *= ((1 - colFactLarge*rowFactLarge) * colFactSmall*rowFactSmall);
}
// loop along column 0 and expanded_width/2
auto colFactLarge = fixy(exp(-(maxN / 2)*(maxN / 2) * scaleLargeC));
auto colFactSmall = fixy(exp(-(maxN / 2)*(maxN / 2) * scaleSmallC));
for (auto j = 1; j < maxN / 2; j++) {
auto row = j * maxN;
auto backrow = (maxN - j)*maxN;
rowFactLarge = exp(-(j*j) * scaleLargeC);
rowFactSmall = exp(-(j*j) * scaleSmallC);
filter[row] *= ((1 - rowFactLarge) * rowFactSmall);
filter[backrow] *= ((1 - rowFactLarge) * rowFactSmall);
filter[row + maxN / 2] *= ((1 - rowFactLarge*colFactLarge) * rowFactSmall*colFactSmall);
filter[backrow + maxN / 2] *= ((1 - rowFactLarge*colFactLarge) * rowFactSmall*colFactSmall);
}
filter[0] = (band.remove_dc) ? 0 : filter[0];
}
You can poke around my code that uses it here: https://github.com/kandel3/DPM_PhaseRetrieval
Calculate alpha and beta values of image
image = cv::imread("F:\Dilated.jpg");
int x,y;
int a=0; // variables to be used in loop
int count=0; // variables to be used in loop
for( int y = 0; y < image.rows; y++ )
{ for( int x = 0; x < image.cols; x++ )
{ for( int c = 0; c < 3; c++ )
{
image.at<Vec3b>(y,x)[c] =
saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
}
}
}