I have the next problem
I want to use this equation in openCV
X = blue channel , Y = green channel and Z = red channel
x = X / (X + Y + Z);
y = Y / (X + Y + Z);
Z = Z / (X + Y + Z);
But when i run my code I have a window whit 3 images on 1.
First I extract the luminance because I want the crominance of the red channel.
Can anybody help me?
Here is my code:
Mat source = cv::imread("Image.bmp");
imshow("Original",source);
Mat src(source.rows, source.cols, CV_32FC3);
normalize(source,src,0,1,CV_MINMAX,CV_32FC1);
Mat Lum = Mat::zeros(src.rows, src.cols, CV_32FC1);
Mat Crom = Mat::zeros(src.rows, src.cols, CV_32FC3);
for (size_t i = 0; i < src.rows; i++)
{
for (size_t j = 0; j < src.cols; j++)
{
Vec3f pixel = src.at<Vec3f>(i, j);
float B = pixel[0];
float G = pixel[1];
float R = pixel[2];
Lum.at<float>(i, j) = ( B + G + R ) /3;
}
}
imshow("Lum",Lum);
///Codigo para la Cromancia
for (size_t i = 0; i < Lum.rows; i++)
{
for (size_t j = 0; j < Lum.cols; j++)
{
Vec3f pixel = src.at<Vec3f>(i,j);
float B = pixel[0];
float G = pixel[1];
float R = pixel[2];
Crom.at<Vec3f>(i,j)[0] = ( Lum.at<Vec3f>(i,j)[0] )/ ( Lum.at<Vec3f>(i,j)[0] + Lum.at<Vec3f>(i,j)[1] + Lum.at<Vec3f>(i,j)[2] );
Crom.at<Vec3f>(i,j)[1] = ( Lum.at<Vec3f>(i,j)[1] )/ ( Lum.at<Vec3f>(i,j)[0] + Lum.at<Vec3f>(i,j)[1] + Lum.at<Vec3f>(i,j)[2] );
Crom.at<Vec3f>(i,j)[2] = ( Lum.at<Vec3f>(i,j)[2] )/ ( Lum.at<Vec3f>(i,j)[0] + Lum.at<Vec3f>(i,j)[1] + Lum.at<Vec3f>(i,j)[2] );
}
}
imshow("Cromancia",Crom);
Related
This is how I managed to use a Sobel Kernel on a GRAYSCALE image.However,I dont actually get how to modify it for a color image.
void Soble()
{
Mat img;
int w = 3;
int k = w / 2;
char fname[MAX_PATH];
openFileDlg(fname);
img = imread(fname, CV_LOAD_IMAGE_GRAYSCALE);
gaussianFiltering(img);
Mat destinationImg = img.clone();
float sobelY[3][3] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 };
float sobelX[3][3] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 };
for (int i = k; i < img.rows - k; i++)
{
for (int j = k; j < img.cols - k; j++)
{
float Gx = 0, Gy = 0;
for (int l = 0; l < w; l++)
{
for (int p = 0; p < w; p++)
{
Gx += img.at<uchar>(i + l - k, j + p - k)*sobelX[l][p];
Gy += img.at<uchar>(i + l - k, j + p - k)*sobelY[l][p];
}
}
destinationImg.at<uchar>(i, j) = sqrt(Gx*Gx + Gy * Gy) / (4 * sqrt(2));
}
}
imshow("Intermediar",destinationImg);
imshow("Initial", img);
waitKey(0);
}
I thought of using each RGB chanel but it does not work and even give some errors.
float GxR = 0, GyR = 0;
float GxG = 0, GyG = 0;
float GxB = 0, GyB = 0;
for (int l = 0; l < w; l++)
{
for (int p = 0; p < w; p++)
{
GxR += img.at<Vec3b>[0](i + l - k, j + p - k)*sobelX[l][p];
GxG += img.at<Vec3b>[1](i + l - k, j + p - k)*sobelX[l][p];
GxB += img.at<Vec3b>[2](i + l - k, j + p - k)*sobelX[l][p];
GyR += img.at<Vec3b>[0](i + l - k, j + p - k)*sobelY[l][p];
GyG += img.at<Vec3b>[1](i + l - k, j + p - k)*sobelY[l][p];
GyB += img.at<Vec3b>[2](i + l - k, j + p - k)*sobelY[l][p];
}
}
destinationImg.at<Vec3b>[0](i, j) = sqrt(GxR*GxR + GyR * GyR) / (4 * sqrt(2));
destinationImg.at<Vec3b>[1](i, j) = sqrt(GxG*GxG + GyB * GyB) / (4 * sqrt(2));
destinationImg.at<Vec3b>[2](i, j) = sqrt(GxG*GxG + GyG * GyG) / (4 * sqrt(2));
Can you please explain how can this code must be rewritten?
You access the image data the wrong way.
destinationImg.at<Vec3b>[0](i, j)
destinationImg is a Mat of type Vec3b. That means it's a 2d array of three dimensional vectors.
You'r [ ] operator is in the wrong place...
The subscript error message tells you that you're using that operator on something that is neither a pointer nor an array which is not possible.
You get the other error message because you have that operator where the (i,j) is expected.
First you have to get one of these vectors, then you can get its elements.
destinationImg.at<Vec3b>(i,j) will give you the vector at i,j.
destinationImg.at<Vec3b>(i,j)[0] will give you the first element of that vector.
Example from the OpenCV documentation:
Vec3b intensity = img.at<Vec3b>(y, x);
uchar blue = intensity.val[0];
uchar green = intensity.val[1];
uchar red = intensity.val[2];
http://docs.opencv.org/2.4.13.2/doc/user_guide/ug_mat.html
how can I copy red channel value of a mat image to blue channel using opencv.
Thanks in advance.
cv::Mat Image =cv::imread("image.jpg");
uint8_t * orig_ptr = (uint8_t*)Image.data;
for (int y = 0; y < Image.rows; y++)
{
for (int x = 0; x < Image.cols; x++)
{
int R = orig_ptr[x * 3 + y*Image.step + 2];
orig_ptr[x * 3 + y*Image.step + 1] = R;
orig_ptr[x * 3 + y*Image.step] = R;
}
}
I have written a function to convert an image in YUV420P to RGB but it is taking 30 millisecond to convert an image (size: 1280 x 720) into RGB, but when I am using ffmpeg function ( as this) to convert YUV image into RGB its taking only 2 millisecond for the same image. What is the problem with my code ? How can I optimize the code that I have written ??
My code is given below
int step = origImage->widthStep;
uchar *data = (uchar *)origImage->imageData;
int size = origImage->width * origImage->height;
IplImage* img1 = cvCreateImage(cvGetSize(origImage), IPL_DEPTH_8U, 3);
for (int i = 0; i<origImage->height; i++)
{
for (int j=0; j<origImage->width; j++)
{
float Y = data[i*step + j];
float U = data[ (int)(size + (i/2)*(step/2) + j/2) ];
float V = data[ (int)(size*1.25 + (i/2)*(step/2) + j/2)];
float R = Y + 1.402 * (V - 128);
float G = Y - 0.344 * (U - 128) - 0.714 * (V - 128);
float B = Y + 1.772 * (U - 128);
if (R < 0){ R = 0; } if (G < 0){ G = 0; } if (B < 0){ B = 0; }
if (R > 255 ){ R = 255; } if (G > 255) { G = 255; } if (B > 255) { B = 255; }
cvSet2D(img1, i, j,cvScalar(B,G,R));
}
}
Here, try this(should reduce to 25 milliseconds):
int step = origImage->widthStep;
uchar *data = (uchar *)origImage->imageData;
int size = origImage->width * origImage->height;
IplImage* img1 = cvCreateImage(cvGetSize(origImage), IPL_DEPTH_8U, 3);
int stepDb2=step /2;
float sizeMb1d25=size*1.25 ;
int origImagePTheight=origImage->height;
int origImagePTwidth=origImage->width;
for (int i = 0; i<origImagePTheight; i++)
{
float idb2=i/2;
int iStep=i*step;
for (int j=0; j<origImagePTwidth; j++)
{
float variable=idb2*stepDb2 + j/2;
float Y = data[iStep + j];
float U = -128 + data[ (int)(size + variable) ];
float V = -128 + data[ (int)(sizeMb1d25 + variable)];
float R = Y + 1.402 * V ;
float G = Y - 0.344 * U - 0.714 * V;
float B = Y + 1.772 * U;
R= R * !(R<0);
G= G * !(G<0);
B= B * !(B<0);
R=R*(!(R>255)) + 255 * (R>255);
G=G*(!(G>255)) + 255 * (G>255);
B=B*(!(B>255)) + 255 * (B>255);
cvSet2D(img1, i, j,cvScalar(B,G,R));
}
}
I am going to process a region of pixels defined by RotatedRect in OpenCV. Although I know the rectangle center, size, and angle, I am not sure how to store all the x and y of this region to another matrix. I have checked some other posts, some suggest to rotate the image, but this will crop part of the image. Can you please help me out?
Try this (not sure I undestand the problem perfectly):
#include "opencv2/opencv.hpp"
#include <vector>
using namespace std;
using namespace cv;
//----------------------------------------------------------
//
//----------------------------------------------------------
void getQuadrangleSubPix_8u32f_CnR( const uchar* src, size_t src_step, Size src_size,
float* dst, size_t dst_step, Size win_size,
const double *matrix, int cn )
{
int x, y, k;
double A11 = matrix[0], A12 = matrix[1], A13 = matrix[2];
double A21 = matrix[3], A22 = matrix[4], A23 = matrix[5];
src_step /= sizeof(src[0]);
dst_step /= sizeof(dst[0]);
for( y = 0; y < win_size.height; y++, dst += dst_step )
{
double xs = A12*y + A13;
double ys = A22*y + A23;
double xe = A11*(win_size.width-1) + A12*y + A13;
double ye = A21*(win_size.width-1) + A22*y + A23;
if( (unsigned)(cvFloor(xs)-1) < (unsigned)(src_size.width - 3) &&
(unsigned)(cvFloor(ys)-1) < (unsigned)(src_size.height - 3) &&
(unsigned)(cvFloor(xe)-1) < (unsigned)(src_size.width - 3) &&
(unsigned)(cvFloor(ye)-1) < (unsigned)(src_size.height - 3))
{
for( x = 0; x < win_size.width; x++ )
{
int ixs = cvFloor( xs );
int iys = cvFloor( ys );
const uchar *ptr = src + src_step*iys;
float a = (float)(xs - ixs), b = (float)(ys - iys), a1 = 1.f - a, b1 = 1.f - b;
float w00 = a1*b1, w01 = a*b1, w10 = a1*b, w11 = a*b;
xs += A11;
ys += A21;
if( cn == 1 )
{
ptr += ixs;
dst[x] = ptr[0]*w00 + ptr[1]*w01 + ptr[src_step]*w10 + ptr[src_step+1]*w11;
}
else if( cn == 3 )
{
ptr += ixs*3;
float t0 = ptr[0]*w00 + ptr[3]*w01 + ptr[src_step]*w10 + ptr[src_step+3]*w11;
float t1 = ptr[1]*w00 + ptr[4]*w01 + ptr[src_step+1]*w10 + ptr[src_step+4]*w11;
float t2 = ptr[2]*w00 + ptr[5]*w01 + ptr[src_step+2]*w10 + ptr[src_step+5]*w11;
dst[x*3] = t0;
dst[x*3+1] = t1;
dst[x*3+2] = t2;
}
else
{
ptr += ixs*cn;
for( k = 0; k < cn; k++ )
dst[x*cn+k] = ptr[k]*w00 + ptr[k+cn]*w01 +
ptr[src_step+k]*w10 + ptr[src_step+k+cn]*w11;
}
}
}
else
{
for( x = 0; x < win_size.width; x++ )
{
int ixs = cvFloor( xs ), iys = cvFloor( ys );
float a = (float)(xs - ixs), b = (float)(ys - iys), a1 = 1.f - a, b1 = 1.f - b;
float w00 = a1*b1, w01 = a*b1, w10 = a1*b, w11 = a*b;
const uchar *ptr0, *ptr1;
xs += A11; ys += A21;
if( (unsigned)iys < (unsigned)(src_size.height-1) )
ptr0 = src + src_step*iys, ptr1 = ptr0 + src_step;
else
ptr0 = ptr1 = src + (iys < 0 ? 0 : src_size.height-1)*src_step;
if( (unsigned)ixs < (unsigned)(src_size.width-1) )
{
ptr0 += ixs*cn; ptr1 += ixs*cn;
for( k = 0; k < cn; k++ )
dst[x*cn + k] = ptr0[k]*w00 + ptr0[k+cn]*w01 + ptr1[k]*w10 + ptr1[k+cn]*w11;
}
else
{
ixs = ixs < 0 ? 0 : src_size.width - 1;
ptr0 += ixs*cn; ptr1 += ixs*cn;
for( k = 0; k < cn; k++ )
dst[x*cn + k] = ptr0[k]*b1 + ptr1[k]*b;
}
}
}
}
}
//----------------------------------------------------------
//
//----------------------------------------------------------
void myGetQuadrangleSubPix(const Mat& src, Mat& dst,Mat& m )
{
CV_Assert( src.channels() == dst.channels() );
cv::Size win_size = dst.size();
double matrix[6];
cv::Mat M(2, 3, CV_64F, matrix);
m.convertTo(M, CV_64F);
double dx = (win_size.width - 1)*0.5;
double dy = (win_size.height - 1)*0.5;
matrix[2] -= matrix[0]*dx + matrix[1]*dy;
matrix[5] -= matrix[3]*dx + matrix[4]*dy;
if( src.depth() == CV_8U && dst.depth() == CV_32F )
getQuadrangleSubPix_8u32f_CnR( src.data, src.step, src.size(),
(float*)dst.data, dst.step, dst.size(),
matrix, src.channels());
else
{
CV_Assert( src.depth() == dst.depth() );
cv::warpAffine(src, dst, M, dst.size(),
cv::INTER_LINEAR + cv::WARP_INVERSE_MAP,
cv::BORDER_REPLICATE);
}
}
//----------------------------------------------------------
//
//----------------------------------------------------------
void getRotRectImg(cv::RotatedRect rr,Mat &img,Mat& dst)
{
Mat m(2,3,CV_64FC1);
float ang=rr.angle*CV_PI/180.0;
m.at<double>(0,0)=cos(ang);
m.at<double>(1,0)=sin(ang);
m.at<double>(0,1)=-sin(ang);
m.at<double>(1,1)=cos(ang);
m.at<double>(0,2)=rr.center.x;
m.at<double>(1,2)=rr.center.y;
myGetQuadrangleSubPix(img,dst,m);
}
//----------------------------------------------------------
//
//----------------------------------------------------------
int main(int argc, char* argv[])
{
Mat img=imread("D:\\ImagesForTest\\lena.jpg");
img.convertTo(img,CV_32FC3,1.0/255.0);
cv::RotatedRect rr(cv::Point2f(200,200),Size(50,50),-30);
// rotated rectangle
Point2f rect_points[4];
rr.points( rect_points );
for( int j = 0; j < 4; j++ )
{
line( img, rect_points[j], rect_points[(j+1)%4], Scalar(0,1,0), 1, CV_AA );
}
imshow("colImg",img);
Mat dst(rr.size,CV_32FC3);
getRotRectImg(rr,img,dst);
imshow("rotImg",dst);
cv::waitKey(0);
cv::destroyAllWindows();
return 0;
}
The result:
Implementation with OpenCV warpAffine.
Mat getAffineTransformForRotatedRect(RotatedRect rr) {
float angle = rr.angle * M_PI / 180.0;
// angle += M_PI; // you may want rotate it upsidedown
float sinA = sin(angle), cosA = cos(angle);
float data[6] = {
cosA, sinA, rr.size.width/2.0f - cosA * rr.center.x - sinA * rr.center.y,
-sinA, cosA, rr.size.height/2.0f - cosA * rr.center.y + sinA * rr.center.x};
Mat rot_mat(2, 3, CV_32FC1, data);
return rot_mat.clone();
}
Mat getRotatedRectImg(const cv::Mat &mat, RotatedRect rr) {
Mat M, result;
M = getAffineTransformForRotatedRect(rr);
warpAffine(mat, result, M, rr.size, INTER_CUBIC);
return result;
}
i want to transport the follow codes into c++:
gaussFilter = fspecial('gaussian', 2*neighSize+1, 0.5*neighSize);
pointFeature = imfilter(pointFeature, gaussFilter, 'symmetric');
where the pointFeature is a [height, width, 24] array.
i try to use filter2D, but it only support the 2D array.
so i want to know if there are functions in opencv that can filtering the multi-dimensional array?
You can use separable kernel filters for make anydimentional filter.
If you are using OpenCV, you could try this for a 3 Dimensional MatND:
void Smooth3DHist(cv::MatND &hist, const int& kernDimension)
{
assert(hist.dims == 3);
int x_size = hist.size[0];
int y_size = hist.size[1];
int z_size = hist.size[2];
int xy_size = x_size*y_size;
cv::Mat kernal = cv::getGaussianKernel(kernDimension, -1, CV_32F);
// Filter XY dimensions for every Z
for (int z = 0; z < z_size; z++)
{
float *ind = (float*)hist.data + z * xy_size; // sub-matrix pointer
cv::Mat subMatrix(2, hist.size, CV_32F, ind);
cv::sepFilter2D(subMatrix, subMatrix, CV_32F, kernal.t(), kernal, Point(-1,-1), 0.0, cv::BORDER_REPLICATE);
}
// Filter Z dimension
float* kernGauss = (float *)kernal.data;
unsigned kernSize = kernal.total();
int kernMargin = (kernSize - 1)/2;
float* lineBuffer = new float[z_size + 2*kernMargin];
for (int y = 0; y < y_size; y++)
{
for (int x = 0; x < x_size; x++)
{
// Copy along Z dimension into a line buffer
float* z_ptr = (float*)hist.data + y * x_size + x;//same as hist.ptr<float>(0, y, x)
for (int z = 0; z < z_size; z++, z_ptr += xy_size)
{
lineBuffer[z + kernMargin] = *z_ptr;
}
// Replicate borders
for (int m = 0; m < kernMargin; m++)
{
lineBuffer[m] = lineBuffer[kernMargin];// replicate left side
lineBuffer[z_size + 2*kernMargin - 1 - m] = lineBuffer[kernMargin + z_size - 1];//replicate right side
}
// Filter line buffer 1D - convolution
z_ptr = (float*)hist.data + y * x_size + x;
for (int z = 0; z < z_size; z++, z_ptr += xy_size)
{
*z_ptr = 0.0f;
for (unsigned k = 0; k < kernSize; k++)
{
*z_ptr += lineBuffer[z+k]*kernGauss[k];
}
}
}
}
delete [] lineBuffer;
}