cv::getPerspectiveTransform throws Error after cv::eigen2cv - c++

My Code:
cv::Mat
getPerspectiveTransform(Eigen::MatrixXd quadrangle, Eigen::MatrixXd warpedQuadrangle) {
cv::Mat transMat;
cv::Mat quad(4,2,CV_32FC1);
cv::Mat warpedQuad(4,2,CV_32FC1);
cv::eigen2cv(quadrangle,quad);
cv::eigen2cv(warpedQuadrangle,warpedQuad);
std::cout << "[ ] quadrangle in cv::Mat " << quad << std::endl;
transMat = cv::getPerspectiveTransform(quad,warpedQuad);
return transMat;
}
Error:
C++ exception with description "OpenCV(4.6.0) /home/ci/opencv/modules/imgproc/src/imgwarp.cpp:3392: error: (-215:Assertion failed) src.checkVector(2, CV_32F) == 4 && dst.checkVector(2, CV_32F) == 4 in function 'getPerspectiveTransform'
Suspected Issue:
eigen2cv is converting my CV_32FC1 to CV_64F. getPerspectiveTransform is expecting CV32F as its input.
What should be the ideal solution to this?

eigen2cv changes the layout of output matrix according to type of the input matrix, as can be seen from its source. So, e.g., if your Eigen matrices use 64-bit floats, output Mat's will have CV_64F depth. In this case, the simplest solution is to convert output matrices to CV_32F using Mat::convertTo (documentation):
Mat quadF, warpedQuadF;
quad.convertTo(quadF, CV_32F);
warpedQuad.convertTo(warpedQuadF, CV_32F);
transMat = cv::getPerspectiveTransform(quadF, warpedQuadF);
Since matrices are pretty small, this conversion is unlikely to be a performance issue, but it's possible to avoid it by rewriting 2 overloads of getPerspectiveTransform function (source1 and source2) to work with 64-bit floats. Second overload just delegates to the first, and the first works with double's internally anyway, so it's pretty trivial:
cv::Mat getPerspectiveTransform64(const Point2d src[], const Point2d dst[], int solveMethod)
{
CV_INSTRUMENT_REGION();
Mat M(3, 3, CV_64F), X(8, 1, CV_64F, M.ptr());
double a[8][8], b[8];
Mat A(8, 8, CV_64F, a), B(8, 1, CV_64F, b);
for( int i = 0; i < 4; ++i )
{
a[i][0] = a[i+4][3] = src[i].x;
a[i][1] = a[i+4][4] = src[i].y;
a[i][2] = a[i+4][5] = 1;
a[i][3] = a[i][4] = a[i][5] = a[i+4][0] = a[i+4][1] = a[i+4][2] = 0;
a[i][6] = -src[i].x*dst[i].x;
a[i][7] = -src[i].y*dst[i].x;
a[i+4][6] = -src[i].x*dst[i].y;
a[i+4][7] = -src[i].y*dst[i].y;
b[i] = dst[i].x;
b[i+4] = dst[i].y;
}
solve(A, B, X, solveMethod);
M.ptr<double>()[8] = 1.;
return M;
}
cv::Mat getPerspectiveTransform64(InputArray _src, InputArray _dst, int solveMethod)
{
Mat src = _src.getMat(), dst = _dst.getMat();
CV_Assert(src.checkVector(2, CV_64F) == 4 && dst.checkVector(2, CV_64F) == 4);
return getPerspectiveTransform64((const Point2d*)src.data, (const Point2d*)dst.data, solveMethod);
}
This can now be used directly without additional conversion:
transMat = getPerspectiveTransform64(quad, warpedQuad);

Related

Convolution using FFT gives a bad result

I'm trying to convolve an image using FFT. I use openCV so images are in Mat containers. I convert color image to gray image, then add a second channel for imaginary numbers that is all zero. Then I take this 2-channel Mat and convolve it with Prewitt's kernel. I get a result very different from the result I get when I use normal convolution algorithm. Left image is the output I get using FFT and right image is the output of normal convolution.
Below is the pseudo algorithm of how I do the operation;
Convert image Mat and kernel Mat to complex Mats by adding second channel (Result Mat type is CV_32FC2)
Assign all Mat elements to complex vectors
Zero pad the vectors to the same next power of 2
FFT the vectors
Signal multiply both vectors elementwise and assign result to result vector
Inverse FFT the result vector
Convert result vector to Mat
I think FFT algorithm is not the problem because when I take an image, FFT it, then inverse FFT it, I get the original image just fine. But I could be wrong. So here is the FFT algorithm. Notice how there are two of them. I use the second one. I also tried other FFT algorithms and they all output the same. FFT'ing and IFFT'ing same image only skips the signal multiplication step above. So I think that's where the problem is. Here is the code of the operation;
std::vector<cf> signalMultiplication(std::vector<cf> lh, std::vector<cf> rh) {
std::vector<cf> imVec = lh, kerVec = rh, resultVec;
resultVec.resize(imVec.size());
std::transform(imVec.begin(), imVec.end(), kerVec.begin(), resultVec.begin(), std::multiplies<cf>());
return resultVec;
}
I tried multiplying them using for loop but result was the same. I don't know the problem and I can't type the whole code here since it is too long, so tell me where you think the problem is and I'll give the code of that part.
#Paul below is the main body of the code;
cv::Mat convolution2D(cv::Mat image, cv::Mat kernel) {
cv::Mat imMat, kerMat;
imMat = convertToComplexMat(image);
kerMat = convertToComplexMat(kernel);
std::vector<cf> imVec, kerVec, resultVec;
imVec = matElementsToVector<cf>(imMat);
kerVec = matElementsToVector<cf>(kerMat);
float power = log2f(imVec.size());
if (abs(power - (int)power) == 0)
power++;
else
power = ceil(power);
zeroPadding(imVec, power);
zeroPadding(kerVec, power);
//FFT code I linked takes valarray as argument so I convert vectors to valarray and back
std::valarray<cf> imCArr(imVec.data(), imVec.size());
std::valarray<cf> kerCArr(kerVec.data(), kerVec.size());
fftRosetta(imCArr);
fftRosetta(kerCArr);
imVec.assign(std::begin(imCArr), std::end(imCArr));
kerVec.assign(std::begin(kerCArr), std::end(kerCArr));
resultVec = signalMultiplication(imVec, kerVec);
std::valarray<cf> resCArr(resultVec.data(), resultVec.size());
ifftRosetta(resCArr);
resultVec.assign(std::begin(resCArr), std::end(resCArr));
cv::Mat resultMat;
resultMat = vectorToMatElementsRowMajor(resultVec, imMat.rows, imMat.cols, imMat.type());
std::vector<cv::Mat> matVec;
cv::split(resultMat, matVec);
return matVec[0]; }
These are the custom functions;
convertToComplexMat, matElementsToVector, zeroPadding, fftRosetta, ifftRosetta, signalMultiplication, vectorToMatElementsRowMajor
signalMultiplication is posted, fftRosetta and ifftRosetta are linked so here, the rest of the functions;
using cf = std::complex<float>;
cv::Mat convertToComplexMat(cv::Mat imageMat) {
cv::Mat matOper;
if (imageMat.channels() == 3)
cv::cvtColor(imageMat, matOper, cv::COLOR_BGR2GRAY);
else
matOper = imageMat.clone();
matOper.convertTo(matOper, CV_32FC1);
cv::Mat compChannel = cv::Mat::zeros(matOper.rows, matOper.cols, CV_32FC1);
std::vector<cv::Mat> channels;
channels.push_back(matOper);
channels.push_back(compChannel);
cv::merge(channels, matOper);
return matOper;
}
template <typename T>
std::vector<T> matElementsToVector(cv::Mat operand) {
std::vector<T> vecOper;
int cn = operand.channels();
int lele = operand.total();
for (int i = 0; i < operand.total(); i++) {
if (cn == 1)
vecOper.push_back(operand.at<cv::Vec<T, 1>>(i)[0]);
else if (cn == 2) {
if (typeid(T) == typeid(cf)) {
T xd = operand.at<T>(i);
vecOper.push_back(xd);
}
else
for (int k = 0; k < cn; k++)
vecOper.push_back(operand.at<cv::Vec<T, 2>>(i)[k]);
}
else if (cn == 3)
for (int k = 0; k < cn; k++)
vecOper.push_back(operand.at<cv::Vec<T,3>>(i)[k]);
}
return vecOper;
}
void zeroPadding(std::vector<cf>& a, int power) {
int p, ioper;
if (power == -1)
p = ceil(log2f(a.size()));
else
p = power;
ioper = pow(2, p);
int size = a.size();
for (int i = 0; i < ioper - size; i++) {
a.push_back(0);
}
}
template <typename T>
cv::Mat vectorToMatElementsRowMajor(std::vector<T> operand, int mrows, int mcols, int mtype) {
cv::Mat matoper(mrows, mcols, mtype);
for (int j = 0; j < matoper.total(); j++) {
matoper.at<T>(j) = operand[j];
}
return matoper;
}
#Cris I tried it again with openCV DFT like you said, following the directions here. I applied DFT to image and kernel, then element-wise multiplied them, then applied IDFT. But result is something very different now. I can see resemblance of original image in there, but there are multiple shadows of it in different angles. I think the problem is how I do signal multiplication, but I can't find any answers on how to multiply 2D signals. Here is the code, output image is below it;
cv::Mat convolution2DopenCV(cv::Mat image, cv::Mat kernel) {
cv::Mat paddedImage, paddedKernel, imgOper, kerOper;
if (image.channels() == 3)
cv::cvtColor(image, imgOper, cv::COLOR_BGR2GRAY);
else
imgOper = image.clone();
kerOper = kernel;
int m = cv::getOptimalDFTSize(imgOper.rows);
int n = cv::getOptimalDFTSize(imgOper.cols);
cv::copyMakeBorder(imgOper, paddedImage, 0, m - imgOper.rows, 0, n - imgOper.cols, cv::BORDER_CONSTANT, cv::Scalar::all(0));
cv::copyMakeBorder(kerOper, paddedKernel, 0, m - kerOper.rows, 0, n - kerOper.cols, cv::BORDER_CONSTANT, cv::Scalar::all(0));
cv::Mat planesImage[] = { cv::Mat_<float>(paddedImage), cv::Mat::zeros(paddedImage.size(), CV_32F) };
cv::Mat cmpImgMat;
cv::merge(planesImage, 2, cmpImgMat);
cv::dft(cmpImgMat, cmpImgMat);
cv::Mat planesKernel[] = { cv::Mat_<float>(paddedKernel), cv::Mat::zeros(paddedKernel.size(), CV_32F) };
cv::Mat cmpKerMat;
cv::merge(planesKernel, 2, cmpKerMat);
cv::dft(cmpKerMat, cmpKerMat);
cv::Mat resultMat = cmpImgMat.mul(cmpKerMat);
cv::Mat planes[2];
cv::idft(resultMat, resultMat);
cv::split(resultMat, planes);
cv::normalize(planes[0], planes[0], 0, 255, cv::NORM_MINMAX);
return planes[0];
}
That's everything, if there is something I'm missing, let me know.

Convert pytorch tensor to opencv mat and vice versa in C++

I want to convert pytorch tensors to opencv mat and vice versa in C++. I have these two functions:
cv::Mat TensorToCVMat(torch::Tensor tensor)
{
std::cout << "converting tensor to cvmat\n";
tensor = tensor.squeeze().detach().permute({1, 2, 0});
tensor = tensor.mul(255).clamp(0, 255).to(torch::kU8);
tensor = tensor.to(torch::kCPU);
int64_t height = tensor.size(0);
int64_t width = tensor.size(1);
cv::Mat mat(width, height, CV_8UC3);
std::memcpy((void *)mat.data, tensor.data_ptr(), sizeof(torch::kU8) * tensor.numel());
return mat.clone();
}
torch::Tensor CVMatToTensor(cv::Mat mat)
{
std::cout << "converting cvmat to tensor\n";
cv::cvtColor(mat, mat, cv::COLOR_BGR2RGB);
cv::Mat matFloat;
mat.convertTo(matFloat, CV_32F, 1.0 / 255);
auto size = matFloat.size();
auto nChannels = matFloat.channels();
auto tensor = torch::from_blob(matFloat.data, {1, size.height, size.width, nChannels});
return tensor.permute({0, 3, 1, 2});
}
In my code I load two images (image1 and image2) and I want to convert them to pytorch tensors and then back to opencv mat to check if it works. The problem is that I get an memory access error on the first call of TensorToCVMat and I cant figure out whats wrong as I do not have much experience with C++ programming.
cv::Mat image1;
image1 = cv::imread(argv[1]);
if (!image1.data)
{
std::cout << "no image data\n";
return -1;
}
cv::Mat image2;
image2 = cv::imread(argv[2]);
if (!image2.data)
{
std::cout << "no image data\n";
return -1;
}
torch::Tensor tensor1 = CVMatToTensor(image1);
cv::Mat new_image1 = TensorToCVMat(tensor1); // <<< this is where the memory access error is thrown
torch::Tensor tensor2 = CVMatToTensor(image2);
cv::Mat new_image2 = TensorToCVMat(tensor2);
It would be great if you could give me hints or an explanation to solve this problem.
Not sure if the error is happening at the memcpy step. But you can use the void* data variant of the Mat constructor
Mat (int rows, int cols, int type, void *data, size_t step=AUTO_STEP)
and you can skip the memcpy step
tensor = uint8_tensor //shape: (h, w, 3)
cv::Mat mat = cv::Mat(height, width, CV_8UC3, tensor.data_ptr());
return mat;
I am using torch>=1.7.0.
For a tensor of dtype=float and size [1, 3, height, width]
this is what worked for me
cv::Mat torchTensortoCVMat(torch::Tensor& tensor)
{
tensor = tensor.squeeze().detach();
tensor = tensor.permute({1, 2, 0}).contiguous();
tensor = tensor.mul(255).clamp(0, 255).to(torch::kU8);
tensor = tensor.to(torch::kCPU);
int64_t height = tensor.size(0);
int64_t width = tensor.size(1);
cv::Mat mat = cv::Mat(cv::Size(width, height), CV_8UC3, tensor.data_ptr<uchar>());
return mat.clone();
}
My tensor shape was 500x500x3, I have to add tensor.reshape({width * height * 3}) to get the actual image
cv::Mat TensorToCVMat(torch::Tensor tensor)
{
// torch.squeeze(input, dim=None, *, out=None) → Tensor
// Returns a tensor with all the dimensions of input of size 1 removed.
// tensor.detach
// Returns a new Tensor, detached from the current graph.
// permute dimension, 3x700x700 => 700x700x3
tensor = tensor.detach().permute({1, 2, 0});
// float to 255 range
tensor = tensor.mul(255).clamp(0, 255).to(torch::kU8);
// GPU to CPU?, may not needed
tensor = tensor.to(torch::kCPU);
// shape of tensor
int64_t height = tensor.size(0);
int64_t width = tensor.size(1);
// Mat takes data form like {0,0,255,0,0,255,...} ({B,G,R,B,G,R,...})
// so we must reshape tensor, otherwise we get a 3x3 grid
tensor = tensor.reshape({width * height * 3});
// CV_8UC3 is an 8-bit unsigned integer matrix/image with 3 channels
cv::Mat imgbin(cv::Size(width, height), CV_8UC3, tensor.data_ptr());
return imgbin;
}

Multiply std::vector<cv::Point3f> by a cv::Mat?

as above, I have a std::vector of cv::Point3f. I have a transformation matrix. I need to multiply the vector by the inverse of the Mat.
My Mat: (T is the resulting transformation)
cv::Mat R(3,3,rvec.type());
cv::Rodrigues(rvec, R); // R is 3x3
cv::Mat T(4, 4, R.type()); // T is 4x4
T(cv::Range(0, 3), cv::Range(0, 3)) = R * 1; // copies R into T
T(cv::Range(0, 3), cv::Range(3, 4)) = tvec * 1; // copies tvec into T
float *p = T.ptr<float>(3);
p[0] = p[1] = p[2] = 0; p[3] = 1;
my vector:
std::vector<cv::Point3f> objectPoints;
I have tried:
cv::Mat V = T.inv() * cv::Mat(objectPoints, false)
V.copyTo(cv::Mat(objectPoints, false));
(Assertion failed, type error)
for (int i = 0; i < objectPoints.size(); i++)
{
cv::Mat dst = cv::Mat(objectPoints[i], false);
dst = -T*dst; //USE MATRIX ALGEBRA
// cv::Point3f tmp3 = cv::Point3f(dst(0, 0), dst(1, 0), dst(2, 0));
}
(Assertion failed)
std::vector<cv::Point3f> p3d;
perspectiveTransform(objectPoints, p3d, -T);
(runs, but the values are very incorrect)
cv::transform(objectPoints, p3d, -T);
(Assertion error)
What is the correct way (if there is a way!) to do this?
Thank you.
As Rick M. pointed out, you're trying to multiply a 4x4 matrix with a length-3 point. To perform a transformation with just one matrix multiplication (i.e. with the 4x4 combined R-T matrix), you first have to represent the point in homogeneous coordinates, which essentially just involves tacking on a 1 as the 4th element of your point; after the transformation, you divide the new point by the 4th element to maintain its value as a 1. Here's a nice source on 3D-3D transformations, with homogeneous coordinates discussed on slide 14.
Since OpenCV doesn't have a Point4f class, you'll have to add this 1 when you're creating the Mat form of the point. This is untested but might work:
std::vector<cv::Point3f> dstPoint;
for (int i = 0; i < objectPoints.size(); i++) {
// Convert Point3f to 4x1 Mat (in homogeneous coordinates, with 1 as 4th element)
cv::Point3f pt = objectPoints[i];
cv::Mat ptMat = (cv::Mat_<float>(4,1) << pt.x, pt.y, pt.z, 1);
// Perform matrix multiplication and store as Mat_ for easy element access
cv::Mat_<float> dstMat(T.inv() * ptMat);
// Divide first three resulting elements by the 4th (homogenizing
// the point) and store as Point3f
float scale = dstMat(0,3);
cv::Point3f dst(dstMat(0,0)/scale, dstMat(0,1)/scale, dstMat(0,2)/scale);
dstPoints.push_back(dst)
}
Would test, but I'm at work and don't have OpenCV on this computer.
UPDATE:
When copying to T, try this instead:
cv::Mat T(4, 4, cv::DataType<float>::type);
cv::Mat rot = T(cv::Range(0, 3), cv::Range(0, 3));
cv::Mat trans = T(cv::Range(0, 3), cv::Range(3, 4));
R.copyTo(rot);
tvec.copyTo(trans);
Based on the answer by DCSmith, I have it working. I had to make this small change:
cv::Mat T(4, 4, cv::DataType<float>::type);
R.copyTo(T(cv::Rect(0, 0, 3, 3)));
tvec.copyTo(T(cv::Rect(3, 0, 1, 3)));
To make the entire function look like:
std::vector<cv::Point3f> p3d;
cv::Mat R(3,3, cv::DataType<float>::type);
cv::Rodrigues(rvec, R); // R is 3x3
cv::Mat T(4, 4, cv::DataType<float>::type);
R.copyTo(T(cv::Rect(0, 0, 3, 3)));
tvec.copyTo(T(cv::Rect(3, 0, 1, 3)));
float *p = T.ptr<float>(3);
p[0] = p[1] = p[2] = 0; p[3] = 1;
std::vector<cv::Point3f> dstPoint;
for (int i = 0; i < objectPoints.size(); i++) {
cv::Point3f pt = objectPoints[i];
cv::Mat ptMat = (cv::Mat_<float>(4, 1) << pt.x, pt.y, pt.z, 1);
// Perform matrix multiplication and store as Mat_ for easy element access
cv::Mat_<float> dstMat = T.inv() * ptMat;
// Divide first three resulting elements by the 4th (homogenizing
// the point) and store as Point3f
float scale = dstMat(0, 3);
cv::Point3f dst(dstMat(0, 0) / scale, dstMat(0, 1) / scale, dstMat(0, 2) / scale);
p3d.push_back(dst);
}
Thank you for your help!

Convert boost array to open cv mat

I try to convert a boost::array rightCamInfo.K to an opencv Mat cv::Mat K. I didn't found any functions for this setting so I wrote an iterative approach:
float tempK[9];
cv::Mat K;
for (int i = 0; i < 9; i++) {
tempK[i] = rightCamInfo.K[i];
}
K = cv::Mat(3, 3, CV_64F, &tempK);
But this is giving me strange results. The range of the given data is between 400 and 0 and the result matrix is around 5 * 10^(-315). So obviously there are some conversion errors. What is wrong? Did I chose the wrong type for the matrix or does this array type is not fitting?
You should use CV_32F not CV_64F and point the first element of tempK
K = cv::Mat(3, 3, CV_32F, &tempK[0]);
or
Mat K(3, 3, CV_32F, &tempK[0]);

fourier transform gaussian filter error

I'm new to openCV and I'm trying to filter an image using a gaussian filter in frequency domain. But there is a run time error
"assertion failed (type == srcB.type() && srcA.size() == srcB.size()) in cv::mulSpectrum"
I know it is caused by the return type of my filter, the type doesn't match and I don't know how to make it right
here is the filter function (my guess is the return value from this function is wrong):
cv::Mat createGaussianHighPassFilter(cv::Size size, double cutoffInPixels){
Mat ghpf(size, CV_64F);
cv::Point center(size.width / 2, size.height / 2);
for(int u = 0; u < ghpf.rows; u++)
{
for(int v = 0; v < ghpf.cols; v++)
{
ghpf.at<double>(u, v) = gaussianCoeff(u - center.x, v - center.y, cutoffInPixels); //kernel utk gaussian filter yg 128x128
}
}
return ghpf;
}
and this is the main function:
Mat mask = createGaussianHighPassFilter(complexI.size(),16);
shift(mask);
Mat AX[] = {Mat::zeros(complexI.size(), CV_32F), Mat::zeros(complexI.size(), CV_32F)};
Mat kernel_spec;
AX[0] = mask; // real
AX[1] = mask; // imaginar
merge(AX, 2, kernel_spec);
cout<<complexI.type()<<endl<<kernel_spec.type(); //the result is 13 and 14, the type doesn't match
mulSpectrums(complexI, kernel_spec, complexI, DFT_ROWS); // only DFT_ROWS accepted
updateMag(complexI); // show spectrum
updateResult(complexI); // do inverse transform
Well of course they don't match. You are initializing kernel_spec as CV_32 but complexI is CV_64. Do a Mat::convertTo() and it should work.
HTH