I try to convert a boost::array rightCamInfo.K to an opencv Mat cv::Mat K. I didn't found any functions for this setting so I wrote an iterative approach:
float tempK[9];
cv::Mat K;
for (int i = 0; i < 9; i++) {
tempK[i] = rightCamInfo.K[i];
}
K = cv::Mat(3, 3, CV_64F, &tempK);
But this is giving me strange results. The range of the given data is between 400 and 0 and the result matrix is around 5 * 10^(-315). So obviously there are some conversion errors. What is wrong? Did I chose the wrong type for the matrix or does this array type is not fitting?
You should use CV_32F not CV_64F and point the first element of tempK
K = cv::Mat(3, 3, CV_32F, &tempK[0]);
or
Mat K(3, 3, CV_32F, &tempK[0]);
Related
My Code:
cv::Mat
getPerspectiveTransform(Eigen::MatrixXd quadrangle, Eigen::MatrixXd warpedQuadrangle) {
cv::Mat transMat;
cv::Mat quad(4,2,CV_32FC1);
cv::Mat warpedQuad(4,2,CV_32FC1);
cv::eigen2cv(quadrangle,quad);
cv::eigen2cv(warpedQuadrangle,warpedQuad);
std::cout << "[ ] quadrangle in cv::Mat " << quad << std::endl;
transMat = cv::getPerspectiveTransform(quad,warpedQuad);
return transMat;
}
Error:
C++ exception with description "OpenCV(4.6.0) /home/ci/opencv/modules/imgproc/src/imgwarp.cpp:3392: error: (-215:Assertion failed) src.checkVector(2, CV_32F) == 4 && dst.checkVector(2, CV_32F) == 4 in function 'getPerspectiveTransform'
Suspected Issue:
eigen2cv is converting my CV_32FC1 to CV_64F. getPerspectiveTransform is expecting CV32F as its input.
What should be the ideal solution to this?
eigen2cv changes the layout of output matrix according to type of the input matrix, as can be seen from its source. So, e.g., if your Eigen matrices use 64-bit floats, output Mat's will have CV_64F depth. In this case, the simplest solution is to convert output matrices to CV_32F using Mat::convertTo (documentation):
Mat quadF, warpedQuadF;
quad.convertTo(quadF, CV_32F);
warpedQuad.convertTo(warpedQuadF, CV_32F);
transMat = cv::getPerspectiveTransform(quadF, warpedQuadF);
Since matrices are pretty small, this conversion is unlikely to be a performance issue, but it's possible to avoid it by rewriting 2 overloads of getPerspectiveTransform function (source1 and source2) to work with 64-bit floats. Second overload just delegates to the first, and the first works with double's internally anyway, so it's pretty trivial:
cv::Mat getPerspectiveTransform64(const Point2d src[], const Point2d dst[], int solveMethod)
{
CV_INSTRUMENT_REGION();
Mat M(3, 3, CV_64F), X(8, 1, CV_64F, M.ptr());
double a[8][8], b[8];
Mat A(8, 8, CV_64F, a), B(8, 1, CV_64F, b);
for( int i = 0; i < 4; ++i )
{
a[i][0] = a[i+4][3] = src[i].x;
a[i][1] = a[i+4][4] = src[i].y;
a[i][2] = a[i+4][5] = 1;
a[i][3] = a[i][4] = a[i][5] = a[i+4][0] = a[i+4][1] = a[i+4][2] = 0;
a[i][6] = -src[i].x*dst[i].x;
a[i][7] = -src[i].y*dst[i].x;
a[i+4][6] = -src[i].x*dst[i].y;
a[i+4][7] = -src[i].y*dst[i].y;
b[i] = dst[i].x;
b[i+4] = dst[i].y;
}
solve(A, B, X, solveMethod);
M.ptr<double>()[8] = 1.;
return M;
}
cv::Mat getPerspectiveTransform64(InputArray _src, InputArray _dst, int solveMethod)
{
Mat src = _src.getMat(), dst = _dst.getMat();
CV_Assert(src.checkVector(2, CV_64F) == 4 && dst.checkVector(2, CV_64F) == 4);
return getPerspectiveTransform64((const Point2d*)src.data, (const Point2d*)dst.data, solveMethod);
}
This can now be used directly without additional conversion:
transMat = getPerspectiveTransform64(quad, warpedQuad);
I am learning image processing with OpenCV in C++. To implement a basic down-sampling algorithm I need to work on the pixel level -to remove rows and columns. However, when I assign values with mat.at<>(i,j) other values are assign - things like 1e-38.
Here is the code :
Mat src, dst;
src = imread("diw3.jpg", CV_32F);//src is a 479x359 grayscale image
//dst will contain src low-pass-filtered I checked by displaying it works fine
Mat kernel;
kernel = Mat::ones(3, 3, CV_32F) / (float)(9);
filter2D(src, dst, -1, kernel, Point(-1, -1), 0, BORDER_DEFAULT);
// Now I try to remove half the rows/columns result is stored in downsampled
Mat downsampled = Mat::zeros(240, 180, CV_32F);
for (int i =0; i<downsampled.rows; i ++){
for (int j=0; j<downsampled.cols; j ++){
downsampled.at<uchar>(i,j) = dst.at<uchar>(2*i,2*j);
}
}
Since I read here OpenCV outputing odd pixel values that for cout I needed to cast, I wrote downsampled.at<uchar>(i,j) = (int) before dst.at<uchar> but it does not work also.
The second argument to cv::imread is cv::ImreadModes, so the line:
src = imread("diw3.jpg", CV_32F);
is not correct; it should probably be:
cv::Mat src_8u = imread("diw3.jpg", cv::IMREAD_GRAYSCALE);
src_8u.convertTo(src, CV_32FC1);
which will read the image as 8-bit grayscale image, and will convert it to floating point values.
The loop should look something like this:
Mat downsampled = Mat::zeros(240, 180, CV_32FC1);
for (int i = 0; i < downsampled.rows; i++) {
for (int j = 0; j < downsampled.cols; j++) {
downsampled.at<float>(i,j) = dst.at<float>(2*i,2*j);
}
}
note that the argument to cv::Mat::zeros is CV_32FC1 (1 channel, with 32-bit floating values), so Mat::at<float> method should be used.
I have point clouds in the form of a vector of Point3d (vector). If I use the conversion provided by OpenCV, like
cv::Mat tmpMat = cv::Mat(pts) //Here pts is vector<cv::Point3d>
It gets converted into a Matrix with 3 channels. I want a single channel matrix with the dimensions nx3 (n - the number of elements in the vector). Is there any direct method to convert a vector of Point3d into an OpenCV Mat of size nx3?
Now I'm doing
cv::Mat1f tmpMat = cv::Mat::zeros(pts.size(), 3, cv::CV_32FC1);
for(int i = 0; i< pts.size(); ++i)
{
tmpMat(i, 0) = pts[i].x;
tmpMat(i, 1) = pts[i].y;
tmpMat(i, 2) = pts[i].z;
}
from Mat to vector of Point3d
vector<cv::Point3d> pts;
for (int i = 0; i < tmpMat.rows; ++i)
{
pts.push_back(cv::Point3d(tmpMat(i, 0), tmpMat(i, 1), tmpMat(i, 2));
}
I will be doing this repeatedly. Is there any faster method?
Found the method to convert a 3 Channel Mat to a single Channel Mat of size (nx3) at
http://docs.opencv.org/2.4/modules/core/doc/basic_structures.html#mat-reshape
cv::Mat tmpMat = cv::Mat(pts).reshape(1);
to convert from a Mat of size nx3 to vector
vector<cv::Point3d> pts;
tmpMat.reshape(3, tmpMat.rows*tmpMat.cols).copyTo(pts);
the size of the vector will be the equal to the number of rows of the Mat
as above, I have a std::vector of cv::Point3f. I have a transformation matrix. I need to multiply the vector by the inverse of the Mat.
My Mat: (T is the resulting transformation)
cv::Mat R(3,3,rvec.type());
cv::Rodrigues(rvec, R); // R is 3x3
cv::Mat T(4, 4, R.type()); // T is 4x4
T(cv::Range(0, 3), cv::Range(0, 3)) = R * 1; // copies R into T
T(cv::Range(0, 3), cv::Range(3, 4)) = tvec * 1; // copies tvec into T
float *p = T.ptr<float>(3);
p[0] = p[1] = p[2] = 0; p[3] = 1;
my vector:
std::vector<cv::Point3f> objectPoints;
I have tried:
cv::Mat V = T.inv() * cv::Mat(objectPoints, false)
V.copyTo(cv::Mat(objectPoints, false));
(Assertion failed, type error)
for (int i = 0; i < objectPoints.size(); i++)
{
cv::Mat dst = cv::Mat(objectPoints[i], false);
dst = -T*dst; //USE MATRIX ALGEBRA
// cv::Point3f tmp3 = cv::Point3f(dst(0, 0), dst(1, 0), dst(2, 0));
}
(Assertion failed)
std::vector<cv::Point3f> p3d;
perspectiveTransform(objectPoints, p3d, -T);
(runs, but the values are very incorrect)
cv::transform(objectPoints, p3d, -T);
(Assertion error)
What is the correct way (if there is a way!) to do this?
Thank you.
As Rick M. pointed out, you're trying to multiply a 4x4 matrix with a length-3 point. To perform a transformation with just one matrix multiplication (i.e. with the 4x4 combined R-T matrix), you first have to represent the point in homogeneous coordinates, which essentially just involves tacking on a 1 as the 4th element of your point; after the transformation, you divide the new point by the 4th element to maintain its value as a 1. Here's a nice source on 3D-3D transformations, with homogeneous coordinates discussed on slide 14.
Since OpenCV doesn't have a Point4f class, you'll have to add this 1 when you're creating the Mat form of the point. This is untested but might work:
std::vector<cv::Point3f> dstPoint;
for (int i = 0; i < objectPoints.size(); i++) {
// Convert Point3f to 4x1 Mat (in homogeneous coordinates, with 1 as 4th element)
cv::Point3f pt = objectPoints[i];
cv::Mat ptMat = (cv::Mat_<float>(4,1) << pt.x, pt.y, pt.z, 1);
// Perform matrix multiplication and store as Mat_ for easy element access
cv::Mat_<float> dstMat(T.inv() * ptMat);
// Divide first three resulting elements by the 4th (homogenizing
// the point) and store as Point3f
float scale = dstMat(0,3);
cv::Point3f dst(dstMat(0,0)/scale, dstMat(0,1)/scale, dstMat(0,2)/scale);
dstPoints.push_back(dst)
}
Would test, but I'm at work and don't have OpenCV on this computer.
UPDATE:
When copying to T, try this instead:
cv::Mat T(4, 4, cv::DataType<float>::type);
cv::Mat rot = T(cv::Range(0, 3), cv::Range(0, 3));
cv::Mat trans = T(cv::Range(0, 3), cv::Range(3, 4));
R.copyTo(rot);
tvec.copyTo(trans);
Based on the answer by DCSmith, I have it working. I had to make this small change:
cv::Mat T(4, 4, cv::DataType<float>::type);
R.copyTo(T(cv::Rect(0, 0, 3, 3)));
tvec.copyTo(T(cv::Rect(3, 0, 1, 3)));
To make the entire function look like:
std::vector<cv::Point3f> p3d;
cv::Mat R(3,3, cv::DataType<float>::type);
cv::Rodrigues(rvec, R); // R is 3x3
cv::Mat T(4, 4, cv::DataType<float>::type);
R.copyTo(T(cv::Rect(0, 0, 3, 3)));
tvec.copyTo(T(cv::Rect(3, 0, 1, 3)));
float *p = T.ptr<float>(3);
p[0] = p[1] = p[2] = 0; p[3] = 1;
std::vector<cv::Point3f> dstPoint;
for (int i = 0; i < objectPoints.size(); i++) {
cv::Point3f pt = objectPoints[i];
cv::Mat ptMat = (cv::Mat_<float>(4, 1) << pt.x, pt.y, pt.z, 1);
// Perform matrix multiplication and store as Mat_ for easy element access
cv::Mat_<float> dstMat = T.inv() * ptMat;
// Divide first three resulting elements by the 4th (homogenizing
// the point) and store as Point3f
float scale = dstMat(0, 3);
cv::Point3f dst(dstMat(0, 0) / scale, dstMat(0, 1) / scale, dstMat(0, 2) / scale);
p3d.push_back(dst);
}
Thank you for your help!
I want to get the magnitude and orientation of all pixel values in an image. This is what I have done until now:
cv::Sobel( dis, grad_x, dis.depth(), 1, 0, 3);
cv::Sobel( dis, grad_y, dis.depth(), 0, 1, 3);
Mat orientation(width, height, CV_32FC1);
#define PI 3.14159265
for(int i = 0; i < grad_y.rows; ++i){
for(int j= 0; j< grad_y.cols; ++j){
orientation = atan( grad_y / grad_x ) * 180/PI ;
magnitude= sqrt((grad_x)*(grad_x)+(grad_y)*(grad_y));
}
}
but I get this error in atan and sqrt lines :
error C2665: 'atan' : none of the 3 overloads could convert all the argument type
math.h(108):could be 'double atan(double)
math.h(505):float atan(float)
math.h(553):long double atan(long double)
does anyone know what is the problem?
Thanks in advance...
-----------After Editing-------------
#define PI 3.14159265
for(int i=0; i<grad_y.rows; i++){
for(int j=0; j<grad_y.cols; j++){
orientation = atan( grad_y.at<float>(i,j) / grad_x.at<float>(i,j) ) * 180/PI ;
}
}
I changed the sqrt into this but still I get error:
Mat magnitude(m_pmdDevice->GetY(), m_pmdDevice->GetX(), CV_32FC1);
Mat gradAdd(m_pmdDevice->GetY(), m_pmdDevice->GetX(), CV_32FC1);
grad_x = grad_x.mul(grad_x);
grad_y = grad_y.mul(grad_y);
cv::add(grad_x, grad_y, gradAdd);
magnitude = sqrt(gradAdd);
atan should get number (double, float or long), but you are providing matrix. You should use
orientation = atan( grad_y.at<float>(i,j) / grad_x.at<float>(i,j) ) * 180/PI ;
I wrote 'float' in my example since I don't know what type you are actually using. Replace it if necessary.
Same problem in sqrt.
Edit (after edit of question):
You are using sqrt in the wrong way. See its documentation. It should be:
sqrt(gradAdd, magnitude);
Even better will be to use function magnitude instead of all the multiplications and square roots:
magnitude(grad_x, grad_y, magnit);
Also I recommend you not to give names to matrices that are functions of OpenCV. This will create confusion.
the problem is in the following line:
orientation = atan( grad_y / grad_x ) * 180/PI ;
Because Orientation is of Mat type and should contain float value(because you have declared it as CV_32FC1) so, you can not put the values directly in it. You should do it as following:
orientation.at<float>(i,j) = (float)atan( grad_y / grad_x ) * 180/PI ;
PS: Just thing once, how can you put a single value in a Matrix
Use the followinf code for element wise computation for obtaining orientation and magnitude. If you want to use a function use magnitude as defined in above answer
cv::Sobel( dis, grad_x, CV_32F, 1, 0, 3);
cv::Sobel( dis, grad_y, CV_32F, 0, 1, 3);
Mat orientation(width, height, CV_32FC1);
Mat magnitude(width, height, CV_32FC1);
for(int i = 0; i < grad_y.rows; ++i)
for(int j= 0; j< grad_y.cols; ++j)
{
orientation.at<float>(i,j) = atan2(grad_y.at<float>(i,j),grad_x.at<float>(i,j) ) * 180/PI ;
magnitude.at<float>(i,j)= sqrt(grad_x.at<float>(i,j)*grad_x.at<float>(i,j)+grad_y.at<float>(i,j)*grad_y.at<float>(i,j));
}
Please see if you define your matrix as CV_8UC1 then typecast argument for sqrt as float/double as it is only defined for float/double and not for integers