How to use the merge function properly? - c++

I've used the cv::merge() function at the end of the following code, but it throws an unhandled exception when the compiler reaches to the cv::merge() function call.
I've tried both cv::Mat[] array and vector of cv::Mat as inputs, but it is still throws the C++ exception.
The purpose of the code is to extract the red channel of an underwater image, and apply some new values in order to enhance color distribution according to equation 8 of this reference (Color Correction Based on CFA and Enhancement Based on Retinex With Dense Pixels for Underwater Images).
It only works with cv::merge(planes, 1, image2); which returns one page of planes in image2. It must merge three planes in planes into image2 to give a color image not a gray.
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
int main()
{
//read an image
Mat image = imread("9554.png", 1);
//check for existence of data
if (!image.data)
{ printf("no image data.\n"); return -1; }
//planes is a vector for holding rgb channels separately
//std::vector<Mat> planes;
Mat planes[3];
//split the image into channels
//planes[2] is the red channel
split(image, planes);
// converting planes from uchar to double
planes[0].convertTo(planes[0], CV_64FC1);
planes[1].convertTo(planes[1], CV_64FC1);
planes[2].convertTo(planes[2], CV_64FC1);
// defining coefficients of green and blue channel for blending
double a = 0.05, b = 0.95;
//sum_im stores pixelwise sum of Red, Green and Blue planes
Mat imBlendNormal_B_G, sum_im;
//converting to double
imBlendNormal_B_G.convertTo(imBlendNormal_B_G, CV_64FC1);
sum_im.convertTo(sum_im, CV_64FC1);
//blending green and blue planes with a and b coefficients
// and 0.0 offset(or gamma)
addWeighted(planes[1], a, planes[0], b, 0.0, imBlendNormal_B_G);
// sum of red, green and blue pixel in two addWeighted calls
addWeighted(planes[2], 1.0, planes[1], 1.0, 0.0, sum_im);
addWeighted(planes[0], 1.0, sum_im, 1.0, 0.0, sum_im);
//dividing blended green and blue image to total RGB sum
divide(imBlendNormal_B_G, sum_im, imBlendNormal_B_G);
//defining average kernel 3x3
Mat avg3x3_kernel = (Mat_<double>(3, 3) << 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0);
//defining matrices for storing 3x3 average of blue and green planes
Mat blueAverage, greenAverage;
// converting to double type
blueAverage.convertTo(blueAverage, CV_64FC1);
greenAverage.convertTo(greenAverage, CV_64FC1);
// taking 3x3 average
filter2D(planes[0], blueAverage, planes[0].depth(), avg3x3_kernel);
filter2D(planes[1], greenAverage, planes[1].depth(), avg3x3_kernel);
//imBlendAverage_B_G_R: for blending of averaged green and blue channels
Mat imBlendAverage_B_G_R;
//convert to double
imBlendAverage_B_G_R.convertTo(imBlendAverage_B_G_R, CV_64FC1);
//blend averaged green and blue with a and b coeffs
addWeighted(greenAverage, a, blueAverage, b, 0.0, imBlendAverage_B_G_R);
//differentiate red values
addWeighted(imBlendAverage_B_G_R, 1.0, planes[2], -1.0, 0.0, imBlendAverage_B_G_R);
//CompensationTermRed: storing finally compensated red channel intensities
Mat CompensationTermRed;
//coverting to double
CompensationTermRed.convertTo(CompensationTermRed, CV_64FC1);
//multiplication term
CompensationTermRed = imBlendAverage_B_G_R.mul(imBlendNormal_B_G);
//final add term
addWeighted(CompensationTermRed, 1.0, planes[2], 1.0, 0.0, CompensationTermRed);
//convert to uchar
Mat CompensationTermRed_uint8;
CompensationTermRed.convertTo(CompensationTermRed_uint8, CV_8UC1);
//imshow("CompensationTermRed_uint8", CompensationTermRed_uint8);
// assign new red channel values to planes[2]
planes[2] = CompensationTermRed_uint8;
Mat image2 = image;
cv::merge(planes, 1, image2);
image2.convertTo(image2, CV_8UC3);
imshow("merge",image2);
waitKey(0);
return 0;
}

Debugging your code, namely inspecting planes right before the cv::merge call, reveals that planes[0] and planes[1] are of type FLOAT64, whereas planes[2] is of type UINT8. From the documentation on cv::merge:
Parameters
mv input array of matrices to be merged; all the matrices in mv must have the same size and the same depth.
Therefore, you get the exception – and that's also the reason, why cv::merge(planes, 1, image2) works nevertheless.
To fix that issue, just get rid of that part:
//convert to uchar
Mat CompensationTermRed_uint8;
CompensationTermRed.convertTo(CompensationTermRed_uint8, CV_8UC1);
and replace
planes[2] = CompensationTermRed_uint8;
with
planes[2] = CompensationTermRed;
Then, your code should work as expected.

Here is the corrected version of the code that works fine and merge() function throws no unhandled C++ exception. (thanks to comments from HansHirse)
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
int main()
{
//read an image
Mat image = imread("9554.png", 1);
//check for existance of data
if (!image.data)
{ printf("no image data.\n"); return -1; }
//planes is a vector for holding rgb channels separately
Mat planes[3];
//split the image into channels
//planes[2] is the red channel
split(image, planes);
// converting planes from uchar to double
planes[0].convertTo(planes[0], CV_64FC1);
planes[1].convertTo(planes[1], CV_64FC1);
planes[2].convertTo(planes[2], CV_64FC1);
// defining coefficients of green and blue channel for blending
double a = 0.05, b = 0.95;
//sum_im stores pixelwise sum of Red, Green and Blue planes
Mat imBlendNormal_B_G, sum_im;
//converting to double
imBlendNormal_B_G.convertTo(imBlendNormal_B_G, CV_64FC1);
sum_im.convertTo(sum_im, CV_64FC1);
//blending green and blue planes with a and b coefficients
// and 0.0 offset(or gamma)
addWeighted(planes[1], a, planes[0], b, 0.0, imBlendNormal_B_G);
// sum of red, green and blue pixel in two addWeighted calls
addWeighted(planes[2], 1.0, planes[1], 1.0, 0.0, sum_im);
addWeighted(planes[0], 1.0, sum_im, 1.0, 0.0, sum_im);
//dividing blended green and blue image to total RGB sum
divide(imBlendNormal_B_G, sum_im, imBlendNormal_B_G);
//defining average kernel 3x3
Mat avg3x3_kernel = (Mat_<double>(3, 3) << 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0);
//defining matrices for storing 3x3 average of blue and green planes
Mat blueAverage, greenAverage;
// converting to double type
blueAverage.convertTo(blueAverage, CV_64FC1);
greenAverage.convertTo(greenAverage, CV_64FC1);
// taking 3x3 average
filter2D(planes[0], blueAverage, planes[0].depth(), avg3x3_kernel);
filter2D(planes[1], greenAverage, planes[1].depth(), avg3x3_kernel);
//imBlendAverage_B_G_R: for blending of averaged green and blue channels
Mat imBlendAverage_B_G_R;
//convert to double
imBlendAverage_B_G_R.convertTo(imBlendAverage_B_G_R, CV_64FC1);
//blend averaged green and blue with a and b coeffs
addWeighted(greenAverage, a, blueAverage, b, 0.0, imBlendAverage_B_G_R);
//differentiate red values
addWeighted(imBlendAverage_B_G_R, 1.0, planes[2], -1.0, 0.0, imBlendAverage_B_G_R);
//CompensationTermRed: storing finally compensated red channel intensities
Mat CompensationTermRed;
//coverting to double
CompensationTermRed.convertTo(CompensationTermRed, CV_64FC1);
//multiplication term
CompensationTermRed = imBlendAverage_B_G_R.mul(imBlendNormal_B_G);
//final add term
addWeighted(CompensationTermRed, 1.0, planes[2], 1.0, 0.0, CompensationTermRed);
// assign new red channel values to planes[2]
planes[2] = CompensationTermRed;
Mat image2;
cv::merge(planes, 3, image2);
image2.convertTo(image2, CV_8UC3);
imshow("merge",image2);
waitKey(0);
return 0;
}

Related

Fill circle with gradient

I want fill circle with gradient color, like I show on bottom. I can't figure out easy way, how to do that.
I can make more circles, but transitions are visible.
cv::circle(img, center, circle_radius * 1.5, cv::Scalar(1.0, 1.0, 0.3), CV_FILLED);
cv::circle(img, center, circle_radius * 1.2, cv::Scalar(1.0, 1.0, 0.6), CV_FILLED);
cv::circle(img, center, circle_radius, cv::Scalar(1.0, 1.0, 1.0), CV_FILLED);
All you need to do is create a function which takes in a central point and a new point, calculates the distance, and returns a grayscale value for that point. Alternatively you could just return the distance, store the distance at that point, and then scale the whole thing later with cv::normalize().
So let's say you have the central point as (50, 50) in a (100, 100) image. Here's pseudocode for what you'd want to do:
function euclideanDistance(center, point) # returns a float
return sqrt( (center.x - point.x)^2 + (center.y - point.y)^2 )
center = (50, 50)
rows = 100
cols = 100
gradient = new Mat(rows, cols) # should be of type float
for row < rows:
for col < cols:
point = (col, row)
gradient[row, col] = euclideanDistance(center, point)
normalize(gradient, 0, 255, NORM_MINMAX, uint8)
gradient = 255 - gradient
Note the steps here:
Create the Euclidean distance function to calculate distance
Create a floating point matrix to hold the distance values
Loop through all rows and columns and assign a distance value
Normalize to the range you want (you could stick with a float here instead of casting to uint8, but you do you)
Flip the binary gradient, since distances farther away will be brighter---but you want the opposite.
Now for your exact example image, there's a gradient in a circle, whereas this method just creates the whole image as a gradient. In your case, if you want a specific radius, just modify the function which calculates the Euclidean distance, and if it's beyond some distance, set it to 0 (the value at the center of the circle, which will be flipped eventually to white):
function euclideanDistance(center, point, radius) # returns a float
distance = sqrt( (center.x - point.x)^2 + (center.y - point.y)^2 )
if distance > radius:
return 0
else
return distance
Here is the above in actual C++ code:
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <cmath>
float euclidean_distance(cv::Point center, cv::Point point, int radius){
float distance = std::sqrt(
std::pow(center.x - point.x, 2) + std::pow(center.y - point.y, 2));
if (distance > radius) return 0;
return distance;
}
int main(){
int h = 400;
int w = 400;
int radius = 100;
cv::Mat gradient = cv::Mat::zeros(h, w, CV_32F);
cv::Point center(150, 200);
cv::Point point;
for(int row=0; row<h; ++row){
for(int col=0; col<w; ++col){
point.x = col;
point.y = row;
gradient.at<float>(row, col) = euclidean_distance(center, point, radius);
}
}
cv::normalize(gradient, gradient, 0, 255, cv::NORM_MINMAX, CV_8U);
cv::bitwise_not(gradient, gradient);
cv::imshow("gradient", gradient);
cv::waitKey();
}
A completely different method (though doing the same thing) would be to use the distanceTransform(). This function maps the distance from the center of a white blob to the nearest black value to a grayscale value, like we were doing above. This code is more concise and does the same thing. However, it can work on arbitrary shapes, not just circles, so that's cool.
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
int main(){
int h = 400;
int w = 400;
int radius = 100;
cv::Point center(150, 200);
cv::Mat gradient = cv::Mat::zeros(h, w, CV_8U);
cv::rectangle(gradient, cv::Point(115, 100), cv::Point(270, 350), cv::Scalar(255), -1, 8 );
cv::Mat gradient_padding;
cv::bitwise_not(gradient, gradient_padding);
cv::distanceTransform(gradient, gradient, CV_DIST_L2, CV_DIST_MASK_PRECISE);
cv::normalize(gradient, gradient, 0, 255, cv::NORM_MINMAX, CV_8U);
cv::bitwise_or(gradient, gradient_padding, gradient);
cv::imshow("gradient-distxform.png", gradient);
cv::waitKey();
}
You have to draw many circles. Color of each circle depends on distance from center. Here is some simple example:
void printGradient(cv::Mat &_input,const cv::Point &_center, const double radius)
{
cv::circle(_input, _center, radius, cv::Scalar(0, 0, 0), -1);
for(double i=1; i<radius; i=i++)
{
const int color = 255-int(i/radius * 255); //or some another color calculation
cv::circle(_input,_center,i,cv::Scalar(color, color, color),2);
}
}
And result:
Another approach not mentioned yet is to precompute a circle gradient image (with one of the mentioned approaches like the accepted solution) and use affine warping with linear interpolation to create other such circles (different sizes). This can be faster, if warping and interpolation are optimized and maybe accelerated by hardware.
Result might be a bit worse than perfect.
I once used this to create a single individual vignetting mask circle for each frame innendoscopic imaging. Was faster than to compute the distances "manually".

AR with OpenCV & OpenGL

Here's the problem: I wrote a code to display the OpenGL teapot on a sheet of paper with drawing. For this, I track the 4 corners of the paper (using SURF detection & matching followed by computing the homography matrix, then moving average of the corners position to reduce the jitter). The corners coordinates are used to compute the intrinsic & extrinsic matrices of the camera (using calibrateCamera() and solvePnP(), respectively). The rotation matrix is then computed using Rodrigues(). Afterwards, I computed the rotation angles using decomposeProjectionMatrix(). Here's the OpenCV part of the code:
...
objPoints.push_back(objCorners);
scenePoints.push_back(sceneCorners);
calibrateCamera(objPoints, scenePoints, Size(640,480), camMtx, distortCoeff, RVecs, tVecs);
solvePnP(objCorners, sceneCorners, camMtx, distortCoeff, RVec, tVec);
Rodrigues(RVec, rotMtx);
getAngles(rotMtx, rotAngles);
objCorners are the corners coordinates in the template image ([1 1], [img width 1], [img width img height], [1 img height]). sceneCorners are the corners coordinates in the webcam frame, computed using the homography matrix. The function getAngles() is as follows:
void getAngles(Mat &rotCamMtx, Vec3d &angles)
{
Mat camMtx, rotMtx, transVec, rotMtxX, rotMtxY, rotMtxZ;
double *r = rotCamMtx.ptr<double>();
double projMtx[12] = {r[0], r[1], r[2], 0,
r[3], r[4], r[5], 0,
r[6], r[7], r[8], 0};
decomposeProjectionMatrix(Mat(3,4,CV_64FC1,projMtx), camMtx, rotMtx, transVec, rotMtxX, rotMtxY, rotMtxZ, angles);
}
Then I set the element of the OpenGL model view matrix as follows:
modelViewMat[0] = 1.0;
modelViewMat[1] = 0.0;
modelViewMat[2] = 0.0;
modelViewMat[3] = 0.0;
modelViewMat[4] = 0.0;
modelViewMat[5] = 1.0;
modelViewMat[6] = 0.0;
modelViewMat[7] = 0.0;
modelViewMat[8] = 0.0;
modelViewMat[9] = 0.0;
modelViewMat[10] = 1.0;
modelViewMat[11] = 0.0;
modelViewMat[12] = 2*matCenter.x/639 - 641/639;
modelViewMat[13] = 481/479 - 2*matCenter.y/479;
modelViewMat[14] = -0.25;
modelViewMat[15] = 1.0;
matCenter is the center coordinate of the paper, obtained by taking the average of the 4 corners. The values in modelViewMat[12] and modelViewMat[13] are obtained by mapping the pixel coordinates ([1 640], [1 480]) to ([-1 1], [1 -1]). The OpenGL part of the code:
...
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadMatrixd(modelViewMat);
glRotated(-45, 1.0, 0.0, 0.0);
glRotated(rotAngles[2], 0.0, 1.0, 0.0);
glShadeModel(GL_SMOOTH);
glColor3f(1.0, 1.0, 1.0);
glutSolidTeapot(0.3);
I rotated the teapot -45 degrees around x-axis to make it appears "sitting" on the paper.
The result is this: if I translate the paper on the desk, the location of the teapot on the paper is more or less correct (on the same spot). If I rotate the paper, the teapot will follow the rotation correctly (around y-axis), but the location is no more correct. The question is: how to "pin" the teapot always on the same spot of the paper? I've tried using the result of Rodrigues() and solvePnP() directly in the OpenGL model view matrix (as suggested in OpenCV + OpenGL: proper camera pose using solvePnP), but the result is incorrect.
Solved this problem several days ago, based on the code from http://blog.yarrago.com/2011/08/introduction-to-augmented-reality.html. To display the 3D object correctly, the OpenGL projection matrix is set first, followed by the OpenGL model view matrix. The elements of the projection matrix are computed from the intrinsic matrix of the camera as follows:
calibrateCamera(objPoints, scenePoints, Size(640,480), camMtx, distortCoeff, RVecs, tVecs);
...
projectionMat[0] = 2*camMtx.at<double>(0,0)/frameW;
projectionMat[1] = 0;
projectionMat[2] = 0;
projectionMat[3] = 0;
projectionMat[4] = 0;
projectionMat[5] = 2*camMtx.at<double>(1,1)/frameH;
projectionMat[6] = 0;
projectionMat[7] = 0;
projectionMat[8] = 1 - 2*camMtx.at<double>(0,2)/frameW;
projectionMat[9] = -1 + (2*camMtx.at<double>(1,2) + 2)/frameH;
projectionMat[10] = (zNear + zFar)/(zNear - zFar);
projectionMat[11] = -1;
projectionMat[12] = 0;
projectionMat[13] = 0;
projectionMat[14] = 2*zNear*zFar/(zNear - zFar);
projectionMat[15] = 0;
frameW and frameH are 640 and 480, respectively. zNear is 0.1 and zFar is 100.
The elements of the OpenGL model view matrix are computed from the rotation matrix and the translation vector (obtained from solvePnP() and Rodrigues()). To get a correct positioning of the 3D object, the translation vector needs to be transformed before computing the model view matrix.
// Offset value to move the translation vector
double offsetC[3][1] = {424, 600, 0};
Mat offset(3, 1, CV_64F, offsetC);
...
solvePnP(objCorners, sceneCorners, camMtx, distortCoeff, RVec, tVec);
Rodrigues(RVec, rotMtx);
tVec = tVec + rotMtx*offset; // Move tVec to refer to the center of the paper
tVec = tVec / 250.0; // Converting pixel coordinates to OpenGL world coordinates
...
modelviewMat[0] = rotMtx.at<double>(0,0);
modelviewMat[1] = -rotMtx.at<double>(1,0);
modelviewMat[2] = -rotMtx.at<double>(2,0);
modelviewMat[3] = 0;
modelviewMat[4] = rotMtx.at<double>(0,1);
modelviewMat[5] = -rotMtx.at<double>(1,1);
modelviewMat[6] = -rotMtx.at<double>(2,1);
modelviewMat[7] = 0;
modelviewMat[8] = rotMtx.at<double>(0,2);
modelviewMat[9] = -rotMtx.at<double>(1,2);
modelviewMat[10] = -rotMtx.at<double>(2,2);
modelviewMat[11] = 0;
modelviewMat[12] = tVec.at<double>(0,0);
modelviewMat[13] = -tVec.at<double>(1,0);
modelviewMat[14] = -tVec.at<double>(2,0);
modelviewMat[15] = 1;
The numerical values for offsetC is the pixel coordinate of the center of the paper. The OpenGL part of the code is then:
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(projectionMat);
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(modelviewMat);
glRotatef(90, -1.0, 0.0, 0.0); // Rotate the teapot first so that it will be displayed correctly on the paper
glutSolidTeapot(1.0);
One important thing for the correct positioning of the teapot is the transformation of tVec.

I want to add gaussian noise to colour image where the standard deviation of gaussian noise were varied from 0.2 to 2 at 0.2 intervals in openCV

I have tried to separate the colour channels and add noise to each of them and merge them. And succeed to do so. But I want to separate the Y Cr Cb components and then add noise to only Y. Finally merge the components but could not. My code for colour channels:
for (int i=0 ; i<3 ; i++){
Mat noise = Mat(channel[i].size(), CV_64F);
normalize(channel[i], result[i], 0.0, 1.0, CV_MINMAX, CV_64F);
randn(noise, 0, .2);
result[i] += noise;
normalize(result[i], result[i], 0.0, 1.0, CV_MINMAX, CV_64F);
result[i].convertTo(result[i], CV_32F, 255, 0);
}
merge(result, 3, input);
imwrite("/Users/hossainmdshakhawat/developer/subimg/Noisy/merge.jpg", input);

How to correct the image according to the camera's matrix with the distortion coefficients?

I'm not sure the the unit of the camera's matrix(mm, etc.):
camera matrix ={0.0074209246, 0.0, 0.026450, 0.0, 0.0074209246, -0.0560390.0, 0.0, 1.0}
matrix distortion= {-4.179306e-005,-4.179306e-005,,2.008752e-005,-2.959854e-005}
undistort(image, imageUndistorted, cameraMatrix, distCoeffs);
Where image is the captured image, imageUndistorted is a mat to store the undistorted image. The units are pixels.

OpenCV Stereo rectification from manually created matrices

I am currently working on a 3d reconstruction of X-Ray images, and therefore I need to stereo-rectify images of two views before I can match some features with help of the epilines. I am using OpenCV 2.4 with C++.
For this purpose I got a set of pairs of X-Ray images (cone beam X-ray images, no real cameras with distortion parameters or a real focal length), one from the anteroposterior view (directly looking at the chest), and one from the lateral view (looking at the chest from the side). I know some parameters like a virtual focal length (equal for both views) that I can use, and the images have got a resolution of 512x512px, hence the camera projection at the images is at (255,255) for both views. Also i know that the cameras are perpendicular. From this information I developed a rotation matrix R and a translation vector t (both verified with help of a 3d plot in Matlab).
Problem: R and t are actually enough for a stereo rectification in OpenCV, but the resulting images after rectification are black. Googling led me to a bug in stereoRectify, but I doubt that it is the bug as I can run the OpenCV stereoRectification example which does work. When trying a stereoRectification in Matlab I can at least see some distorted rectification results.
Here is my C++ code:
float camera_matrix_ap_data[] = {1207*2.0, 0.0, 255.0,
0.0, 1207*2, 255.0,
0.0, 0.0, 1.0};
cv::Mat camera_matrix_ap(3, 3, CV_64F, camera_matrix_ap_data);
float camera_matrix_lat_data[] = {1207*2, 0.0, 255.0,
0.0, 1207*2, 255.0,
0.0, 0.0, 1.0};
cv::Mat camera_matrix_lat(3, 3, CV_64F, camera_matrix_lat_data);
///
/// #brief the distortion matrices
///
cv::Mat distortion_ap(4, 1, CV_64F, 0.0);
cv::Mat distortion_lat(4, 1, CV_64F, 0.0);
///
/// #brief Translation and Rotation matrices
///
float R_data[] = {0.0, 0.0, 1.0,
0.0, 1.0, 0.0,
-1.0, 0.0, 0.0};
float T_data[] = {-(1207.0*2 + 255), 0.0, 1207.0*2 + 255};
cv::Mat R(3, 3, CV_64F, R_data);
cv::Mat T(3, 1, CV_64F, T_data);
for (int i=1; i<=20; i++) {
std::stringstream filenameAP_tmp;
std::stringstream filenameLAT_tmp;
filenameAP_tmp << "imageAP"<< i <<".jpg";
filenameAP = filenameAP_tmp.str();
filenameLAT_tmp << "imageLAT"<< i <<".jpg";
filenameLAT = filenameLAT_tmp.str();
rectimg_ap = cv::imread(filenameAP);
rectimg_lat = cv::imread(filenameLAT);
// Yes, these images are grayscale
/// Experimental
/// Stereo rectify both images
cv::Mat R1(3, 3, CV_64F);
cv::Mat R2(3, 3, CV_64F);
cv::Mat P1(3, 4, CV_64F);
cv::Mat P2(3, 4, CV_64F);
cv::Mat Q(4, 4, CV_64F);
cv::Rect validRoi[2];
// buggy?
cv::stereoRectify(camera_matrix_ap, distortion_ap, camera_matrix_lat, distortion_lat, rectimg_ap.size(), R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, rectimg_ap.size(), &validRoi[0], &validRoi[1] );
// Maps for AP View
cv::Mat map1x(rectimg_ap.size(), CV_32FC1, 255.0);
cv::Mat map2x(rectimg_ap.size(), CV_32FC1, 255.0);
// Maps for LAT View
cv::Mat map1y(rectimg_ap.size(), CV_32FC1, 255.0);
cv::Mat map2y(rectimg_ap.size(), CV_32FC1, 255.0);
cv::initUndistortRectifyMap(camera_matrix_ap, distortion_ap, R1, P1, rectimg_ap.size(), CV_32FC1, map1x, map1y);
cv::initUndistortRectifyMap(camera_matrix_lat, distortion_lat, R2, P2, rectimg_lat.size(), CV_32FC1, map2x, map2y);
cv::Mat tmp1, tmp2;
cv::remap(rectimg_ap, tmp1, map1x, map1y, INTER_LINEAR);
cv::remap(rectimg_lat, tmp2, map2x, map2y, INTER_LINEAR);
//findHomography(rectimg_ap, rectimg_lat, CV_RANSAC);
}
So I am wondering what is wrong with this code or my matrices, as the rectification images after remap are completely black. Is there a difference concerning the coordinate system axes between OpenCV and Matlab? As I read, in OpenCV the z-axis points to the image plane, and it was the same for Matlab.
I'd be glad if someone could help me, I am stuck with this problem for weeks now. Thank you very much!
Try changing the "float" variable types to "double". The CV_64F corresponds to a double, and not to a float, since it is 8 bytes (= 64bits). I tried your code with my own matrix values, and that did the trick.