opencv stereo vision depth map, code does not work - c++

I am studying on stereo vision depth map and I am using the opencv library.I wrote a program to obtain depth map. But when program was run I obtained an empty depth map frame.can anybody help me please, what is wrong ? code are shown in below;
#include <opencv/highgui.h>
#include <opencv/cv.h>
#include <stdio.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <math.h>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/contrib/contrib.hpp>
int main()
{
IplImage* img1 = cvLoadImage("/home/sezen/Masaüstü/imR.png");
IplImage* img2 = cvLoadImage("/home/sezen/Masaüstü/imL.png");
IplImage *rimage = cvCreateImage(
cvSize( img1->width, img1->height ), IPL_DEPTH_8U, 1 );
cvCvtColor( img1, rimage, CV_RGB2GRAY );
IplImage *limage = cvCreateImage(
cvSize( img2->width, img2->height ), IPL_DEPTH_8U, 1 );
cvCvtColor( img2, limage, CV_RGB2GRAY );
cvNamedWindow( "Right", CV_WINDOW_AUTOSIZE );
cvShowImage( "Right", rimage );
cvNamedWindow( "Left", CV_WINDOW_AUTOSIZE );
cvShowImage("Left", limage);
CvMat *matr = cvCreateMat(rimage->height,rimage->width,CV_8UC1 );
CvMat *matl = cvCreateMat(limage->height,limage->width,CV_8UC1 );
CvMat* disp = cvCreateMat(rimage->height,rimage->width,CV_16S);
CvMat* vdisp = cvCreateMat(rimage->height,rimage->width,CV_16S);
cvConvert( rimage, matr );
cvConvert( limage, matl );
CvStereoBMState *BMState = cvCreateStereoBMState();
assert(BMState != 0);
BMState->preFilterSize=21;
BMState->preFilterCap=31;
BMState->SADWindowSize=21;
BMState->minDisparity=0;
BMState->numberOfDisparities=128;
BMState->textureThreshold=10;
BMState->uniquenessRatio=15;
cvFindStereoCorrespondenceBM( matr, matl, disp, BMState);
cvNormalize(disp, vdisp, 0, 255, CV_MINMAX);
cvShowImage("depthmap", vdisp);
cvWaitKey(0);
return 0;
}

Here's a code for disparity map using C++ API. Final image that you normalize should be of type CV_8UC1.
Mat img1, img2, g1, g2;
Mat disp, disp8;
img1 = imread("leftImage.jpg");
img2 = imread("rightImage.jpg");
cvtColor(img1, g1, CV_BGR2GRAY);
cvtColor(img2, g2, CV_BGR2GRAY);
StereoBM sbm;
sbm.state->SADWindowSize = 9;
sbm.state->numberOfDisparities = 112;
sbm.state->preFilterSize = 5;
sbm.state->preFilterCap = 61;
sbm.state->minDisparity = -39;
sbm.state->textureThreshold = 507;
sbm.state->uniquenessRatio = 0;
sbm.state->speckleWindowSize = 0;
sbm.state->speckleRange = 8;
sbm.state->disp12MaxDiff = 1;
sbm(g1, g2, disp);
normalize(disp, disp8, 0, 255, CV_MINMAX, CV_8U);
imshow("left", img1);
imshow("right", img2);
imshow("disp", disp8);

I can only add that structure of OpenCV namespaces and classes changes every year.
I placed below working source code for OpenCV 3.4.0
#include <Windows.h>
#include <Vfw.h>
#include <string>
#include <iostream>
#include "opencv2\core\core.hpp"
#include "opencv2\imgproc\imgproc.hpp"
#include "opencv2\imgcodecs\imgcodecs.hpp"
#include "opencv2\highgui\highgui.hpp"
#include "opencv2\calib3d\calib3d.hpp"
using namespace std;
using namespace cv;
int _tmain(int argc, _TCHAR* argv[])
{
Mat im_left=imread("right.png");
Mat im_right=imread("left.png");
cv::Size imagesize = im_left.size();
cv::Mat disparity_left=cv::Mat(imagesize.height,imagesize.width,CV_16S);
cv::Mat disparity_right=cv::Mat(imagesize.height,imagesize.width,CV_16S);
cv::Mat g1,g2,disp,disp8;
cv::cvtColor(im_left,g1,cv::COLOR_BGR2GRAY);
cv::cvtColor(im_right,g2,cv::COLOR_BGR2GRAY);
cv::Ptr<cv::StereoBM> sbm = cv::StereoBM::create(0,21);
sbm->setDisp12MaxDiff(1);
sbm->setSpeckleRange(8);
sbm->setSpeckleWindowSize(9);
sbm->setUniquenessRatio(0);
sbm->setTextureThreshold(507);
sbm->setMinDisparity(-39);
sbm->setPreFilterCap(61);
sbm->setPreFilterSize(5);
sbm->compute(g1,g2,disparity_left);
normalize(disparity_left, disp8, 0, 255, CV_MINMAX, CV_8U);
cv::namedWindow("Left",CV_WINDOW_FREERATIO);
cv::imshow("Left", im_left);
cv::namedWindow("Right",CV_WINDOW_FREERATIO);
cv::imshow("Right", im_right);
cv::namedWindow("Depth map",CV_WINDOW_FREERATIO);
cv::imshow("Depth map", disp8);
cv::waitKey(0);
return 0;
}

For me it worked slightly different with the init of the stereoBM object
Ptr<StereoBM> sbm = cv::StereoBM::create(16, 5);
sbm->setDisp12MaxDiff(1);
sbm->setSpeckleRange(8);
sbm->setSpeckleWindowSize(0);
sbm->setUniquenessRatio(0);
sbm->setTextureThreshold(507);
sbm->setMinDisparity(-39);
sbm->setPreFilterCap(61);
sbm->setPreFilterSize(5);
sbm->compute(src1, src2, disp);

I edited the code to my needs, I added the camera one and two, and reading from them.
Then, made the Depth map. Thanks hope its helpful.
#include <string>
#include <iostream>
#include <opencv2/opencv.hpp>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main()
{
VideoCapture leftCam(0); //lets say 0 is left, 1 is right
if (leftCam.isOpened() == false){cout << "error: Webcam connect unsuccessful\n"; return(0); }
VideoCapture rightCam(1); //lets say 0 is left, 1 is right
if (rightCam.isOpened() == false){cout << "error: Webcam connect unsuccessful\n"; return(0); }
Mat left, right;
Mat leftClone, rightClone;
char charCheckForEscKey = 0;
while ( charCheckForEscKey != 27 && leftCam.isOpened() )
{
leftCam.read(left);
if (left.empty()){cout << "No frame to read" << endl; break;}
leftClone = left.clone(); //copy from the left camera
imwrite("left.png", leftClone); // write it to screenshot.png in this directory
rightCam.read(right);
if (right.empty()){cout << "No frame to read" << endl; break;}
rightClone = right.clone(); //copy from the left camera
imwrite("right.png", rightClone); // write it to screenshot.png in this directory
Mat im_left = imread("left.png"); //left cam picture
Mat im_right = imread("right.png"); // right cam picture
Size imagesize = im_left.size();
Mat disparity_left= Mat(imagesize.height,imagesize.width,CV_16S);
Mat disparity_right=Mat(imagesize.height,imagesize.width,CV_16S);
Mat g1,g2,disp,disp8;
cvtColor(im_left,g1, COLOR_BGR2GRAY);
cvtColor(im_right,g2, COLOR_BGR2GRAY);
Ptr<cv::StereoBM> sbm = StereoBM::create(0,21);
sbm->setDisp12MaxDiff(1);
sbm->setSpeckleRange(8);
sbm->setSpeckleWindowSize(9);
sbm->setUniquenessRatio(0);
sbm->setTextureThreshold(507);
sbm->setMinDisparity(-39);
sbm->setPreFilterCap(61);
sbm->setPreFilterSize(5);
sbm->compute(g1,g2,disparity_left);
normalize(disparity_left, disp8, 0, 255, NORM_MINMAX, CV_8U);
namedWindow("Left", WINDOW_AUTOSIZE);
imshow("Left", im_left);
namedWindow("Right", WINDOW_AUTOSIZE);
imshow("Right", im_right);
namedWindow("Depth map", WINDOW_AUTOSIZE);
imshow("Depth map", disp8);
namedWindow("Left Cloned", WINDOW_FREERATIO);
imshow("Left Cloned", leftClone); // left is the left pic taken from camera 0
charCheckForEscKey = waitKey(1);
}
return(0);
}

#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include<opencv2/calib3d.hpp>
int main()
{
cv::Mat leftimg =cv::imread("leftimage.jpg");
cv::Mat rightimg = cv::imread("rightimage.jpg");
cv::Mat disparity_left=cv::Mat(leftimg.size(),leftimg.type());
cv::Mat disparity_right=cv::Mat(rightimg.size(),rightimg .type());
cv::Mat g1,g2,disp,disp8;
cv::cvtColor(leftimg,g1,cv::COLOR_BGR2GRAY);
cv::cvtColor(rightimg,g2,cv::COLOR_BGR2GRAY);
cv::Ptr<cv::StereoBM> sbm = cv::createStereoBM(16,21);
sbm->setDisp12MaxDiff(1);
sbm->setSpeckleRange(8);
sbm->setSpeckleWindowSize(9);
sbm->setUniquenessRatio(0);
sbm->setTextureThreshold(507);
sbm->setMinDisparity(-39);
sbm->setPreFilterCap(61);
sbm->setPreFilterSize(5);
sbm->compute(g1,g2,disparity_left);
normalize(disparity_left, disp8, 0, 255, CV_MINMAX, CV_8U);
}

Related

How to use Map class to implement image registration?

Actually, I have read the official documentation here about class Map in opencv to try to use the module reg. And This is my test image:
This is my code:
#include<opencv.hpp>
#include "opencv2/reg/mapshift.hpp"
#include "opencv2/reg/mappergradshift.hpp"
#include "opencv2/reg/mapperpyramid.hpp"
using namespace cv;
using namespace std;
using namespace cv::reg;
Mat highlight1(const Mat src, const Mat t_mask) {
Mat srcImg = src.clone(), mask = t_mask.clone();
threshold(mask, mask, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);
cvtColor(mask, mask, COLOR_GRAY2BGR);
cvtColor(srcImg, srcImg, COLOR_GRAY2BGR);
dilate(mask - Scalar(0, 0, 255), mask, Mat(), Point(-1, -1), 1);
return srcImg - mask;
}
int main() {
Mat img1 = imread("img.jpg", 0);
Mat img2;
// Warp original image
Vec<double, 2> shift(5., 5.);
MapShift mapTest(shift);
mapTest.warp(img1, img2);
// Register
Ptr<MapperGradShift> mapper = makePtr<MapperGradShift>();
MapperPyramid mappPyr(mapper);
Ptr<Map> mapPtr = mappPyr.calculate(img1, img2);
MapShift* mapShift = dynamic_cast<MapShift*>(mapPtr.get());
// Display registration result
Mat result;
mapShift->inverseWarp(img2, result);
Mat registration_before = highlight1(img1, img2);
Mat registration_after = highlight1(img1, result);
return 0;
}
But as we see, the registration_after is even worse than registration_before. What's I have missed?
This is registration_before:
This is registration_after:

opencv getPerspectiveTransform not working

so I am working on an assignment in which I have to classify road signs based on input images. So naturally I used the canny function, and findContours, followed by approxPolyPD in order to get the corners of the image that I will be transforming.
However for some reason, I keep getting an error when I attempt to use getPerspectiveTransform for the next step. Please help.
Error:
OpenCV Error: Assertion failed (0 <= i && i < (int)vv.size()) in getMat_, file /home/path_to_opencv/opencv/modules/core/src/matrix.cpp, line 1192
terminate called after throwing an instance of 'cv::Exception'
what(): /home/path_to_opencv/opencv/modules/core/src/matrix.cpp:1192: error: (-215) 0 <= i && i < (int)vv.size() in function getMat_
Aborted (core dumped)
Code used:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#define WARPED_XSIZE 200
#define WARPED_YSIZE 300
using namespace cv;
using namespace std;
Mat src; Mat src_gray, warped_result; Mat dst;
Mat speed_80, speed_40;
int canny_thresh = 154;
#define VERY_LARGE_VALUE 100000
#define NO_MATCH 0
#define STOP_SIGN 1
#define SPEED_LIMIT_40_SIGN 2
#define SPEED_LIMIT_80_SIGN 3
RNG rng(12345);
/** #function main */
int main(int argc, char** argv)
{
int sign_recog_result = NO_MATCH;
speed_40 = imread("speed_40.bmp", 0);
speed_80 = imread("speed_80.bmp", 0);
// you run your program on these three examples (uncomment the two lines below)
//string sign_name = "stop4";
string sign_name = "speedsign12";
//string sign_name = "speedsign3";
//string sign_name = "speedsign4";
string final_sign_input_name = sign_name + ".jpg";
string final_sign_output_name = sign_name + "_result" + ".jpg";
/// Load source image and convert it to gray
src = imread(final_sign_input_name, 1);
/// Convert image to gray and blur it
cvtColor(src, src_gray, COLOR_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
warped_result = Mat(Size(WARPED_XSIZE, WARPED_YSIZE), src_gray.type());
// here you add the code to do the recognition, and set the variable
// sign_recog_result to one of STOP_SIGN, SPEED_LIMIT_40_SIGN, SPEED_LIMIT_80_SIGN, or NO_MATCH
// PART 1 of Assignment 2
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Canny(src_gray, canny_output, canny_thresh, canny_thresh*2, 3);
findContours(canny_output, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE, Point(0, 0));
vector<vector<Point> > contours_poly(contours.size());
for (unsigned int i = 0; i < contours.size(); ++i) {
approxPolyDP(Mat(contours[i]), contours_poly[i], contours_poly[i].size()*.02, true);
}
// Part 2 of Assignment 2
vector<vector<Point> > transform_result(contours_poly.size());
warped_result = getPerspectiveTransform(contours_poly, transform_result);
warpPerspective(src, dst, warped_result, dst.size());
//imshow("input", src);
//imshow("output", dst);
/*
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
for(unsigned int i = 0; i< contours_poly.size(); i++ ) {
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours_poly, i, color, 2, 8, hierarchy, 0, Point() );
}
// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
//*/
// Returning to the predetermined code.
string text;
if (sign_recog_result == SPEED_LIMIT_40_SIGN) text = "Speed 40";
else if (sign_recog_result == SPEED_LIMIT_80_SIGN) text = "Speed 80";
else if (sign_recog_result == STOP_SIGN) text = "Stop";
else if (sign_recog_result == NO_MATCH) text = "Fail";
int fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;
double fontScale = 2;
int thickness = 3;
cv::Point textOrg(10, 130);
cv::putText(src, text, textOrg, fontFace, fontScale, Scalar::all(255), thickness, 8);
/// Create Window
char* source_window = "Result";
namedWindow(source_window, WINDOW_AUTOSIZE);
imshow(source_window, src);
imwrite(final_sign_output_name, src);
waitKey(0);
return(0);
}

Displaying Multiple Images on Single window with OpenCV 3.0 and MSVS 12

i am new to opencv as well as to c++ and currently working on a program that requires me to display multiple images with opencv version 3.0 and visual studio 12. i am working on the following codes but it did not work. I would like to ask on how can i solve the problem.
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
//Image Reading
IplImage* img1 = cvLoadImage( "ball.jpg" );
IplImage* img2 = cvLoadImage( "ball.jpg" );
IplImage* img3 = cvLoadImage( "ball.jpg" );
IplImage* img4 = cvLoadImage( "ball.jpg" );
int dstWidth=img1->width+img1->width;
int dstHeight=img1->height+img1->height;
IplImage* dst=cvCreateImage(cvSize(dstWidth,dstHeight),IPL_DEPTH_8U,3);
// Copy first image to dst
cvSetImageROI(dst, cvRect(0, 0,img1->width,img1->height) );
cvCopy(img1,dst,NULL);
cvResetImageROI(dst);
// Copy second image to dst
cvSetImageROI(dst, cvRect(img2->width, 0,img2->width,img2->height) );
cvCopy(img2,dst,NULL);
cvResetImageROI(dst);
// Copy third image to dst
cvSetImageROI(dst, cvRect(0, img3->height,img3->width,img3->height) );
cvCopy(img3,dst,NULL);
cvResetImageROI(dst);
// Copy fourth image to dst
cvSetImageROI(dst, cvRect(img4->width, img4->height,img4->width,img4->height) );
cvCopy(img4,dst,NULL);
cvResetImageROI(dst);
//show all in a single window
cvNamedWindow( "Example1", CV_WINDOW_AUTOSIZE );
cvShowImage( "Example1", dst );
cvWaitKey(0);
}
Here's how you can do it with C++ API, if all images have the same size:
int main(int argc, char* argv[])
{
cv::Mat input1 = cv::imread("C:/StackOverflow/Input/Lenna.png");
cv::Mat input2 = cv::imread("C:/StackOverflow/Input/Lenna.png");
cv::Mat input3 = cv::imread("C:/StackOverflow/Input/Lenna.png");
cv::Mat input4 = cv::imread("C:/StackOverflow/Input/Lenna.png");
int width = 2*input1.cols; // width of 2 images next to each other
int height = 2*input1.rows; // height of 2 images over reach other
cv::Mat inputAll = cv::Mat(height, width, input1.type());
cv::Rect subImageROI = cv::Rect(0, 0, input1.cols, input1.rows);
// copy to subimage:
input1.copyTo(inputAll(subImageROI));
// move to 2nd image ROI position:
subImageROI.x = input1.cols;
input2.copyTo(inputAll(subImageROI));
subImageROI.x = 0;
subImageROI.y = input1.rows;
input3.copyTo(inputAll(subImageROI));
subImageROI.x = input1.cols;
subImageROI.y = input1.rows;
input4.copyTo(inputAll(subImageROI));
cv::imshow("input", inputAll);
cv::waitKey(0);
return 0;
}

Stereo with openCV

I try to find the distance between camera and an object,so I use two different cameras (stereo). I use the program in opencv samples .
#include "opencv2/core/core.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/contrib/contrib.hpp"
#include <stdio.h>
#include <string.h>
#include<iostream>
using namespace cv;
const char *windowDisparity = "Disparity";
int main( )
{
Size S(640,360);
Mat threshold2,threshold1;
Mat imgLeft = imread( "lift.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat imgRight = imread("right.jpg", CV_LOAD_IMAGE_GRAYSCALE );
resize(imgLeft,imgLeft,S);
Mat imgDisparity16S = Mat( imgLeft.rows, imgLeft.cols, CV_16S );
Mat imgDisparity8U = Mat( imgRight.rows, imgRight.cols, CV_8UC1 );
int ndisparities = 16*5;
int SADWindowSize = 21;
StereoBM sbm( StereoBM::BASIC_PRESET,ndisparities,SADWindowSize );
sbm( imgLeft, imgRight, imgDisparity16S, CV_16S );
double minVal; double maxVal;
minMaxLoc( imgDisparity16S, &minVal, &maxVal );
printf("Min disp: %f Max value: %f \n", minVal, maxVal);
imgDisparity16S.convertTo( imgDisparity8U, CV_8UC1, 255/(maxVal - minVal));
namedWindow( windowDisparity, WINDOW_NORMAL );
imshow( windowDisparity, imgDisparity8U );
imshow( "left", imgLeft );
imshow( "right", imgRight );
imwrite("SBM_sample.png", imgDisparity16S);
waitKey(0);
return 0;
}
my problem is that I can't find the depth Z between object and camera
Maybe try to convert disparity to 3D points using cv::reprojectImageTo3D() or cv::perspectiveTransform() for sparse set of points.

error in implementation of dct in opencv

why the following errors occur when i try to implement dct in opencv?
Assertion failed (type == CV_32FC1 || type == CV_64FC1) in dct,
here is the code:
#include <cv.h>
#include "opencv2/imgproc/imgproc.hpp"
#include <highgui.h>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat image,dimage(image);
image = imread( "lena.jpg", 1 );
if( !image.data )
{
printf( "No image data \n" );
return -1;
}
namedWindow( "Display Image", CV_WINDOW_AUTOSIZE );
imshow( "Display Image", image );
dimage=Mat::zeros(image.rows,image.cols,CV_32F);
dct(image, dimage, DCT_INVERSE == 0 );
namedWindow( "DCT Image", CV_WINDOW_AUTOSIZE );
imshow( "DCT image", dimage );
waitKey(0);
return 0;
}
you have to convert your image from uchar to float first (and maybe back to uchar later):
// also please use the c++ headers, not the old c-ones !
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
Mat image = imread( "lena.jpg", 0 ); // grayscale
Mat fimage;
image.convertTo(fimage, CV_32F, 1.0/255); // also scale to [0..1] range (not mandatory)
// you're lucky with lena (512x512), for odd sizes, you have to
// pad it to pow2 size, or use dft() instead:
Mat dimage;
dct( fimage, dimage );
// process dimage,
// then same way back:
dct( dimage, dimage, DCT_INVERSE );
dimage.convertTo(image, CV_8U); // maybe scale back to [0..255] range (depending on your processing)
imshow("result",image);
waitKey();