Hello I am trying to get a Mat with the XYZ cordenates from a disparity map that i have already generated with a cuda::StereoBM object, this is a screenshot of the disparity map I created "disparity map"
but when I add the function cuda::reprojectImageTo3D(d_disp,d_xyz,d_Q,3,Stream); I get an error I can't understand, if I coment this line of code no error is shown but the moment I try to use this function I get the next error window
and the error code is:
OpenCV Error: The function/feature is not implemented (You should explicitly call download method for cuda::GpuMat object) in cv::_InputArray::getMat_, file C:\opencv\sources\modules\core\src\matrix.cpp, line 1211
bellow is my code, note that most of my Mats are declared as global , so their declarations are not in this slot function
void MainWindow::on_botonMostrarDispSGBMCUDA_clicked()
{
if(map_l1.empty()&&map_l2.empty()&&map_r1.empty() &&map_r2.empty()){
FileStorage fs(xmlname,FileStorage::READ);
fs["map_l1"]>>map_l1;
fs["map_l2"]>>map_l2;
fs["map_r1"]>>map_r1;
fs["map_r2"]>>map_r2;
fs["Q"] >> Q;
fs.release();
}
Mat img1, img2;
img1 = imread("C:\\Users\\Diego\\Pictures\\calibstereo\\camleft\\photoleft_1.jpg");
img2 = imread("C:\\Users\\Diego\\Pictures\\calibstereo\\camright\\photoright_1.jpg");
cvtColor(img1, frameLGray, CV_BGR2GRAY);
cvtColor(img2, frameRGray, CV_BGR2GRAY);
remap(frameLGray,framel_rect,map_l1,map_l2,INTER_LINEAR);
remap(frameRGray,framer_rect,map_r1,map_r2,INTER_LINEAR);
bool ok1, ok2, ok3, ok4, ok5, ok6 , ok7,ok8,ok9,ok10 ;
int preFilterSize = ui->editPrefilterSizeBM->text().toInt(&ok1);
int prefilterCap = ui->editFilterCapBM->text().toInt(&ok2);
int blockSize = ui->editBlockSBM->text().toInt(&ok3);
int minDisp = ui->editMinDispBM->text().toInt(&ok4);
int numDisp = ui->editNumDispBM->text().toInt(&ok5);
int texturethresh = ui->editTextureBM->text().toInt(&ok6);
int uniqueness = ui->editUniquenesBM->text().toInt(&ok7);
int speckleWindow = ui->editSpeckleWindowBM->text().toInt(&ok8);
int speckleRange = ui->editSpeckleRangeBM->text().toInt(&ok9);
int maxDiff = ui->editMaxDiffBM->text().toInt(&ok10);
if (!ok1 || !ok2 || !ok3 || !ok4 || !ok5 || !ok6 || !ok7 || !ok8 || !ok9 || !ok10){
QMessageBox messageBox;
messageBox.setIconPixmap(QPixmap(":/icon.svg"));
messageBox.setText("One of your entries is not a valid number.");
messageBox.setWindowTitle("Error");
messageBox.exec();
}
d_left.upload(framel_rect);
d_right.upload(framer_rect);
Ptr<cuda::StereoBM> CUSBM;
CUSBM = cuda::createStereoBM(64, 5);
CUSBM->setPreFilterSize(preFilterSize);
CUSBM->setPreFilterCap(prefilterCap);
CUSBM->setBlockSize(blockSize);
CUSBM->setMinDisparity(minDisp);
CUSBM->setNumDisparities(numDisp);
CUSBM->setTextureThreshold(texturethresh);
CUSBM->setUniquenessRatio(uniqueness);
CUSBM->setSpeckleWindowSize(speckleWindow);
CUSBM->setSpeckleRange(speckleRange);
CUSBM->setDisp12MaxDiff(maxDiff);
cuda::Stream myStream = cuda::Stream::Stream();
CUSBM->compute(d_left, d_right, d_disp, myStream);
if(!d_disp.empty()){
cuda::GpuMat d_xyz(d_disp.size(), CV_32FC4);
cuda::GpuMat d_Q(4,4,CV_32F);
d_Q.upload(Q);
cuda::Stream mySecondStream = cuda::Stream::Stream();
cuda::reprojectImageTo3D(d_disp,d_xyz,d_Q,3,mySecondStream);
d_xyz.download(xyz);
}
cuda::drawColorDisp(d_disp, d_disp,numDisp); //64
d_disp.download(disp);
namedWindow("left");
namedWindow("right");
namedWindow("disp");
imshow("left", framel_rect);
imshow("right", framer_rect);
imshow("disp", disp);
//imshow("xyz", xyz);
waitKey(0);
cv::destroyAllWindows();
}
I am using Windows 8.1, QtCreator as my IDE, OpenCV 3.0.0 with cuda 7.0
Are you sure that d_disp in line cuda::reprojectImageTo3D(d_disp,d_xyz,d_Q,3,mySecondStream); is a GpuMat
Your problem may from cv::Mat class, it has no overloaded assignment operator or copy constructor which takes argument of type cv::gpu::GpuMat, so you are not able to mix them in the code.
I checked the the argument list for the function from cudastereo/src/util.cpp.
The function seems to take d_disp and d_xyz as cuda::GpuMat, but the Q matrix needs to be a cv::Mat.
cuda::reprojectImageTo3D(d_disp,d_xyz,Q,3,mySecondStream); seems to work fine.
Sorry for the late answer.
The problem lies in cuda::drawColorDisp(d_disp, d_disp,numDisp); //64. You are supplying same source and destination GpuMat files, which should not be the case. You may like to have a look here for the detailed constructor arguments for drawColorDisp.
Related
I'm not familiar with opencv, but I need to use the function ‘remap’ to rectify the image.
I have an image with 960x1280, and a remap file called ‘remap.bin’ with 9.8MB(is equaled to 960x1280x4x2, which means the two floats in one position(x,y));
Applies a generic geometrical transformation to an image.
C++: void remap(InputArray src, OutputArray dst, InputArray map1, InputArray map2, int interpolation, int borderMode=BORDER_CONSTANT, const Scalar& borderValue=Scalar())
map1 – The first map of either (x,y) points or just x values having the type CV_16SC2 , CV_32FC1 , or CV_32FC2 . See convertMaps() for details on converting a floating point representation to fixed-point for speed.
map2 – The second map of y values having the type CV_16UC1 , CV_32FC1 , or none (empty map if map1 is (x,y) points), respectively.
According to the explain,
I code like this:
int main(int argc, char* argv[]){
if(argc != 3){
printf("Please enter one path of image and one path of mapdata!\n");
return 0;
}
std::string image_path = argv[1];
char* remap_path = argv[2];
cv::Mat src = cv::imread(image_path);
cv::Mat dst;
dst.create( src.size(), src.type());
cv::Mat map2;
map2.create( src.size(), CV_32FC1);
map2.data = NULL;
cv::Mat mapXY;
mapXY.create( src.rows, src.cols, CV_64FC1);
FILE *fp;
fp = fopen(remap_path, "rb");
fread(mapXY.data, sizeof(float), mapXY.cols*mapXY.rows*2, fp);
fclose(fp);
imshow("src", src);
printf("remap!\n");
cv::remap(src, dst, mapXY, map2, cv::INTER_LINEAR);
imshow("dst", dst);
cv::waitKey(0);
return 0;
But when I run the program I get this error:
OpenCV Error: Assertion failed (((map1.type() == CV_32FC2 || map1.type() == CV_16SC2) && !map2.data) || (map1.type() == CV_32FC1 && map2.type() == CV_32FC1)) in remap, file /home/liliming/opencv-2.4.13/modules/imgproc/src/imgwarp.cpp, line 3262 terminate called after throwing an instance of 'cv::Exception' what(): /home/liliming/opencv-2.4.13/modules/imgproc/src/imgwarp.cpp:3262: error: (-215) ((map1.type() == CV_32FC2 || map1.type() == CV_16SC2) && !map2.data) || (map1.type() == CV_32FC1 && map2.type() == CV_32FC1) in function remap Aborted (core dumped)
I have no idea about it.
Could anyone help me? or give some sample codes?
Thank you very much!
The documentation for OpenCV 3.1 says:
map1 The first map of either (x,y) points or just x values having the type
CV_16SC2 , CV_32FC1, or CV_32FC2.
The assert says that map1 doesn't have a type of CV_32FC2
This is because you are creating and reading it with a type of CV_64FC1.
You need to convert it to the correct type: array of two dimensions of type CV_32FC2 (two 32-bit floats per element.)
The documentation goes on to say:
See `convertMaps` for details on converting a
floating point representation to fixed-point for speed.
Documentation can be found here: https://docs.opencv.org/3.1.0/da/d54/group__imgproc__transform.html#gab75ef31ce5cdfb5c44b6da5f3b908ea4
I separate the remap table into two tables remapX, remapY.
Like this:
float *data_xy = (float *)malloc(sizeof(float)*960*1280*2);
FILE *fp;
fp = fopen(remap_path, "rb");
fread(data_xy, sizeof(float), 960*1280*2, fp);
fclose(fp);
for(int y=0; y<1280; ++y){
for(int x=0; x<960; ++x){
map_x.at<float>(y, x) = data_xy[(y*960+x)*2];
map_y.at<float>(y, x) = data_xy[(y*960+x)*2+1];
}
}
And then use the
cv::remap(src, dst, map_x, map_y, cv::INTER_LINEAR);
It works well.
But I don't know how to use one parameter map1 to finish remap.
I want to implement a OCR feature.
I have collected some samples and i want to use K-Nearest to implement it.
So, i use the below code to load data and initialize KNearest
KNearest knn = new KNearest;
Mat mData, mClass;
for (int i = 0; i <= 9; ++i)
{
Mat mImage = imread( FILENAME ); // the filename format is '%d.bmp', presenting a 15x15 image
Mat mFloat;
if (mImage.empty()) break; // if the file doesn't exist
mImage.convertTo(mFloat, CV_32FC1);
mData.push_back(mFloat.reshape(1, 1));
mClass.push_back( '0' + i );
}
knn->train(mData, mClass);
Then, i call the code to find best result
for (vector<Mat>::iterator it = charset.begin(); it != charset.end(); ++it)
{
Mat mFloat;
it->convertTo(mFloat, CV_32FC1); // 'it' presents a 15x15 gray image
float result = knn->find_nearest(mFloat.reshape(1, 1), knn->get_max_k());
}
But, my application crashes at find_nearest.
Anyone could help me?
I seemed to find the problem...
My sample image is a converted gray image by cvtColor, but my input image isn't.
After i add
cvtColor(mImage, mImage, COLOR_BGR2GRAY);
between
if (mImage.empty()) break;
mImage.convertTo(mFloat, CV_32FC1);
find_nearest() return a value and my application is fine.
I am working on a project that uses opencv on raspberry pi. I have run into a hurdle which looks simple, but I am unable to solve the issue.
First of all, here a part of my code:
{
gray=cvarrToMat(py);
///cvShowImage("camcvWin", py); // display only gray channel
if(img_num%2 == 1)
{
cv::imwrite("/home/pi/test/Gray_2Image1.jpg",gray);
}
else if (img_num%2 == 0)
{
cv::imwrite( "/home/pi/test/Gray_2Image2.jpg", gray );
cv::Mat img2 = cv::imread("/home/pi/test/Gray_2Image2.jpg", 0);
cv::Mat img1 = cv::imread("/home/pi/test/Gray_2Image1.jpg", 0);
diffImage = abs(img1-img2);
imshow("diffImage", diffImage);
cv::imwrite( "/home/pi/test/Diffimage.jpg", diffImage );
}
img_num++;
This code has no problem.
However, if I edit the code to make a slight modification as follows:
{
gray=cvarrToMat(py);
///cvShowImage("camcvWin", py); // display only gray channel
if(img_num%2 == 1)
{
cv::imwrite("/home/pi/test/Gray_2Image1.jpg",gray);
cv::Mat img1 = cv::imread("/home/pi/test/Gray_2Image1.jpg", 0);
}
else if (img_num%2 == 0)
{
cv::imwrite( "/home/pi/test/Gray_2Image2.jpg", gray );
cv::Mat img2 = cv::imread("/home/pi/test/Gray_2Image2.jpg", 0);
diffImage = abs(img1-img2);
imshow("diffImage", diffImage);
cv::imwrite( "/home/pi/test/Diffimage.jpg", diffImage );
}
img_num++;
I get the following error:
OpenCV Error: Sizes of input arguments do not match (The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array') in arithm_op, file /home/pi/OpenCV-2.3.1/modules/core/src/arithm.cpp, line 1253
terminate called after throwing an instance of 'cv::Exception'
what(): /home/pi/OpenCV-2.3.1/modules/core/src/arithm.cpp:1253: error: (-209) The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array' in function arithm_op
I am not really able to understand what is going on. img1 and img2 are declared globally as Mat.
This might be a simple issue, but I am still a novice. Please help me solve the issue.
Thank you for your time.
In the first block of code, img1 and img2 are declared and contain two gray valid images (since you do imread). Note that these img1 and img2 are not global variables, but local. If you have global variables with the same names, the local ones shadow them.
In the second block, you define img2 in the else and later do img1 - img2, but you don't show to us the value of img1 (in this case, the global variable). The img1 that is in the if is local to that if and is not visible in the else. Probably, you defined cv::Mat img1 in the global scope but you didn't give it any value. This would cause an error in img1-img2 because they are of different size (img1 would be empty).
Update: Something like this should fix it.
// global scope
cv::Mat img1, img2, diffImage;
void yourFunction()
{
...
img1 = cv::imread("/home/pi/test/Gray_2Image1.jpg", 0);
img2 = cv::imread("/home/pi/test/Gray_2Image2.jpg", 0);
diffImage = abs(img1-img2);
...
}
Update again: you can load the images in different if-else blocks as long as their declaration is visible.
This is ok:
// global scope
cv::Mat img1, img2, diffImage;
void yourFunction()
{
...
if(condition)
{
img1 = cv::imread("/home/pi/test/Gray_2Image1.jpg", 0);
}
else
{
img2 = cv::imread("/home/pi/test/Gray_2Image2.jpg", 0);
}
...
diffImage = abs(img1-img2); // make sure img1 and img2 are loaded first
...
}
And this is wrong:
// global scope
cv::Mat img1, img2, diffImage;
void yourFunction()
{
...
if(condition)
{
// wrong: you are creating a local variable that shadows the global one
cv::Mat img1 = cv::imread("/home/pi/test/Gray_2Image1.jpg", 0);
}
...
diffImage = abs(img1-img2); // img1 is the global variable and not the local one in the previous if block!
...
}
In the else if block - img1 is not defined or empty - to give a better diagnosis, more code is required.
Also: why do you store gray to disk just in order to read it again?
I'm trying to use thresholding on my video stream but it is not working.
My video stream:
Mat *depthImage = new Mat(480, 640, CV_8UC1, Scalar::all(0));
Then i try to do the adaptive thresholding, (also doesn't work with regular thresholding)
for(;;){
if( wrapper->update()){
wrapper->getDisplayDepth(depthImage);
cvAdaptiveThreshold(depthImage, depthImage,255,CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY,75,10);
imshow("Depth", *depthImage);
}
int k = waitKey(25);
if(k == 27 ) exit(0);
}
I get this error :
OpenCV Error: Bad argument (Unknown array type) in cvarrToMat, file /Users/olivierjanssens/source/OpenCV-2.3.1/modules/core/src/matrix.cpp, line 646
terminate called throwing an exception
What am i doing wrong, i can get display and see the stream perfectly.But when i add this thresholding i get the error previously mentioned. (i'm rather new to opencv btw).
Thx in advance !
Your depthImage is a pointer to a cv::Mat, which to me seems strange...
...but, if you're using the C++ syntax then you'll want to use the C++ version of adaptiveThreshold, which deals with cv::Mat, with the following definition:
void adaptiveThreshold(InputArray src, OutputArray dst, double maxValue,
int adaptiveMethod, int thresholdType, int blockSize, double C);
which will need prefixed by cv:: if you're not using that namespace already.
For example:
Mat *depthImage; // Obtain this using your method
Mat image = *depthImage; // Obtain a regular Mat to use (doesn't copy data, just headers)
adaptiveThreshold(image, image,255,CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY,75,10);
imshow("Depth Image", *depthImage);
// OR
imshow("Depth Image", image);
The documentation on this seems incredibly spotty.
I've basically got an empty array of IplImage*s (IplImage** imageArray) and I'm calling a function to import an array of cv::Mats - I want to convert my cv::Mat into an IplImage* so I can copy it into the array.
Currently I'm trying this:
while(loop over cv::Mat array)
{
IplImage* xyz = &(IplImage(array[i]));
cvCopy(iplimagearray[i], xyz);
}
Which generates a segfault.
Also trying:
while(loop over cv::Mat array)
{
IplImage* xyz;
xyz = &array[i];
cvCopy(iplimagearray[i], xyz);
}
Which gives me a compile time error of:
error: cannot convert ‘cv::Mat*’ to ‘IplImage*’ in assignment
Stuck as to how I can go further and would appreciate some advice :)
cv::Mat is the new type introduce in OpenCV2.X while the IplImage* is the "legacy" image structure.
Although, cv::Mat does support the usage of IplImage in the constructor parameters, the default library does not provide function for the other way. You will need to extract the image header information manually. (Do remember that you need to allocate the IplImage structure, which is lack in your example).
Mat image1;
IplImage* image2=cvCloneImage(&(IplImage)image1);
Guess this will do the job.
Edit: If you face compilation errors, try this way:
cv::Mat image1;
IplImage* image2;
image2 = cvCreateImage(cvSize(image1.cols,image1.rows),8,3);
IplImage ipltemp=image1;
cvCopy(&ipltemp,image2);
(you have cv::Mat old)
IplImage copy = old;
IplImage* new_image = ©
you work with new as an originally declared IplImage*.
Here is the recent fix for dlib users link
cv::Mat img = ...
IplImage iplImage = cvIplImage(img);
Personaly I think it's not the problem caused by type casting but a buffer overflow problem; it is this line
cvCopy(iplimagearray[i], xyz);
that I think will cause segment fault, I suggest that you confirm the array iplimagearray[i] have enough size of buffer to receive copyed data
According to OpenCV cheat-sheet this can be done as follows:
IplImage* oldC0 = cvCreateImage(cvSize(320,240),16,1);
Mat newC = cvarrToMat(oldC0);
The cv::cvarrToMat function takes care of the conversion issues.
In case of gray image, I am using this function and it works fine! however you must take care about the function features ;)
CvMat * src= cvCreateMat(300,300,CV_32FC1);
IplImage *dist= cvCreateImage(cvGetSize(dist),IPL_DEPTH_32F,3);
cvConvertScale(src, dist, 1, 0);
One problem might be: when using external ipl and defining HAVE_IPL in your project, the ctor
_IplImage::_IplImage(const cv::Mat& m)
{
CV_Assert( m.dims <= 2 );
cvInitImageHeader(this, m.size(), cvIplDepth(m.flags), m.channels());
cvSetData(this, m.data, (int)m.step[0]);
}
found in ../OpenCV/modules/core/src/matrix.cpp is not used/instanciated and conversion fails.
You may reimplement it in a way similar to :
IplImage& FromMat(IplImage& img, const cv::Mat& m)
{
CV_Assert(m.dims <= 2);
cvInitImageHeader(&img, m.size(), cvIplDepth(m.flags), m.channels());
cvSetData(&img, m.data, (int)m.step[0]);
return img;
}
IplImage img;
FromMat(img,myMat);