I am trying to get OpenCV 2.4.5 to recognize a checkerboard pattern from my webcam. I couldn't get that working, so I decided to try to get it working just using a "perfect" image:
but it still won't work--patternFound returns false every time. Does anyone have any idea what I'm doing wrong?
#include <stdio.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace std;
int main(){
Size patternsize(8,8); //number of centers
Mat frame = imread("perfect.png"); //source image
vector<Point2f> centers; //this will be filled by the detected centers
bool patternfound = findChessboardCorners(frame,patternsize,centers);
cout<<patternfound<<endl;
drawChessboardCorners(frame, patternsize, Mat(centers), patternfound);
cvNamedWindow("window");
while(1){
imshow("window",frame);
cvWaitKey(33);
}
}
Through trial and error, I realized that patternsize should be 7x7 since it is counting internal corners. This parameter has to be exact--8x8 won't work, but neither will anything less than 7x7.
Instead of using
Size patternsize(8,8);
use
Size patternsize(7,7);
Width and height of the chessboard can't be of the same length, i.e. it needs to be assymetric. This might be the source of your problem.
Here is a very good tutorial about camera calibration with OpenCV.
Just below is the code I use for my calibration (tested and fully functional, HOWEVER I call it in some processing thread of my own, you should call it in your processing loop or whatever you are using to catch your frames) :
void MyCalibration::execute(IplImage* in, bool debug)
{
const int CHESSBOARD_WIDTH = 8;
const int CHESSBOARD_HEIGHT = 5;
const int CHESSBOARD_INTERSECTION_COUNT = CHESSBOARD_WIDTH * CHESSBOARD_HEIGHT;
//const bool DO_CALIBRATION = ((BoolProperty*)getProperty("DoCalibration"))->getValue();
if(in->nChannels == 1)
cvCopy(in,gray_image);
else
cvCvtColor(in,gray_image,CV_BGR2GRAY);
int corner_count;
CvPoint2D32f* corners = new CvPoint2D32f[CHESSBOARD_INTERSECTION_COUNT];
int wasChessboardFound = cvFindChessboardCorners(gray_image, cvSize(CHESSBOARD_WIDTH, CHESSBOARD_HEIGHT), corners, &corner_count);
if(wasChessboardFound) {
// Refine the found corners
cvFindCornerSubPix(gray_image, corners, corner_count, cvSize(5, 5), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.1));
// Add the corners to the array of calibration points
calibrationPoints.push_back(corners);
cvDrawChessboardCorners(in, cvSize(CHESSBOARD_WIDTH, CHESSBOARD_HEIGHT), corners, corner_count, wasChessboardFound);
}
}
Just in case you wondered about the class members, here is my class (IplImage was still around at the time I wrote it) :
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv/cv.h>
class MyCalibration
{
private:
std::vector<CvPoint2D32f*> calibrationPoints;
IplImage *gray_image;
public:
MyCalibration(IplImage* in);
void execute(IplImage* in, bool debug=false);
~MyCalibration(void);
};
And finally the constructor :
MyCalibration::MyCalibration(IplImage* in)
{
gray_image = cvCreateImage(cvSize(in->width,in->height),8,1);
}
Related
I am having a bit of trouble in trying to get the opencv face detection to work in QT with my basler cam; I have tried many different approaches to get it to work, using many different sample codes online. I just can’t seem to get it to work at all; in addition the attempts I have made have lowered my frame rate.
The code I used to capture a video with the basler cam is working great, I’m just having trouble implementing the face detection part. I will paste the code I have so far for the camera and opencv below. The code does get me a few red boxes appearing now and then, but it isn’t stable. I am also getting this error
Failed to load OpenCL runtime
I’m not sure what I am doing wrong, also is there a way to implement the face detection without lowering the frame rate, as it is already slow
#include "mainwindow.h"
#include "ui_mainwindow.h"
#include <opencv2/opencv.hpp>
#include <pylon/PylonIncludes.h>
//#include <pylon/PylonGUI.h>
//#ifdef PYLON_WIN_BUILD
//#include <pylon/PylonGUI.h>
//#endif
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include<time.h>
#include<stdlib.h>
using namespace cv;
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
static const uint32_t c_countOfImagesToGrab = 100;
cv::CascadeClassifier faceCade;
String faceCascadeName = "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
String FaceDetectWindow = "Face Detector Window";
String FaceDetectGrayWindow = "Face Detector Gray Window";
size_t i;
vector<Rect> faces;
cv::Mat camFrames, grayFrames;
int main()
{
// The exit code of the sample application.
int exitCode = 0;
// Automagically call PylonInitialize and PylonTerminate to ensure
// the pylon runtime system is initialized during the lifetime of this object.
Pylon::PylonAutoInitTerm autoInitTerm;
faceCade.load( faceCascadeName );
CGrabResultPtr ptrGrabResult;
namedWindow("CV_Image",WINDOW_AUTOSIZE);
CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice());
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
camera.Open();
GenApi::CIntegerPtr width(camera.GetNodeMap().GetNode("Width"));
GenApi::CIntegerPtr height(camera.GetNodeMap().GetNode("Height"));
Mat cv_img(width->GetValue(), height->GetValue(), CV_8UC3);
camera.StartGrabbing();
CPylonImage image;
CImageFormatConverter fc;
fc.OutputPixelFormat = PixelType_BGR8packed;
while(camera.IsGrabbing()){
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
if (ptrGrabResult->GrabSucceeded()){
fc.Convert(image, ptrGrabResult);
cv_img = cv::Mat(ptrGrabResult->GetHeight(), ptrGrabResult->GetWidth(), CV_8UC3,(uint8_t*)image.GetBuffer());
//cvtColor(cv_img, grayFrames, cv::COLOR_BGR2GRAY);
//equalizeHist(grayFrames, grayFrames);
faceCade.detectMultiScale(cv_img, faces, 1.1, 2, 0, Size(160, 160));
for (int i = 0; i < faces.size(); i++)
{
//Mat faceROI = grayFrames(faces[i]);
rectangle(cv_img, Rect(faces[i].x - 25,faces[i].y - 25,faces[i].width + 35 ,faces[i].height + 35), Scalar(0, 0, 255), 1, 1, 0);
Point center(faces[i].x + faces[i].width * 0.5,faces[i].y + faces[i].height * 0.5);
}
imshow("CV_Image",cv_img);
//imshow("FaceDetectGrayWindow", grayFrames);
waitKey(1);
if(waitKey(30)==27){
camera.StopGrabbing();
}
}
}
}
}
Thank you
i'm not quite sure about this but detectMultiScale function works with image in cv_8u type , and as i see you are using cv_8uc3, as i know cv_8u it's 8 bit pixel with 1 channel, cv_8uc3 it's alos 8 bit but 3 channels, you need to convert your image to gray scale , i saw you did that but you comment it?!!!
look at this link opencv_face_detection.
maybe that will fix your problem, and some people advice you to install opencl
sudo apt-get install ocl-icd-opencl-dev
I've been able to find/create some code that allows me to open the depth and color stream from the OpenNI enabled camera (It is an Orbbec Astra S to be specific). Except unlike with the standard OpenNI Viewer, My stream displays the closest points as darkest and further points as the lighter colors.
How would I be able to change this around so that the points closest to the cameras are shown as lighter (whites) and further away is shown as dark?
#include "stdafx.h"
#include "OpenNI.h"
#include <iostream>
#include <iomanip>
#include <fstream>
#include <string>
#include <array>
// OpenCV Header
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
using namespace std;
using namespace cv;
using namespace openni;
//Recorder
int main(int argc, char** argv)
{
Device device;
VideoStream DepthStream,ColorStream;
VideoFrameRef DepthFrameRead,ColorFrameRead;
const char* deviceURI = openni::ANY_DEVICE;
if (argc > 1)
{
deviceURI = argv[1];
}
Status result = STATUS_OK;
result = OpenNI::initialize();
result = device.open(deviceURI);
result = DepthStream.create(device, openni::SENSOR_DEPTH);
result = DepthStream.start();
result = ColorStream.create(device, openni::SENSOR_COLOR);
result = ColorStream.start();
device.setImageRegistrationMode(ImageRegistrationMode::IMAGE_REGISTRATION_DEPTH_TO_COLOR);
int framenum = 0;
Mat frame;
while (true)
{
if (DepthStream.readFrame(&DepthFrameRead) == STATUS_OK)
{
cv::Mat cDepthImg(DepthFrameRead.getHeight(), DepthFrameRead.getWidth(),
CV_16UC1, (void*)DepthFrameRead.getData());
cv::Mat c8BitDepth;
cDepthImg.convertTo(c8BitDepth, CV_8U, 255.0 / (8000));
cv::imshow("Orbbec", c8BitDepth);
}
if (ColorStream.readFrame(&ColorFrameRead) == STATUS_OK)
{
ColorStream.readFrame(&ColorFrameRead);
const openni::RGB888Pixel* imageBuffer = (const openni::RGB888Pixel*)ColorFrameRead.getData();
frame.create(ColorFrameRead.getHeight(), ColorFrameRead.getWidth(), CV_8UC3);
memcpy(frame.data, imageBuffer, 3 * ColorFrameRead.getHeight()*ColorFrameRead.getWidth() * sizeof(uint8_t));
cv::cvtColor(frame, frame, CV_BGR2RGB); //this will put colors right
cv::imshow("frame", frame);
framenum++;
}
if (cvWaitKey(30) >= 0)
{
break;
}
}
DepthStream.destroy();
ColorStream.destroy();
device.close();
OpenNI::shutdown();
return 0;
}
-------------------EDIT-------------------
These Images are originally read in as 16bit images, which look like this (note how dark it is):
But after converting to an 8bit image, they look as follows:
The image you attached shows that the sensor is capturing the data with directly encoding the distance (in mm) of the object in the depth. This is quite normal for such depth cameras. What we want instead for displaying is higher values for objects closer to the sensor (this is totally opposite to the depth image encoding but useful for displaying).
One can devise a simple depth adjustment function if the operating range of the sensor is known. For Astra S, the operating range is from 0.35m to 2.5m. So what we want now is a function that converts 0.35m -> 2.5m and 2.5m -> 0.35m.
This is pretty straightforward, the only caveat is that you have to take care of the invalid depth pixel (depth == 0) yourself. Here is the code for doing this:
#include "include\opencv\cv.h"
#include "include\opencv\highgui.h"
cv::Mat adjustDepth(const cv::Mat& inImage)
{
// from https://orbbec3d.com/product-astra/
// Astra S has a depth in the range 0.35m to 2.5m
int maxDepth = 2500;
int minDepth = 350; // in mm
cv::Mat retImage = inImage;
for(int j = 0; j < retImage.rows; j++)
for(int i = 0; i < retImage.cols; i++)
{
if(retImage.at<ushort>(j, i))
retImage.at<ushort>(j, i) = maxDepth - (retImage.at<ushort>(j, i) - minDepth);
}
return retImage;
}
int main ()
{
cv::Mat inImage;
inImage = cv::imread("testImage.png", CV_LOAD_IMAGE_UNCHANGED);
cv::Mat adjustedDepth = adjustDepth(inImage);
cv::Mat dispImage;
adjustedDepth.convertTo(dispImage, CV_8UC1, 255.0f/2500.0f);
cv::imshow(" ", dispImage);
//cv::imwrite("testImageAdjusted.png", adjustedDepth);
//cv::imwrite("savedImage.png", dispImage);
cv::waitKey(0);
return 0;
}
Here is the output renormalized depth image:
If one wants to further explore what happens in such readjustment function, one can have a look at the histogram for image both before and after applying the adjustment.
Histogram for input depth image (D):
Histogram for negative input depth image (-D):
Histogram for (maxVal-(D-minVal)):
Hope this answers your question.
I'm trying to test some motion estimation in Visual Studio 2013, using OpenCV v3.0 (which is probably my 1st mistake!). I got an unhandled exception trying to use createOptFlow_DualTVL1() and createOptFlow_Farneback(), and then, for testing, tried cv::accumulate(), which threw the same exception.
It seems that OpenCV can't write to the Mat object that I'm passing these functions. I can't read the actual cvException because I don't have the PDB files, because I didn't compile this version myself. That might be my next stop, but before I do I figured I'd see if anyone's seen this behaviour before.
Here's a minimal working example:
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/video/tracking.hpp>
#include <opencv2/videoio/videoio.hpp>
#include <iostream>
#include <stdio.h>
#include <fstream>
#include <string>
#include <regex>
using namespace std;
using namespace cv;
int main(int argc, const char** argv)
{
VideoCapture captureDevice;
std::string videoname = "example.mp4";
captureDevice.open(videoname);
//setup image files used in the capture process
Mat currFrame, dst;
captureDevice >> currFrame;
accumulate(currFrame, dst, cv::noArray());
imshow("outputCapture", dst);
//pause for 33ms
waitKey(33);
return 0;
}
dst should be of same size as that currFrame and of type CV_32FC3.
So, add this line of code before calling accumulate -
dst.create(currFrame.size(), CV_32FC3);
Since dst is of float type, you will need to convert it to uchar to display it. For that, convert as shown below -
Mat dst_disp;
dst.convertTo(dst_disp, CV_8UC3);
imshow("outputCapture",dst_disp );
Additionally, as you accumulate more frames in dst, you will need to normalize by number of frames(let's say N) cached in dst. Simply, divide dst by the N, then convert the result into CV_8UC3 and display. For example, if you accumulated 1000 frames in dst do as shown below,
// Accumulate 1000 frames
for(int i = 0; i < 1000; i++)
accumulate(currFrame, dst, cv::noArray());
// Normalize
dst = dst/ 1000;
// Display the frame
Mat dst_disp;
dst.convertTo(dst_disp, CV_8UC3);
imshow("outputCapture",dst_disp );
else, you might get an all white image.
UPDATE
From #berak's comment below.
For normalization, simply use
dst.convert(dst_disp, CV_8UC3, 1.0/N);
where N in example above will be 1000.
Hey please can someone help me to found the solution of this error :
Assertion failed in unknown function ../../ocv/opencv/modules/imgproc/src/imgwarp.cpp
i try to compile the code that exist in this link : http://ipwithopencv.googlecode.com/svn/trunk/ThinPlateSpline/ThinPlateSpline/
im using 2 classes and the main:
here is my main code :
#include "stdafx.h"
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include "CThinPlateSpline.h"
#include "iostream"
using namespace std;
int _tmain(int argc, _TCHAR* argv[])
{
// load a nice picture
cv::Mat img = cv::imread("lena.jpg");
// generate some generic points
// usually you would use a interest point detector such as SURF or SIFT
std::vector<cv::Point> iP, iiP;
// push some points into the vector for the source image
iP.push_back(cv::Point(50,50));
iP.push_back(cv::Point(400,50));
iP.push_back(cv::Point(50,400));
iP.push_back(cv::Point(400,400));
iP.push_back(cv::Point(256,256));
iP.push_back(cv::Point(150,256));
// push some point into the vector for the dst image
iiP.push_back(cv::Point(70,70));
iiP.push_back(cv::Point(430,60));
iiP.push_back(cv::Point(60,410));
iiP.push_back(cv::Point(430,420));
iiP.push_back(cv::Point(220,280));
iiP.push_back(cv::Point(180,240));
// create thin plate spline object and put the vectors into the constructor
CThinPlateSpline tps(iP,iiP);
// warp the image to dst
Mat dst;
tps.warpImage(img,dst,0.01,INTER_CUBIC,BACK_WARP);
// show images
cv::imshow("original",img);
cv::imshow("distorted",dst);
//cv::waitKey(0);
//Sleep(5);
cv::waitKey(5000);
return 0;
}
here is the is the imagewarp method :
void CThinPlateSpline::warpImage(const Mat& src, Mat& dst, float lambda, const int interpolation,const TPS_INTERPOLATION tpsInter)
{
Size size = src.size();
dst = Mat(size,src.type());
// only compute the coefficients new if they weren't already computed
// or there had been changes to the points
if(tpsInter == BACK_WARP && !FLAG_COEFFS_BACK_WARP_SET)
{
computeSplineCoeffs(pSrc,pDst,lambda,tpsInter);
}
else if(tpsInter == FORWARD_WARP && !FLAG_COEFFS_FORWARD_WARP_SET)
{
computeSplineCoeffs(pSrc,pDst,lambda,tpsInter);
}
computeMaps(size,mapx,mapy);
remap(src,dst,mapx,mapy,interpolation);
}
there is to other classes that exist in the link also CthinPlateSpline.cpp and CthinPlateSpline.h ...please i really need help and sorry for my bad english
"Assertion failed" in OpenCV usually happens when your input Mat has incorrect size, e.g. zero (empty Mat) or less than the requirement of the function.
Would you mind to check img after cv::Mat img = cv::imread("lena.jpg");
by img.empty() or display img by imshow?
I can run these on my mac with OpenCV 2.4.7
The result image is a twisted lena
I just change two places as following
First of all, I change the included file in main.cpp to become
#include <opencv2/opencv.hpp>
#include "CThinPlateSpline.h"
#include <iostream>
#include <vector>
And included file in CThinPlateSpline.cpp to become
#include <vector>
#include <opencv2/opencv.hpp>
#include "CThinPlateSpline.h"
I use only 3 files main.cpp, CThinPlateSpline.cpp, and CThinPlateSpline.h
libraries linked are
-lopencv_imgproc -lopencv_core -lopencv_highgui
Verify if the image was loaded correctly. In my case (OpenCV for Python) my image was not loaded at all.
Try to verify what image exactly are you passing to the function.
I am now learning a code from the opencv codebook (OpenCV 2 Computer Vision Application Programming Cookbook): Chapter 5, Segmenting images using watersheds, page 131.
Here is my main code:
#include "opencv2/opencv.hpp"
#include <string>
using namespace cv;
using namespace std;
class WatershedSegmenter {
private:
cv::Mat markers;
public:
void setMarkers(const cv::Mat& markerImage){
markerImage.convertTo(markers, CV_32S);
}
cv::Mat process(const cv::Mat &image){
cv::watershed(image,markers);
return markers;
}
};
int main ()
{
cv::Mat image = cv::imread("/Users/yaozhongsong/Pictures/IMG_1648.JPG");
// Eliminate noise and smaller objects
cv::Mat fg;
cv::erode(binary,fg,cv::Mat(),cv::Point(-1,-1),6);
// Identify image pixels without objects
cv::Mat bg;
cv::dilate(binary,bg,cv::Mat(),cv::Point(-1,-1),6);
cv::threshold(bg,bg,1,128,cv::THRESH_BINARY_INV);
// Create markers image
cv::Mat markers(binary.size(),CV_8U,cv::Scalar(0));
markers= fg+bg;
// Create watershed segmentation object
WatershedSegmenter segmenter;
// Set markers and process
segmenter.setMarkers(markers);
segmenter.process(image);
imshow("a",image);
std::cout<<".";
cv::waitKey(0);
}
However, it doesn't work. How could I initialize a binary image? And how could I make this segmentation code work?
I am not very clear about this part of the book.
Thanks in advance!
There's a couple of things that should be mentioned about your code:
Watershed expects the input and the output image to have the same size;
You probably want to get rid of the const parameters in the methods;
Notice that the result of watershed is actually markers and not image as your code suggests; About that, you need to grab the return of process()!
This is your code, with the fixes above:
// Usage: ./app input.jpg
#include "opencv2/opencv.hpp"
#include <string>
using namespace cv;
using namespace std;
class WatershedSegmenter{
private:
cv::Mat markers;
public:
void setMarkers(cv::Mat& markerImage)
{
markerImage.convertTo(markers, CV_32S);
}
cv::Mat process(cv::Mat &image)
{
cv::watershed(image, markers);
markers.convertTo(markers,CV_8U);
return markers;
}
};
int main(int argc, char* argv[])
{
cv::Mat image = cv::imread(argv[1]);
cv::Mat binary;// = cv::imread(argv[2], 0);
cv::cvtColor(image, binary, CV_BGR2GRAY);
cv::threshold(binary, binary, 100, 255, THRESH_BINARY);
imshow("originalimage", image);
imshow("originalbinary", binary);
// Eliminate noise and smaller objects
cv::Mat fg;
cv::erode(binary,fg,cv::Mat(),cv::Point(-1,-1),2);
imshow("fg", fg);
// Identify image pixels without objects
cv::Mat bg;
cv::dilate(binary,bg,cv::Mat(),cv::Point(-1,-1),3);
cv::threshold(bg,bg,1, 128,cv::THRESH_BINARY_INV);
imshow("bg", bg);
// Create markers image
cv::Mat markers(binary.size(),CV_8U,cv::Scalar(0));
markers= fg+bg;
imshow("markers", markers);
// Create watershed segmentation object
WatershedSegmenter segmenter;
segmenter.setMarkers(markers);
cv::Mat result = segmenter.process(image);
result.convertTo(result,CV_8U);
imshow("final_result", result);
cv::waitKey(0);
return 0;
}
I took the liberty of using Abid's input image for testing and this is what I got:
Below is the simplified version of your code, and it works fine for me. Check it out :
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
using namespace std;
int main ()
{
Mat image = imread("sofwatershed.jpg");
Mat binary = imread("sofwsthresh.png",0);
// Eliminate noise and smaller objects
Mat fg;
erode(binary,fg,Mat(),Point(-1,-1),2);
// Identify image pixels without objects
Mat bg;
dilate(binary,bg,Mat(),Point(-1,-1),3);
threshold(bg,bg,1,128,THRESH_BINARY_INV);
// Create markers image
Mat markers(binary.size(),CV_8U,Scalar(0));
markers= fg+bg;
markers.convertTo(markers, CV_32S);
watershed(image,markers);
markers.convertTo(markers,CV_8U);
imshow("a",markers);
waitKey(0);
}
Below is my input image :
Below is my output image :
See the code explanation here : Simple watershed Sample in OpenCV
I had the same problem as you, following the exact same code sample of the cookbook (great book btw).
Just to place the matter I was coding under Visual Studio 2013 and OpenCV 2.4.8. After a lot of searching and no solutions I decided to change the IDE.
It's still Visual Studio BUT it's 2010!!!! And boom it works!
Becareful of how you configure Visual Studio with OpenCV. Here's a great tutorial for installation here
Good day to all