Heap corruption exception in knnMatch opencv - c++

I'm having Heap corruption exception while compiling this code which I found on the internet. I'm a beginner and I don't know how to solve the problem.
the exception occurred in the following line:
matcher.knnMatch(desObject, des_image, matches, 2);
Could someone tell me what should I replace this line of code with?
The complete source code:
cv::initModule_nonfree();
// turn performance analysis functions on if testing = true
bool testing = false;
double t; // timing variable
// load training image
Mat object = imread("boo2.png", CV_LOAD_IMAGE_GRAYSCALE);
if (!object.data) {
cout << "Can't open image";
return -1;
}
namedWindow("Good Matches", CV_WINDOW_AUTOSIZE);
// SURF Detector, and descriptor parameters
int minHess = 3000;
vector<KeyPoint> kpObject, kpImage;
Mat desObject, desImage;
// Performance measures calculations for report
if (testing) {
cout << object.rows << " " << object.cols << endl;
// calculate integral image
Mat iObject;
integral(object, iObject);
imshow("Good Matches", iObject);
imwrite("boo2.png", iObject);
cvWaitKey(0);
// calculate number of interest points, computation time as f(minHess)
int minHessVector[] = {100, 500, 1000, 1500, 2000, 2500, 3000,
3500, 4000, 4500, 5000, 5500, 6000, 6500,
7000, 7500, 8000, 8500, 9000, 9500, 10000};
int minH;
std::ofstream file;
file.open("C:/School/Image Processing/TimingC.csv", std::ofstream::out);
for (int i = 0; i < 20; i++) {
minH = minHessVector[i];
t = (double)getTickCount();
SurfFeatureDetector detector(minH);
detector.detect(object, kpObject);
t = ((double)getTickCount() - t) / getTickFrequency();
file << minHess << "," << kpObject.size() << "," << t << ",";
cout << t << " " << kpObject.size() << " " << desObject.size() << endl;
t = (double)getTickCount();
SurfDescriptorExtractor extractor;
extractor.compute(object, kpObject, desObject);
t = ((double)getTickCount() - t) / getTickFrequency();
file << t << endl;
}
file.close();
// Display keypoints on training image
Mat interestPointObject = object;
for (unsigned int i = 0; i < kpObject.size(); i++) {
if (kpObject[i].octave) {
circle(interestPointObject, kpObject[i].pt, kpObject[i].size, 0);
string octaveS;
switch (kpObject[i].octave) {
case 0:
octaveS = "0";
break;
case 1:
octaveS = '1';
break;
case 2:
octaveS = '2';
break;
default:
break;
}
putText(interestPointObject, octaveS, kpObject[i].pt,
FONT_HERSHEY_COMPLEX_SMALL, 1, cvScalar(0, 0, 250), 1, CV_AA);
}
}
imshow("Good Matches", interestPointObject);
imwrite("C:/School/Image Processing/bookIP2.jpg", interestPointObject);
cvWaitKey(0);
}
// SURF Detector, and descriptor parameters, match object
// initialization
minHess = 2000;
SurfFeatureDetector detector(minHess);
detector.detect(object, kpObject);
SurfDescriptorExtractor extractor;
extractor.compute(object, kpObject, desObject);
FlannBasedMatcher matcher;
// Initialize video and display window
VideoCapture cap(1); // camera 1 is webcam
if (!cap.isOpened()) return -1;
// Object corner points for plotting box
vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0, 0);
obj_corners[1] = cvPoint(object.cols, 0);
obj_corners[2] = cvPoint(object.cols, object.rows);
obj_corners[3] = cvPoint(0, object.rows);
// video loop
char escapeKey = 'k';
double frameCount = 0;
float thresholdMatchingNN = 0.7;
unsigned int thresholdGoodMatches = 4;
unsigned int thresholdGoodMatchesV[] = {4, 5, 6, 7, 8, 9, 10};
for (int j = 0; j < 7; j++) {
thresholdGoodMatches = thresholdGoodMatchesV[j];
// thresholdGoodMatches=8;
cout << thresholdGoodMatches << endl;
if (true) {
t = (double)getTickCount();
}
while (escapeKey != 'q') {
frameCount++;
Mat frame;
Mat image;
cap >> frame;
cvtColor(frame, image, CV_RGB2GRAY);
Mat des_image, img_matches, H;
vector<KeyPoint> kp_image;
vector<vector<DMatch>> matches;
vector<DMatch> good_matches;
vector<Point2f> obj;
vector<Point2f> scene;
vector<Point2f> scene_corners(4);
detector.detect(image, kp_image);
extractor.compute(image, kp_image, des_image);
matcher.knnMatch(desObject, des_image, matches, 2);
for (int i = 0; i < min(des_image.rows - 1, (int)matches.size()); i++)
// THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if ((matches[i][0].distance <
thresholdMatchingNN * (matches[i][1].distance)) &&
((int)matches[i].size() <= 2 && (int)matches[i].size() > 0)) {
good_matches.push_back(matches[i][0]);
}
}
// if (good_matches.size()<1)
// good_matches.resize(0,cv::DMatch);
// Draw only "good" matches
drawMatches(object, kpObject, image, kp_image, good_matches, img_matches,
Scalar::all(-1), Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
if (good_matches.size() >= thresholdGoodMatches) {
// Display that the object is found
putText(img_matches, "Object Found", cvPoint(10, 50),
FONT_HERSHEY_COMPLEX_SMALL, 2, cvScalar(0, 0, 250), 1, CV_AA);
for (unsigned int i = 0; i < good_matches.size(); i++) {
// Get the keypoints from the good matches
obj.push_back(kpObject[good_matches[i].queryIdx].pt);
scene.push_back(kp_image[good_matches[i].trainIdx].pt);
}
H = findHomography(obj, scene, CV_RANSAC);
perspectiveTransform(obj_corners, scene_corners, H);
// Draw lines between the corners (the mapped object in the scene
// image )
line(img_matches, scene_corners[0] + Point2f(object.cols, 0),
scene_corners[1] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[1] + Point2f(object.cols, 0),
scene_corners[2] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[2] + Point2f(object.cols, 0),
scene_corners[3] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[3] + Point2f(object.cols, 0),
scene_corners[0] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
} else {
putText(img_matches, "", cvPoint(10, 50), FONT_HERSHEY_COMPLEX_SMALL, 3,
cvScalar(0, 0, 250), 1, CV_AA);
}
// Show detected matches
imshow("Good Matches", img_matches);
escapeKey = cvWaitKey(10);
// imwrite("C:/School/Image Processing/bookIP3.jpg", img_matches);
if (frameCount > 10) escapeKey = 'q';
}
// average frames per second
if (true) {
t = ((double)getTickCount() - t) / getTickFrequency();
cout << t << " " << frameCount / t << endl;
cvWaitKey(0);
}
frameCount = 0;
escapeKey = 'a';
}
// Release camera and exit
cap.release();

Related

Add a button with functionality in c++ using Visual Studio

I am a newbie to c++ and the IDE I am using is Visual Studio '22. I have written a code to detect a face (eyes and mouth too) and save the roi to a folder on the pc. Now what it does can be thought of as an auto-capture of the roi as soon as the face is detected.
I now want to create the function for "force capture", for which I will need to have a button and add pretty much the same code I wrote for auto-capture to give it functionality.
How do I add the button and make it perform its task?
I found related answers but they use Qt not sure how to apply that here.
Thanks a ton! Really need help.
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>
//(1) include face header
#include "opencv2/face.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
//(2) include face header
#include "opencv2/objdetect.hpp"
#include <iostream>
#include <opencv2/imgproc/types_c.h>
//file handling
#include <fstream>
#include <sstream>
using namespace cv;
using namespace std;
using namespace ml;
using namespace cv::face;
//(3) Global variables
Ptr<Facemark> facemark; //mark detection
CascadeClassifier faceDetector, mouth, eye, eye1; //face detection
string name, filename;
void process(Mat img, Mat imgcol) {
vector<Rect> faces;
faceDetector.detectMultiScale(img, faces);
Mat imFace;
if (faces.size() != 0) {
for (size_t i = 0; i < faces.size(); i++)
{
cv::rectangle(imgcol, faces[i], Scalar(255, 0, 0));
imFace = imgcol(faces[i]);
resize(imFace, imFace, Size(imFace.cols * 5, imFace.rows * 5));
faces[i] = Rect(faces[i].x = 0, faces[i].y = 0, faces[i].width * 5,
(faces[i].height) * 5);
}
vector< vector<Point2f> > shapes;
//vector < Rect > measures;
if (facemark->fit(imFace, faces, shapes)) //fiiting predef shapes in faces// // imface is the size of faces
{
for (unsigned long i = 0; i < faces.size(); i++) {
for (unsigned long k = 0; k < shapes[i].size(); k++) {
cv::circle(imFace, shapes[i][k], 5, cv::Scalar(0, 0, 255), FILLED);
}
}
}
namedWindow("Detected_shape");
imshow("Detected_shape", imFace);
waitKey(5);
}
else {
cout << "Faces not detected." << endl;
}
}
int main()
{
facemark = FacemarkLBF::create();
facemark->loadModel("C:/Dev/HeadPose/HeadPose/lbfmodel.yml");
faceDetector.load("D:/opencv/build/install/etc/haarcascades/haarcascade_frontalface_alt2.xml");
mouth.load("D:/opencv/build/install/etc/haarcascades/haarcascade_smile.xml");
eye.load("D:/opencv/build/install/etc/haarcascades/haarcascade_eye.xml");
cout << "Loaded model" << endl;
Mat frame, grayframe, testframe, faceROI;
int x_axis, y_axis;
namedWindow("Detecting");
VideoCapture cap(0); //1 for diff cam
while (1)
{
cap.read(frame);
if (!cap.read(frame))
{
cout << "an error while taking the frame from cap" << endl;
}
//face
vector<Rect> faces;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, grayframe, CV_BGR2GRAY);
equalizeHist(grayframe, testframe);
faceDetector.detectMultiScale(testframe, faces, 1.1, 3, CASCADE_SCALE_IMAGE, Size(30, 30));
Rect roi_b;
Rect roi_c;
size_t ic = 0;
int ac = 0;
size_t ib = 0;
int ab = 0;
for (int ic = 0; ic < faces.size(); ic++)
{
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
crop = frame(roi_b);
resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR);
cvtColor(crop, gray, COLOR_BGR2GRAY);
stringstream ssfn;
filename = "C:\\Users\\Hp\\Desktop\\Faces\\";
ssfn << filename.c_str() << name <<"_"<< roi_b.width<<"_"<< roi_b.height << ".jpg";
filename = ssfn.str();
imwrite(filename, res);
rectangle(frame, faces[ic], Scalar(255, 0, 255), 2, 8, 0);
Mat face = frame(faces[ic]);
cvtColor(face, face, CV_BGR2GRAY);
//mouth
vector <Rect> mouthi;
mouth.detectMultiScale(face, mouthi);
for (int k = 0; k < mouthi.size(); k++)
{
Point pt1(mouthi[0].x + faces[ic].x, mouthi[0].y + faces[ic].y);
Point pt2(pt1.x + mouthi[0].width, pt1.y + mouthi[0].height);
rectangle(frame, pt1, pt2, Scalar(255, 0, 0), 1, 8, 0);
}
//eyes
faceROI = frame(faces[ic]);//Taking area of the face as Region of Interest for eyes//
vector<Rect>eyes;//declaring a vector named eyes//
eye.detectMultiScale(faceROI, eyes, 1.1, 3, 0 | CASCADE_SCALE_IMAGE, Size(5, 5)); //detect eyes in every face//
/*eye1.detectMultiScale(faceROI, eyes, 1.1, 3, 0 | CASCADE_SCALE_IMAGE, Size(5, 5));*/
for (size_t j = 0; j < eyes.size(); j++)
{ //for locating eyes//
Point center(faces[ic].x + eyes[j].x + eyes[j].width * 0.5, faces[ic].y + eyes[j].y + eyes[j].height * 0.5);//getting the centers of both eyes//
int radius = cvRound((eyes[j].width + eyes[j].height) * 0.25); //declaring radius of the eye enclosing circles//
// cout << "radius" << radius << endl;
circle(frame, center, radius, Scalar(255, 0, 0), 1, 8, 0);//drawing circle around both eyes//
x_axis = eyes[j].x;//storing x axis location of eyes in x_axis//
y_axis = eyes[j].y;//storing y axis location of eyes in y_axis//
cout << "Position of the eyes is:" << "(" << x_axis << "," << y_axis << ")" << endl;//showing co-ordinate values//
}
}
sstm << "Crop area size: " << roi_b.width << "x" << roi_b.height << " Filename: " << filename;
text = sstm.str();
if (!crop.empty()) {
imshow("detected", crop);
}
else destroyWindow("detected");
cout << "Name\n";
cin >> name;
Mat img; //image containers
Mat imgbw;
cap >> img; //image from webcam
resize(img, img, Size(460, 460), 0, 0, INTER_LINEAR_EXACT);
cvtColor(img, imgbw, COLOR_BGR2GRAY);
process(imgbw, img);
imshow("Detecting", frame);
if (waitKey(30) == 27) {
break;
}
}
return 0;
}

change size of rectangle after object detection

I use below code for object detection.
int main(int argc, char* argv[]){
VideoCapture cap(0);
if (!cap.isOpened()){
cout << "Cannot open the video cam" << endl;
return -1;}
int totalFrameNumber = cap.get(CV_CAP_PROP_FRAME_COUNT);
Mat frame;
namedWindow("MyVideo", CV_WINDOW_AUTOSIZE);
while (1)
{
bool bSuccess = cap.read(frame); // read a new frame from video
if (!bSuccess)
{
cout << "Cannot read a frame from video stream" << endl;
break;
}
Mat frame2;
Rect rectangle2(420,280, 40, 40);
rectangle(frame, rectangle2, Scalar(255, 255, 255));
Mat cornerstrength;
cornerHarris(frame, cornerstrength, 3, 3, 0.1);
//threshold the corner strength
Mat harriscorners;
double th = 0.00001;
threshold(cornerstrength, harriscorners, th, 255, THRESH_BINARY);
morphologyEx(harriscorners, harriscorners, MORPH_CLOSE, Mat(), Point(-1, -1), 6);
//local maxima detection
Mat dilated, localMax;
dilate(cornerstrength, dilated, Mat());
compare(cornerstrength, dilated, localMax, CMP_EQ);
threshold(cornerstrength, harriscorners, th, 255, THRESH_BINARY);
harriscorners.convertTo(harriscorners, CV_8U);
bitwise_and(harriscorners, localMax, harriscorners);
harriscorners.convertTo(harriscorners, CV_32F);
Mat S(0, 2, CV_32SC1);
//drawing a circle around corners
for (int j = 0;j < harriscorners.rows;j++)
for (int i = 0;i < harriscorners.cols;i++)
{
if (harriscorners.at<float>(j, i)> 0)
{ circle(frame, Point(i, j), 5, Scalar(255), 2, 8);
Mat pt(1, 2, CV_32SC1);
pt.at<int>(1, 0) = i;
pt.at<int>(0, 1) = j;
// Add the point to S
S.push_back(pt);
for (int x = 430; x < 460; x++)
for (int y = 285; y < 315; y++)
if ((pt.at<int>(1, 0) = i) == x && (pt.at<int>(0, 1) = j) == y))
{
Rect rectangle2(430, 285, 30,30);}}}
imshow("MyVideo", frame);
if (waitKey(30) == 27)
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
I want When the condition if ((pt.at(1, 0) = i) == x && (pt.at(0, 1) = j) == y)) is established then the size of rectangle2 from
rectangle2(420,280,40,40) Change to rectangle2(430,285,30,30).I can do this change but when I use my code Both rectangle(previous rectangle2 and new rectangle2)are displayed in picture.but I want to display onle new rectangle2.do you have any idea to solve my problem? thanks a lot..
As I said in my comments, you need to create a clone and keep frame almost a constant.
So, in this part:
Rect rectangle2(420,280, 40, 40);
rectangle(frame, rectangle2, Scalar(255, 255, 255));
Mat cornerstrength;
cornerHarris(frame, cornerstrength, 3, 3, 0.1);
Write something more like this
cv::Mat frameCopy = frame.clone();
Rect rectangle2(420,280, 40, 40);
rectangle(frameCopy , rectangle2, Scalar(255, 255, 255));
Mat cornerstrength;
cornerHarris(frameCopy , cornerstrength, 3, 3, 0.1);
Then, in this part, I am not sure what it is intended to do
for (int x = 430; x < 460; x++)
for (int y = 285; y < 315; y++)
if ((pt.at<int>(1, 0) = i) == x && (pt.at<int>(0, 1) = j) == y))
{
Rect rectangle2(430, 285, 30,30);
}
}
}
imshow("MyVideo", frame);
But probably you want to show the new image with a new rectangle, so You can do something like before again:
cv::Mat anotherCopy= frame.clone();
Rect rectangleInLoop(430,280,30,30);
rectangle(anotherCopy, rectangleInLoop, Scalar(255, 255, 255));
imshow("MyVideo", anotherCopy);

hough transform in a limited frame height of a video

This is my code:
int main() {
VideoCapture cap;
cap.open("D:/01.avi");
if (!cap.isOpened()) {
cout << "Video cannot be opened" << endl;
system("pause");
return -1;
}
VideoWriter cap_output("D:/01_ouput.avi",
cap.get(CV_CAP_PROP_FOURCC),
cap.get(CV_CAP_PROP_FPS),
Size(cap.get(CV_CAP_PROP_FRAME_WIDTH),
cap.get(CV_CAP_PROP_FRAME_HEIGHT)));
if (!cap_output.isOpened()) {
cout << "Output video cannot be opened" << endl;
return -1;
}
Mat frame, canny_frame, frame_ouput;
while (true) {
if (!cap.read(frame))
break;
Canny(frame, canny_frame, 50, 200, 3);
cvtColor(canny_frame, frame_ouput, CV_GRAY2BGR);
vector<Vec4i> lines;
HoughLinesP(canny_frame, lines, 1, CV_PI / 180, 100, 100, 10);
for (size_t i = 0; i < lines.size(); i++) {
Vec4i l = lines[i];
line(frame, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 3, 8);
}
imshow("ahoho", frame);
//cap_output.write(frame_ouput);
if (waitKey(30) >= 0) {
break;
}
}
cap.release();
cap_output.release();
return 0;
}
I want to detect the lines in only 1/2 height of the frame of the video (to avoid different objects). How can I do it?

How can I track many blobs in occlusion, Opencv C++

I am trying to develop an algorithm which tracks two hands. I need to handle the occlusion problem. I have used the CvBlobs for this.
Do you have a solution ?
I think you need to use some machine learning algorithm
Try cvStartFindContours() and cvFindNextContour()
Found a few good links:
OpenCV HAND DETECTION code: http://download.andol.info/hsv.cpp
color object tracking in openCV keeps detecting the skin
Hand gesture: http://www.andol.info/hci/895.htm
//VERSION: HAND DETECTION 1.0
//AUTHOR: ANDOL LI#CW3/18, Live:lab
//PROJECT: HAND DETECTION PROTOTYPE
//LAST UPDATED: 03/2009
#include "cv.h"
#include "cxcore.h"
#include "highgui.h"
#include "math.h"
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <conio.h>
#include <sstream>
#include <time.h>
using namespace std;
/*
--------------------------------------------*/
int main()
{
int c = 0;
CvSeq* a = 0;
CvCapture* capture = cvCaptureFromCAM(0);
if(!cvQueryFrame(capture)){ cout<<"Video capture failed, please check the camera."<<endl;}else{cout<<"Video camera capture status: OK"<<endl;};
CvSize sz = cvGetSize(cvQueryFrame( capture));
IplImage* src = cvCreateImage( sz, 8, 3 );
IplImage* hsv_image = cvCreateImage( sz, 8, 3);
IplImage* hsv_mask = cvCreateImage( sz, 8, 1);
IplImage* hsv_edge = cvCreateImage( sz, 8, 1);
CvScalar hsv_min = cvScalar(0, 30, 80, 0);
CvScalar hsv_max = cvScalar(20, 150, 255, 0);
//
CvMemStorage* storage = cvCreateMemStorage(0);
CvMemStorage* areastorage = cvCreateMemStorage(0);
CvMemStorage* minStorage = cvCreateMemStorage(0);
CvMemStorage* dftStorage = cvCreateMemStorage(0);
CvSeq* contours = NULL;
//
cvNamedWindow( "src",1);
//cvNamedWindow( "hsv-msk",1);
//cvNamedWindow( "contour",1);
//////
while( c != 27)
{
IplImage* bg = cvCreateImage( sz, 8, 3);
cvRectangle( bg, cvPoint(0,0), cvPoint(bg->width,bg->height), CV_RGB( 255, 255, 255), -1, 8, 0 );
bg->origin = 1;
for(int b = 0; b< int(bg->width/10); b++)
{
cvLine( bg, cvPoint(b*20, 0), cvPoint(b*20, bg->height), CV_RGB( 200, 200, 200), 1, 8, 0 );
cvLine( bg, cvPoint(0, b*20), cvPoint(bg->width, b*20), CV_RGB( 200, 200, 200), 1, 8, 0 );
}
src = cvQueryFrame( capture);
cvCvtColor(src, hsv_image, CV_BGR2HSV);
cvInRangeS (hsv_image, hsv_min, hsv_max, hsv_mask);
cvSmooth( hsv_mask, hsv_mask, CV_MEDIAN, 27, 0, 0, 0 );
cvCanny(hsv_mask, hsv_edge, 1, 3, 5);
cvFindContours( hsv_mask, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
CvSeq* contours2 = NULL;
double result = 0, result2 = 0;
while(contours)
{
result = fabs( cvContourArea( contours, CV_WHOLE_SEQ ) );
if ( result > result2) {result2 = result; contours2 = contours;};
contours = contours->h_next;
}
if ( contours2 )
{
//cout << "contours2: " << contours2->total << endl;
CvRect rect = cvBoundingRect( contours2, 0 );
cvRectangle( bg, cvPoint(rect.x, rect.y + rect.height), cvPoint(rect.x + rect.width, rect.y), CV_RGB(200, 0, 200), 1, 8, 0 );
//cout << "Ratio: " << rect.width << ", " << rect.height << ", " << (float)rect.width / rect.height << endl;
int checkcxt = cvCheckContourConvexity( contours2 );
//cout << checkcxt <<endl;
CvSeq* hull = cvConvexHull2( contours2, 0, CV_CLOCKWISE, 0 );
CvSeq* defect = cvConvexityDefects( contours2, hull, dftStorage );
if( defect->total >=40 ) {cout << " Closed Palm " << endl;}
else if( defect->total >=30 && defect->total <40 ) {cout << " Open Palm " << endl;}
else{ cout << " Fist " << endl;}
cout << "defet: " << defect->total << endl;
CvBox2D box = cvMinAreaRect2( contours2, minStorage );
//cout << "box angle: " << (int)box.angle << endl;
cvCircle( bg, cvPoint(box.center.x, box.center.y), 3, CV_RGB(200, 0, 200), 2, 8, 0 );
cvEllipse( bg, cvPoint(box.center.x, box.center.y), cvSize(box.size.height/2, box.size.width/2), box.angle, 0, 360, CV_RGB(220, 0, 220), 1, 8, 0 );
//cout << "Ratio: " << (float)box.size.width/box.size.height <<endl;
}
//cvShowImage( "hsv-msk", hsv_mask); hsv_mask->origin = 1;
//IplImage* contour = cvCreateImage( sz, 8, 3 );
cvDrawContours( bg, contours2, CV_RGB( 0, 200, 0), CV_RGB( 0, 100, 0), 1, 1, 8, cvPoint(0,0));
cvShowImage( "src", src);
//contour->origin = 1; cvShowImage( "contour", contour);
//cvReleaseImage( &contour);
cvNamedWindow("bg",0);
cvShowImage("bg",bg);
cvReleaseImage( &bg);
c = cvWaitKey( 10);
}
//////
cvReleaseCapture( &capture);
cvDestroyAllWindows();
}

How to find the ruler position from an image by using opencv?

I have to find the ruler position from an image by using opencv.I am able to detect the color of the ruler(green). How can i read all the pixel from an image and get the upper and lower position of ruler.
void findrulerPosition(cv::Mat image, int indx) {
std::stringstream ss;//create a stringstream
ss << indx;//add number to the stream
cv::Mat hsv;
cvtColor(image, hsv, CV_BGR2HSV);
String filename = OUTPUT_FOLDER + "hsv" + ss.str() + ".png";
imwrite(filename, hsv );
cv::Mat hsvbw;
inRange(hsv, cv::Scalar(30,0,0), cv::Scalar(80, 255, 255), hsvbw);
//inRange(hsv, cv::Scalar(12,255,255), cv::Scalar(23, 245, 255), hsvbw);
//inRange(image, cv::Scalar(0,64,255), cv::Scalar(0, 207, 255), hsvbw);
filename = OUTPUT_FOLDER + "hsvbw" + ss.str() + ".png";
imwrite(filename, hsvbw );
vector<vector<Point> > contours;
findContours(hsvbw.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
cv::Mat dst = Mat::zeros(image.size(), image.type());
drawContours(dst, contours, -1, Scalar::all(255), CV_FILLED);
this->cmpddst &= dst;
dst &= image;
this->cmpddst &= image;
filename = OUTPUT_FOLDER + "cmpddst" + ss.str() + ".png";
imwrite(filename, this->cmpddst );
filename = OUTPUT_FOLDER + "dst" + ss.str() + ".png";
imwrite(filename, dst );
}
Here's what I've done:
A bit improved your green range because yours is not detecting green color - it's detecting many other colors.
Find contours on image.
Find contour with area bigger than 100.
Find up and low points of contour.
Draw these 2 points.
Mat src = imread("input.png"), tmp;
cvtColor(src, tmp, CV_BGR2HSV_FULL);
inRange(tmp, Scalar(50, 50, 50), Scalar(70, 255, 255), tmp);
vector<vector<Point> > contours;
findContours(tmp, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
int upY = INT_MAX, lowY = 0, upX, lowX;
for (int i=0; i<contours.size(); i++)
{
if (contourArea(contours[i]) > 100)
{
for (int j=0; j<contours[i].size(); j++)
{
if (contours[i][j].y > lowY)
{
lowY = contours[i][j].y;
lowX = contours[i][j].x;
}
if (contours[i][j].y < upY)
{
upY = contours[i][j].y;
upX = contours[i][j].x;
}
}
cout << "low = (" << lowX << ", " << lowY << ")"<< endl
<< "up = (" << upX << ", " << upY << ")"<< endl;
break;
}
}
circle(src, Point(lowX, lowY), 3, Scalar(255, 0, 255));
circle(src, Point(upX, upY), 3, Scalar(255, 0, 255));
imshow("Window", src);
waitKey();
Here's result: