I'm teaching myself OpenCV and wrote the following code today to track a ball rolling across my computer webcam feed and (attempt to) draw a filled in grey circle on to it's centroid:
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
Point getBlobCentroid(Mat blobImage);
int main()
{
Mat bGround, fGround, diff;
Point p = (500, 280);
VideoCapture cap(0);
while (true)
{
cap >> fGround; //assign frame from camera to newest image
cvtColor(fGround, fGround, CV_BGR2GRAY); //convert to grayscale
bGround.create(fGround.size(), fGround.type());
absdiff(bGround, fGround, diff); //subtract current frame from old frame
threshold(diff, diff, 50, 255, CV_THRESH_BINARY); //convert to binary
erode(diff, diff, NULL, Point(-1,-1), 3, 0, BORDER_DEFAULT);
imshow("Thresholded", diff);
circle(fGround, getBlobCentroid(diff), 6, 127, -1, 8, 16);
imshow("Natural Image with Tracking", fGround);
fGround.copyTo(bGround); //move forward in time
waitKey(1);
}
return 0;
}
Point getBlobCentroid(Mat blobImage)
{
int rowSum=0, colSum=0, count = 1;
for(int i=0; i<blobImage.rows; i++)
{
for (int j=0; j<blobImage.cols; j++)
{
if (blobImage.at<uchar>(i,j) == 255)
{
rowSum+=i;
colSum+=j;
count++;
}
}
}
Point centroid = (rowSum, colSum)/count;
return centroid;
}
However, as evidenced by the attached image - the circle never moves away from the top of the screen - in other words, the centroid.y component is always zero. I wrote a bunch of steps of the calculation to the screen, and it appears as though the searching and additions to rowSum and count and such work - those are nonzero. However, as soon as you calculate the centroid or call it in the circle, that's a no go. Even weirder, I tried making a constant center for the circle Point p = (285, 285) and using that as an argument, and that was a no go as well. Help? Thanks!
-Tony
fGround.copyTo(bGround); //move forward in time
// so, that's your idea. compare bg & fg, get the centroid of the diff.
// but then, if you follow your while loop there, (waitkey, back to the top ... )
bGround.create(fGround.size(), fGround.type());
// aww, so you're never using, what you copied before
absdiff(bGround, fGround, diff);
// so, in the end, you're always comparing fGround to an empty bGround img
Related
guys.
I've written a code to describe motion on interest points over a .avi video file.
Here is the code:
#include "opencv2/video/tracking.hpp"
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<iostream>
using namespace cv;
using namespace std;
int main() {
VideoCapture capture("video.avi");
if (!capture.isOpened()) {
cout << "ERROR OPENING VIDEO\n\n";
return(0);
}
double rate = capture.get(CV_CAP_PROP_FPS);
unsigned int numberFrames = (unsigned int) capture.get(CV_CAP_PROP_FRAME_COUNT);
int width = (unsigned int) capture.get(CV_CAP_PROP_FRAME_WIDTH);
int height = (unsigned int) capture.get(CV_CAP_PROP_FRAME_HEIGHT);
unsigned int codec = (unsigned int) capture.get(CV_CAP_PROP_FOURCC);
Mat currentGray;
Mat previousGray;
vector< Point2f > points[2];
vector< Point2f > initial;
vector< Point2f > features;
vector< uchar > status;
vector< float > error;
int maxCorners = 500; // maximum number of features to detect
double qualityLevel = 0.01; // quality level for feature detection
double minDistance = 10; // min distance between two points
Mat frame, output;
VideoWriter createdVideo("output.avi", codec, rate, Size(width,height), 1);
for (unsigned frameCounter = 0; frameCounter < numberFrames; frameCounter++) {
capture >> frame;
if (frame.empty())
break;
imshow("Video", frame);
cvtColor(frame, currentGray, CV_BGR2GRAY);
frame.copyTo(output);
if (points[0].size() <= 10){
goodFeaturesToTrack(currentGray, // the image
features, // the output detected features
maxCorners, // the maximum number of features
qualityLevel, // quality level
minDistance); // min distance between two features
// add the detected features to
// the currently tracked features
points[0].insert(points[0].end(),
features.begin(), features.end());
initial.insert(initial.end(),
features.begin(), features.end());
}
if (previousGray.empty())
currentGray.copyTo(previousGray);
calcOpticalFlowPyrLK(previousGray, currentGray, // 2 consecutive images
points[0], // input point positions in first image
points[1], // output point positions in the 2nd image
status, // tracking success
error); // tracking error
int k = 0;
for (int i = 0; i < points[1].size(); i++) {
// do we keep this point?
if (status[i] && // if point has moved
(abs(points[0][i].x - points[1][i].x) +
(abs(points[0][i].y - points[1][i].y)) > 2))
initial[k] = initial[i];
points[1][k++] = points[1][i];
}
points[1].resize(k);
initial.resize(k);
for (int i = 0; i < points[1].size(); i++) {
// draw line and circle
line(output,
initial[i], // initial position
points[1][i],// new position
Scalar(0, 255, 0), 2);
circle(output,
points[1][i],
2,
Scalar(0, 0, 255), -1);
}
std::swap(points[1], points[0]);
cv::swap(previousGray, currentGray);
createdVideo.write(output);
}
waitKey(0);
return(0);
}
My code tracks displacement of points frame by frame and keeps the first location of them until the end of video.
However, I would like not the keep the location's points of the first frame, but change them over time, i.e. changing the first point location with the second point location so on and then huge lines will not appear but only the displacement between two points in two frames.
Is there any possibility of doing this?
Since you only want the position of points in two frames, just use two vectors; one holding the keypoints from the last frame, and one holding keypoints from the previous frame. At the end of each iteration, just set the previous points to the current points. Something like this pseudocode:
// first frame
// detect keypoints
prev_frame_points = keypoints
// rest of the frames
for frame in frames:
// detect keypoints
curr_frame_points = keypoints
line(..., prev_frame_points, curr_frame_points, ...)
prev_frame_points = curr_frame_points
In order to gain a better understanding of computer vision, I am trying to write a canny edge detection algorithm "from scratch" (not just using the canny function from the cv lib). In order to do this, I need to apply a hysteresis threshold to the image. However, I get an error when I try to modify an element in my matrix. Here is my function:
cv::Mat hysteresisThresh(cv::Mat originalImage, int lowThresh, int highThresh){
//initialize a 2d vector to keep track of whethr or not the pixel is "in" the image
std::vector<std::vector<bool>>isIn(
originalImage.rows,
std::vector<bool>(originalImage.cols, true));
//loop through every row and col in the original image
//create a varaible to hold the element being checked
unsigned short curElementValue;
for(int curRow = 0; curRow<originalImage.rows; curRow++){
for(int curCol = 0; curCol<originalImage.cols; curCol++){
curElementValue = originalImage.at<unsigned short>(curRow, curCol);
if(curElementValue > highThresh){
isIn.at(curRow).at(curCol) = true;
//do nothing to the returnImage since the correct value is already stored
}
else if(curElementValue < highThresh && curElementValue > lowThresh){
/* i need to check that this pixel is adjacent to another edge pixel
thus, I have 8 possabilities. Think of them as:
123
4*5
678
with the * being the current pixel. However, I can cut the possabilities
in half since pixels 5-8 (inclusive) have not been checked yet and will be
checked in the future. So, I will only check pixels 1-4
*/
//TODO there may be a more efficient way to check these values. Find out
//The first stage of this if is the be sure that you are checking values in the array that actually exist
if((curRow!=0 && curCol!=0 && curRow!=originalImage.rows-1 && curCol!=originalImage.cols-1)&&
(isIn[curRow][curCol-1] || isIn[curRow-1][curCol-1]|| isIn[curRow-1][curCol] || isIn[curRow-1][curCol+1])){
isIn.at(curRow).at(curCol) = true;
//do nothing to the returnImage since the correct value is already stored
}
else{
//none of the adjacent pixels are in, so it is not an edge
isIn.at(curRow).at(curCol) = false;
originalImage.at<unsigned short>(curRow, curCol) = 0; //same as above
}
}
else{
isIn.at(curRow).at(curCol) = false;
originalImage.at<unsigned short>(curRow, curCol) = 0; //same as above
}
}
}
return originalImage;
}
The output upon running the program is a classic seg fault:
Segmentation fault (core dumped)
I've explored in gdb a bit and found that the fault occurs at the line:
originalImage.at<unsigned short>(curRow, curCol) = 0;
What am I doing wrong? Thanks!
EDIT: I was asked in the comments to provide context for the function. This is the code surrounding the function call. I am calling all of the operations on a depth map (yes I am aware canny is designed for use with color maps but after researching I think the same general principles would apply to finding edges in a depth map). Anyway, here is the code:
void displayDepthEdges(cv::VideoCapture* capture){
//initialize the matricies for holding images
cv::Mat depthImage;
cv::Mat edgeImage;
cv::Mat bgrImage;
//initialize variables for sobel operations
cv::Mat yGradient;
cv::Mat xGradient;
//double theta;
//initialize variable for hysteresis thresh operation
cv::Mat threshedImage;
int cannyLowThresh = 0;
int cannyHighThresh = 500;
//initialize variables for erosion and dilation based noise cancelation.
//CURRENTLY NOT IN USE
//int erosionFactor = 0;
//int dilationFactor = 0;
//initialize the windows and trackbars used to control everything
//cv::namedWindow("EroDilMenu", CV_WINDOW_NORMAL);
cv::namedWindow("CannyMenu", CV_WINDOW_NORMAL);
cvCreateTrackbar("Low Thresh", "CannyMenu",&cannyLowThresh, 500);
cvCreateTrackbar("High Thresh", "CannyMenu",&cannyHighThresh, 500);
//cvCreateTrackbar("Dil Fac", "EroDilMenu",&dilationFactor, 25);
//cvCreateTrackbar("Ero Fac", "EroDilMenu", &erosionFactor, 25);
cv::namedWindow("Edges", CV_WINDOW_NORMAL);
for(;;){
if(!capture->grab()){
std::cerr << "Error: Could not grab an image\n";
}
else{
//initialization here will stop totalGradient from carrying values over (hopefully)
cv::Mat totalGradient;
capture->retrieve(depthImage, CV_CAP_OPENNI_DEPTH_MAP);
//blur the image so that only major edges are detected
GaussianBlur(depthImage, depthImage, cv::Size(3,3), 1, 0, cv::BORDER_DEFAULT);
//apply the sobel operator in the x and y directions, then average to approximate gradient
//arguments are(input, output, dataType, x order derivative, y order derivative
//matrix size, scale, delta)
Sobel(depthImage, xGradient, CV_16U, 1, 0, 3, 1, 0, cv::BORDER_DEFAULT);
convertScaleAbs(xGradient, xGradient);
Sobel(depthImage, yGradient, CV_16U, 1, 0, 3, 1, 0, cv::BORDER_DEFAULT);
convertScaleAbs(yGradient, yGradient);
addWeighted(yGradient, 0.5, xGradient, 0.5, 0.0, totalGradient);
//it is not clear that noise canceling does anything here...
//it seemed to only blur or sharpen edges in testing
//totalGradient = noiseCancel(totalGradient ,erosionFactor, dilationFactor);
//TODO impliment direction based edge priority
//theta = atan2(xGradient, yGradient);//this is returned in radians
//times 10 since the threshes are in mm and trackbars are in cm
totalGradient = hysteresisThresh(totalGradient,cannyLowThresh*10, cannyHighThresh*10);
imshow("Edges", totalGradient);
if(cv::waitKey(30) == 27){break;}
}
}
}
Your mistake is in surrounding code, not in algorithm itself. According to documentation (it's also correct for 3.0.0 version), cv::convertScaleAbs converts result to 8-bit image, i. e. CV_8UC1 type. As a result your totalGradiend is 8-bit image, and its elements can't be accessed via unsigned short pointer.
I'm playing around with OpenCV and I want to know how you would build a simple version of a perspective transform program. I have a image of a parallelogram and each corner of it consists of a pixel with a specific color, which is nowhere else in the image. I want to iterate through all pixels and find these 4 pixels. Then I want to use them as corner points in a new image in order to warp the perspective of the original image. In the end I should have a zoomed on square.
Point2f src[4]; //Is this the right datatype to use here?
int lineNumber=0;
//iterating through the pixels
for(int y = 0; y < image.rows; y++)
{
for(int x = 0; x < image.cols; x++)
{
Vec3b colour = image.at<Vec3b>(Point(x, y));
if(color.val[1]==245 && color.val[2]==111 && color.val[0]==10) {
src[lineNumber]=this pixel // something like Point2f(x,y) I guess
lineNumber++;
}
}
}
/* I also need to get the dst points for getPerspectiveTransform
and afterwards warpPerspective, how do I get those? Take the other
points, check the biggest distance somehow and use it as the maxlength to calculate
the rest? */
How should you use OpenCV in order to solve the problem? (I just guess I'm not doing it the "normal and clever way") Also how do I do the next step, which would be using more than one pixel as a "marker" and calculate the average point in the middle of multiple points. Is there something more efficient than running through each pixel?
Something like this basically:
Starting from an image with colored circles as markers, like:
Note that is a png image, i.e. with a loss-less compression which preserves the actual color. If you use a lossy compression like jpeg the colors will change a little, and you cannot segment them with an exact match, as done here.
You need to find the center of each marker.
Segment the (known) color, using inRange
Find all connected components with the given color, with findContours
Find the largest blob, here done with max_element with a lambda function, and distance. You can use a for loop for this.
Find the center of mass of the largest blob, here done with moments. You can use a loop also here, eventually.
Add the center to your source vertices.
Your destination vertices are just the four corners of the destination image.
You can then use getPerspectiveTransform and warpPerspective to find and apply the warping.
The resulting image is:
Code:
#include <opencv2/opencv.hpp>
#include <vector>
#include <algorithm>
using namespace std;
using namespace cv;
int main()
{
// Load image
Mat3b img = imread("path_to_image");
// Create a black output image
Mat3b out(300,300,Vec3b(0,0,0));
// The color of your markers, in order
vector<Scalar> colors{ Scalar(0, 0, 255), Scalar(0, 255, 0), Scalar(255, 0, 0), Scalar(0, 255, 255) }; // red, green, blue, yellow
vector<Point2f> src_vertices(colors.size());
vector<Point2f> dst_vertices = { Point2f(0, 0), Point2f(0, out.rows - 1), Point2f(out.cols - 1, out.rows - 1), Point2f(out.cols - 1, 0) };
for (int idx_color = 0; idx_color < colors.size(); ++idx_color)
{
// Detect color
Mat1b mask;
inRange(img, colors[idx_color], colors[idx_color], mask);
// Find connected components
vector<vector<Point>> contours;
findContours(mask, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
// Find largest
int idx_largest = distance(contours.begin(), max_element(contours.begin(), contours.end(), [](const vector<Point>& lhs, const vector<Point>& rhs) {
return lhs.size() < rhs.size();
}));
// Find centroid of largest component
Moments m = moments(contours[idx_largest]);
Point2f center(m.m10 / m.m00, m.m01 / m.m00);
// Found marker center, add to source vertices
src_vertices[idx_color] = center;
}
// Find transformation
Mat M = getPerspectiveTransform(src_vertices, dst_vertices);
// Apply transformation
warpPerspective(img, out, M, out.size());
imshow("Image", img);
imshow("Warped", out);
waitKey();
return 0;
}
I am trying to use openCV to detect red round object and draw a circle around that object. However,the segmentation fault occurs when i use circle function to draw circle. I don't know why is it happening and how to fix it? Thanks!!
#include <opencv/cvaux.h>
#include <opencv/highgui.h>
#include <opencv/cxcore.h>
#include <stdlib.h>
#include <cv.hpp>
#include <cxcore.hpp>
#include <highgui.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include<stdio.h>
#include<math.h>
#include<opencv/cv.h>
#include<opencv/highgui.h>
#include<opencv2/objdetect/objdetect.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<vector>
using namespace cv; // if you don want to use scope resolution operator(::) in the code to call the classes or functions from cv namespace, you need this line
using namespace std; // if you don want to use scope resolution operator(::) in the code to call the classes or functions from std namespace, you need this line
int main(int argc, char* argv[]){
VideoCapture capWebcam(0); //use scope resolution operator :: because VideoCapture is a class under namespace of cv
//use VideoCapture class to instantiate an object called capWebcam; here used the constructor of the object immediately to
//grab the only (0) camera
if(capWebcam.isOpened()==false){ //check whether the camera is detected and successfully grabbed
printf("Error: camera not detected!!\n");
cout<<"Error: camera not detected!!\n"<<endl;
return(1);
}
Mat matOriginal; // matrix object used to store image from webcam
Mat matProcessed;
vector<Vec3f> vecCircles; //declare a 3-element vector of type floats, this will be the pass by reference(i.e. a pointer) output of HoughCicles()
vector<Vec3f>::iterator itrCircles; //iterator for circles vector just a counter, but has the same data type from the itrCircles' data member
namedWindow("Original"); //window for original image
namedWindow("Processed"); //window for Processed image
char charCheckForEscKey =0;
while(charCheckForEscKey!=27){ //as long as ESC is not pressed, stays in the while
if(capWebcam.read(matOriginal) == false){ //check to see whether the image read from webcam correctly
cout<<"Error: image frame not read!!\n"<<endl;
break;
} //
inRange(matOriginal, //this time we don't need to pass a pointer; we pass the image as an object instead
Scalar(0,0,175), //specify the lower bound of BGR we want to keep
Scalar(100,100,256), //upper bound of BGR
matProcessed); //return the processed image to another object
GaussianBlur(matProcessed,matProcessed,Size(9,9),1.5,1.5); //take matProcessed image and blur by Gaussian filter(9x9 window with std of 1.5 in both x,y direction) and return to same object
HoughCircles(matProcessed,
vecCircles, //use vector element to receive the x,y,radius of the detected circle
CV_HOUGH_GRADIENT, //algorithms used to detect circles
2, //size of image divided by this value = "accumulator resolution"
matProcessed.rows/4, //min distance between the centers of two detected circles
100, //upper pixel value threshold for canny edge detection to interpret as edge
50, //lower pixel value threshold for canny edge detection to interpret as edge
10, //min radius of a circle can be detected
400); //max radius of a circle can be detected
for(itrCircles = vecCircles.begin();itrCircles != vecCircles.end();itrCircles++) //retrieve the x,y and radius of the detected circles from vecCircles object one by one
cout<< "circle position x = " << (*itrCircles)[0] //because itrCircles is a pointer(pass by reference), to get the value need to use * to dereference
<< ",y = " << (*itrCircles)[1]
<< ",r = " << (*itrCircles)[2] << "\n" << endl;
// draw the center of detected circle in green
circle(matOriginal,
Point((int)(*itrCircles)[0],(int)(*itrCircles)[1]),
3,
Scalar(0,255,0),
CV_FILLED);
// draw the circumference of detected circle
circle(matOriginal,
Point((int)(*itrCircles)[0],(int)(*itrCircles)[1]),
(int)(*itrCircles)[2],
Scalar(0,0,255),
3);
imshow("Original",matOriginal); //show the original mat(image) in Original window
imshow("Processed",matProcessed);// show the processed mat(image) in Processed window
charCheckForEscKey = waitKey(10); // delay 10 ms to allow a time gap to listen to any key pressed
} // end while
return(0);
} // end main
The crash is caused by the missing parenthesis on the for loop, and so the iterators you are using to draw are not correctly initialized. You should do:
for(itrCircles = vecCircles.begin();itrCircles != vecCircles.end();itrCircles++)
{
// your functions
}
May I suggest to drop iterators, and use the foreach loop?
for (const auto& circ : vecCircles)
{
// your functions
}
Here the full example, cleaned from all useless stuff (especially useless headers).
#include <opencv2\opencv.hpp>
#include <iostream>
#include<vector>
using namespace cv;
using namespace std;
int main(){
VideoCapture capWebcam(0);
if (capWebcam.isOpened() == false){
cout << "Error: camera not detected!!\n" << endl;
return -1;
}
Mat matOriginal; // matrix object used to store image from webcam
Mat matProcessed;
vector<Vec3f> vecCircles;
namedWindow("Original"); //window for original image
namedWindow("Processed"); //window for Processed image
char charCheckForEscKey = 0;
while (charCheckForEscKey != 27){ //as long as ESC is not pressed, stays in the while
if (!capWebcam.read(matOriginal)){
cout << "Error: image frame not read!!" << endl;
break;
} //
inRange(matOriginal, //this time we don't need to pass a pointer; we pass the image as an object instead
Scalar(0, 0, 175), //specify the lower bound of BGR we want to keep
Scalar(100, 100, 256), //upper bound of BGR
matProcessed); //return the processed image to another object
GaussianBlur(matProcessed, matProcessed, Size(9, 9), 1.5, 1.5); //take matProcessed image and blur by Gaussian filter(9x9 window with std of 1.5 in both x,y direction) and return to same object
HoughCircles(matProcessed,
vecCircles, //use vector element to receive the x,y,radius of the detected circle
CV_HOUGH_GRADIENT, //algorithms used to detect circles
2, //size of image divided by this value = "accumulator resolution"
matProcessed.rows / 4, //min distance between the centers of two detected circles
100, //upper pixel value threshold for canny edge detection to interpret as edge
50, //lower pixel value threshold for canny edge detection to interpret as edge
10, //min radius of a circle can be detected
400); //max radius of a circle can be detected
for (const auto& circ : vecCircles) //retrieve the x,y and radius of the detected circles from vecCircles object one by one
{
cout << "circle position x = " << circ[0] //because itrCircles is a pointer(pass by reference), to get the value need to use * to dereference
<< ",y = " << circ[1]
<< ",r = " << circ[2] << "\n" << endl;
// draw the center of detected circle in green
circle(matOriginal, Point(circ[0], circ[1]), 3, Scalar(0, 255, 0), CV_FILLED);
// draw the circumference of detected circle
circle(matOriginal, Point(circ[0], circ[1]), circ[2], Scalar(0, 0, 255), 3);
}
imshow("Original", matOriginal); //show the original mat(image) in Original window
imshow("Processed", matProcessed);// show the processed mat(image) in Processed window
charCheckForEscKey = waitKey(10); // delay 10 ms to allow a time gap to listen to any key pressed
} // end while
return(0);
} // end main
i want to find hwo to get diff b/w 2 similar grayscale images for implementation in system for security purposes. I want to check whether any difference has occurred between them. For object tracking, i have implementd canny detection in the program below. I get outline of structured objects easily.. which cn later be subtracted to give only the outline of the difference in the delta image....but what if there's a non structural difference such as smoke or fire in the second image? i have increased the contrast for clearer edge detection as well have modified threshold vals in the canny fn parameters..yet got no suitable results.
also canny edge detects shadows edges too. if my two similar image were taken at different times during the day, the shadows will vary, so the edges will vary and will give undesirable false alarm
how should i work around this? Can anyone help? thanks!
Using c language api in enter code hereopencv 2.4 in visual studio 2010
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
#include "cxcore.h"
#include <math.h>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
int main()
{
IplImage* img1 = NULL;
if ((img1 = cvLoadImage("libertyH1.jpg"))== 0)
{
printf("cvLoadImage failed\n");
}
IplImage* gray1 = cvCreateImage(cvGetSize(img1), IPL_DEPTH_8U, 1); //contains greyscale //image
CvMemStorage* storage1 = cvCreateMemStorage(0); //struct for storage
cvCvtColor(img1, gray1, CV_BGR2GRAY); //convert to greyscale
cvSmooth(gray1, gray1, CV_GAUSSIAN, 7, 7); // This is done so as to //prevent a lot of false circles from being detected
IplImage* canny1 = cvCreateImage(cvGetSize(gray1),IPL_DEPTH_8U,1);
IplImage* rgbcanny1 = cvCreateImage(cvGetSize(gray1),IPL_DEPTH_8U,3);
cvCanny(gray1, canny1, 50, 100, 3); //cvCanny( const //CvArr* image, CvArr* edges(output edge map), double threshold1, double threshold2, int //aperture_size CV_DEFAULT(3) );
cvNamedWindow("Canny before hough");
cvShowImage("Canny before hough", canny1);
CvSeq* circles1 = cvHoughCircles(gray1, storage1, CV_HOUGH_GRADIENT, 1, gray1->height/3, 250, 100);
cvCvtColor(canny1, rgbcanny1, CV_GRAY2BGR);
cvNamedWindow("Canny after hough");
cvShowImage("Canny after hough", rgbcanny1);
for (size_t i = 0; i < circles1->total; i++)
{
// round the floats to an int
float* p = (float*)cvGetSeqElem(circles1, i);
cv::Point center(cvRound(p[0]), cvRound(p[1]));
int radius = cvRound(p[2]);
// draw the circle center
cvCircle(rgbcanny1, center, 3, CV_RGB(0,255,0), -1, 8, 0 );
// draw the circle outline
cvCircle(rgbcanny1, center, radius+1, CV_RGB(0,0,255), 2, 8, 0 );
printf("x: %d y: %d r: %d\n",center.x,center.y, radius);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
IplImage* img2 = NULL;
if ((img2 = cvLoadImage("liberty_wth_obj.jpg"))== 0)
{
printf("cvLoadImage failed\n");
}
IplImage* gray2 = cvCreateImage(cvGetSize(img2), IPL_DEPTH_8U, 1);
CvMemStorage* storage = cvCreateMemStorage(0);
cvCvtColor(img2, gray2, CV_BGR2GRAY);
// This is done so as to prevent a lot of false circles from being detected
cvSmooth(gray2, gray2, CV_GAUSSIAN, 7, 7);
IplImage* canny2 = cvCreateImage(cvGetSize(img2),IPL_DEPTH_8U,1);
IplImage* rgbcanny2 = cvCreateImage(cvGetSize(img2),IPL_DEPTH_8U,3);
cvCanny(gray2, canny2, 50, 100, 3);
CvSeq* circles2 = cvHoughCircles(gray2, storage, CV_HOUGH_GRADIENT, 1, gray2->height/3, 250, 100);
cvCvtColor(canny2, rgbcanny2, CV_GRAY2BGR);
for (size_t i = 0; i < circles2->total; i++)
{
// round the floats to an int
float* p = (float*)cvGetSeqElem(circles2, i);
cv::Point center(cvRound(p[0]), cvRound(p[1]));
int radius = cvRound(p[2]);
// draw the circle center
cvCircle(rgbcanny2, center, 3, CV_RGB(0,255,0), -1, 8, 0 );
// draw the circle outline
cvCircle(rgbcanny2, center, radius+1, CV_RGB(0,0,255), 2, 8, 0 );
printf("x: %d y: %d r: %d\n",center.x,center.y, radius);
}
You want code help here? This is not an easy task. There are few algorithms available in internet or you can try to invent new one. A lot of research is going on this. I have some idea about a process. You can find the edges by Y from YCbCr color system. Deduct this Y value from blurred image's Y value. Then you will get the edge. Now make an array representation. You have to divide the image in blocks. Now check the block with blocks. It may slide, rotated, twisted etc. Compare with array matching. Object tracking is difficult due to background. Take care/omit unnecessary objects carefully.
I think the way to go could be Background subtraction. It lets you cope with lighting conditions changes.
See wikipedia entry for an intro. The basic idea is you have to build a model for the scene background, then all differences are computed relative to the background.
I have done some analysis on Image Differencing but the code was written for java. Kindly look into the below link that may come to help
How to find rectangle of difference between two images
Cheers !