I am trying to use openCV to detect red round object and draw a circle around that object. However,the segmentation fault occurs when i use circle function to draw circle. I don't know why is it happening and how to fix it? Thanks!!
#include <opencv/cvaux.h>
#include <opencv/highgui.h>
#include <opencv/cxcore.h>
#include <stdlib.h>
#include <cv.hpp>
#include <cxcore.hpp>
#include <highgui.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include<stdio.h>
#include<math.h>
#include<opencv/cv.h>
#include<opencv/highgui.h>
#include<opencv2/objdetect/objdetect.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<vector>
using namespace cv; // if you don want to use scope resolution operator(::) in the code to call the classes or functions from cv namespace, you need this line
using namespace std; // if you don want to use scope resolution operator(::) in the code to call the classes or functions from std namespace, you need this line
int main(int argc, char* argv[]){
VideoCapture capWebcam(0); //use scope resolution operator :: because VideoCapture is a class under namespace of cv
//use VideoCapture class to instantiate an object called capWebcam; here used the constructor of the object immediately to
//grab the only (0) camera
if(capWebcam.isOpened()==false){ //check whether the camera is detected and successfully grabbed
printf("Error: camera not detected!!\n");
cout<<"Error: camera not detected!!\n"<<endl;
return(1);
}
Mat matOriginal; // matrix object used to store image from webcam
Mat matProcessed;
vector<Vec3f> vecCircles; //declare a 3-element vector of type floats, this will be the pass by reference(i.e. a pointer) output of HoughCicles()
vector<Vec3f>::iterator itrCircles; //iterator for circles vector just a counter, but has the same data type from the itrCircles' data member
namedWindow("Original"); //window for original image
namedWindow("Processed"); //window for Processed image
char charCheckForEscKey =0;
while(charCheckForEscKey!=27){ //as long as ESC is not pressed, stays in the while
if(capWebcam.read(matOriginal) == false){ //check to see whether the image read from webcam correctly
cout<<"Error: image frame not read!!\n"<<endl;
break;
} //
inRange(matOriginal, //this time we don't need to pass a pointer; we pass the image as an object instead
Scalar(0,0,175), //specify the lower bound of BGR we want to keep
Scalar(100,100,256), //upper bound of BGR
matProcessed); //return the processed image to another object
GaussianBlur(matProcessed,matProcessed,Size(9,9),1.5,1.5); //take matProcessed image and blur by Gaussian filter(9x9 window with std of 1.5 in both x,y direction) and return to same object
HoughCircles(matProcessed,
vecCircles, //use vector element to receive the x,y,radius of the detected circle
CV_HOUGH_GRADIENT, //algorithms used to detect circles
2, //size of image divided by this value = "accumulator resolution"
matProcessed.rows/4, //min distance between the centers of two detected circles
100, //upper pixel value threshold for canny edge detection to interpret as edge
50, //lower pixel value threshold for canny edge detection to interpret as edge
10, //min radius of a circle can be detected
400); //max radius of a circle can be detected
for(itrCircles = vecCircles.begin();itrCircles != vecCircles.end();itrCircles++) //retrieve the x,y and radius of the detected circles from vecCircles object one by one
cout<< "circle position x = " << (*itrCircles)[0] //because itrCircles is a pointer(pass by reference), to get the value need to use * to dereference
<< ",y = " << (*itrCircles)[1]
<< ",r = " << (*itrCircles)[2] << "\n" << endl;
// draw the center of detected circle in green
circle(matOriginal,
Point((int)(*itrCircles)[0],(int)(*itrCircles)[1]),
3,
Scalar(0,255,0),
CV_FILLED);
// draw the circumference of detected circle
circle(matOriginal,
Point((int)(*itrCircles)[0],(int)(*itrCircles)[1]),
(int)(*itrCircles)[2],
Scalar(0,0,255),
3);
imshow("Original",matOriginal); //show the original mat(image) in Original window
imshow("Processed",matProcessed);// show the processed mat(image) in Processed window
charCheckForEscKey = waitKey(10); // delay 10 ms to allow a time gap to listen to any key pressed
} // end while
return(0);
} // end main
The crash is caused by the missing parenthesis on the for loop, and so the iterators you are using to draw are not correctly initialized. You should do:
for(itrCircles = vecCircles.begin();itrCircles != vecCircles.end();itrCircles++)
{
// your functions
}
May I suggest to drop iterators, and use the foreach loop?
for (const auto& circ : vecCircles)
{
// your functions
}
Here the full example, cleaned from all useless stuff (especially useless headers).
#include <opencv2\opencv.hpp>
#include <iostream>
#include<vector>
using namespace cv;
using namespace std;
int main(){
VideoCapture capWebcam(0);
if (capWebcam.isOpened() == false){
cout << "Error: camera not detected!!\n" << endl;
return -1;
}
Mat matOriginal; // matrix object used to store image from webcam
Mat matProcessed;
vector<Vec3f> vecCircles;
namedWindow("Original"); //window for original image
namedWindow("Processed"); //window for Processed image
char charCheckForEscKey = 0;
while (charCheckForEscKey != 27){ //as long as ESC is not pressed, stays in the while
if (!capWebcam.read(matOriginal)){
cout << "Error: image frame not read!!" << endl;
break;
} //
inRange(matOriginal, //this time we don't need to pass a pointer; we pass the image as an object instead
Scalar(0, 0, 175), //specify the lower bound of BGR we want to keep
Scalar(100, 100, 256), //upper bound of BGR
matProcessed); //return the processed image to another object
GaussianBlur(matProcessed, matProcessed, Size(9, 9), 1.5, 1.5); //take matProcessed image and blur by Gaussian filter(9x9 window with std of 1.5 in both x,y direction) and return to same object
HoughCircles(matProcessed,
vecCircles, //use vector element to receive the x,y,radius of the detected circle
CV_HOUGH_GRADIENT, //algorithms used to detect circles
2, //size of image divided by this value = "accumulator resolution"
matProcessed.rows / 4, //min distance between the centers of two detected circles
100, //upper pixel value threshold for canny edge detection to interpret as edge
50, //lower pixel value threshold for canny edge detection to interpret as edge
10, //min radius of a circle can be detected
400); //max radius of a circle can be detected
for (const auto& circ : vecCircles) //retrieve the x,y and radius of the detected circles from vecCircles object one by one
{
cout << "circle position x = " << circ[0] //because itrCircles is a pointer(pass by reference), to get the value need to use * to dereference
<< ",y = " << circ[1]
<< ",r = " << circ[2] << "\n" << endl;
// draw the center of detected circle in green
circle(matOriginal, Point(circ[0], circ[1]), 3, Scalar(0, 255, 0), CV_FILLED);
// draw the circumference of detected circle
circle(matOriginal, Point(circ[0], circ[1]), circ[2], Scalar(0, 0, 255), 3);
}
imshow("Original", matOriginal); //show the original mat(image) in Original window
imshow("Processed", matProcessed);// show the processed mat(image) in Processed window
charCheckForEscKey = waitKey(10); // delay 10 ms to allow a time gap to listen to any key pressed
} // end while
return(0);
} // end main
Related
im beginner in Opencv with c++. I have to draw a filled rectangle(10x10) in the middle of a image where every 5th pixel is black.
i Know how to create a rectangle. But how i can fill it and change the color of every 5th pixel ?
Would be nice if someone can help :/
void cv::rectangle ( InputOutputArray img,
Point pt1,
Point pt2,
const Scalar & color,
int thickness = 1,
int lineType = LINE_8,
int shift = 0
)
My code so far:
#include "opencv2/opencv.hpp"
#include<sstream>
using namespace std;
using namespace cv;
int main(void)
{
//Laden vom Bild
Mat img;
img = imread("C:\\Users\\Mehmet\\Desktop\\yoshi.png");
if (!img.data)
{
cout << "Could not find the image";
return -1;
}
namedWindow("window");
imshow("window", img);
imwrite("C:\\Users\\Max Mustermann\\Desktop\\11.png", img);
cv::Size sz = img.size();
int imageWidth = sz.width;
int imageHeight = sz.height;
cout <<"Es gibt " <<img.channels()<<" Farbkanäle" << endl;;
cout << "Die Breite betreagt: "<<sz.width << endl;
cout <<"Die Hoehe betreagt: " << sz.height<<endl;
std::cout << img.type();
Mat img1;
img.convertTo(img1, CV_32FC3, 1 / 255.0);
waitKey(0);
return 0;
}
```
You may be able to find the answer to your question in the opencv document.
To fill the rectangle, you can change the parameter 'thickness'
==> 'thickness Thickness of lines that make up the rectangle. Negative values, like FILLED, mean that the function has to draw a filled rectangle.'
Link:
https://docs.opencv.org/4.5.2/d6/d6e/group__imgproc__draw.html#ga07d2f74cadcf8e305e810ce8eed13bc9
And, changing color can be done by the color parameter. Controlling this parameter is easy with using cv::Scalar(BLUE, GREEN, RED).
For example, Rectangle(~~~,cv::Scalar(255,0,0),~~~); will make blue colorized rectangle with depending other parameters. So, if you want to change the color, change these values as what you want.
Consequently, if you want to change the color of rectangle repeatably, I think you can surely make the loop with this two parameters.
How do I read numbers in an image when the lines of the characters aren't aligned with the image? Do I need to rotate the entire image, or can I give KNN character recognition an axis to read from?
In the included image are several numbers angled. If I attempt to read using the current code, it will not produce accurate results because the objects it attempts to match with a character are not straight with respect to the image.
[#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/ml/ml.hpp>
#include<stdio.h>
#include<opencv2\opencv.hpp>
#include<opencv\highgui.h>
#include<iostream>
#include<sstream>
// global variables ///////////////////////////////////////////////////////////////////////////////
const int MIN_CONTOUR_AREA = 60;
const int RESIZED_IMAGE_WIDTH = 20;
const int RESIZED_IMAGE_HEIGHT = 30;
bool Does_image_contain_barcode = 1;
///////////////////////////////////////////////////////////////////////////////////////////////////
class ContourWithData {
public:
// member variables ///////////////////////////////////////////////////////////////////////////
std::vector<cv::Point> ptContour; // contour
cv::Rect boundingRect; // bounding rect for contour
float fltArea; // area of contour
///////////////////////////////////////////////////////////////////////////////////////////////
bool checkIfContourIsValid() { // obviously in a production grade program
if (fltArea < MIN_CONTOUR_AREA) return false; // we would have a much more robust function for
return true; // identifying if a contour is valid !!
}
///////////////////////////////////////////////////////////////////////////////////////////////
static bool sortByBoundingRectXPosition(const ContourWithData& cwdLeft, const ContourWithData& cwdRight) { // this function allows us to sort
return(cwdLeft.boundingRect.x < cwdRight.boundingRect.x); // the contours from left to right
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
int main() {
std::vector<ContourWithData> allContoursWithData; // declare empty vectors,
std::vector<ContourWithData> validContoursWithData; // we will fill these shortly
// read in training classifications ///////////////////////////////////////////////////
cv::Mat matClassificationInts; // we will read the classification numbers into this variable as though it is a vector
cv::FileStorage fsClassifications("classifications.xml", cv::FileStorage::READ); // open the classifications file
if (fsClassifications.isOpened() == false) { // if the file was not opened successfully
std::cout << "error, unable to open training classifications file, exiting program\n\n"; // show error message
return(0); // and exit program
}
fsClassifications\["classifications"\] >> matClassificationInts; // read classifications section into Mat classifications variable
fsClassifications.release(); // close the classifications file
// read in training images ////////////////////////////////////////////////////////////
cv::Mat matTrainingImagesAsFlattenedFloats; // we will read multiple images into this single image variable as though it is a vector
cv::FileStorage fsTrainingImages("images.xml", cv::FileStorage::READ); // open the training images file
if (fsTrainingImages.isOpened() == false) { // if the file was not opened successfully
std::cout << "error, unable to open training images file, exiting program\n\n"; // show error message
return(0); // and exit program
}
fsTrainingImages\["images"\] >> matTrainingImagesAsFlattenedFloats; // read images section into Mat training images variable
fsTrainingImages.release(); // close the traning images file
// train //////////////////////////////////////////////////////////////////////////////
cv::Ptr<cv::ml::KNearest> kNearest(cv::ml::KNearest::create()); // instantiate the KNN object
// finally we get to the call to train, note that both parameters have to be of type Mat (a single Mat)
// even though in reality they are multiple images / numbers
kNearest->train(matTrainingImagesAsFlattenedFloats, cv::ml::ROW_SAMPLE, matClassificationInts);
cv::Mat matTestingNumbers = cv::imread("bc_sick_12_c.jpg"); // read in the test numbers image
if (matTestingNumbers.empty()) { // if unable to open image
std::cout << "error: image not read from file\n\n"; // show error message on command line
return(0); // and exit program
}
cv::Mat matGrayscale; //
cv::Mat matBlurred; // declare more image variables
cv::Mat matThresh; //
cv::Mat matThreshCopy; //
cv::cvtColor(matTestingNumbers, matGrayscale, CV_BGR2GRAY); // convert to grayscale
// blur
cv::GaussianBlur(matGrayscale, // input image
matBlurred, // output image
cv::Size(5, 5), // smoothing window width and height in pixels
0); // sigma value, determines how much the image will be blurred, zero makes function choose the sigma value
// filter image from grayscale to black and white
cv::adaptiveThreshold(matBlurred, // input image
matThresh, // output image
255, // make pixels that pass the threshold full white
cv::ADAPTIVE_THRESH_GAUSSIAN_C, // use gaussian rather than mean, seems to give better results
cv::THRESH_BINARY_INV, // invert so foreground will be white, background will be black
11, // size of a pixel neighborhood used to calculate threshold value
4); // constant subtracted from the mean or weighted mean (default 2)
matThreshCopy = matThresh.clone(); // make a copy of the thresh image, this in necessary b/c findContours modifies the image
std::vector<std::vector<cv::Point> > ptContours; // declare a vector for the contours
std::vector<cv::Vec4i> v4iHierarchy; // declare a vector for the hierarchy (we won't use this in this program but this may be helpful for reference)
cv::findContours(matThreshCopy, // input image, make sure to use a copy since the function will modify this image in the course of finding contours
ptContours, // output contours
v4iHierarchy, // output hierarchy
cv::RETR_EXTERNAL, // retrieve the outermost contours only
cv::CHAIN_APPROX_SIMPLE); // compress horizontal, vertical, and diagonal segments and leave only their end points
for (int i = 0; i < ptContours.size(); i++) { // for each contour
ContourWithData contourWithData; // instantiate a contour with data object
contourWithData.ptContour = ptContours\[i\]; // assign contour to contour with data
contourWithData.boundingRect = cv::boundingRect(contourWithData.ptContour); // get the bounding rect
contourWithData.fltArea = cv::contourArea(contourWithData.ptContour); // calculate the contour area
allContoursWithData.push_back(contourWithData); // add contour with data object to list of all contours with data
}
for (int i = 0; i < allContoursWithData.size(); i++) { // for all contours
if (allContoursWithData\[i\].checkIfContourIsValid()) { // check if valid
validContoursWithData.push_back(allContoursWithData\[i\]); // if so, append to valid contour list
}
}
// sort contours from left to right
std::sort(validContoursWithData.begin(), validContoursWithData.end(), ContourWithData::sortByBoundingRectXPosition);
std::string strFinalString; // declare final string, this will have the final number sequence by the end of the program
for (int i = 0; i < validContoursWithData.size(); i++) { // for each contour
// draw a green rect around the current char
cv::rectangle(matTestingNumbers, // draw rectangle on original image
validContoursWithData\[i\].boundingRect, // rect to draw
cv::Scalar(0, 255, 0), // green
2); // thickness
cv::Mat matROI = matThresh(validContoursWithData\[i\].boundingRect); // get ROI image of bounding rect
cv::Mat matROIResized;
cv::resize(matROI, matROIResized, cv::Size(RESIZED_IMAGE_WIDTH, RESIZED_IMAGE_HEIGHT)); // resize image, this will be more consistent for recognition and storage
cv::Mat matROIFloat;
matROIResized.convertTo(matROIFloat, CV_32FC1); // convert Mat to float, necessary for call to find_nearest
cv::Mat matROIFlattenedFloat = matROIFloat.reshape(1, 1);
cv::Mat matCurrentChar(0, 0, CV_32F);
kNearest->findNearest(matROIFlattenedFloat, 1, matCurrentChar); // finally we can call find_nearest !!!
float fltCurrentChar = (float)matCurrentChar.at<float>(0, 0);
strFinalString = strFinalString + char(int(fltCurrentChar)); // append current char to full string
}
std::cout << "\n\n" << "numbers read = " << strFinalString << "\n\n"; // show the full string
cv::imshow("matTestingNumbers", matTestingNumbers); // show input image with green boxes drawn around found digits
//cv::imshow("matTestingNumbers", matThreshCopy);
cv::waitKey(0); // wait for user key press
return(0);
}][1]
I'm currently using opencv library with c++, and my goal is to cancel a fisheye effect on an image ("make it plane")
I'm using the function "undistortImage" to cancel the effect but I need before to perform camera calibration in order to find the parameters K, Knew, and D, but I didn't understand exactly the documentation ( link: http://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gga37375a2741e88052ce346884dfc9c6a0a0899eaa2f96d6eed9927c4b4f4464e05).
From my understanding, I should give two lists of points and the function "calibrate" is supposed to return the arrays I need. So my question is the following: given a fisheye image, how am I supposed to pick the two lists of points to get the result ? This is for the moment my code, very basic, just takes the picture, display it, performs the undistortion and displays the new image. The elements in the matrix are random, so currently the result is not as expected. Thanks for the answers.
#include "opencv2\core\core.hpp"
#include "opencv2\highgui\highgui.hpp"
#include "opencv2\calib3d\calib3d.hpp"
#include <stdio.h>
#include <iostream>
using namespace std;
using namespace cv;
int main(){
cout << " Usage: display_image ImageToLoadAndDisplay" << endl;
Mat image;
image = imread("C:/Users/Administrator/Downloads/eiffel.jpg", CV_LOAD_IMAGE_COLOR); // Read the file
if (!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << endl;
return -1;
}
cout << "Input image depth: " << image.depth() << endl;
namedWindow("Display window", WINDOW_AUTOSIZE);// Create a window for display.
imshow("Display window", image); // Show our image inside it.
Mat Ka = Mat::eye(3, 3, CV_64F); // Creating distortion matrix
Mat Da = Mat::ones(1, 4, CV_64F);
Mat dstImage(image.rows, image.cols, CV_32F);
cout << "K matrix depth: " << Ka.depth() << endl;
cout << "D matrix depth: " << Da.depth() << endl;
Mat Knew = Mat::eye(3, 3, CV_64F);
std::vector<cv::Vec3d> rvec;
std::vector<cv::Vec3d> tvec;
int flag = 0;
std::vector<Point3d> objectPoints1 = { Point3d(0,0,0), Point3d(1,1,0), Point3d(2,2,0), Point3d(3,3,0), Point3d(4,4,0), Point3d(5,5,0),
Point3d(6,6,0), Point3d(7,7,0), Point3d(3,0,0), Point3d(4,1,0), Point3d(5,2,0), Point3d(6,3,0), Point3d(7,4,0), Point3d(8,5,0), Point3d(5,4,0), Point3d(0,7,0), Point3d(9,7,0), Point3d(9,0,0), Point3d(4,3,0), Point3d(7,2,0)};
std::vector<Point2d> imagePoints1 = { Point(107,84), Point(110,90), Point(116,96), Point(126,107), Point(142,123), Point(168,147),
Point(202,173), Point(232,192), Point(135,69), Point(148,73), Point(165,81), Point(189,93), Point(219,112), Point(248,133), Point(166,119), Point(96,183), Point(270,174), Point(226,56), Point(144,102), Point(206,75) };
std::vector<std::vector<cv::Point2d> > imagePoints(1);
imagePoints[0] = imagePoints1;
std::vector<std::vector<cv::Point3d> > objectPoints(1);
objectPoints[0] = objectPoints1;
fisheye::calibrate(objectPoints, imagePoints, image.size(), Ka, Da, rvec, tvec, flag); // Calibration
cout << Ka<< endl;
cout << Da << endl;
fisheye::undistortImage(image, dstImage, Ka, Da, Knew); // Performing distortion
namedWindow("Display window 2", WINDOW_AUTOSIZE);// Create a window for display.
imshow("Display window 2", dstImage); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
return 0;
}
For calibration with cv::fisheye::calibrate you must provide
objectPoints vector of vectors of calibration pattern points in the calibration pattern coordinate space.
This means to provide KNOWN real-world coordinates of the points (must be corresponding points to the ones in imagePoints), but you can choose the coordinate system positon arbitrarily (but carthesian), so you must know your object - e.g. a planar test pattern.
imagePoints vector of vectors of the projections of calibration pattern points
These must be the same points as in objectPoints, but given in image coordinates, so where the projection of the object points hit your image (read/extract the coordinates from your image).
For example, if your camera did capture this image (taken from here ):
you must know the dimension of your testpattern (up to a scale), for example you could choose the top-left corner of the top-left square to be position (0,0,0), the top-right corner of the top-left square to be (1,0,0), and the bottom-left corner of the top-left square to be (1,1,0), so your whole testpattern would be placed on the xy-plane.
Then you could extract these correspondences:
pixel real-world
(144,103) (4,3,0)
(206,75) (7,2,0)
(109,151) (2,5,0)
(253,159) (8,6,0)
for these points (marked red):
The pixel position could be your imagePoints list while the real-world positions could be your objectPoints list.
Does this answer your question?
I'm pretty new to OpenCV, so bear with me. I'm running a Mac Mini with OSX 10.8. I have a program that recognizes colors and displays them in binary picture (black and white). However, I want to store the number of white pixels as an integer (or float, etc.) to compare with other number of pixels. How can I do this? Here is my current code-
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
using namespace cv;
using namespace std;
int main( int argc, char** argv )
{
VideoCapture cap(0); //capture the video from webcam
if ( !cap.isOpened() ) // if not success, exit program
{
cout << "Cannot open the web cam" << endl;
return -1;
}
namedWindow("HSVLeftRed", CV_WINDOW_AUTOSIZE);
namedWindow("HSVLeftGreen", CV_WINDOW_AUTOSIZE);
while (true) {
Mat image;
cap.read(image);
Mat HSV;
Mat leftgreen;
Mat leftred;
//Left Cropping
Mat leftimg = image(Rect(0, 0, 640, 720));
//Left Red Detection
cvtColor(leftimg,HSV,CV_BGR2HSV);
inRange(HSV,Scalar(0,0,150),Scalar(0,0,255), leftgreen);
//imshow("HSVLeftRed", leftgreen);
//print pixel type
//Left Green Detection
cvtColor(leftimg,HSV,CV_BGR2HSV);
inRange(HSV,Scalar(still need to find proper min values),Scalar(still need to find proper max values), leftgreen);
//imshow("HSVLeftGreen", leftgreen);
//compare pixel types
}
return 0;
}
Thanks in advance!
To count the non-zero pixels, OpenCV has this function cv::countNonZero. It takes input the image, whose number of non-zero pixels, we want to calculate and output is number of non-zero pixels(int). Here is the documentation.
In your case, since all the pixels are either black or white, all the non zero pixels will be white pixels.
This is how to use it,
int cal = countNonZero(image);
Change image, as per your code.
I'm teaching myself OpenCV and wrote the following code today to track a ball rolling across my computer webcam feed and (attempt to) draw a filled in grey circle on to it's centroid:
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
Point getBlobCentroid(Mat blobImage);
int main()
{
Mat bGround, fGround, diff;
Point p = (500, 280);
VideoCapture cap(0);
while (true)
{
cap >> fGround; //assign frame from camera to newest image
cvtColor(fGround, fGround, CV_BGR2GRAY); //convert to grayscale
bGround.create(fGround.size(), fGround.type());
absdiff(bGround, fGround, diff); //subtract current frame from old frame
threshold(diff, diff, 50, 255, CV_THRESH_BINARY); //convert to binary
erode(diff, diff, NULL, Point(-1,-1), 3, 0, BORDER_DEFAULT);
imshow("Thresholded", diff);
circle(fGround, getBlobCentroid(diff), 6, 127, -1, 8, 16);
imshow("Natural Image with Tracking", fGround);
fGround.copyTo(bGround); //move forward in time
waitKey(1);
}
return 0;
}
Point getBlobCentroid(Mat blobImage)
{
int rowSum=0, colSum=0, count = 1;
for(int i=0; i<blobImage.rows; i++)
{
for (int j=0; j<blobImage.cols; j++)
{
if (blobImage.at<uchar>(i,j) == 255)
{
rowSum+=i;
colSum+=j;
count++;
}
}
}
Point centroid = (rowSum, colSum)/count;
return centroid;
}
However, as evidenced by the attached image - the circle never moves away from the top of the screen - in other words, the centroid.y component is always zero. I wrote a bunch of steps of the calculation to the screen, and it appears as though the searching and additions to rowSum and count and such work - those are nonzero. However, as soon as you calculate the centroid or call it in the circle, that's a no go. Even weirder, I tried making a constant center for the circle Point p = (285, 285) and using that as an argument, and that was a no go as well. Help? Thanks!
-Tony
fGround.copyTo(bGround); //move forward in time
// so, that's your idea. compare bg & fg, get the centroid of the diff.
// but then, if you follow your while loop there, (waitkey, back to the top ... )
bGround.create(fGround.size(), fGround.type());
// aww, so you're never using, what you copied before
absdiff(bGround, fGround, diff);
// so, in the end, you're always comparing fGround to an empty bGround img