Real time object tracking in birds-eye in OpenCV - c++

This program is my 'Real time Color Tracking in Birds-eye System'.
#include <sstream>
#include <string>
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include "stdafx.h"
#include "Fruit.h"
#include "opencv2\\opencv.hpp"
#pragma comment(lib,"C:\\opencv\\build\\x86\\vc10\\lib\\opencv_core249d.lib")
#pragma comment(lib,"C:\\opencv\\build\\x86\\vc10\\lib\\opencv_imgproc249d.lib")
#pragma comment(lib,"C:\\opencv\\build\\x86\\vc10\\lib\\opencv_highgui249d.lib")
#pragma comment(lib,"C:\\opencv\\build\\x86\\vc10\\lib\\opencv_objdetect249d.lib")
#pragma comment(lib,"C:\\opencv\\build\\x86\\vc10\\lib\\opencv_ml249d.lib")
#pragma comment(lib,"C:\\opencv\\build\\x86\\vc10\\lib\\opencv_video249d.lib")
#pragma comment(lib,"C:\\opencv\\build\\x86\\vc10\\lib\\opencv_calib3d249d.lib")
using namespace cv;
//initial min and max HSV filter values.
//these will be changed using trackbars
int H_MIN = 0;
int H_MAX = 256;
int S_MIN = 0;
int S_MAX = 256;
int V_MIN = 0;
int V_MAX = 256;
//default capture width and height
const int FRAME_WIDTH = 640;
const int FRAME_HEIGHT = 480;
//max number of objects to be detected in frame
const int MAX_NUM_OBJECTS=50;
//minimum and maximum object area
const int MIN_OBJECT_AREA = 7*7;
const int MAX_OBJECT_AREA = FRAME_HEIGHT*FRAME_WIDTH/1.5;
//names that will appear at the top of each window
const string windowName = "Original Image";
const string windowName1 = "HSV Image";
const string windowName2 = "Thresholded Image";
const string windowName3 = "After Morphological Operations";
const string trackbarWindowName = "Trackbars";
void on_trackbar( int, void* )
{
//This function gets called whenever a
// trackbar position is changed
}
string intToString(int number){
std::stringstream ss;
ss << number;
return ss.str();
}
void createTrackbars(){
//create window for trackbars
namedWindow(trackbarWindowName,0);
//create memory to store trackbar name on window
char TrackbarName[50];
sprintf( TrackbarName, "H_MIN", H_MIN);
sprintf( TrackbarName, "H_MAX", H_MAX);
sprintf( TrackbarName, "S_MIN", S_MIN);
sprintf( TrackbarName, "S_MAX", S_MAX);
sprintf( TrackbarName, "V_MIN", V_MIN);
sprintf( TrackbarName, "V_MAX", V_MAX);
createTrackbar( "H_MIN", trackbarWindowName, &H_MIN, H_MAX, on_trackbar );
createTrackbar( "H_MAX", trackbarWindowName, &H_MAX, H_MAX, on_trackbar );
createTrackbar( "S_MIN", trackbarWindowName, &S_MIN, S_MAX, on_trackbar );
createTrackbar( "S_MAX", trackbarWindowName, &S_MAX, S_MAX, on_trackbar );
createTrackbar( "V_MIN", trackbarWindowName, &V_MIN, V_MAX, on_trackbar );
createTrackbar( "V_MAX", trackbarWindowName, &V_MAX, V_MAX, on_trackbar );
}
void drawObject(vector<Fruit> theFruits,Mat &frame){
for(int i =0; i<theFruits.size(); i++){
cv::circle(frame,cv::Point(theFruits.at(i).getXPos(),theFruits.at(i).getYPos()),10,cv::Scalar(0,0,255));
cv::putText(frame,intToString(theFruits.at(i).getXPos())+ " , " + intToString(theFruits.at(i).getYPos()),cv::Point(theFruits.at(i).getXPos(),theFruits.at(i).getYPos()+20),1,1,Scalar(0,255,0));
}
}
void morphOps(Mat &thresh){
//create structuring element that will be used to "dilate" and "erode" image.
//the element chosen here is a 3px by 3px rectangle
Mat erodeElement = getStructuringElement( MORPH_RECT,Size(3,3));
//dilate with larger element so make sure object is nicely visible
Mat dilateElement = getStructuringElement( MORPH_RECT,Size(8,8));
erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);
dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);
}
void trackFilteredObject(Mat threshold,Mat HSV, Mat &cameraFeed_BE){
vector <Fruit> apples;
Mat temp;
threshold.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
//use moments method to find our filtered object
double refArea = 0;
bool objectFound = false;
if (hierarchy.size() > 0) {
int numObjects = hierarchy.size();
//if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
if(numObjects<MAX_NUM_OBJECTS){
for (int index = 0; index >= 0; index = hierarchy[index][0]) {
Moments moment = moments((cv::Mat)contours[index]);
double area = moment.m00;
if(area>MIN_OBJECT_AREA){
Fruit apple;
apple.setXPos(moment.m10/area);
apple.setYPos(moment.m01/area);
apples.push_back(apple);
objectFound = true;
}else objectFound = false;
}
//let user know you found an object
if(objectFound ==true){
//draw object location on screen
drawObject(apples,cameraFeed_BE);
}
}else putText(cameraFeed_BE,"TOO MUCH NOISE! ADJUST FILTER",Point(0,50),1,2,Scalar(0,0,255),2);
}
}
int main(int argc, char* argv[])
{
//if we would like to calibrate our filter values, set to true.
bool calibrationMode = true;
//Matrix to store each frame of the webcam feed
Mat cameraFeed;
Mat threshold;
Mat HSV;
if(calibrationMode){
//create slider bars for HSV filtering
createTrackbars();
}
//video capture object to acquire webcam feed
VideoCapture capture;
//open capture object at location zero (default location for webcam)
capture.open(0);
//set height and width of capture frame
capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
if(!capture.isOpened()) //confirm camera opened
{
return -1;
}
//start an infinite loop where webcam feed is copied to cameraFeed matrix
//all of our operations will be performed within this loop
while(1){
do{
capture >> cameraFeed;
}while(cameraFeed.empty());
int key = 0;
IplImage* Frame ;
IplImage* birds_image;
CvMat *H;
float Z;
cvNamedWindow("Birds eye");
Frame =new IplImage(cameraFeed);
birds_image = cvCloneImage(Frame);
// EXAMPLE OF LOADING THESE MATRICES BACK IN:
CvMat *intrinsic = (CvMat*)cvLoad("Intrinsics.xml");
CvMat *distortion = (CvMat*)cvLoad("Distortion.xml");
// Build the undistort map which we will use for all subsequent frames.
IplImage* mapx = cvCreateImage( cvGetSize(Frame), IPL_DEPTH_32F, 1 );
IplImage* mapy = cvCreateImage( cvGetSize(Frame), IPL_DEPTH_32F, 1 );
cvInitUndistortMap(
intrinsic,
distortion,
mapx,
mapy
);
IplImage *t = cvCloneImage(Frame);
//cvShowImage( "Calibration", image ); // Show raw image
cvRemap( t, Frame, mapx, mapy ); // Undistort image
cvReleaseImage(&t);
H = (CvMat*)cvLoad("H.xml");
cvWarpPerspective(
Frame,
birds_image,
H,
CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS
);
cvShowImage("Birds eye", birds_image);
Mat cameraFeed_BE = cvarrToMat(birds_image);
//store image to matrix
capture.read(cameraFeed_BE);
//convert frame from BGR to HSV colorspace
cvtColor(cameraFeed_BE,HSV,COLOR_BGR2HSV);
if(calibrationMode==true){
//if in calibration mode, we track objects based on the HSV slider values.
cvtColor(cameraFeed_BE,HSV,COLOR_BGR2HSV);
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
morphOps(threshold);
imshow(windowName2,threshold);
trackFilteredObject(threshold,HSV,cameraFeed_BE);
}
//show frames
imshow(windowName2,threshold);
imshow(windowName,cameraFeed_BE);
imshow(windowName1,HSV);
//delay 30ms so that screen can refresh.
//image will not appear without this waitKey() command
if(cv::waitKey(30) >= 0) break;
}
return 0;
}
I think Original Image, HSV Image and Thresholded Image must
be birds image but actually these aren't. I don't know why.
And I run the program for a minute, this Error appears.
enter image description here
I think this error appears when there are some memory leak. SO I added this code.
cvReleaseImage(&birds_image)
but this code didn't work well.
Could you give me your ideas regarding 1. and 2.?

As you noted, the error message is related to an memory error. You have a memory allocation line
Frame =new IplImage(cameraFeed);
inside your infinite loop. This array (a RGB image from the webcam!!) is never released, and the loop writes over the pointer, losing the information and making it impossible to destroy that memory allocation.
Also, might be a good idea to move reading .xml files out of the infinite loop
CvMat *intrinsic = (CvMat*)cvLoad("Intrinsics.xml");
CvMat *distortion = (CvMat*)cvLoad("Distortion.xml");

Related

Changing displacement over a sequence of frames

guys.
I've written a code to describe motion on interest points over a .avi video file.
Here is the code:
#include "opencv2/video/tracking.hpp"
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<iostream>
using namespace cv;
using namespace std;
int main() {
VideoCapture capture("video.avi");
if (!capture.isOpened()) {
cout << "ERROR OPENING VIDEO\n\n";
return(0);
}
double rate = capture.get(CV_CAP_PROP_FPS);
unsigned int numberFrames = (unsigned int) capture.get(CV_CAP_PROP_FRAME_COUNT);
int width = (unsigned int) capture.get(CV_CAP_PROP_FRAME_WIDTH);
int height = (unsigned int) capture.get(CV_CAP_PROP_FRAME_HEIGHT);
unsigned int codec = (unsigned int) capture.get(CV_CAP_PROP_FOURCC);
Mat currentGray;
Mat previousGray;
vector< Point2f > points[2];
vector< Point2f > initial;
vector< Point2f > features;
vector< uchar > status;
vector< float > error;
int maxCorners = 500; // maximum number of features to detect
double qualityLevel = 0.01; // quality level for feature detection
double minDistance = 10; // min distance between two points
Mat frame, output;
VideoWriter createdVideo("output.avi", codec, rate, Size(width,height), 1);
for (unsigned frameCounter = 0; frameCounter < numberFrames; frameCounter++) {
capture >> frame;
if (frame.empty())
break;
imshow("Video", frame);
cvtColor(frame, currentGray, CV_BGR2GRAY);
frame.copyTo(output);
if (points[0].size() <= 10){
goodFeaturesToTrack(currentGray, // the image
features, // the output detected features
maxCorners, // the maximum number of features
qualityLevel, // quality level
minDistance); // min distance between two features
// add the detected features to
// the currently tracked features
points[0].insert(points[0].end(),
features.begin(), features.end());
initial.insert(initial.end(),
features.begin(), features.end());
}
if (previousGray.empty())
currentGray.copyTo(previousGray);
calcOpticalFlowPyrLK(previousGray, currentGray, // 2 consecutive images
points[0], // input point positions in first image
points[1], // output point positions in the 2nd image
status, // tracking success
error); // tracking error
int k = 0;
for (int i = 0; i < points[1].size(); i++) {
// do we keep this point?
if (status[i] && // if point has moved
(abs(points[0][i].x - points[1][i].x) +
(abs(points[0][i].y - points[1][i].y)) > 2))
initial[k] = initial[i];
points[1][k++] = points[1][i];
}
points[1].resize(k);
initial.resize(k);
for (int i = 0; i < points[1].size(); i++) {
// draw line and circle
line(output,
initial[i], // initial position
points[1][i],// new position
Scalar(0, 255, 0), 2);
circle(output,
points[1][i],
2,
Scalar(0, 0, 255), -1);
}
std::swap(points[1], points[0]);
cv::swap(previousGray, currentGray);
createdVideo.write(output);
}
waitKey(0);
return(0);
}
My code tracks displacement of points frame by frame and keeps the first location of them until the end of video.
However, I would like not the keep the location's points of the first frame, but change them over time, i.e. changing the first point location with the second point location so on and then huge lines will not appear but only the displacement between two points in two frames.
Is there any possibility of doing this?
Since you only want the position of points in two frames, just use two vectors; one holding the keypoints from the last frame, and one holding keypoints from the previous frame. At the end of each iteration, just set the previous points to the current points. Something like this pseudocode:
// first frame
// detect keypoints
prev_frame_points = keypoints
// rest of the frames
for frame in frames:
// detect keypoints
curr_frame_points = keypoints
line(..., prev_frame_points, curr_frame_points, ...)
prev_frame_points = curr_frame_points

How to count white object on Binary Image?

I'm trying to count object from image. I use logs photo, and I use some steps to get a binary image.
This is my code:
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <features2d.hpp>
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
//load image
Mat img = imread("kayu.jpg", CV_LOAD_IMAGE_COLOR);
if(img.empty())
return -1;
//namedWindow( "kayu", CV_WINDOW_AUTOSIZE );
imshow("kayu", img);
//convert to b/w
Mat bw;
cvtColor(img, bw, CV_BGR2GRAY);
imshow("bw1", bw);
threshold(bw, bw, 40, 255, CV_THRESH_BINARY);
imshow("bw", bw);
//distance transform & normalisasi
Mat dist;
distanceTransform(bw, dist, CV_DIST_L2, 3);
normalize(dist, dist, 0, 2., NORM_MINMAX);
imshow("dist", dist);
//threshold to draw line
threshold(dist, dist, .5, 1., CV_THRESH_BINARY);
imshow("dist2", dist);
//dist = bw;
//dilasi
Mat dilation, erotion, element;
int dilation_type = MORPH_ELLIPSE;
int dilation_size = 17;
element = getStructuringElement(dilation_type, Size(2*dilation_size + 1, 2*dilation_size+1), Point(dilation_size, dilation_size ));
erode(dist, erotion, element);
int erotionCount = 0;
for(int i=0; i<erotionCount; i++){
erode(erotion, erotion, element);
}
imshow("erotion", erotion);
dilate(erotion, dilation, element);
imshow("dilation", dilation);
waitKey(0);
return 0;
}
As you can see, I use Erosion and Dilation to get better circular object of log. My problem is, I'm stuck at counting the object. I tried SimpleBlobDetector but I got nothing, because when I try to convert the result of "dilation" step to CV_8U, the white object disappear. I got error too when I use findContours(). It say something about channel of image. I can't show the error here, because that's too many step and I already delete it from my code.
Btw, at the end, i got 1 channel of image.
Can i just use it to counting, or am i have to convert it and what is the best method to do it?
Two simple steps:
Find contours for the binarized image.
Get the count of the contours.
Code:
int count_trees(const cv::Mat& bin_image){
cv::Mat img;
if(bin_image.channels()>1){
cv::cvtColor(bin_image,img,cv::COLOR_BGR2GRAY);
}
else{
img=bin_image.clone();;
}
if(img.type()!=CV_8UC1){
img*=255.f; //This could be stupid, but I do not have an environment to try it
img.convertTo(img,CV_8UC1);
}
std::vector<std::vector<cv::Point>> contours
std::vector<Vec4i> hierarchy;
cv::findContours( img, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
return contours.size();
}
I have the same problem, here's an idea I'm about to implement.
1) Represent your image as an array of integers; 0 = black, 1 = white.
2) set N = 2;
3) Scan your image, pixel-by-pixel. Whenever you find a white pixel, activate a flood-fill algorithm, starting at the pixel just found; paint the region with the value of N++;
4) Iterate 3 until you reach the last pixel. (N-2) is the number of regions found.
This method depends on the shape of the objects; mine are more chaotic than yours (wish me luck..). I'll make use of a recursive flood-fill recipe found somewhere (maybe Rosetta Code).
This solution also makes it easy to compute the size of each region.
try to apply that on the your deleted img
// count
for (int i = 0; i< contours.size(); i = hierarchy[i][0]) // iteration sur chaque contour .
{
Rect r = boundingRect(contours[i]);
if (hierarchy[i][2]<0) {
rectangle(canny_output, Point(r.x, r.y), Point(r.x + r.width, r.y + r.height), Scalar(20, 50, 255), 3, 8, 0);
count++;
}
}
cout << "Numeber of contour = " << count << endl;
imshow("src", src);
imshow("contour", dst);
waitKey(0);

Object tracking delay color tracking OpenCV

I am trying to detect colored balls like ps3 move controller balls from 2 mt distance.I have 10 camera in same room hanging from the ceiling.Room is dark and balls have led inside.I have 4-5 balls.(red,blue,green,yellow,pink). I want track their position with opencv.Whats the right mehtod for doing this in opencv ? Can u give link , example for this ?
I use this code but i have delay problem.When i comment // my trackFilteredObject line there is no lag.But when using this code i have lot latency.I cant understand why happening because my normal cpu usage ~%15 ram usage 6.3GB/15GB (%40) when run this code cpu usage ~20-23 ram usage 6.4GB . I think its not about cpu-ram performance.What am i doing wrong ?
Video: https://www.youtube.com/watch?v=_BKtJpPrkO4 (You can see lag in first 10 sec.After 10 sen i comment tracking codes.)
Note:Kamerasayisi mean cameracount My Track Function:
void trackFilteredObject(Object theObject,Mat threshold,Mat HSV, Mat &cameraFeed){
//max number of objects to be detected in frame
const int FRAME_WIDTH = 5120;
const int FRAME_HEIGHT = 480;
const int MAX_NUM_OBJECTS=50;
//minimum and maximum object area
const int MIN_OBJECT_AREA = 10*10;
const int MAX_OBJECT_AREA = FRAME_HEIGHT*FRAME_WIDTH/1.5;
vector <Object> objects;
Mat temp;
threshold.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
//use moments method to find our filtered object
double refArea = 0;
bool objectFound = false;
if (hierarchy.size() > 0) {
int numObjects = hierarchy.size();
//if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
if(numObjects<MAX_NUM_OBJECTS){
for (int index = 0; index >= 0; index = hierarchy[index][0]) {
Moments moment = moments((cv::Mat)contours[index]);
double area = moment.m00;
//if the area is less than 20 px by 20px then it is probably just noise
//if the area is the same as the 3/2 of the image size, probably just a bad filter
//we only want the object with the largest area so we safe a reference area each
//iteration and compare it to the area in the next iteration.
if(area>MIN_OBJECT_AREA){
Object object;
object.setXPos(moment.m10/area);
object.setYPos(moment.m01/area);
object.setType(theObject.getType());
object.setColor(theObject.getColor());
objects.push_back(object);
objectFound = true;
}else objectFound = false;
}
//let user know you found an object
if(objectFound ==true){
//draw object location on screen
drawObject(objects,cameraFeed,temp,contours,hierarchy);}
}else putText(cameraFeed,"TOO MUCH NOISE! ADJUST FILTER",Point(0,50),1,2,Scalar(0,0,255),2);
}
}
};
Main Code:
void Run()
{
int w, h;
_fps = 30;
IplImage *pCapImage[kameraSayisi];
IplImage *pDisplayImage;
PBYTE pCapBuffer = NULL;
// Create camera instance
for(int i = 0; i < kameraSayisi; i++)
{
_cam[i] = CLEyeCreateCamera(_cameraGUID[i], _mode, _resolution, _fps);
if(_cam[i] == NULL) return;
// Get camera frame dimensions
CLEyeCameraGetFrameDimensions(_cam[i], w, h);
// Create the OpenCV images
pCapImage[i] = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
// Set some camera parameters
CLEyeSetCameraParameter(_cam[i], CLEYE_GAIN, 0);
CLEyeSetCameraParameter(_cam[i], CLEYE_EXPOSURE, 511);
// Start capturing
CLEyeCameraStart(_cam[i]);
}
pDisplayImage = cvCreateImage(cvSize(w*kameraSayisi / 2, h * kameraSayisi/4 ), IPL_DEPTH_8U ,1);
if(_cam == NULL) return;
int iLastX = -1;
int iLastY = -1;
//Capture a temporary image from the camera
//program
bool trackObjects = true;
bool useMorphOps = true;
Mat HSV;
//Create a black image with the size as the camera output
Mat imgLines;
// imgLines = Mat::zeros( cvarrToMat(image).size(), CV_8UC3 );;
Mat threshold;
//x and y values for the location of the object
int x=0, y=0;
bool calibrationMode = false;
if(calibrationMode){
//create slider bars for HSV filtering
createTrackbars();
}
// image capturing loop
while(_running)
{
PBYTE pCapBuffer;
// Capture camera images
for(int i = 0; i < kameraSayisi; i++)
{
cvGetImageRawData(pCapImage[i], &pCapBuffer);
CLEyeCameraGetFrame(_cam[i], pCapBuffer, (i==0)?2000:0);
}
// Display stereo image
for(int i = 0; i < kameraSayisi; i++)
{
cvSetImageROI(pDisplayImage, cvRect(w * (i%4) ,i/4 * h, w, h));
cvCopy(pCapImage[i], pDisplayImage);
}
cvResetImageROI(pDisplayImage);
Mat imgOriginal;
Mat imgConverted = cvarrToMat(pDisplayImage);
if(calibrationMode==true)
{
//need to find the appropriate color range values
// calibrationMode must be false
//if in calibration mode, we track objects based on the HSV slider values.
//cvtColor(imgOriginal,imgOriginal,CV_BayerRG2RGB);
cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
cvtColor(imgOriginal,HSV,CV_BGR2HSV);
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
morphOps(threshold);
imshow(_windowName + 'T',threshold);
//the folowing for canny edge detec
/// Create a matrix of the same type and size as src (for dst)
dst.create( imgOriginal.size(), src.type() );
/// Convert the image to grayscale
cvtColor( imgOriginal, src_gray, CV_BGR2GRAY );
/// Create a window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Create a Trackbar for user to enter threshold
// createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
/// Show the image
Object a = Object(H_MIN,S_MIN,V_MIN,H_MAX,S_MAX,V_MAX);
trackFilteredObject(a,threshold,HSV,imgOriginal);
}
else{
//we can use their member functions/information
Object blue("blue"), yellow("yellow"), red("red"), orange("orange"),white("white");
cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
//first find blue objects
cvtColor(imgOriginal,HSV,CV_RGB2HSV);
inRange(HSV,blue.getHSVmin(),blue.getHSVmax(),threshold);
morphOps(threshold);
//then yellows
inRange(HSV,yellow.getHSVmin(),yellow.getHSVmax(),threshold);
//then reds
inRange(HSV,red.getHSVmin(),red.getHSVmax(),threshold);
//then white
inRange(HSV,white.getHSVmin(),white.getHSVmax(),threshold);
//then orange
inRange(HSV,orange.getHSVmin(),orange.getHSVmax(),threshold);
trackFilteredObject(yellow,threshold,HSV,imgOriginal);
trackFilteredObject(white,threshold,HSV,imgOriginal);
trackFilteredObject(red,threshold,HSV,imgOriginal);
trackFilteredObject(blue,threshold,HSV,imgOriginal);
trackFilteredObject(orange,threshold,HSV,imgOriginal);
}
//delay 10ms so that screen can refresh.
//image will not appear without this waitKey() command
if (cvWaitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
// cvShowImage(_windowName, image);
imshow(_windowName,imgOriginal);
}
for(int i = 0; i < kameraSayisi; i++)
{
// Stop camera capture
CLEyeCameraStop(_cam[i]);
// Destroy camera object
CLEyeDestroyCamera(_cam[i]);
// Destroy the allocated OpenCV image
cvReleaseImage(&pCapImage[i]);
_cam[i] = NULL;
}
}

OpenCV splitting camera feed into grid and determining colour

I've written a piece of code to take my camera feed, split it into a grid (like a chess board) and evaluate each square for colour.
The code i currently have looks like this:
using namespace std;
using namespace cv;
//Standard Dilate and erode functions to improve white/black areas in Binary Image
// Pointer &thresh used so it affects threshImg so it can be used in tracking.
void morphOps(Mat &thresh){
//Increases size of black to remove unwanted white specks outside of object
Mat erodeElement = getStructuringElement( MORPH_RECT,Size(3,3));
//Increases white-area size to remove holes in object
Mat dilateElement = getStructuringElement( MORPH_RECT,Size(8,8));
erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);
dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);
}
//Tracking for the Filtered Object
void trackFilteredObject(int noteNum, string colourtype, Mat &thresh ,Mat HSVImage, Mat &cam){
vector<Brick> Bricks;
Mat temp;
thresh.copyTo(temp);
threshold(temp, thresh, 120, 255, 3); //3 = Threshold to Zero
int whitePixs = countNonZero(thresh);
int cols = thresh.cols;
int rows = thresh.rows;
int imgSize = (rows*cols)/0.75;
if(whitePixs > imgSize){
Brick Brick;
Brick.setColour(colourtype);
Brick.setnoteNum(noteNum);
Bricks.push_back(Brick);
}
int main(int argc, char* argv[])
{
/// Create a window
namedWindow("window", CV_WINDOW_AUTOSIZE );
while(1){
//initialtes camera, sets capture resolution
VideoCapture capture;
capture.open(1);
capture.set(CV_CAP_PROP_FPS, 30);
capture.set(CV_CAP_PROP_FRAME_WIDTH,640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);
Mat cam;
// Saves camera image to Matrix "cam"
capture.read(cam);
//Sets Widths and Heights based on camera resolution (cam.cols/cam.rows retrieves this)
int Width = cam.cols;
int gridWidth = Width/16;
int Height = cam.rows;
int gridHeight = Height/16;
//Splits image into 256 squares going left to right through rows and descending vertically. (16 squares per row for 4/4 pattern)
Mat BigImage;
Mat HSVImage;
// Converts cam to HSV pallete
cvtColor(cam, HSVImage, COLOR_BGR2HSV);
Size smallSize(gridWidth,gridHeight);
std::vector<Mat> smallImages;
for (int y = 0; y < HSVImage.rows; y += smallSize.height)
{
for (int x = 0; x < HSVImage.cols; x += smallSize.width)
{
cv::Rect rect = cv::Rect(x,y, smallSize.width, smallSize.height);
//Saves the matrix to vector
smallImages.push_back(cv::Mat(HSVImage, rect));
}
}
for (int i = 0; i < smallImages.size(); i++){
Mat HSV;
smallImages.at(i).copyTo(HSV);
int noteNum = i;
Mat threshImg;
inRange(HSV,Scalar(0,0,0),Scalar(255,255,255),threshImg);
morphOps(threshImg); //erodes image
string colour = "Red";
trackFilteredObject(noteNum,colour,threshImg,HSV,cam);
inRange(HSV,Scalar(0,0,0),Scalar(255,255,255),threshImg);
morphOps(threshImg); // threshold = mat after erosion/dilation
colour = "yellow";
trackFilteredObject(noteNum,colour,threshImg,HSV,cam);
inRange(HSV,Scalar(0,0,0),Scalar(255,255,255),threshImg);
morphOps(threshImg);
colour = "Black";
trackFilteredObject(noteNum,colour,threshImg,HSV,cam);
inRange(HSV,Scalar(0,0,0),Scalar(255,255,255),threshImg);
morphOps(threshImg); // threshold = mat after erosion/dilation
colour = "White";
trackFilteredObject(noteNum,colour,threshImg,HSV,cam);
inRange(HSV,Scalar(0,0,0),Scalar(255,255,255),threshImg);
morphOps(threshImg); // threshold = mat after erosion/dilation
colour = "Green";
trackFilteredObject(noteNum,colour,threshImg,HSV,cam);
}
imshow("window", cam);
}
return 0;
}
At the moment the code takes quite a long time to execute a full loop (about 1.5 seconds) but i ideally need it to run as close to real time as possible for a music application.
Could anyone suggest why it takes so long to execute? Is there a better way to evaluate the colour of each square?
My class is as follows:
//Brick.h
#include <string>
using namespace std;
class Brick{
public:
Brick(void);
~Brick(void);
string getColour();
void setColour(string whatColour);
int getnoteNum();
void setnoteNum(int whatnoteNum);
private:
int noteNum;
string colour;
};
///
Brick.cpp
#include <stdio.h>
#include <Brick.h>
Brick::Brick(void){
}
Brick::~Brick(void){
}
// get/set Colour
////////////////////////////////
string Brick::getColour(){
return Brick::colour;
}
void Brick::setColour(string whatColour){
Brick::colour = whatColour;
}
// get/set Note Number
////////////////////////////////
int Brick::getnoteNum(){
return Brick::noteNum;
}
void Brick::setnoteNum(int whatnoteNum){
Brick::noteNum = whatnoteNum;
}
I will be so grateful to anyone who replies!
Thank you.
Try hard to not use erode and dilate. These operations are extremely time intensive. I'm quite confident that they are the bottleneck in your program.
There are some measures you can take:
Downscaling(or downsampling) the image. Ideally, you want the downscaled image's pixel to be of the same order of magnitude of a grid square's size.
Remove dilate and erode.
Off-topic: Bugfix. Fix the inRange() parameters used. Consult the HSV color space diagram and normalize to your space. e.g. extracting "green pixels" would correspond to inRange(HSV,Scalar(80f*255/360,0.3*255,0.3*255),Scalar(160f*255/360,255,255),threshImg);

Need only one edge in Canny edge algorithm

When i use the canny edge algorithm, it produces the 2 edges opposite the thick colored line as expected, but i want only one edge to be displayed so as to make my line and curve detection algorithm much less complicated, any ideas on how i can make that happen ?
Here is the code :
bool CannyEdgeDetection(DataStructure& col)
{
Mat src, src_gray;
Mat dst, detected_edges, fin;
int WhiteCount = 0, BCount = 0;
char szFil1[32] = "ocv.bmp";
char szFil2[32] = "dst.bmp";
src = imread(szFil1);
dst = imread(szFil1);
blur( src_gray, detected_edges, Size(3,3) );
Canny( src, dst, 100, 200, 3 );
imwrite(szFil2, dst );
IplImage* img = cvLoadImage(szFil2);
int height = img->height;
int width = img->width;
int step = img->widthStep;
int channels = img->nChannels;
uchar * datau = (uchar *)img->imageData;
for(int i=0;i<height;i++){
for(int j=0;j<width;j++){
for(int k=0;k<channels;k++){
datau[i*step+j*channels+k] = 255 - datau[i*step+j*channels+k];
if (datau[i*step+j*channels+k]==0){
WhiteCount++;
col.pixel_col [i][j] = 2;
}
else{BCount++;
col.pixel_col[i][j] = 0;
}
}
}
}
cvSaveImage("img.bmp" ,img);
return 0;
}
This is not the original image but similar :
Which part do i comment out to be able to read black images in white backgrounds ? or any colored image ?
bool done;
do
{
cv::morphologyEx(img, temp, cv::MORPH_OPEN, element);
cv::bitwise_not(temp, temp);
cv::bitwise_and(img, temp, temp);
cv::bitwise_or(skel, temp, skel);
cv::erode(img, img, element);
double max;
cv::minMaxLoc(img, 0, &max);
done = (max == 0);
} while (!done);
That process is called skeletonization or thinning. You can google for that.
Here is a simple method for skeletonization : skeletonization OpenCV In C#
Below is the output I got when applied above method to your image ( Image is inverted before skeletonization because above method work for white images in black background, just opposite case of your input image).