Video gets stuck meanshift tracking opencv - c++

I want to track any moving object using meanshift algorithm. For this I am first subtracting frames from background. Then erosion, dilation and smoothing is applied. In order to detect moving object, and to find its coordinates, I am using corner point detection.
Then I calculate mean of corner points, and pass these points to meanshift searching window. Now when object appears in screen, the program leaves corner point detection and enters meanshift tracking. It keeps running in meanshift until the object leaves the screen.
Now if object leaves the screen, I want to activate corner point detection again. For this, I take the program out of meanshift and jump back to corner point detection. The program is running fine but the problem is when it leaves meanshift and enters corner point detection again, it gets stuck for few seconds.
After that it runs smoothly. The problem occurs only during transition from meanshift to corner point detection. I don't know what could be the possible reason. Kindly tell me some solution.
Here is my code:
#include "highgui.h"
#include "cv.h"
#include "cxcore.h"
#include "cvaux.h"
#include <iostream>
using namespace std;
const int MAX_CORNERS = 500;
inline static void allocateOnDemand(IplImage** img, CvSize size, int depth, int channels) {
if (*img != NULL)
return;
*img = cvCreateImage(size, depth, channels);
if (*img == NULL) {
fprintf(stderr, "Error: Couldn't allocate image. Out of memory?\n");
exit(-1);
}
}
int main() {
CvCapture* capture = cvCaptureFromCAM(CV_CAP_V4L2);
IplImage* pFrame[10];
IplImage* bg;
IplImage* img_A;
IplImage* img_B;
IplImage* eig_image;
IplImage* tmp_image;
img_A = cvQueryFrame(capture);
CvSize img_sz = cvGetSize(img_A);
int c1 = 0, c2 = 0;
int xarr[40];
//A temporary replacement for background averaging
int j = 0;
for (j = 0; j < 10; j++) {
pFrame[j] = cvQueryFrame(capture);
cvWaitKey(200);
}
cvSaveImage("10.jpg", pFrame[9], 0); //saving the background
IplImage* imgA = cvCreateImage(cvGetSize(img_A), 8, 1);
IplImage* imgB = cvCreateImage(cvGetSize(img_A), 8, 1);
IplImage* imgB1 = cvCreateImage(cvGetSize(img_A), 8, 1);
IplImage* imgb = cvCreateImage(cvGetSize(img_A), 8, 1);
cvNamedWindow("LKpyr_OpticalFlow", CV_WINDOW_AUTOSIZE);
bg = cvLoadImage("10.jpg", CV_LOAD_IMAGE_GRAYSCALE); //loading the saved background
int flag = 0;
int index = 0;
char keypress;
bool quit = false;
CvConnectedComp*out = new CvConnectedComp(); //output window for meanshift
int win_size = 25; //window size for corner point detection
int x1 = 0;
int y1 = 0;
int x;
int y;
int cc;
line3:
while (quit == false) { //line3:
IplImage* imgC = cvCreateImage(cvGetSize(img_A), 8, 1); //creating output image
cvZero(imgC);
img_B = cvQueryFrame(capture);
imgC = cvQueryFrame(capture);
// line3:
int corner_count = MAX_CORNERS; //total no of corners found in frame
cvCvtColor(img_B, imgb, CV_BGR2GRAY);
//line3:
CvPoint2D32f* cornersA = new CvPoint2D32f[MAX_CORNERS];
CvPoint2D32f* cornersB = new CvPoint2D32f[MAX_CORNERS];
if (index % 2 == 0) {
cvSub(imgb, bg, imgB, NULL); //background subtraction and stuff
cvErode(imgB, imgB, NULL, 4);
cvDilate(imgB, imgB, 0, 2);
cvSmooth(imgB, imgB, 0, 1);
cvThreshold(imgB, imgB, 50, 255, CV_THRESH_BINARY);
//line3:
if (flag == 1) goto line1; //Go to Meanshift
allocateOnDemand(&eig_image, img_sz, IPL_DEPTH_32F, 1);
allocateOnDemand(&tmp_image, img_sz, IPL_DEPTH_32F, 1);
cvCvtColor(img_A, imgA, CV_BGR2GRAY);
//line3:
cvGoodFeaturesToTrack(imgB, eig_image, tmp_image, cornersA, &
corner_count, 0.05, 5.0, 0, 3, 0, 0.04); //detects corners and no of corners stored in corner_count
cvFindCornerSubPix(imgB, cornersA, corner_count, cvSize(12, 12),
cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER |
CV_TERMCRIT_EPS, 20, 0.03));
CvPoint p0;
CvPoint p1;
CvPoint acc;
cc = corner_count + 20;
acc.x = 0;
acc.y = 0;
for (int i = 0; i < corner_count; i++) {
p0 = cvPoint(cvRound(cornersA[i].x), cvRound(cornersA[i].y));
p1 = cvPoint(cvRound(cornersB[i].x), cvRound(cornersB[i].y));
acc.x = acc.x + p0.x; //calculating mean of corner points
acc.y = acc.y + p0.y;
}
delete[] cornersA;
delete[] cornersB;
cout << "Corner Count is" << corner_count << endl;
cout << "Flag status: " << flag << endl;
if (corner_count > 0) {
flag = 1;
cvWaitKey(20);
}
x1 = cvRound(acc.x / (corner_count + 1));
y1 = cvRound(acc.y / (corner_count + 1));
cout << "x is " << x1 << " y is " << y1 << endl;
cout << "Flag status: " << flag << endl;
x = x1;
y = y1;
if (flag == 0) goto line2; //Go back to Corner Point detection
line1: CvRect window = cvRect(x, y, 80, 90); //Creates window for meanshift algo
cvMeanShift(imgB, window, cvTermCriteria(CV_TERMCRIT_EPS |
CV_TERMCRIT_ITER, 200, 1), out);
window = out->rect;
x = out->rect.x;
y = out->rect.y;
cout << "Now x is " << x << " y is " << y << endl;
cout << "Flag status: " << flag << endl;
if (out->area > 200) {
cvRectangle(imgC, cvPoint(x + 50, y + 100), cvPoint(x - 20, y -
90), cvScalar(0, 0, 255), 3, 8, 0);
} else {}
xarr[c1] = x;
c1++;
if (c1 > 39) c1 = 0;
if (xarr[0] == xarr[39]) {
c2 = 1;
cout << "c2 is now " << c2 << endl;
}
}
if (x == 0 || y == 0 || x < 7 || x > 572 || c2 == 1) {
flag = 0;
c2 = 0;
goto line3;
break;
}
line2: cvShowImage("LKpyr_OpticalFlow", imgC);
keypress = cvWaitKey(20);
// Set the flag to quit if escape was pressed
if (keypress == 27) {
quit = true;
}
//index++;
} //end of while
return 0;
}

Related

Get image(rect on a plane) with perspective correction from camera (2d image)

I want to get the original image from my camera.
This is the image that my camera get. The image that i want is the purple rectangle.
I want to crop the purple rectangle and correct the prespective. This is the image i expect to get.
The image size is unknown. It can be wide or tall.
How can I do this in OpenCV? Any tips, guides? Note that for each marker, I already have the coordinates of each marker corner.(this info might help)
Edit. Some progress.
I learnt that the function I need are getPerspectiveTransform and warpPerspective.
I use both methods with this.
if (ids.size() == 4)
{
array<Point2f, 4> srcCorners; // corner that we want
array<Point2f, 4> srcCornersSmall;
array<Point2f, 4> dstCorners; // destination corner
//id 8 14 18 47
for (size_t i = 0; i < ids.size(); i++)
{
// first corner
if (ids[i] == 8)
{
srcCorners[0] = corners[i][0]; // get the first point
srcCornersSmall[0] = corners[i][2];
}
// second corner
else if (ids[i] == 14)
{
srcCorners[1] = corners[i][1]; // get the second point
srcCornersSmall[1] = corners[i][3];
}
// third corner
else if (ids[i] == 18)
{
srcCorners[2] = corners[i][2]; // get the thirt point
srcCornersSmall[2] = corners[i][0];
}
// fourth corner
else if (ids[i] == 47)
{
srcCorners[3] = corners[i][3]; // get the fourth point
srcCornersSmall[3] = corners[i][1];
}
}
dstCorners[0] = Point2f(0.0f, 0.0f);
dstCorners[1] = Point2f(256.0f, 0.0f);
dstCorners[2] = Point2f(256.0f, 256.0f);
dstCorners[3] = Point2f(0.0f, 256.0f);
// get perspectivetransform
Mat M = getPerspectiveTransform(srcCorners, dstCorners);
// warp perspective
Mat dst;
Size dsize = Size(cvRound(dstCorners[2].x), cvRound(dstCorners[2].y));
warpPerspective(imageCopy, dst, M, dsize);
// show
imshow("perspective transformed", dst);
}
While I do get the image that I want(almost), the image is not in the correct width/height ratio.
This is the output that I get.
How do I correct the width height ratio?
Finally got it.
The idea is to draw the marker as white box on a black image. Then crop the image that we want and draw it in a new image. Since the correct size for the new image is unknown, we just set the size as square. The new image should be black image with white boxes at the corner. Starting from (0,0) we then cross the image and check for the pixel value. The pixel value should be white. If the pixel value is black, we are outside the white box. Trace back the pixel value along x and y because the white box might be tall or wide. Once we find the bottom right of the white box, we have the size of the white box. Rescale this white box to square. Use the same function to rescale the image.
This is the image captured by camera
Draw the marker as white box in a black image.
Crop and warped into a square.
Get the width and height of the white box in top left corner.
Once we have the scale function, apply it.
In case anyone interested, here are the codes.
// Get3dRectFrom2d.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#include "pch.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#define CAMERA_WINDOW "Simple ArUco"
using namespace std;
using namespace cv;
static bool readCameraParameters(string filename, Mat &camMatrix, Mat &distCoeffs) {
FileStorage fs(filename, FileStorage::READ);
if (!fs.isOpened())
return false;
fs["camera_matrix"] >> camMatrix;
fs["distortion_coefficients"] >> distCoeffs;
return true;
}
int main()
{
Mat camMatrix, distCoeffs;
string cameraSettings = "camera.txt";
bool estimatePose = false;
bool showRejected = true;
if (readCameraParameters(cameraSettings, camMatrix, distCoeffs))
{
estimatePose = true;
}
Ptr<aruco::Dictionary> dictionary =
aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(aruco::DICT_4X4_50));
Ptr<aruco::DetectorParameters> detectorParams = aruco::DetectorParameters::create();
float markerLength = 3.75f;
float markerSeparation = 0.5f;
double totalTime = 0;
int totalIterations = 0;
VideoCapture inputVideo(0);
if (!inputVideo.isOpened())
{
cout << "cannot open camera";
}
double prevW = -1, prevH = -1;
double increment = 0.1;
while (inputVideo.grab())
{
Mat image, imageCopy;
inputVideo.retrieve(image);
double tick = (double)getTickCount();
vector< int > ids;
vector< vector< Point2f > > corners, rejected;
vector< Vec3d > rvecs, tvecs;
// detect markers and estimate pose
aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected);
if (estimatePose && ids.size() > 0)
aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs,
tvecs);
double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
totalTime += currentTime;
totalIterations++;
if (totalIterations % 30 == 0) {
cout << "Detection Time = " << currentTime * 1000 << " ms "
<< "(Mean = " << 1000 * totalTime / double(totalIterations) << " ms)" << endl;
}
// draw results
image.copyTo(imageCopy);
if (ids.size() > 0) {
aruco::drawDetectedMarkers(imageCopy, corners, ids);
if (estimatePose) {
for (unsigned int i = 0; i < ids.size(); i++)
aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i],
markerLength * 0.5f);
}
}
if (ids.size() == 4)
{
if (true)
{
// process the image
array<Point2f, 4> srcCorners; // corner that we want
array<Point2f, 4> dstCorners; // destination corner
vector<Point> marker0; // marker corner
vector<Point> marker1; // marker corner
vector<Point> marker2; // marker corner
vector<Point> marker3; // marker corner
//id 8 14 18 47
for (size_t i = 0; i < ids.size(); i++)
{
// first corner
if (ids[i] == 8)
{
srcCorners[0] = corners[i][0]; // get the first point
//srcCornersSmall[0] = corners[i][2];
marker0.push_back(corners[i][0]);
marker0.push_back(corners[i][1]);
marker0.push_back(corners[i][2]);
marker0.push_back(corners[i][3]);
}
// second corner
else if (ids[i] == 14)
{
srcCorners[1] = corners[i][1]; // get the second point
//srcCornersSmall[1] = corners[i][3];
marker1.push_back(corners[i][0]);
marker1.push_back(corners[i][1]);
marker1.push_back(corners[i][2]);
marker1.push_back(corners[i][3]);
}
// third corner
else if (ids[i] == 18)
{
srcCorners[2] = corners[i][2]; // get the thirt point
//srcCornersSmall[2] = corners[i][0];
marker2.push_back(corners[i][0]);
marker2.push_back(corners[i][1]);
marker2.push_back(corners[i][2]);
marker2.push_back(corners[i][3]);
}
// fourth corner
else if (ids[i] == 47)
{
srcCorners[3] = corners[i][3]; // get the fourth point
//srcCornersSmall[3] = corners[i][1];
marker3.push_back(corners[i][0]);
marker3.push_back(corners[i][1]);
marker3.push_back(corners[i][2]);
marker3.push_back(corners[i][3]);
}
}
// create a black image with the same size of cam image
Mat mask = Mat::zeros(imageCopy.size(), CV_8UC1);
Mat dstImage = Mat::zeros(imageCopy.size(), CV_8UC1);
// draw white fill on marker corners
{
int num = (int)marker0.size();
if (num != 0)
{
const Point * pt4 = &(marker0[0]);
fillPoly(mask, &pt4, &num, 1, Scalar(255, 255, 255), 8);
}
}
{
int num = (int)marker1.size();
if (num != 0)
{
const Point * pt4 = &(marker1[0]);
fillPoly(mask, &pt4, &num, 1, Scalar(255, 255, 255), 8);
}
}
{
int num = (int)marker2.size();
if (num != 0)
{
const Point * pt4 = &(marker2[0]);
fillPoly(mask, &pt4, &num, 1, Scalar(255, 255, 255), 8);
}
}
{
int num = (int)marker3.size();
if (num != 0)
{
const Point * pt4 = &(marker3[0]);
fillPoly(mask, &pt4, &num, 1, Scalar(255, 255, 255), 8);
}
}
// draw the mask
imshow("black white lines", mask);
// we dont have the correct size/aspect ratio
double width = 256.0f, height = 256.0f;
dstCorners[0] = Point2f(0.0f, 0.0f);
dstCorners[1] = Point2f(width, 0.0f);
dstCorners[2] = Point2f(width, height);
dstCorners[3] = Point2f(0.0f, height);
// get perspectivetransform
Mat M = getPerspectiveTransform(srcCorners, dstCorners);
// warp perspective
Mat dst;
Size dsize = Size(cvRound(dstCorners[2].x), cvRound(dstCorners[2].y));
warpPerspective(mask, dst, M, dsize);
// show warped image
imshow("perspective transformed", dst);
// get width and length of the first marker
// start from (0,0) and cross
int cx = 0, cy = 0; // track our current coordinate
Scalar v, vx, vy; // pixel value at coordinate
bool cont = true;
while (cont)
{
v = dst.at<uchar>(cx, cy); // get pixel value at current coordinate
if (cx > 1 && cy > 1)
{
vx = dst.at<uchar>(cx - 1, cy);
vy = dst.at<uchar>(cx, cy - 1);
}
// if pixel not black, continue crossing
if ((int)v.val[0] != 0)
{
cx++;
cy++;
}
// current pixel is black
// if previous y pixel is not black, means that we need to walk the pixel right
else if ((int)((Scalar)dst.at<uchar>(cx, cy - 1)).val[0] != 0)
{
cx = cx + 1;
}
// if previous x pixel is not black, means that we need to walk the pixel down
else if ((int)((Scalar)dst.at<uchar>(cx - 1, cy)).val[0] != 0)
{
cy = cy + 1;
}
// the rest is the same with previous 2, only with higher previous pixel to check
// need to do this because sometimes pixels is jagged
else if ((int)((Scalar)dst.at<uchar>(cx, cy - 2)).val[0] != 0)
{
cx = cx + 1;
}
else if ((int)((Scalar)dst.at<uchar>(cx - 2, cy)).val[0] != 0)
{
cy = cy + 1;
}
else if ((int)((Scalar)dst.at<uchar>(cx, cy - 3)).val[0] != 0)
{
cx = cx + 1;
}
else if ((int)((Scalar)dst.at<uchar>(cx - 3, cy)).val[0] != 0)
{
cy = cy + 1;
}
else if ((int)((Scalar)dst.at<uchar>(cx, cy - 4)).val[0] != 0)
{
cx = cx + 1;
}
else if ((int)((Scalar)dst.at<uchar>(cx - 4, cy)).val[0] != 0)
{
cy = cy + 1;
}
else if ((int)((Scalar)dst.at<uchar>(cx, cy - 5)).val[0] != 0)
{
cx = cx + 1;
}
else if ((int)((Scalar)dst.at<uchar>(cx - 5, cy)).val[0] != 0)
{
cy = cy + 1;
}
else
{
cx = cx - 1;
cy = cy - 1;
cont = false;
}
// reached the end of the picture
if (cx >= dst.cols)
{
cont = false;
}
else if (cy >= dst.rows)
{
cont = false;
}
}
if (cx == cy)
{
//we have perfect square
}
if (cx > cy)
{
// wide
width = (height * ((double)cx / (double)cy));
}
else
{
// tall
height = (width * ((double)cy / (double)cx));
}
// we dont want the size varied too much every frame,
// so limits the increment or decrement for every frame
// initialize first usage
if (prevW<0)
{
prevW = width;
}
if (prevH<0)
{
prevH = height;
}
if (width > prevW + increment)
{
width = prevW + increment;
}
else if (width < prevW - increment)
{
width = prevW - increment;
}
prevW = width;
if (height > prevH + increment)
{
height = prevH + increment;
}
else if (height < prevH - increment)
{
height = prevH - increment;
}
prevH = height;
// show resized image
Size s(width, height);
Mat resized;
resize(dst, resized, s);
imshow("resized", resized);
}
}
if (showRejected && rejected.size() > 0)
aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(100, 0, 255));
imshow("out", imageCopy);
if (waitKey(1) == 27) {
break;
}
}
cout << "Hello World!\n";
cin.ignore();
return 0;
}
I'm more interested in a mathematical solution but for now, this suffice. If you guys know a much better approach(faster) let me know.

OpenCV Multi-threading giving errors

I am running OpenCV code on multiple threads in the following fashion:
std::thread t1(runOnSingleCamera, alphaFile, featureToUse, classifier,0);
std::thread t2(runOnSingleCamera, betaFile, featureToUse, classifier,1);
std::thread t3(runOnSingleCamera, gammaFile, featureToUse, classifier,2);
std::thread t4(runOnSingleCamera, deltaFile, featureToUse, classifier,3);
t1.join();
t2.join();
t3.join();
t4.join();
This compiles fine, but when I run it I get a variety of errors, and it even occasionally works...
Here is a sample of some of the errors I get:
tom#thinkpad:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1
Segmentation fault (core dumped)
tom#thinkpad:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1
(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'
(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'
(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'
(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'
(betaInput.webm:8571): Gtk-CRITICAL **: IA__gtk_widget_new: assertion 'g_type_is_a (type, GTK_TYPE_WIDGET)' failed
(betaInput.webm:8571): Gtk-CRITICAL **: IA__gtk_widget_new: assertion 'g_type_is_a (type, GTK_TYPE_WIDGET)' failed
Segmentation fault (core dumped)
tom#thinkpad:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1
(alphaInput.webm:8593): GLib-GObject-WARNING **: invalid cast from 'CvImageWidget' to 'CvImageWidget'
** (alphaInput.webm:8593): CRITICAL **: void cvImageWidget_size_allocate(GtkWidget*, GtkAllocation*): assertion 'CV_IS_IMAGE_WIDGET (widget)' failed
** (alphaInput.webm:8593): CRITICAL **: void cvImageWidget_realize(GtkWidget*): assertion 'CV_IS_IMAGE_WIDGET (widget)' failed
**
Gtk:ERROR:/build/gtk+2.0-KsZKkB/gtk+2.0-2.24.30/gtk/gtkwidget.c:8861:gtk_widget_real_map: assertion failed: (gtk_widget_get_realized (widget))
Aborted (core dumped)
tom#thinkpad:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1
/usr/share/themes/Ambiance/gtk-2.0/gtkrc:720: Unable to find include file: "apps/ff.rc"
(betaInput.webm:8615): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'
(betaInput.webm:8615): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget'
(betaInput.webm:8615): Gtk-CRITICAL **: IA__gtk_widget_new: assertion 'g_type_is_a (type, GTK_TYPE_WIDGET)' failed
Segmentation fault (core dumped)
Has anyone seen this before/know what is going wrong and how to fix it?
Running using gdb gives the following:
Thread 4 "main" received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7fffdb7fe700 (LWP 29317)]
0x0000000000000000 in ?? ()
I am on the most up to date version of Ubuntu using the most up to date version of OpenCV at the time of asking.
Full code below as requested, it is very long and made up of multiple elements, I imagine the issue would be early or in the declarations, or possibly an incompatability with the imshow function:
int runOnSingleCamera(String file, int featureToUse, int classifier, int cameraID)
{
//enable velocity
int timeSteps = 0;
string windowName = file; // window name
Mat img, outputImage, foreground; // image objects
VideoCapture cap;
bool keepProcessing = true; // loop control flag
unsigned char key; // user input
int EVENT_LOOP_DELAY = 40; // delay for GUI window, 40 ms equates to 1000ms/25fps = 40ms per frame
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
int width = 40;
int height = 100;
int learning = 1000;
int padding = 40;
// if command line arguments are provided try to read image/video_name
// otherwise default to capture from attached H/W camera
if((cap.open(file) == true))
{
// create window object (use flag=0 to allow resize, 1 to auto fix size)
namedWindow(windowName, 1);
// create background / foreground Mixture of Gaussian (MoG) model
Ptr<BackgroundSubtractorMOG2> MoG = createBackgroundSubtractorMOG2(500,25,false);
HOGDescriptor hog;
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
CascadeClassifier cascade = CascadeClassifier(CASCADE_TO_USE);
Ptr<SuperpixelSEEDS> seeds;
// start main loop
while(keepProcessing)
{
int64 timeStart = getTickCount();
if (cap.isOpened())
{
cap >> img;
if(img.empty())
{
std::cerr << "End of video file reached" << std::endl;
exit(0);
}
outputImage = img.clone();
cvtColor(img, img, CV_BGR2GRAY);
}
else
{
// if not a capture object set event delay to zero so it waits
// indefinitely (as single image file, no need to loop)
EVENT_LOOP_DELAY = 0;
}
// update background model and get background/foreground
MoG->apply(img, foreground, (double)(1.0/learning));
//imshow("old foreground", foreground);
/////////////////////////////////////////////////////////////////////////////////SUPERPIXELS
int useSuperpixels = 0;
if(useSuperpixels == 1)
{
Mat seedMask, labels, result;
result = img.clone();
int width = img.size().width;
int height = img.size().height;
seeds = createSuperpixelSEEDS(width, height, 1, 2000, 10, 2, 5, true);
seeds->iterate(img, 10);
seeds->getLabels(labels);
vector<int> counter(seeds->getNumberOfSuperpixels(),0);
vector<int> numberOfPixelsPerSuperpixel(seeds->getNumberOfSuperpixels(),0);
vector<bool> useSuperpixel(seeds->getNumberOfSuperpixels(),false);
for(int i = 0; i<foreground.rows; i++)
{
for(int j = 0; j<foreground.cols; j++)
{
numberOfPixelsPerSuperpixel[labels.at<int>(i,j)] += 1;
if(foreground.at<unsigned char>(i,j)==255)
{
counter[labels.at<int>(i,j)] += 1;
}
}
}
for(int i = 0; i<counter.size(); i++)
{
if(counter[i]/numberOfPixelsPerSuperpixel[i] > 0.0001)
{
useSuperpixel[i] = true;
}
}
for(int i = 0; i<foreground.rows; i++)
{
for(int j = 0; j<foreground.cols; j++)
{
if(useSuperpixel[labels.at<int>(i,j)] == true)
{
foreground.at<unsigned char>(i,j) = 255;
}
else
{
foreground.at<unsigned char>(i,j) = 0;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////
else
{
// perform erosion - removes boundaries of foreground object
erode(foreground, foreground, Mat(),Point(),1);
// perform morphological closing
dilate(foreground, foreground, Mat(),Point(),5);
erode(foreground, foreground, Mat(),Point(),1);
}
//imshow("foreground", foreground);
// get connected components from the foreground
findContours(foreground, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
// iterate through all the top-level contours,
// and get bounding rectangles for them (if larger than given value)
for(int idx = 0; idx >=0; idx = hierarchy[idx][0])
{
Rect r = boundingRect(contours[idx]);
// adjust bounding rectangle to be padding% larger
// around the object
r.x = max(0, r.x - (int) (padding/100.0 * (double) r.width));
r.y = max(0, r.y - (int) (padding/100.0 * (double) r.height));
r.width = min(img.cols - 1, (r.width + 2 * (int) (padding/100.0 * (double) r.width)));
r.height = min(img.rows - 1, (r.height + 2 * (int) (padding/100.0 * (double) r.height)));
// draw rectangle if greater than width/height constraints and if
// also still inside image
if ((r.width >= width) && (r.height >= height) && (r.x + r.width < img.cols) && (r.y + r.height < img.rows))
{
vector<Rect> found, found_filtered;
Mat roi = outputImage(r);
if (classifier == 1)
{
//changing last parameter helps deal with multiple rectangles per person
if (cameraID == 3)
{
hog.detectMultiScale(roi, found, 0, Size(8,8), Size(32,32), 1.05, 5);
}
else
{
hog.detectMultiScale(roi, found, 0, Size(8,8), Size(64,64), 1.05, 5);
}
}
else
{
if (cameraID == 3)
{
cascade.detectMultiScale(roi, found, 1.1, 4, CV_HAAR_DO_CANNY_PRUNING, cvSize(32,32));
}
else
{
cascade.detectMultiScale(roi, found, 1.1, 4, CV_HAAR_DO_CANNY_PRUNING, cvSize(64,64));
}
}
for(size_t i = 0; i < found.size(); i++ )
{
Rect rec = found[i];
rec.x += r.x;
rec.y += r.y;
size_t j;
// Do not add small detections inside a bigger detection.
for ( j = 0; j < found.size(); j++ )
{
if ( j != i && (rec & found[j]) == rec )
{
break;
}
}
if (j == found.size())
{
found_filtered.push_back(rec);
}
}
for (size_t i = 0; i < found_filtered.size(); i++)
{
Rect rec = found_filtered[i];
// The HOG/Cascade detector returns slightly larger rectangles than the real objects,
// so we slightly shrink the rectangles to get a nicer output.
rec.x += rec.width*0.1;
rec.width = rec.width*0.8;
rec.y += rec.height*0.1;
rec.height = rec.height*0.8;
// rectangle(img, rec.tl(), rec.br(), cv::Scalar(0,255,0), 3);
Point2f center = Point2f(float(rec.x + rec.width/2.0), float(rec.y + rec.height/2.0));
Mat regionOfInterest;
Mat regionOfInterestOriginal = img(rec);
//Mat regionOfInterestOriginal = img(r);
Mat regionOfInterestForeground = foreground(rec);
//Mat regionOfInterestForeground = foreground(r);
bitwise_and(regionOfInterestOriginal, regionOfInterestForeground, regionOfInterest);
Mat clone = regionOfInterest.clone();
resize(clone, regionOfInterest, Size(64,128), CV_INTER_CUBIC);
imshow("roi", regionOfInterest);
double huMoments[7];
vector<double> hu(7);
Mat hist;
vector<float> descriptorsValues;
Mat feature;
if(featureToUse == 1) //HuMoments
{
vector<vector<Point> > contoursHu;
vector<Vec4i> hierarchyHu;
findContours(regionOfInterest, contoursHu, hierarchyHu, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
double largestSize,size;
int largestContour;
for(int i = 0; i < contoursHu.size(); i++)
{
size = contoursHu[i].size();
if(size > largestSize)
{
largestSize = size;
largestContour = i;
}
}
Moments contourMoments;
contourMoments = moments(contoursHu[largestContour]);
HuMoments(contourMoments, huMoments);
hu.assign(huMoments,huMoments+7);
feature = Mat(hu);
feature = feature.t();
}
else if(featureToUse == 2) //HistogramOfIntensities
{
int histSize = 16; // bin size - need to determine which pixel threshold to use
float range[] = {0,255};
const float *ranges[] = {range};
int channels[] = {0, 1};
calcHist(&regionOfInterest, 1, channels, Mat(), hist, 1, &histSize, ranges, true, false);
feature = hist.clone();
feature = feature.t();
}
else if(featureToUse == 3) //HOG
{
//play with these parameters to change HOG size
cv::HOGDescriptor descriptor(Size(64, 128), Size(16, 16), Size(16, 16), Size(16, 16), 4, -1, 0.2, true, 64);
descriptor.compute(regionOfInterest, descriptorsValues);
feature = Mat(descriptorsValues);
feature = feature.t();
}
else if(featureToUse == 4) //Correlogram
{
Mat correlogram(8,8,CV_64F);
Mat occurances(8,8,CV_8U);
int xIntensity, yIntensity;
for(int i = 0; i<regionOfInterest.rows; i++)
{
for(int j = 0; j<regionOfInterest.cols; j++)
{
xIntensity = floor(regionOfInterest.at<unsigned char>(i,j)/32);
for(int k = i; k<regionOfInterest.rows; k++)
{
for(int l = 0; l<regionOfInterest.cols; l++)
{
if((k == i && l > j) || k > i)
{
yIntensity = floor(regionOfInterest.at<unsigned char>(k,l)/32);
correlogram.at<double>(xIntensity,yIntensity) += (norm(Point(i,j)-Point(k,l)));
correlogram.at<double>(yIntensity,xIntensity) += (norm(Point(i,j)-Point(k,l)));
occurances.at<unsigned char>(xIntensity,yIntensity) += 1;
occurances.at<unsigned char>(yIntensity,xIntensity) += 1;
}
}
}
}
}
//average it out
for(int i = 0; i<correlogram.rows; i++)
{
for(int j = 0; j<correlogram.cols; j++)
{
correlogram.at<double>(i,j) = occurances.at<unsigned char>(i,j);
}
}
feature = correlogram.reshape(1,1);
}
else if(featureToUse == 5) //Flow
{
}
feature.convertTo(feature, CV_64F);
normalize(feature, feature, 1, 0, NORM_L1, -1, Mat());
cout << "New Feature" << endl << feature << endl;
//classify first target
if(targets.size() == 0) //if first target found
{
Person person(0, center.x, center.y, timeSteps, rec.width, rec.height);
person.kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height);
Rect p = person.kalmanPredict();
person.updateFeatures(feature);
person.setCurrentCamera(cameraID);
rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3);
char str[200];
sprintf(str,"Person %d",person.getIdentifier());
putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0));
targets.push_back(person);
}
else
{
vector<double> mDistances;
bool singleEntry = false;
for(int i = 0; i<targets.size(); i++)
{
if(targets[i].getFeatures().rows == 1)
{
singleEntry = true;
}
}
for(int i = 0; i<targets.size(); i++)
{
Mat covar, mean;
Mat data = targets[i].getFeatures();
calcCovarMatrix(data,covar,mean,CV_COVAR_NORMAL|CV_COVAR_ROWS);
// cout << i << " data" << endl << data << endl;
// cout << i << " Covar" << endl << covar << endl;
// cout << i << " mean" << endl << mean << endl;
double mDistance;
if(singleEntry == false)
{
Mat invCovar;
invert(covar,invCovar,DECOMP_SVD);
mDistance = Mahalanobis(feature,mean,invCovar);
cout << i << " Mahalanobis Distance" << endl << mDistance << endl;
}
else
{
mDistance = norm(feature,mean,NORM_L1);
cout << i << " Norm Distance" << endl << mDistance << endl;
}
mDistances.push_back(mDistance);
}
Mat test = Mat(mDistances);
cout << "Distances" << endl << test << endl;
double sum = 0.0;
for(int i = 0; i<mDistances.size(); i++)
{
sum += mDistances[i];
}
for(int i = 0; i<mDistances.size(); i++)
{
mDistances[i] = sum/mDistances[i];
}
normalize(mDistances,mDistances,1,0,NORM_L1,-1,Mat());
Mat probabilities = Mat(mDistances);
cout << "Probabilities" << endl << probabilities << endl;
//special case to classify second target
if(targets.size() == 1)
{
if(fabs(center.x-targets[0].getLastPosition().x)<100 and fabs(center.y-targets[0].getLastPosition().y)<100)
{
targets[0].kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height);
Rect p = targets[0].kalmanPredict();
targets[0].updateFeatures(feature);
targets[0].setCurrentCamera(cameraID);
rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3);
char str[200];
sprintf(str,"Person %d",targets[0].getIdentifier());
putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0));
}
else
{
Person person(1, center.x, center.y, timeSteps, rec.width, rec.height);
person.kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height);
Rect p = person.kalmanPredict();
person.updateFeatures(feature);
person.setCurrentCamera(cameraID);
rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3);
char str[200];
sprintf(str,"Person %d",person.getIdentifier());
putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0));
targets.push_back(person);
}
}
else
{
double greatestProbability = 0.0;
int identifier = 0;
double min, max;
Point min_loc, max_loc;
minMaxLoc(probabilities, &min, &max, &min_loc, &max_loc);
greatestProbability = max;
identifier = max_loc.y;
cout << greatestProbability << " at " << identifier << endl;
if(greatestProbability >= 0.5)
{
targets[identifier].kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height);
Rect p = targets[identifier].kalmanPredict();
targets[identifier].updateFeatures(feature);
targets[identifier].setCurrentCamera(cameraID);
rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3);
char str[200];
sprintf(str,"Person %d",targets[identifier].getIdentifier());
putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0));
}
else
{
int identifier = targets.size();
Person person(identifier, center.x, center.y, timeSteps, rec.width, rec.height);
person.kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height);
Rect p = person.kalmanPredict();
person.updateFeatures(feature);
person.setCurrentCamera(cameraID);
rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3);
char str[200];
sprintf(str,"Person %d",person.getIdentifier());
putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0));
targets.push_back(person);
}
}
}
}
rectangle(outputImage, r, Scalar(0,0,255), 2, 8, 0);
}
}
// display image in window
imshow(windowName, outputImage);
key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - (((getTickCount() - timeStart) / getTickFrequency())*1000)));
if (key == 'x')
{
// if user presses "x" then exit
std::cout << "Keyboard exit requested : exiting now - bye!" << std::endl;
keepProcessing = false;
}
timeSteps += 1;
}
// the camera will be deinitialized automatically in VideoCapture destructor
// all OK : main returns 0
return 0;
}
// not OK : main returns -1
return -1;
}
What's happening is that you're abusing some feature of OpenCV that doesn't support running in a multi-threaded environment, or else you're not making proper use of control mechanisms such as mutexes and monitors to restrict access to critical sections of your code to one thread at a time. We won't be able to tell you what you're doing wrong unless you share more of your code though. From your log it seems as though some sort of initialization is being run more than once.
From your code and your log, two things come to mind:
Are you accidentally trying to access video capture hardware on multiple threads?
Maybe creating a new window causes initialization of something in GTK. Try to create your windows on the main thread, see if it helps. Note that OpenCV or not, having more than one thread for your UI is a bad idea.
If none of this helps, try adding some log output to your code so we can be sure which line is causing the errors.
So it was #2 after all. To fix it, you must move all the namedWindow calls to the main thread. Afterwards, if it still fails on the imshow calls, you'll have to move that to the main thread as well. You'll need a condition variable for each thread, and global variables which the threads write to and the main thread uses to update the windows. I'd provide the code, but I don't know much about c++ concurrency. You can read more about this task here: waiting thread until a condition has been occurred

OpenCV fisheye undistort issues

EDIT: I found the cause of the problem, the fisheye::undistortImage() function was not working correctly, I replaced it with estimateNewCameraMatrixForUndistortRectify(), initUndistortRectifyMap(), and remap() as in the original calibrate camera example. Not perfect yet but going in the right direction. Output image: http://imgur.com/a/Xm5vq
Mat output;
Mat newK;
Mat view, map1, map2;
Size newSize(1200, 1200);
Mat rview(newSize, frame.type());
//resize(rview, rview, newSize);
fisheye::estimateNewCameraMatrixForUndistortRectify(K, D, frame.size(), Matx33d::eye(), newK, 1);
fisheye::initUndistortRectifyMap(K, D, Matx33d::eye(), newK, frame.size(), CV_16SC2, map1, map2);
//fisheye::undistortImage(frame, output, K, D, identity);
remap(frame, rview, map1, map2, INTER_LINEAR);
imshow("Image View", rview);
imshow(window_name, frame);
if (waitKey(50) == 27) {
break;
}
Original post:
I'm trying to calibrate and undistort an image coming from an 180 degree fisheye USB camera. Most of this code is from existing examples that claim to be functional.
The code runs fine until fisheye::undistortImage where the output image is very distorted and centered around the top left corner of the window.
Screen shot of the "undistorted" chess board and calibration matrix outputs -
http://imgur.com/a/RTIoT
What am I missing here?
int main(int argc, char** argv) {
VideoCapture camera;
camera.open(1);
if (!camera.isOpened()) {
cout << "Failed to open camera." << std::endl;
return -1;
}
double fWidth = camera.get(CAP_PROP_FRAME_WIDTH);
double fHeight = camera.get(CAP_PROP_FRAME_HEIGHT);
cout << fWidth << std::endl;
cout << fHeight << std::endl;
/*
640 320
480 240
*/
const char* window_name = "output";
namedWindow(window_name, WINDOW_NORMAL);
Mat frame;
Size boardSize;
boardSize.width = 9;
boardSize.height = 6;
int remaining_frames = 30;
Mat K;// = Mat(3, 3, CV_64F, vK);
Mat D;
Mat identity = Mat::eye(3, 3, CV_64F);
vector<vector<Point2f> > img_points;
vector<vector<Point3f> > obj_points(1);
int sq_sz = 25;
for (int i = 0; i < boardSize.height; i++) {
for (int j = 0; j < boardSize.width; j++) {
obj_points[0].push_back(Point3f(float(j * sq_sz), float(i * sq_sz), 0));
}
}
obj_points.resize(remaining_frames, obj_points[0]);
bool found = false;
clock_t prevTimestamp = 0;
int delay = 500;
while (1) {
frame = nextFrame(camera);
bool blinkOutput = false;
if (remaining_frames > 0) {
vector<Point2f> corners;
int chessBoardFlags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE;
found = findChessboardCorners(frame, boardSize, corners, chessBoardFlags);
if (found) {
drawChessboardCorners(frame, boardSize, corners, found);
if (clock() - prevTimestamp > delay*1e-3*CLOCKS_PER_SEC) {
Mat viewGray;
cvtColor(frame, viewGray, COLOR_BGR2GRAY);
cornerSubPix(viewGray, corners, Size(11, 11), Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
img_points.push_back(corners);
remaining_frames--;
cout << remaining_frames << " frames to calibration." << endl;
blinkOutput = true;
prevTimestamp = clock();
}
if (remaining_frames == 0) {
cout << "Computing distortion" << endl;
int flags = 0;
flags |= cv::fisheye::CALIB_RECOMPUTE_EXTRINSIC;
flags |= cv::fisheye::CALIB_CHECK_COND;
flags |= cv::fisheye::CALIB_FIX_SKEW;
fisheye::calibrate(obj_points, img_points, frame.size(), K, D, noArray(), noArray(), flags);
cout << "Finished computing distortion" << endl;
cout << K << endl;
cout << D << endl;
}
}
if (blinkOutput) { bitwise_not(frame, frame); }
cv::imshow(window_name, frame);
if (waitKey(50) == 27) {
break;
}
}
else {
Mat output;
fisheye::undistortImage(frame, output, K, D, identity);
cv::imshow(window_name, output);
if (waitKey(50) == 27) {
break;
}
}
}
return 0;
}

How to determine the value of the 3 channel in RGB or BGR

This is the image that i need to detect
// cdst is the image
// This is the code which i want to know what are the values in a certain pixel but the code below only detects the first then the 2nd and 3rd is equal to 0
void dec()
{
Mat em;
cdst="path of the image";
//this is the value of BGR per color that i wish to check
Vec3f red(0, 0, 255);
Vec3f blue(255, 0, 0);
Vec3f green(0, 128, 0);
Vec3f yellow(0, 255, 255);
Vec3f marron(0, 0,128);
Vec3f pink(147, 20, 255);
Vec3f indigo(130, 0, 75);
Vec3f midblue(112, 25, 25);
Vec3f magenta(139, 0, 139);
//em will hold the cdst image
em=cdst;
//for loop to determine what are the colors
for (int i = 0; i < l.size(); i++)
{
int x = l[i][0];
int y = l[i][1];
cout << x << " " << y<<endl;
Vec3f px = em.at<uchar>(y,x);
//Im trying to print all color of the 3 channels
//But it only the first on has a value then the second and third is 0
cout << px.val[0] << " " << px.val[1] << " "<<px.val[2]<<endl;
if (px == pink)
{
cout<<"A";
}
}
}
How about try this code?
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main(){
Mat em(100, 100, CV_8UC3, Scalar::all(0));
rectangle(em, Rect(30, 30, 10, 10), Scalar(147, 20, 255), -1);
imshow("em", em);
waitKey(1);
Vec3b pink(147,20,255);
for (int i = 0; i < em.rows; i++){
for (int j = 0; j < em.cols; j++){
Vec3b px = em.at<Vec3b>(i, j);
cout << px << endl;
if (px == pink){
cout << "A" ;
}
}
}
waitKey(0);
destroyAllWindows();
return 0;
}

watershed of IRM images

I want to apply an algorithm of segmentation of ( watershed by markers ). I have a problem in this code. It's applied only with one image (lena.jpg)
I want to use it with irm images but it's not working.
I think that the problem is in image RGB and gray.
#include <opencv\highgui.h>
#include <opencv2\imgproc\imgproc.hpp>
#include <cstdio>
#include <iostream>
using namespace cv;
using namespace std;
static void help()
{
cout << "\nThis program demonstrates the famous watershed segmentation algorithm in OpenCV: watershed()\n"
"Usage:\n"
"./watershed [image_name -- default is ../data/fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
"\tESC - quit the program\n"
"\tr - restore the original image\n"
"\tw or SPACE - run watershed segmentation algorithm\n"
"\t\t(before running it, *roughly* mark the areas to segment on the image)\n"
"\t (before that, roughly outline several markers on the image)\n";
}
Mat markerMask, img;
Point prevPt(-1, -1);
static void onMouse(int event, int x, int y, int flags, void*)
{
if (x < 0 || x >= img.cols || y < 0 || y >= img.rows)
return;
if (event == EVENT_LBUTTONUP || !(flags & EVENT_FLAG_LBUTTON))
prevPt = Point(-1, -1);
else if (event == EVENT_LBUTTONDOWN)
prevPt = Point(x, y);
else if (event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON))
{
Point pt(x, y);
if (prevPt.x < 0)
prevPt = pt;
line(markerMask, prevPt, pt, Scalar::all(255), 5, 8, 0);
line(img, prevPt, pt, Scalar::all(255), 5, 8, 0);
prevPt = pt;
imshow("image", img);
}
}
int main(int argc, char** argv)
{
char* filename = argc >= 2 ? argv[1] : (char*)"samah.png";
Mat img0 = imread(filename, 1), imgGray;
if (img0.empty())
{
cout << "Couldn'g open image " << filename << ". Usage: watershed <image_name>\n";
//system("wait");
return 1;
}
help();
namedWindow("image", 1);
img0.copyTo(img);
cvtColor(img, markerMask, COLOR_BGR2GRAY);
cvtColor(markerMask, imgGray, COLOR_GRAY2BGR);
markerMask = Scalar::all(0);
imshow("image", img);
setMouseCallback("image", onMouse, 0);
for (;;)
{
int c = waitKey(0);
if ((char)c == 27)
break;
if ((char)c == 'r')
{
markerMask = Scalar::all(0);
img0.copyTo(img);
imshow("image", img);
}
if ((char)c == 'w' || (char)c == ' ')
{
int i, j, compCount = 0;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(markerMask, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);
if (contours.empty())
continue;
Mat markers(markerMask.size(), CV_32S);
markers = Scalar::all(0);
int idx = 0;
for (; idx >= 0; idx = hierarchy[idx][0], compCount++)
drawContours(markers, contours, idx, Scalar::all(compCount + 1), -1, 8, hierarchy, INT_MAX);
if (compCount == 0)
continue;
vector<Vec3b> colorTab;
for (i = 0; i < compCount; i++)
{
int b = theRNG().uniform(0, 255);
int g = theRNG().uniform(0, 255);
int r = theRNG().uniform(0, 255);
colorTab.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
}
double t = (double)getTickCount();
watershed(img0, markers);
t = (double)getTickCount() - t;
printf("execution time = %gms\n", t*1000. / getTickFrequency());
Mat wshed(markers.size(), CV_8UC3);
// paint the watershed image
for (i = 0; i < markers.rows; i++)
for (j = 0; j < markers.cols; j++)
{
int index = markers.at<int>(i, j);
if (index == -1)
wshed.at<Vec3b>(i, j) = Vec3b(255, 255, 255);
else if (index <= 0 || index > compCount)
wshed.at<Vec3b>(i, j) = Vec3b(0, 0, 0);
else
wshed.at<Vec3b>(i, j) = colorTab[index - 1];
}
wshed = wshed*0.5 + imgGray*0.5;
imshow("watershed transform", wshed);
}
}
system("wait");
return 0;
}