I have removed non maxima regions from the vector "bbox" . But when I plot it shows the regions which contain all i.e unsupressed & supressed.
If I measure the bbox.size() befor & after non max supression it says 4677 & 3582 respectively.
Ptr<MSER> ms = MSER::create();
vector<Rect> bbox,filtered;
vector<vector<Point> > regions;
vector <int> vec;
ms->detectRegions(gray,regions,bbox);
cout<<bbox.size()<<"Befor Delete"<<endl;
for (int i = 0; i<bbox.size(); i++){
for(int j = 0; j<bbox.size(); j++){
if( (bbox[i].x > bbox[j].x) && (bbox[i].y > bbox[j].y) && ((bbox[i].x+bbox[i].width) < (bbox[j].x+bbox[j].width)) && ((bbox[i].y+bbox[i].height) < (bbox[j].y+bbox[j].height)) && (bbox[i].area() < bbox[j].area())){
bbox.erase(bbox.begin() + i );
}
}
}
cout<<bbox.size()<<" After"<<endl;
for (int k = 0; k < bbox.size(); k++)
{
rectangle(src,bbox[k], CV_RGB(0, 255, 0));
//cout<<k<<endl;
}
imshow("mser", src);
imwrite("NoN.jpg",src);
Related
I have a picture with a table in it and I need to extract the boxes with the handwriting in it then crop them,because I need to recognise the handwriting in those boxes one by one but the problem is the findSquare function gives me an output of too many squares with the same coordinates(+-5 pixels) beacuse of the several threshold levels.I already reduced the number of squares from 300 to 90 by searching in only one channel.The teacher told me to filter the extra squares by their coordinates,if the difference between the square coordinates is less then 10 pixels it means it's the same box.
The picture with the table is here https://ibb.co/Ms8YP8n
And instead of 20 boxes it crops me around 90 and most of them are the same.
So I was trying to go through the squares vector in a for loop and eliminate the squares that are seen twice.
Here is the code that I was trying with
vector<vector<Point> > same;
same.clear();
same = squares;
int sq = squares.size();
for (int i = 0; i < sq; i++) {
for (int j = 1; j < sq-1; j++) {
if (-5 < (same[i][0].x - same[j][0].x) < 5 && -5 < (same[i][0].y - same[j][0].y) < 5 &&
-5 < (same[i][1].x - same[j][1].x) < 5 && -5 < (same[i][1].y - same[j][1].y) < 5 &&
-5 < (same[i][2].x - same[j][2].x) < 5 && -5 < (same[i][2].y - same[j][2].y) < 5 &&
-5 < (same[i][3].x - same[j][3].x) < 5 && -5 < (same[i][3].y - same[j][3].y) < 5) {
squares.erase(same.begin() + j);
}
}
}
The "same" vector is just a copy of "squares".
The error that gives me is "Vector erase iterator outside range".
You can find this code in the drawSquare function bellow where you can see the full code too.
I need the pictures for future proccessing, I need to recognise the handwritten numbers.
Can anybody please help me?With another method or ideeas or some fixes in this code...
Thank you !
static void findSquares(const Mat& image, vector<vector<Point> >& squares)
{
squares.clear();
Mat pyr, timg, timb, gray0(image.size(), CV_8UC1), gray;
pyrDown(image, pyr, Size(image.cols / 2, image.rows / 2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 1; c++)
{
int ch[] = { c, 0 };
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for (int l = 0; l < N; l++)
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if (l == 0)
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1, -1));
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
gray = gray0 >= (l + 1) * 255 / N;
// imshow("graaay0", gray);
}
// find contours and store them all as a list
findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for (int i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
//// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() >= 4 && approx.size() <= 6 &&
fabs(contourArea(Mat(approx))) > 5000 && fabs(contourArea(Mat(approx))) < 8000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j % 4], approx[j - 2], approx[j - 1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if (maxCosine >= 0 && maxCosine < 0.2)
squares.push_back(approx);
}
}
}
}
}
// the function draws all the squares in the image
static void drawSquares(Mat& image, vector<vector<Point> > &squares)
{
vector<vector<Point> > same;
same.clear();
same = squares;
int sq = squares.size();
for (int i = 0; i < sq; i++) {
for (int j = 1; j < sq; j++) {
if (-5 < (same[i][0].x - same[j][0].x) < 5 && -5 < (same[i][0].y - same[j][0].y) < 5 &&
-5 < (same[i][1].x - same[j][1].x) < 5 && -5 < (same[i][1].y - same[j][1].y) < 5 &&
-5 < (same[i][2].x - same[j][2].x) < 5 && -5 < (same[i][2].y - same[j][2].y) < 5 &&
-5 < (same[i][3].x - same[j][3].x) < 5 && -5 < (same[i][3].y - same[j][3].y) < 5) {
squares.erase(same.begin() + j);
}
}
}
for (int i = 0; i < squares.size(); i++)
{
const Point* p = &squares[i][0];
int n = (int)squares[i].size();
polylines(image, &p, &n, 1, true, Scalar(0, 255, 0), 3, LINE_AA);
}
imshow("Plate with green", image);
}
Mat ImageExtract(Mat& image, vector<vector<Point> >& squares)
{
char file_name[100];
int x = squares.size();
printf("squaree %d", x);
Mat roi;
for (int i = 0; i < squares.size(); i++)
{
sprintf(file_name, "cropped%d.jpg", i + 1);
Rect r = boundingRect(squares[i]);
roi = Mat(image, r);
imwrite(file_name, roi);
}
return roi;
}
void Preprocessing() {
char fname[MAX_PATH];
openFileDlg(fname);
Mat img = imread(fname, 1);
std::vector<std::vector<Point> > squares;
findSquares(img, squares);
ImageExtract(img, squares);
drawSquares(img, squares);
waitKey(0);
}
int main() {
Preprocessing();
return 0;
}
[1]: https://i.stack.imgur.com/mwvxX.png
Mat m1 = Mat(500, 300, CV_64F, Vec3b(255,255,255));
for (int i = 0; i < m1.rows; ++i)
for (int j = 0; j < m1.cols; ++j)
{
Vec3b color=m1.at<Vec3b>(Point(i, j));
}
imshow("test2", m1);
waitKey();
The variable color should contain the color of the pixel which is white but i when i run the code i get the error:
OpenCV Error: Assertion failed (((((sizeof(size_t)<<28)|0x8442211) >>
((traits::Depth<_Tp>::value) & ((1 << 3) - 1))*4) & 15) ==
elemSize1()) in cv::Mat::at, file
c:\opencv\build\include\opencv2\core\mat.inl.hpp, line 1118
The error message is telling you what you need to know. You've gone out of bounds!
Try:
Point(j, i)
Columns correspond to x. Rows correspond to y.
try this
Mat m1 = Mat(500, 300, CV_64F, Vec3b(255,255,255));
for (int i = 0; i < m1.rows; ++i)
for (int j = 0; j < m1.cols; ++j)
{
Vec3b color=m1.at<Vec3b>(i, j);//changed
}
imshow("test2", m1);
waitKey();
I just started learning openCV and I want to write a program that can detect the organs in a radiograph. The result I want to have look like this.
I tried cv2.findContours but it can't detect the correct one, then I use convex hull and it return like this which is not the one I want neither.
Is there a other way that you can find the contours in openCV that can help me with this one? I can only find two ways above.
you must use the validContours so you can use this code after use findContours and you change the boundRect value to find the area that you want
vector<vector<Point> > contours_poly(contourss.size());
vector<Rect> boundRect(contourss.size());
vector<Point2f>center(contourss.size());
vector<float>radius(contourss.size());
//Get poly contours
for (int i = 0; i < contourss.size(); i++)
{
approxPolyDP(Mat(contourss[i]), contours_poly[i], 3, true);
}
//Get only important contours, merge contours that are within another
vector<vector<Point> > validContours;
for (int i = 0; i < contours_poly.size(); i++){
Rect r = boundingRect(Mat(contours_poly[i]));
if (r.area() < 200)continue;
bool inside = false;
for (int j = 0; j < contours_poly.size(); j++){
if (j == i)continue;
Rect r2 = boundingRect(Mat(contours_poly[j]));
if (r2.area() < 200 || r2.area()<r.area())continue;
if (r.x>r2.x&&r.x + r.width<r2.x + r2.width&&
r.y>r2.y&&r.y + r.height < r2.y + r2.height){
inside = true;
}
}
if (inside)continue;
validContours.push_back(contours_poly[i]);
}
//Get bounding rects
for (int i = 0; i < validContours.size(); i++){
boundRect[i] = boundingRect(Mat(validContours[i]));
}
I am trying to implement the Matlab function imquantize using opencv. Which opencv thresholding function I should use to implement Matlab function multithresh? Once thresholding has been done how do I label the pixels according to the threshold? Is this the right way to implement imquantize ? Are there any other function's I should include in the code?
There is an implementation based on OpenCV here, where you should probably get the idea:
cv::Mat
imquantize(const cv::Mat& in, const arma::fvec& thresholds) {
BOOST_ASSERT_MSG(cv::DataType<float>::type == in.type(), "input is not of type float");
cv::Mat index(in.size(), in.type(), cv::Scalar::all(1));
for (int i = 0; i < thresholds.size() ; i++) {
cv::Mat temp = (in > thresholds(i)) / 255;
temp.convertTo(temp, cv::DataType<float>::type);
index += temp;
}
return index;
}
Updated: thresholds are the vector of the float threshold values (uniform distributed to # of levels that you want to quantize within [0, 1]). Check the code snippet of how it is used:
const float step = 1./levels[i];
arma::fvec thresh = arma::linspace<arma::fvec>(step, 1.-step, levels[i]-1);
channels[i] = imquantize(channels[i], thresh);
I suppose you are looking for something like this
/*function imquantize
* 'inputImage' is the input image.
* 'levels' is an array of threholds
* 'quantizedImage' is the reurned image
* with quantized levels.
*/
Mat imquantize(Mat inputImage, vector<vector<int> > levels)
{
//initialise output label matrix
Mat quantizedImage(inputImage.size(), inputImage.type(), Scalar::all(1));
//Apply labels to the pixels according to the thresholds
for (int i = 0; i < inputImage.cols; i++)
{
for (int j = 0; j < inputImage.rows; j++)
{
// Check if image is grayscale or BGR
if(levels.size() == 1)
{
for (int k = 0; k < levels[0].size(); k++)
{
// if pixel < lowest threshold , then assign 0
if(inputImage.at<uchar>(j,i) <= levels[0][0])
{
quantizedImage.at<uchar>(j,i) = 0;
}
// if pixel > highest threshold , then assign 255
else if(inputImage.at<uchar>(j,i) >= levels[0][levels[0].size()-1])
{
quantizedImage.at<uchar>(j,i) = 255;
}
// Check the level borders for pixel and assign the corresponding
// upper bound quanta to the pixel
else
{
if(levels[0][k] < inputImage.at<uchar>(j,i) && inputImage.at<uchar>(j,i) <= levels[0][k+1])
{
quantizedImage.at<uchar>(j,i) = (k+1)*255/(levels[0].size());
}
}
}
}
else
{
Vec3b pair = inputImage.at<Vec3b>(j,i);
// Processing the Blue Channel
for (int k = 0; k < levels[0].size(); k++)
{
if( pair.val[0] <= levels[0][0])
{
quantizedImage.at<Vec3b>(j,i)[0] = 0;
}
else if( pair.val[0] >= levels[0][levels.size()-1])
{
quantizedImage.at<Vec3b>(j,i)[0] = 255;
}
else
{
if(levels[0][k] < pair.val[0] && pair.val[0] <= levels[0][k+1])
{
quantizedImage.at<Vec3b>(j,i)[0] = (k+1)*255/(levels[0].size());
}
}
}
// Processing the Green Channel
for (int k = 0; k < levels[1].size(); k++)
{
if( pair.val[1] <= levels[1][0])
{
quantizedImage.at<Vec3b>(j,i)[1] = 0;
}
else if( pair.val[1] >= levels[1][levels.size()-1])
{
quantizedImage.at<Vec3b>(j,i)[1] = 255;
}
else
{
if(levels[1][k] < pair.val[1] && pair.val[1] <= levels[1][k+1])
{
quantizedImage.at<Vec3b>(j,i)[1] = (k+1)*255/(levels[1].size());
}
}
}
// Processing the Red Channel
for (int k = 0; k < levels[2].size(); k++)
{
if( pair.val[2] <= levels[2][0])
{
quantizedImage.at<Vec3b>(j,i)[2] = 0;
}
else if( pair.val[2] >= levels[2][levels.size()-1])
{
quantizedImage.at<Vec3b>(j,i)[2] = 255;
}
else
{
if(levels[2][k] < pair.val[2] && pair.val[2] <= levels[2][k+1])
{
quantizedImage.at<Vec3b>(j,i)[2] = (k+1)*255/(levels[2].size());
}
}
}
}
}
}
return quantizedImage;
}
In this function the input had to be an Mat::Image and a 2D vector which can have different levels for different channels.
I'm trying to get the number of difference between two pictures.
When I compare 2 images in gray scale, pixDiff <> 0 but when it come to RGB, pixDiff is always 0.
I used openCV's compare and also a custom loop.
Mat frame, oldFrame;
cap >> oldFrame;
if(analyseMod == MONOCHROME)
cvtColor(oldFrame, oldFrame, CV_BGR2GRAY);
nbChannels = oldFrame.channels();
while(1)
{
pixDiff = 0;
cap >> frame;
//Test diff
Mat diff;
compare(oldFrame, frame, diff, CMP_NE);
imshow("video 0", diff);
imshow("video 1", frame);
if(analyseMod == MONOCHROME)
{
cvtColor(frame, frame, CV_BGR2GRAY);
for(int i=0; i<frame.rows; i++)
for(int j=0; j<frame.cols; j++)
if(frame.at<uchar>(i,j) < oldFrame.at<uchar>(i,j) - similarPixelTolerance || frame.at<uchar>(i,j) > oldFrame.at<uchar>(i,j) + similarPixelTolerance)
pixDiff++;
}
else if(analyseMod == RGB)
{
uint8_t *f = (uint8_t *)frame.data;
uint8_t *o = (uint8_t *)oldFrame.data;
for(int i=0; i<frame.rows; i++)
{
for(int j=0; j<frame.cols; j++)
{
if(f[nbChannels*i*frame.cols + j + RED] < o[nbChannels*i*oldFrame.cols + j + RED])
pixDiff++;
}
}
}
frame.copyTo(oldFrame);
cout << pixDiff;
if(waitKey(30) >= 0) break;
}
Thx for help
I still don't get it, why are you not using your delta in the RGB case, but here is the solution for both cases, if you want to consider color channels separately. Set CN to 1 for monochrome case and to 3 for RGB case.
const int CN = 3; // 3 for RGB, 1 for monochrome
uint8_t *f = frame.ptr<uint8_t>();
uint8_t *o = oldFrame.ptr<uint8_t>();
for(int i = 0; i < frame.rows; ++i)
{
for(int j = 0; j < frame.cols; ++j)
{
for (int c = 0; c < CN; ++c)
{
if (abs(*f - *o) > similarPixelTolerance) ++pxDiff;
++f, ++o;
}
}
}
It is way more efficient to access pixels in this way than to call at for each pixel. The only possible problem is if you have some padding in your images, but by default OpenCV is using continuous allocation.