Opencv assertion failed - c++

I'm trying to extract from an image only the contours which have a specific size.
I process like this
int offsetX ;
int offsetY ;
//here: read original image as 8UC3
cv::Mat original = cv::imread("0.png");
Mat imgx=original.clone();
cv::imshow("original", original);
cvtColor(imgx,imgx,CV_BGR2GRAY);
Mat thresh;
vector<Vec4i> hierarchy;
RNG rng(12345);
vector < vector<Point> > contours;
adaptiveThreshold(imgx, thresh, 255, 1, 1, 31, 2);
findContours(thresh, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
Mat drawing = Mat::zeros(thresh.size(), CV_8UC3);
cout << "drawing "<<drawing.type()<<endl;
cv::Mat image = cv::Mat(original.rows, original.cols, original.type());
image.setTo(cv::Scalar::all(255));
for (size_t i = 0; i < contours.size(); i++)
{
vector < Point > cnt = contours[i];
if (contourArea(cnt) > 0)
{
Rect rec = boundingRect(cnt);
if ((rec.height > 20 ) &&(3.5*rec.height>rec.width)&& (rec.width>15)/*&& (rec.width<40)*/)
{
cout<<rec.x<<" "<<rec.y<<endl;
offsetX=rec.x;
offsetY=rec.y;
Mat roi = original(rec);
int width = roi.cols;
int height = roi.rows;
cout <<"h= "<<height<<" w= "<<width<<endl;
cv::Rect characterLocation;
if(height>35)
characterLocation = cv::Rect(offsetX+3, offsetY, width, height);
else
characterLocation = cv::Rect(offsetX, offsetY, width, height);
original(characterLocation).copyTo(image(characterLocation));
imshow("jihedddd",roi);
imwrite("xxxxxx.png",roi);
Mat stagedImage;
Mat img;
Scalar color = Scalar(255, 255, 255);
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
imshow("Contours", drawing);
waitKey();
GaussianBlur(stagedImage, img, Size(5, 5), 2, 2);
medianBlur(img, stagedImage, 3);
Mat copy = original.clone();
rectangle(copy, Point(rec.x, rec.y),
Point(rec.x + rec.width, rec.y + rec.height),
CV_RGB(0x00,0x00,0xff), 3);
cv::imshow("char copied", image);
}
}
}
medianBlur(image,image,3);
cv::imshow("char copied", image);
cv::imwrite("characterC_result.tiff ", image);
cv::waitKey();
But when I run this code I have an error
opencv error :assertion failed
This is an example of image which give me the error.

you have to be careful with this:
characterLocation = cv::Rect(offsetX+3, offsetY, width, height);
if width is the imagewidth, you are out of bounds here. you either have to subtract x from width, or crop the rect to the image borders:
// get the Rect for the original image:
cv::Rect borders(Point(0,0), image.size());
// crop to the legal size:
characterLocation = cv::Rect(offsetX+3, offsetY, width, height) & borders;

Related

Opencv: false number of vertices of a circle

Im struggling with the shape detection using OpenCV for C++. The edged figures such as triangle and rectangular are detected trouble-free. But when it comes to circle it estimates number of vertices up to 6-8. Could somebody help me?
void getContours(Mat video){
Mat grayscale, canny_output;
cvtColor(video, grayscale,COLOR_RGB2GRAY);//converting image to grayscale
GaussianBlur(grayscale, grayscale, Size(9, 9), 2, 2 );
threshold(grayscale, grayscale,60,255,THRESH_BINARY);
vector <vector<Point>> contours, output_contour;
vector <Vec4i> hierarchy;
findContours( grayscale, contours, hierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE );
Mat drawing = Mat::zeros( grayscale.size(), CV_8UC3 );
vector<Point> c;
for (size_t i = 0; i<contours.size(); i++){
c = contours[i];
Rect crect = boundingRect(c);
// compute the center of the contour, then detect the name of the
// shape using only the contour
Moments M = moments(c);
int cX, cY;
cX = static_cast<int>(M.m10/M.m00);
cY = static_cast<int>(M.m01/M.m00);
string shape = detect(Mat(c));
drawContours( drawing, contours, (int)i, Scalar(0, 255, 0), 2);
Point pt(cX,cY);
putText(drawing,shape,pt, FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 255, 255), 2);
imshow("contour", drawing);
}
}
string detect(const Mat &curve){
string shape = "unidentified";
double peri = arcLength(curve, true);
Mat approx;
approxPolyDP(curve, approx, 0.04 * peri, true); // 0.01~0.05
const int num_of_vertices = approx.rows;
if(num_of_vertices == 0){
shape = "circle";
}
if(num_of_vertices==2){
shape = "line";
}
cout<<"\n"<<num_of_vertices;
return to_string(num_of_vertices);
}

Is there a way to detect if a circle is connected to another circle with a line in opencv?

I'm trying to write a Maya plugin that recreates a 2d drawing of bones in UV space to 3D space. I'm starting with a simple plane with this image:
What I need is two find the circles and create a hierarchy.
I tried Nuzhny approach but I'm getting horizontal lines like:
My code:
Mat image;
image = imread("c:/pjs/sk.jpg"); // Read the file
cv::Mat hsv_image;
cv::cvtColor(image, hsv_image, cv::COLOR_BGR2HSV);
cv::Mat lower_red_hue_range;
cv::Mat upper_red_hue_range;
cv::Mat white_hue_range;
//Separate the lines and circles
cv::inRange(hsv_image, cv::Scalar(0, 100, 100), cv::Scalar(10, 255, 255), lower_red_hue_range);
cv::inRange(hsv_image, cv::Scalar(160, 100, 100), cv::Scalar(179, 255, 255), upper_red_hue_range);
cv::inRange(hsv_image, cv::Scalar(0, 0, 20), cv::Scalar(0, 0, 255), white_hue_range);
cv::Mat red_hue_image;
cv::addWeighted(lower_red_hue_range, 1.0, upper_red_hue_range, 1.0, 0.0, red_hue_image);
cv::GaussianBlur(red_hue_image, red_hue_image, cv::Size(9, 9), 2, 2);
//Identify circles
std::vector<cv::Vec3f> circles;
cv::HoughCircles(red_hue_image, circles, HOUGH_GRADIENT, 1, red_hue_image.rows / 8, 100, 20, 0, 0);
if (circles.size() == 0) std::exit(-1);
for (size_t current_circle = 0; current_circle < circles.size(); ++current_circle) {
cv::Point center(std::round(circles[current_circle][0]), std::round(circles[current_circle][1]));
int radius = std::round(circles[current_circle][2]);
cv::circle(image, center, radius, cv::Scalar(0, 255, 0), 5);
}
//Get the contours
cv::threshold(white_hue_range, white_hue_range, 11, 255, cv::THRESH_BINARY);
cv::Mat element = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(3, 3));
element = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(20, 20));
cv::dilate(white_hue_range, white_hue_range, element);
cv::dilate(white_hue_range, white_hue_range, element);
cv::erode(white_hue_range, white_hue_range, element);
cv::erode(white_hue_range, white_hue_range, element);
element = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(5, 5));
cv::dilate(white_hue_range, white_hue_range, element);
Mat gray;
gray = white_hue_range;
Canny(gray, gray, 40, 100, 7);
/// Find contours
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
RNG rng(12345);
findContours(gray, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Draw contours
Mat drawing = Mat::zeros(gray.size(), CV_8UC3);
for (int i = 0; i < contours.size(); i++)
{
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
}
//Get the lines
vector<vector<Point2f> > lines;
vector<Point> approx;
for (unsigned int i = 0; i < contours.size(); i++)
{
if (contours[i].size() > 4) {
//cv::Rect box = cv::fitEllipse(contours[i]);
cv::RotatedRect box = cv::fitEllipseAMS(contours[i]);
cv::Point2f pts[4];
box.points(pts);
vector<cv::Point2f> line_pts;
line_pts.resize(2);
line_pts[0] = (pts[0] + pts[1]) / 2;
line_pts[1] = (pts[2] + pts[3]) / 2;
lines.push_back(line_pts);
}
}
for (int i = 0; i < lines.size(); i++)
{
line(image, lines[i].at(0), lines[i].at(1), 128, 4, LINE_8, 0);
}
imshow("Result window", image);
cvtColor to HSV.
inRange(redFrom, redTo) + findContours to find red circles.
inRange(whiteFrom, whiteTo) + findContours to find white lines.
Line contour to line:
cv::RotatedRect box = cv::fitEllipse(line_contours[i]);
cv::Point2f pts[4];
box.points(pts);
cv::Point2f line_pts[2];
line_pts[0] = (pts[0] + pts[3]) / 2;
line_pts[1] = (pts[1] + pts[2]) / 2;
Nested loops to find a nearest circle for each line point.

the centroid of vessels opencv

I have this image the vascular bundle
My work is to find the centroid of the blood vessels ,
I tried Image moments but I have this error message error
My code is here:
int main() {
cv::Mat img = imread("C:\\Users\\ASUS\\Desktop\\fond1.png ", CV_LOAD_IMAGE_COLOR);
Mat blue, green, red, step1, otsu, step11, green1, blue1;
Mat bgr[3]; //destination array
split(img, bgr);//split source
red.push_back(bgr[2]);
Moments mu = moments(red,true);
Point center;
center.x = mu.m10 / mu.m00;
center.y = mu.m01 / mu.m00;
circle(red, center, 2, Scalar(0, 0, 255));
imshow("Result",red);
Mat mask(red.size(), CV_8UC1, Scalar::all(0));
// Create Polygon from vertices
vector<Point> ROI_Vertices(3);
ROI_Vertices.push_back(Point(0,0 ));
ROI_Vertices.push_back(Point(center.x, center.y));
ROI_Vertices.push_back(Point(0,red.rows -1));
vector<Point> ROI_Poly;
approxPolyDP(ROI_Vertices, ROI_Poly, 1.0, true);
// Fill polygon white
fillConvexPoly(mask, &ROI_Poly[0], ROI_Poly.size(), 255, 8, 0);
Mat hide(red.size(), CV_8UC3);
red.copyTo(hide, mask);
imshow("mask", hide);
Mat blackhat,tophat,dst;
Mat element = getStructuringElement(MORPH_ELLIPSE, Size(6,6));
morphologyEx(hide, blackhat, MORPH_BLACKHAT, element);
imshow("step1", blackhat);
morphologyEx(blackhat, tophat, MORPH_TOPHAT, element);
imshow("step2", tophat);
cv::Mat r1 = cv::Mat::zeros(dst.rows, dst.cols, CV_8UC1);
tophat.copyTo(r1);
imshow("vessel", r1);
threshold(r1, dst, 9, 255, THRESH_BINARY);
// Find contours
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
///Get the moments
Mat canny_output;
// detect edges using canny
Canny(dst, canny_output, 50, 150, 3);
// find contours
findContours(canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));
// get the moments
vector<Moments> mu(contours.size());
for (int i = 0; i<contours.size(); i++)
{
mu[i] = moments(contours[i], false);
}
// get the centroid of figures.
vector<Point2f> mc(contours.size());
for (int i = 0; i<contours.size(); i++)
{
mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
}

Number plate localization in bulk of images

I have got bulk of car images and want to perform automatic number plate recognition but i am stuck at localization phase .I want to get license plate individually as output on which i can perform recognition.Here is my code for localization:
int main(int args,char* argv)
{
//String filename;
//filename="";
cv::Mat image=cv::imread("C:\\Users\\Sarora\\Downloads\\Images\\frame_1375.jpg",CV_LOAD_IMAGE_COLOR);
cv::Mat img;
cv::Mat img_sobel;
cv::Mat grad_x, grad_y;
cv::Mat abs_grad_x, abs_grad_y;
cv::Mat imgContours;
//vector <Plate>result;
cv::cvtColor(image, img, CV_BGR2GRAY);
blur(img, img, cv::Size(5,5));
//cv::namedWindow("Img1.jpg", CV_WINDOW_AUTOSIZE );
//sobel filter applied on image..............................................................................................
cv::Sobel(img, grad_x, CV_16S, 1, 0,3,1,0, cv::BORDER_DEFAULT);
convertScaleAbs( grad_x, abs_grad_x );
cv::Sobel(img, grad_y, CV_16S, 0, 1,3,1,0, cv::BORDER_DEFAULT);
convertScaleAbs( grad_y, abs_grad_y );
addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, img_sobel );
cv::imwrite("Img2.jpg",img_sobel);
//Threshold the image...................................................................................................................
cv::Mat Thresh_img;
threshold(img_sobel, Thresh_img, 0, 255, CV_THRESH_OTSU+CV_THRESH_BINARY);
//imshow("Threshold", Thresh_img);
//Morphological close operation applied................................................................................................
cv::Mat element1=cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
cv::morphologyEx(Thresh_img,Thresh_img,CV_MOP_CLOSE,element1);
cv::imwrite("Close1.jpg",Thresh_img);
//cv::waitKey(5000);
//find Contours of whole image......................................................................................................
std::vector <std::vector<cv::Point>> contours;
cv::findContours(Thresh_img, contours,CV_RETR_LIST,CV_CHAIN_APPROX_NONE);
//cv::drawContours(image,contours,-1,cv::Scalar(0,0,255),3);
cv::imwrite("Contours1.jpg",image);
std::vector <std::vector<cv::Point>>::iterator itc= contours.begin();
std::vector <cv::RotatedRect> rects;
//vector<vector<Point> > contours_poly(rects.size());
//vector<Rect> boundRect(rects.size());
//Remove patch not inside the limits of aspect ratio and area..................................................................................
while (itc!=contours.end()) {
cv::RotatedRect mr= cv::minAreaRect(cv::Mat(*itc));
if( !verifySizes(mr))
{ itc= contours.erase(itc);
}else {
++itc;
rects.push_back(mr);
}
}
cv::Mat drawing;
vector<vector<cv::Point> > contours_poly(rects.size());
vector<cv::Rect> boundRect(rects.size());
//Draw contours
cv::Mat output;
image.copyTo(output);
for(int i=0;i<rects.size();i++)
{
approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 10, true);
boundRect[i] = cv::boundingRect(cv::Mat(contours_poly[i]));
}
//cv::imwrite("Contours.jpg", output);
for (int i = 0; i < rects.size(); i++)
{
drawContours(output, contours_poly, i, CV_RGB(255, 255, 255), 1, 8, vector<cv::Vec4i>(), 0, cv::Point());
//rectangle(output, boundRect[i].tl(), boundRect[i].br(), CV_RGB(0, 255, 0), 3, 8, 0);
}
cv::imwrite("drawing1.jpg",output);
}
bool verifySizes(cv::RotatedRect mr){
float error=0.4;
//Set a min and max area. All other patches are discarded
int min= 5; // minimum area
int max=1000; // maximum area
//Get only patches that match
float rmin= 1;
float rmax= 10;
int area= mr.size.height * mr.size.width;
float r= (float)mr.size.width / (float)mr.size.height;
if(r<1)
r= (float)mr.size.height / (float)mr.size.width;
if(( area < min || area > max ) || ( r < rmin || r > rmax )){
return false;
}else{
return true;
}
}
I have performed sobel filter,Threshold(OTSU+binary),Morphological operation CLOSE,findContours(),removal of one not inside limits of area and aspect ratio and approxPolyDP on the imageThis is my input image
This is approxPolyDP image
Problem is output image is not forming rectangles around License plate.Can anyone tell what is wrong in the code and also how can i proceed further to automatically find license plates in bulk of images?I am confused.
Thank you

OpenCV copy bounded text area to new image

I am new to OpenCV and I am using this code to bound the text area in image. After that I am filtering contours and putting the bounded rectangle to a vector<Rect> to copy these to new image.
Mat large = img1;
Mat rgb;
// downsample and use it for processing
pyrUp(large, rgb);
Mat small;
cvtColor(rgb, small, CV_BGR2GRAY);
// morphological gradient
Mat grad;
Mat morphKernel = getStructuringElement(MORPH_ELLIPSE, Size(2, 2));
morphologyEx(small, grad, MORPH_GRADIENT, morphKernel);
// binarize
Mat bw;
threshold(grad, bw, 0.0, 255.0, THRESH_BINARY | THRESH_OTSU);
// connect horizontally oriented regions
Mat connected;
//morphKernel = getStructuringElement(MORPH_RECT, Size(7, 1));
//morphologyEx(bw, connected, MORPH_CLOSE, morphKernel);
// find contours
connected = bw;
Mat mask = Mat::zeros(bw.size(), CV_8UC1);
Mat mask2;
Mat mask3;
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(connected, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/*drawContours(mask2, contours, -1, Scalar(255), CV_FILLED);
Mat Crop(img1.rows, img1.cols, CV_8UC3);
Crop.setTo(Scalar(0, 255, 0));
img1.copyTo(Crop, mask2);
normalize(mask2.clone(), mask2, 0.0, 255.0, CV_MINMAX, CV_8UC1);
*/
vector<Rect> rect1;
int i = 0;
//filter contours
for (int idx = 0; idx >= 0; idx = hierarchy[idx][0])
{
Rect rect = boundingRect(contours[idx]);
Mat maskROI(mask, rect);
maskROI = Scalar(0, 0, 0);
// fill the contour
drawContours(mask, contours, idx, Scalar(255, 255, 255), CV_FILLED);
// ratio of non-zero pixels in the filled region
double r = (double)countNonZero(maskROI) / (rect.width*rect.height);
if (r > .45 /* assume at least 45% of the area is filled if it contains text */
&&
(rect.height > 10 && rect.width > 10 && rect.height<150 && rect.width<150) /* constraints on region size */
/* these two conditions alone are not very robust. better to use something
like the number of significant peaks in a horizontal projection as a third condition */
)
{
//making rectangles on bounded area
rectangle(rgb, rect, Scalar(0, 255, 0), 2);
//pushing bounding rectangles in vector for new mask
rect1.push_back(rect);
}
}
Input output I am getting after bounded text ares is:
After that I am using this code to copy the bounded area only to new mask
//copying bounded rectangles area from small to new mask2
for (int i = 0; i < rect1.size(); i++){
mask2 = rgb(rect1[i]);
}
but by using this I only get this last bounded text area:
How can I get or update the mask2 rows or cols to get all the mapping of bounded text areas from rgb to mask2.
That's because mask2 will be equal to the last rgb(rect1[i]) called.
You can easily solve this in two ways (using copyTo):
Create a mask (black initialized, same size as input image), where you draw (white) rectangles. Then you copy the original image to a black initialized image of the same size, using the obtained mask.
Copy each sub-image directly to a black initialized image.
Starting from this image, where the red rectangles will be your detected rectangles:
With first approach you'll get a mask like:
and, for both approaches, the final result will be:
Code for first approach:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Your image
Mat3b img = imread("path_to_image");
// Your rectangles
vector<Rect> rects{Rect(100, 100, 100, 200), Rect(300, 200, 200, 100), Rect(500, 400, 80, 130)};
// Mask for rectangles (black initializeds)
Mat1b mask(img.rows, img.cols, uchar(0));
Mat3b dbgRects = img.clone();
for (int i = 0; i < rects.size(); ++i)
{
// Draw white rectangles on mask
rectangle(mask, rects[i], Scalar(255), CV_FILLED);
// Show rectangles
rectangle(dbgRects, rects[i], Scalar(0, 0, 255), 2);
}
// Black initizlied result
Mat3b result(img.rows, img.cols, Vec3b(0,0,0));
img.copyTo(result, mask);
imshow("Rectangles", dbgRects);
imshow("Result", result);
waitKey();
return 0;
}
Code for second approach:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Your image
Mat3b img = imread("path_to_image");
// Your rectangles
vector<Rect> rects{Rect(100, 100, 100, 200), Rect(300, 200, 200, 100), Rect(500, 400, 80, 130)};
// Black initizlied result
Mat3b result(img.rows, img.cols, Vec3b(0, 0, 0));
Mat3b dbgRects = img.clone();
for (int i = 0; i < rects.size(); ++i)
{
img(rects[i]).copyTo(result(rects[i]));
// Show rectangles
rectangle(dbgRects, rects[i], Scalar(0, 0, 255), 2);
}
imshow("Rectangles", dbgRects);
imshow("Result", result);
waitKey();
return 0;
}