I'm trying to match this image
in this image
However, I can't find more than one boss enemy. What do I need to do to find the others?
Image Loading
struct XYposition{
float X;
float Y;
};
std::vector<cv::Mat> bossList;
std::string bossStrings[1] = { "sprites\\boss\\bossUp.png" };
for (int i = 0; i < 1; i++){
cv::Mat pic = cv::imread(bossStrings[i], CV_LOAD_IMAGE_GRAYSCALE);
bossList.push_back(pic);
}
multipleTemplateMatch(screenImage, bossList);
Template Matching
std::vector<XYposition> multipleTemplateMatch(cv::Mat &img, std::vector<cv::Mat> tplList){
std::vector<XYposition> matches;
cv::Mat convertImg(img.rows, img.cols, CV_8UC3);
cv::cvtColor(img, convertImg, CV_BGRA2GRAY);
double threshold = 0.8;
int imgint = convertImg.type();
for(cv::Mat tpl : tplList){
int tplint = tpl.type();
cv::Mat result(convertImg.rows - tpl.rows + 1, convertImg.cols - tpl.cols + 1,
CV_32FC1); //must be this result type
cv::matchTemplate(convertImg, tpl, result, CV_TM_CCOEFF_NORMED);
cv::threshold(result, result, threshold, 1., CV_THRESH_TOZERO);
while (true)
{
double minval, maxval;
cv::Point minloc, maxloc;
cv::minMaxLoc(result, &minval, &maxval, &minloc, &maxloc);
if (maxval >= threshold)
{
rectangle(result, maxloc, cv::Point(maxloc.x - tpl.cols, maxloc.y - tpl.rows),
cv::Scalar(0, 0, 255), 4, 8, 0);
cv::floodFill(result, maxloc, cv::Scalar(0), 0, cv::Scalar(.1), cv::Scalar(1.));
XYposition info = {
maxloc.x - ceil(tpl.cols / 2), maxloc.y - ceil(tpl.rows / 2)
};
matches.push_back(info);
}
else
break;
}
}
return matches;
}
I didn't debug your code, but since it doesn't work (probably floodfill is messing up your result matrix), this is a simple working sample.
I iterate over the maximum points in the result matrix finding the blobs where values are over a threshold, and finding the highest value within each blob (used as a mask to retrieve actual values in the result matrix).
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
Mat3b img = imread("path_to_image");
Mat3b templ = imread("path_to_template");
Mat1b img_gray;
Mat1b templ_gray;
cvtColor(img, img_gray, COLOR_BGR2GRAY);
cvtColor(templ, templ_gray, COLOR_BGR2GRAY);
Mat1f result;
matchTemplate(img, templ, result, TM_CCOEFF_NORMED);
double thresh = 0.7;
threshold(result, result, thresh, 1., THRESH_BINARY);
Mat1b resb;
result.convertTo(resb, CV_8U, 255);
vector<vector<Point>> contours;
findContours(resb, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
for (int i=0; i<contours.size(); ++i)
{
Mat1b mask(result.rows, result.cols, uchar(0));
drawContours(mask, contours, i, Scalar(255), CV_FILLED);
Point max_point;
double max_val;
minMaxLoc(result, NULL, &max_val, NULL, &max_point, mask);
rectangle(img, Rect(max_point.x, max_point.y, templ.cols, templ.rows), Scalar(0,255,0), 2);
}
return 0;
}
Result:
Related
I have this image the vascular bundle
My work is to find the centroid of the blood vessels ,
I tried Image moments but I have this error message error
My code is here:
int main() {
cv::Mat img = imread("C:\\Users\\ASUS\\Desktop\\fond1.png ", CV_LOAD_IMAGE_COLOR);
Mat blue, green, red, step1, otsu, step11, green1, blue1;
Mat bgr[3]; //destination array
split(img, bgr);//split source
red.push_back(bgr[2]);
Moments mu = moments(red,true);
Point center;
center.x = mu.m10 / mu.m00;
center.y = mu.m01 / mu.m00;
circle(red, center, 2, Scalar(0, 0, 255));
imshow("Result",red);
Mat mask(red.size(), CV_8UC1, Scalar::all(0));
// Create Polygon from vertices
vector<Point> ROI_Vertices(3);
ROI_Vertices.push_back(Point(0,0 ));
ROI_Vertices.push_back(Point(center.x, center.y));
ROI_Vertices.push_back(Point(0,red.rows -1));
vector<Point> ROI_Poly;
approxPolyDP(ROI_Vertices, ROI_Poly, 1.0, true);
// Fill polygon white
fillConvexPoly(mask, &ROI_Poly[0], ROI_Poly.size(), 255, 8, 0);
Mat hide(red.size(), CV_8UC3);
red.copyTo(hide, mask);
imshow("mask", hide);
Mat blackhat,tophat,dst;
Mat element = getStructuringElement(MORPH_ELLIPSE, Size(6,6));
morphologyEx(hide, blackhat, MORPH_BLACKHAT, element);
imshow("step1", blackhat);
morphologyEx(blackhat, tophat, MORPH_TOPHAT, element);
imshow("step2", tophat);
cv::Mat r1 = cv::Mat::zeros(dst.rows, dst.cols, CV_8UC1);
tophat.copyTo(r1);
imshow("vessel", r1);
threshold(r1, dst, 9, 255, THRESH_BINARY);
// Find contours
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
///Get the moments
Mat canny_output;
// detect edges using canny
Canny(dst, canny_output, 50, 150, 3);
// find contours
findContours(canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));
// get the moments
vector<Moments> mu(contours.size());
for (int i = 0; i<contours.size(); i++)
{
mu[i] = moments(contours[i], false);
}
// get the centroid of figures.
vector<Point2f> mc(contours.size());
for (int i = 0; i<contours.size(); i++)
{
mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
}
I have got bulk of car images and want to perform automatic number plate recognition but i am stuck at localization phase .I want to get license plate individually as output on which i can perform recognition.Here is my code for localization:
int main(int args,char* argv)
{
//String filename;
//filename="";
cv::Mat image=cv::imread("C:\\Users\\Sarora\\Downloads\\Images\\frame_1375.jpg",CV_LOAD_IMAGE_COLOR);
cv::Mat img;
cv::Mat img_sobel;
cv::Mat grad_x, grad_y;
cv::Mat abs_grad_x, abs_grad_y;
cv::Mat imgContours;
//vector <Plate>result;
cv::cvtColor(image, img, CV_BGR2GRAY);
blur(img, img, cv::Size(5,5));
//cv::namedWindow("Img1.jpg", CV_WINDOW_AUTOSIZE );
//sobel filter applied on image..............................................................................................
cv::Sobel(img, grad_x, CV_16S, 1, 0,3,1,0, cv::BORDER_DEFAULT);
convertScaleAbs( grad_x, abs_grad_x );
cv::Sobel(img, grad_y, CV_16S, 0, 1,3,1,0, cv::BORDER_DEFAULT);
convertScaleAbs( grad_y, abs_grad_y );
addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, img_sobel );
cv::imwrite("Img2.jpg",img_sobel);
//Threshold the image...................................................................................................................
cv::Mat Thresh_img;
threshold(img_sobel, Thresh_img, 0, 255, CV_THRESH_OTSU+CV_THRESH_BINARY);
//imshow("Threshold", Thresh_img);
//Morphological close operation applied................................................................................................
cv::Mat element1=cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
cv::morphologyEx(Thresh_img,Thresh_img,CV_MOP_CLOSE,element1);
cv::imwrite("Close1.jpg",Thresh_img);
//cv::waitKey(5000);
//find Contours of whole image......................................................................................................
std::vector <std::vector<cv::Point>> contours;
cv::findContours(Thresh_img, contours,CV_RETR_LIST,CV_CHAIN_APPROX_NONE);
//cv::drawContours(image,contours,-1,cv::Scalar(0,0,255),3);
cv::imwrite("Contours1.jpg",image);
std::vector <std::vector<cv::Point>>::iterator itc= contours.begin();
std::vector <cv::RotatedRect> rects;
//vector<vector<Point> > contours_poly(rects.size());
//vector<Rect> boundRect(rects.size());
//Remove patch not inside the limits of aspect ratio and area..................................................................................
while (itc!=contours.end()) {
cv::RotatedRect mr= cv::minAreaRect(cv::Mat(*itc));
if( !verifySizes(mr))
{ itc= contours.erase(itc);
}else {
++itc;
rects.push_back(mr);
}
}
cv::Mat drawing;
vector<vector<cv::Point> > contours_poly(rects.size());
vector<cv::Rect> boundRect(rects.size());
//Draw contours
cv::Mat output;
image.copyTo(output);
for(int i=0;i<rects.size();i++)
{
approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 10, true);
boundRect[i] = cv::boundingRect(cv::Mat(contours_poly[i]));
}
//cv::imwrite("Contours.jpg", output);
for (int i = 0; i < rects.size(); i++)
{
drawContours(output, contours_poly, i, CV_RGB(255, 255, 255), 1, 8, vector<cv::Vec4i>(), 0, cv::Point());
//rectangle(output, boundRect[i].tl(), boundRect[i].br(), CV_RGB(0, 255, 0), 3, 8, 0);
}
cv::imwrite("drawing1.jpg",output);
}
bool verifySizes(cv::RotatedRect mr){
float error=0.4;
//Set a min and max area. All other patches are discarded
int min= 5; // minimum area
int max=1000; // maximum area
//Get only patches that match
float rmin= 1;
float rmax= 10;
int area= mr.size.height * mr.size.width;
float r= (float)mr.size.width / (float)mr.size.height;
if(r<1)
r= (float)mr.size.height / (float)mr.size.width;
if(( area < min || area > max ) || ( r < rmin || r > rmax )){
return false;
}else{
return true;
}
}
I have performed sobel filter,Threshold(OTSU+binary),Morphological operation CLOSE,findContours(),removal of one not inside limits of area and aspect ratio and approxPolyDP on the imageThis is my input image
This is approxPolyDP image
Problem is output image is not forming rectangles around License plate.Can anyone tell what is wrong in the code and also how can i proceed further to automatically find license plates in bulk of images?I am confused.
Thank you
I’m trying to detect some rectangles (white colored) which is drawn on an image. (say using paint or some other image editing tool).
As I’m very much beginner to image processing I searched through net and OpenCV sample program to accomplish the job, but could not get it to working perfectly. I’m using OpenCV C++ library.
Algorithm that I’ve tried
cv::Mat src = cv::imread(argv[1]);
cv::Mat gray;
cv::cvtColor(src, gray, CV_BGR2GRAY);
meanStdDev(gray, mu, sigma);
cv::Mat bw;
cv::Canny(gray, bw, mu.val[0] - sigma.val[0], mu.val[0] + sigma.val[0]);
std::vector<std::vector<cv::Point> > contours;
cv::findContours(bw.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
std::vector<cv::Point> approx;
for (int i = 0; i < contours.size(); i++){
cv::approxPolyDP(cv::Mat(contours[i]), approx, cv::arcLength(cv::Mat(contours[i]), true)*0.02, true);
if (approx.size() >= 4 && approx.size() <= 6)
Rect boundRect = boundingRect( Mat(approx) );
rectangle( dst, boundRect.tl(), boundRect.br(), Scalar(255,255,255), 1, 8, 0 );}
Only one rectangle is detected. Can you please guide me or some link for the same.
Input image:
Output image:
I could not compile your code sample because there boundRect is declared within the if-block but rectangle drawing (trying to access boundRect) is outside of the if-block, so I adjusted your code:
int main(int argc, char* argv[])
{
cv::Mat src = cv::imread("C:/StackOverflow/Input/rectangles.png");
cv::Mat dst = src.clone();
cv::Mat gray;
cv::cvtColor(src, gray, CV_BGR2GRAY);
// ADDED: missing declaration of mu and sigma
cv::Scalar mu, sigma;
meanStdDev(gray, mu, sigma);
cv::Mat bw;
cv::Canny(gray, bw, mu.val[0] - sigma.val[0], mu.val[0] + sigma.val[0]);
// ADDED: displaying the canny output
cv::imshow("canny", bw);
std::vector<std::vector<cv::Point> > contours;
cv::findContours(bw.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
std::vector<cv::Point> approx;
for (int i = 0; i < contours.size(); i++){
cv::approxPolyDP(cv::Mat(contours[i]), approx, cv::arcLength(cv::Mat(contours[i]), true)*0.02, true);
if (approx.size() >= 4 && approx.size() <= 6)
{
// ADDED: brackets around both lines belonging to the if-block
cv::Rect boundRect = cv::boundingRect(cv::Mat(approx));
cv::rectangle(dst, boundRect.tl(), boundRect.br(), cv::Scalar(255, 255, 255), 3, 8, 0);
}
}
// ADDED: displaying input and results
cv::imshow("input", src);
cv::imshow("dst", dst);
cv::imwrite("C:/StackOverflow/Output/rectangles.png", dst);
cv::waitKey(0);
return 0;
}
with your input image I do get this output:
which is probably not what you expected. See the canny output image (it is always good to have a look at intermediate results for visual debugging!), there are just too many structures in the image and contours will cover all of these, so there are some that will be approximated to polynomes with 4 to 6 elements.
Instead you'll have to become a bit smarter. You could try to extract straight lines with cv::HoughLinesP and connect those lines. Or you could try to segment the image first by finding white areas (if your rectangles are always white).
int main(int argc, char* argv[])
{
cv::Mat src = cv::imread("C:/StackOverflow/Input/rectangles.png");
cv::Mat dst = src.clone();
cv::Mat gray;
cv::cvtColor(src, gray, CV_BGR2GRAY);
cv::Mat mask;
// find "white" pixel
cv::inRange(src, cv::Scalar(230, 230, 230), cv::Scalar(255, 255, 255), mask);
cv::imshow("mask", mask);
std::vector<std::vector<cv::Point> > contours;
cv::findContours(mask, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
std::vector<cv::Point> approx;
for (int i = 0; i < contours.size(); i++){
cv::approxPolyDP(cv::Mat(contours[i]), approx, cv::arcLength(cv::Mat(contours[i]), true)*0.02, true);
if (approx.size() >= 4 && approx.size() <= 6)
{
cv::Rect boundRect = cv::boundingRect(cv::Mat(approx));
cv::rectangle(dst, boundRect.tl(), boundRect.br(), cv::Scalar(255, 255, 255), 1, 8, 0);
}
}
cv::imshow("input", src);
cv::imshow("dst", dst);
cv::imwrite("C:/StackOverflow/Output/rectangles2.png", dst);
cv::waitKey(0);
return 0;
}
gives this result:
As you can see, there are other bright regions near white, too. The polynom approximation does not help much, too.
In general, it's easier to segment a color (even white) in HSV space. With appropriate thresholds:
inRange(hsv, Scalar(0, 0, 220), Scalar(180, 30, 255), mask);
where we don't care about the Hue, and keep only low Saturation and high Value, I get:
Then you can easily find connected components, and discard blobs smaller than a threshold th_blob_size. Resulting rectangles are (in green):
You can eventually apply other filtering stage to account for more difficult situations, but for this image removing small blobs is enough. Please post other images if you need something more robust in general.
Code:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat3b img = imread("path_to_image");
int th_blob_size = 100;
Mat3b hsv;
cvtColor(img, hsv, COLOR_BGR2HSV);
Mat1b mask;
inRange(hsv, Scalar(0, 0, 220), Scalar(180, 30, 255), mask);
vector<vector<Point>> contours;
findContours(mask.clone(), contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
Mat3b res = img.clone();
for (int i = 0; i < contours.size(); ++i)
{
// Remove small blobs
if (contours[i].size() < th_blob_size)
{
continue;
}
Rect box = boundingRect(contours[i]);
rectangle(res, box, Scalar(0,255,0), 1);
}
imshow("Result", res);
waitKey();
return 0;
}
Are you sure you are only finding one contour or are you only drawing one contour? It doesn't look like you are looping in the drawing routine so you will only ever draw the first one that is found.
I have a blog, long since dead, that may provide you some good direction on this: http://workingwithcomputervision.blogspot.co.uk/2012/09/game-player-step-2-finding-game-board.html
Should the link die I believe this is the most relevant part of the article which relates to drawing contours:
//Draw contours
for (int i = 0; i < contours.size(); i++) {
Scalar color = Scalar(0, 255, 0);
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
}
I notice you are using bounding rectangles for the drawing. Here is an alternative drawing routine, again from the above link, that does this:
Rect bounds;
Mat drawing = Mat::zeros(purpleOnly.size(), CV_8UC3);
int j = 0;
for (int i = 0; i < contours.size(); i++) {
if (arcLength(contours[i], true) > 500){
Rect temp = boundingRect(contours[i]);
rectangle(drawing, temp, Scalar(255, 0, 0), 2, 8);
if (j == 0) {
bounds = temp;
} else {
bounds = bounds | temp;
}
j++;
}
}
Note that I also do some checks on the size of the contour to filter out noise.
I am new to OpenCV and I am using this code to bound the text area in image. After that I am filtering contours and putting the bounded rectangle to a vector<Rect> to copy these to new image.
Mat large = img1;
Mat rgb;
// downsample and use it for processing
pyrUp(large, rgb);
Mat small;
cvtColor(rgb, small, CV_BGR2GRAY);
// morphological gradient
Mat grad;
Mat morphKernel = getStructuringElement(MORPH_ELLIPSE, Size(2, 2));
morphologyEx(small, grad, MORPH_GRADIENT, morphKernel);
// binarize
Mat bw;
threshold(grad, bw, 0.0, 255.0, THRESH_BINARY | THRESH_OTSU);
// connect horizontally oriented regions
Mat connected;
//morphKernel = getStructuringElement(MORPH_RECT, Size(7, 1));
//morphologyEx(bw, connected, MORPH_CLOSE, morphKernel);
// find contours
connected = bw;
Mat mask = Mat::zeros(bw.size(), CV_8UC1);
Mat mask2;
Mat mask3;
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(connected, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/*drawContours(mask2, contours, -1, Scalar(255), CV_FILLED);
Mat Crop(img1.rows, img1.cols, CV_8UC3);
Crop.setTo(Scalar(0, 255, 0));
img1.copyTo(Crop, mask2);
normalize(mask2.clone(), mask2, 0.0, 255.0, CV_MINMAX, CV_8UC1);
*/
vector<Rect> rect1;
int i = 0;
//filter contours
for (int idx = 0; idx >= 0; idx = hierarchy[idx][0])
{
Rect rect = boundingRect(contours[idx]);
Mat maskROI(mask, rect);
maskROI = Scalar(0, 0, 0);
// fill the contour
drawContours(mask, contours, idx, Scalar(255, 255, 255), CV_FILLED);
// ratio of non-zero pixels in the filled region
double r = (double)countNonZero(maskROI) / (rect.width*rect.height);
if (r > .45 /* assume at least 45% of the area is filled if it contains text */
&&
(rect.height > 10 && rect.width > 10 && rect.height<150 && rect.width<150) /* constraints on region size */
/* these two conditions alone are not very robust. better to use something
like the number of significant peaks in a horizontal projection as a third condition */
)
{
//making rectangles on bounded area
rectangle(rgb, rect, Scalar(0, 255, 0), 2);
//pushing bounding rectangles in vector for new mask
rect1.push_back(rect);
}
}
Input output I am getting after bounded text ares is:
After that I am using this code to copy the bounded area only to new mask
//copying bounded rectangles area from small to new mask2
for (int i = 0; i < rect1.size(); i++){
mask2 = rgb(rect1[i]);
}
but by using this I only get this last bounded text area:
How can I get or update the mask2 rows or cols to get all the mapping of bounded text areas from rgb to mask2.
That's because mask2 will be equal to the last rgb(rect1[i]) called.
You can easily solve this in two ways (using copyTo):
Create a mask (black initialized, same size as input image), where you draw (white) rectangles. Then you copy the original image to a black initialized image of the same size, using the obtained mask.
Copy each sub-image directly to a black initialized image.
Starting from this image, where the red rectangles will be your detected rectangles:
With first approach you'll get a mask like:
and, for both approaches, the final result will be:
Code for first approach:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Your image
Mat3b img = imread("path_to_image");
// Your rectangles
vector<Rect> rects{Rect(100, 100, 100, 200), Rect(300, 200, 200, 100), Rect(500, 400, 80, 130)};
// Mask for rectangles (black initializeds)
Mat1b mask(img.rows, img.cols, uchar(0));
Mat3b dbgRects = img.clone();
for (int i = 0; i < rects.size(); ++i)
{
// Draw white rectangles on mask
rectangle(mask, rects[i], Scalar(255), CV_FILLED);
// Show rectangles
rectangle(dbgRects, rects[i], Scalar(0, 0, 255), 2);
}
// Black initizlied result
Mat3b result(img.rows, img.cols, Vec3b(0,0,0));
img.copyTo(result, mask);
imshow("Rectangles", dbgRects);
imshow("Result", result);
waitKey();
return 0;
}
Code for second approach:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Your image
Mat3b img = imread("path_to_image");
// Your rectangles
vector<Rect> rects{Rect(100, 100, 100, 200), Rect(300, 200, 200, 100), Rect(500, 400, 80, 130)};
// Black initizlied result
Mat3b result(img.rows, img.cols, Vec3b(0, 0, 0));
Mat3b dbgRects = img.clone();
for (int i = 0; i < rects.size(); ++i)
{
img(rects[i]).copyTo(result(rects[i]));
// Show rectangles
rectangle(dbgRects, rects[i], Scalar(0, 0, 255), 2);
}
imshow("Rectangles", dbgRects);
imshow("Result", result);
waitKey();
return 0;
}
I'm trying to extract from an image only the contours which have a specific size.
I process like this
int offsetX ;
int offsetY ;
//here: read original image as 8UC3
cv::Mat original = cv::imread("0.png");
Mat imgx=original.clone();
cv::imshow("original", original);
cvtColor(imgx,imgx,CV_BGR2GRAY);
Mat thresh;
vector<Vec4i> hierarchy;
RNG rng(12345);
vector < vector<Point> > contours;
adaptiveThreshold(imgx, thresh, 255, 1, 1, 31, 2);
findContours(thresh, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
Mat drawing = Mat::zeros(thresh.size(), CV_8UC3);
cout << "drawing "<<drawing.type()<<endl;
cv::Mat image = cv::Mat(original.rows, original.cols, original.type());
image.setTo(cv::Scalar::all(255));
for (size_t i = 0; i < contours.size(); i++)
{
vector < Point > cnt = contours[i];
if (contourArea(cnt) > 0)
{
Rect rec = boundingRect(cnt);
if ((rec.height > 20 ) &&(3.5*rec.height>rec.width)&& (rec.width>15)/*&& (rec.width<40)*/)
{
cout<<rec.x<<" "<<rec.y<<endl;
offsetX=rec.x;
offsetY=rec.y;
Mat roi = original(rec);
int width = roi.cols;
int height = roi.rows;
cout <<"h= "<<height<<" w= "<<width<<endl;
cv::Rect characterLocation;
if(height>35)
characterLocation = cv::Rect(offsetX+3, offsetY, width, height);
else
characterLocation = cv::Rect(offsetX, offsetY, width, height);
original(characterLocation).copyTo(image(characterLocation));
imshow("jihedddd",roi);
imwrite("xxxxxx.png",roi);
Mat stagedImage;
Mat img;
Scalar color = Scalar(255, 255, 255);
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
imshow("Contours", drawing);
waitKey();
GaussianBlur(stagedImage, img, Size(5, 5), 2, 2);
medianBlur(img, stagedImage, 3);
Mat copy = original.clone();
rectangle(copy, Point(rec.x, rec.y),
Point(rec.x + rec.width, rec.y + rec.height),
CV_RGB(0x00,0x00,0xff), 3);
cv::imshow("char copied", image);
}
}
}
medianBlur(image,image,3);
cv::imshow("char copied", image);
cv::imwrite("characterC_result.tiff ", image);
cv::waitKey();
But when I run this code I have an error
opencv error :assertion failed
This is an example of image which give me the error.
you have to be careful with this:
characterLocation = cv::Rect(offsetX+3, offsetY, width, height);
if width is the imagewidth, you are out of bounds here. you either have to subtract x from width, or crop the rect to the image borders:
// get the Rect for the original image:
cv::Rect borders(Point(0,0), image.size());
// crop to the legal size:
characterLocation = cv::Rect(offsetX+3, offsetY, width, height) & borders;