Detection ROI using openCV - c++

I'm doing a work where I have to find the region of interest (ROI) and then perform a threshold on the image:
As I am not from the field of computing, I'm having some difficulties.
I started trying to find the ROI through the following code:
// code
string filename = "2011-06-11-09%3A12%3A15.387932.bmp";
Mat img = imread(filename)
if (!img.data)
{
std::cout << "!!! imread não conseguiu abrir imagem: " << filename << std::endl;
return -1;
}
cv::Rect roi;
roi.x = 0
roi.y = 90
roi.width = 400;
roi.height = 90;
cv::Mat crop = original_image(roi);
cv::imwrite("2011-06-11-09%3A12%3A15.387932.bmp", crop);
Thank you very much.

I assume that you are interested in isolating the digits of the image and that you want to specify the ROI manually (given what you wrote).
You can use better coordinates for the ROI and crop that into a new cv::Mat with it to get something like the output below:
Executing a threshold on this image only makes sense if you want to isolate the digits to do some recognition later. A good technique for doing that is offered by cv::inRange() which performs a threshold operation on all channels (RGB image == 3 channels).
Note: cv::Mat stores pixels in the BGR order, this is important to remember when you specify the values for the threshold.
As a simple test, you can perform a threshold from B:70 G:90 R:100 to B:140 G:140 R:140 to get the following output:
Not bad! I changed your code a little to get these results:
#include <cv.h>
#include <highgui.h>
#include <iostream>
int main()
{
cv::Mat image = cv::imread("input.jpg");
if (!image.data)
{
std::cout << "!!! imread failed to load image" << std::endl;
return -1;
}
cv::Rect roi;
roi.x = 165;
roi.y = 50;
roi.width = 440;
roi.height = 80;
/* Crop the original image to the defined ROI */
cv::Mat crop = image(roi);
cv::imwrite("colors_roi.png", crop);
/* Threshold the ROI based on a BGR color range to isolate yellow-ish colors */
cv::Mat dest;
cv::inRange(crop, cv::Scalar(70, 90, 100), cv::Scalar(140, 140, 140), dest);
cv::imwrite("colors_threshold.png", dest);
cv::imshow("Example", dest);
cv::waitKey();
return 0;
}

Related

detect boxes/tables and remove them

How to remove all vertical and horizontal lines that form boxes/tables
I have searched and tried.. But can't make it work
Have tried to search for it the last couple of days.. have found a few examples which doesn't work.. Have tried to get the pieces together..
cv:Mat img = cv::imread(input, CV_LOAD_IMAGE_GRAYSCALE);
cv::Mat grad;
cv::Mat morphKernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(3, 3));
cv::morphologyEx(img, grad, cv::MORPH_GRADIENT, morphKernel);
cv::Mat res;
cv::threshold(grad, res, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
// find contours
cv::Mat mask = cv::Mat::zeros(res.size(), CV_8UC1);
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(res, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
for(int i = 0; i < contours.size(); i++){
cv::Mat approx;
double peri = cv::arcLength(contours[i], true);
cv::approxPolyDP(contours[i], approx, 0.04 * peri, true);
int num_vertices = approx.rows;
if(num_vertices == 4){
cv::Rect rect = cv::boundingRect(contours[i]);
// this is a rectangle
}
}
You could try something like that :
threshold your image
compute connected components
remove particules for which at least 3 of 4 bounding box tops are in touch with particule
This should give you something like that :
Here is the associated source code :
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <limits>
using namespace cv;
struct BBox {
BBox() :
_xMin(std::numeric_limits<int>::max()),
_xMax(std::numeric_limits<int>::min()),
_yMin(std::numeric_limits<int>::max()),
_yMax(std::numeric_limits<int>::min())
{}
int _xMin;
int _xMax;
int _yMin;
int _yMax;
};
int main()
{
// read input image
Mat inputImg = imread("test3_1.tif", IMREAD_GRAYSCALE);
// create binary image
Mat binImg;
threshold(inputImg, binImg, 254, 1, THRESH_BINARY_INV);
// compute connected components
Mat labelImg;
const int nbComponents = connectedComponents(binImg, labelImg, 8, CV_32S);
// compute associated bboxes
std::vector<BBox> bboxColl(nbComponents);
for (int y = 0; y < labelImg.rows; ++y) {
for (int x = 0; x < labelImg.cols; ++x) {
const int curLabel = labelImg.at<int>(y, x);
BBox& curBBox = bboxColl[curLabel];
if (curBBox._xMin > x)
curBBox._xMin = x;
if (curBBox._xMax < x)
curBBox._xMax = x;
if (curBBox._yMin > y)
curBBox._yMin = y;
if (curBBox._yMax < y)
curBBox._yMax = y;
}
}
// parse all labels
std::vector<bool> lutTable(nbComponents);
for (int i=0; i<nbComponents; ++i) {
// check current label width
const BBox& curBBox = bboxColl[i];
if (curBBox._xMax - curBBox._xMin > labelImg.cols * 0.3)
lutTable[i] = false;
else
lutTable[i] = true;
}
// create output image
Mat resImg(binImg);
MatConstIterator_<int> iterLab = labelImg.begin<int>();
MatIterator_<unsigned char> iterRes = resImg.begin<unsigned char>();
while (iterLab != labelImg.end<int>()) {
if (lutTable[*iterLab] == true)
*iterRes = 1;
else
*iterRes = 0;
++iterLab;
++iterRes;
}
// write result
imwrite("resImg3_1.tif", resImg);
}
I simply remove all labels for which with is greater than 30% of image total width. Your image is quite noisy so I can't use bounding box tops touches as said before, sorry...
Don't know if this will match with all your images but you could add some geometrical filters to improve this first version.
Regards,
You can use LineSegmentDetector for this purpose:
import numpy as np
import cv2
image = cv2.imread("image.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# This is the detector, you might have to play with the parameters
lsd = cv2.createLineSegmentDetector(0, _scale=0.6)
lines, widths, _, _ = lsd.detect(gray)
if lines is not None:
for i in range(0, len(lines)):
l = lines[i][0]
# Much slower version of Euclidean distance
if np.sqrt((l[0]-l[2])**2 + (l[1]-l[3])**2) > 50:
# You might have to tweak the threshold as well for other images
cv2.line(image, (l[0], l[1]), (l[2], l[3]), (255, 255, 255), 3,
cv2.LINE_AA)
cv2.imwrite("result.png", image)
Output:
As you can see, the lines aren't completely removed in the top image so I am leaving the tweaking part to you. Hope it helps!
I'd like to use this answer box to make a few comments.
First off, its way easier to see progress, if you can easily see what the output looks like visually. With that in mind, here is an update to your code with an emphasis on viewing interim results. I'm using VS Studio Community 2017, and OpenCV version 4.0.1 (64bit) in Win10 for anyone who wants to repeat this exercise. There were a few routines used that required updates for OpenCV 4...
#include "pch.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
int main()
{
cv::Mat img = cv::imread("0zx9Q.png", cv::IMREAD_GRAYSCALE ); // --> Contour size = 0x000000e7 hex (231 each)
// cv::Mat img = cv::imread("0zx9Q.png", cv::IMREAD_REDUCED_GRAYSCALE_2); // --> Contour size = 0x00000068 hex (104 each)
// cv::Mat img = cv::imread("0zx9Q.png", cv::IMREAD_REDUCED_GRAYSCALE_4); // --> Contour size = 0x0000001f hex (31 each)
// cv::Mat img = cv::imread("0zx9Q.png", cv::IMREAD_REDUCED_GRAYSCALE_8); // --> Contour size = 0x00000034 hex (52 each)
if (!img.data) // Check for invalid input
{
std::cout << "Could not open or find the image" << std::endl;
return -1;
}
// cv::namedWindow("Display Window - GrayScale Image", cv::WINDOW_NORMAL); // Create a window for display.
// cv::imshow("Display Window - GrayScale Image", img); // Show our image inside it.
// cv::waitKey(0); // Wait for a keystroke in the window
cv::Mat imgOriginal = cv::imread("0zx9Q.png", cv::IMREAD_UNCHANGED);
cv::namedWindow("Display Window of Original Document", cv::WINDOW_NORMAL); // Create a window for display.
cv::Mat grad;
cv::Mat morphKernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(25, 25));
// MORPH_ELLIPSE, contourSize: 0x00000005 when 60,60... but way slow...
// MORPH_ELLIPSE, contourSize: 0x00000007 when 30,30...
// MORPH_ELLIPSE, contourSize: 0x00000007 when 20,20...
// MORPH_ELLIPSE, contourSize: 0x0000000a when 15,15...
// MORPH_ELLIPSE, contourSize: 0x0000007a when 5,5...
// MORPH_ELLIPSE, contourSize: 0x000000e7 when 3,3 and IMREAD_GRAYSCALE
// MORPH_CROSS, contourSize: 0x0000008e when 5,5
// MORPH_CROSS, contourSize: 0x00000008 when 25,25
// MORPH_RECT, contourSize: 0x00000007 when 25,25
cv::morphologyEx(img, grad, cv::MORPH_GRADIENT, morphKernel);
cv::Mat res;
cv::threshold(grad, res, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
// find contours
cv::Mat mask = cv::Mat::zeros(res.size(), CV_8UC1);
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(res, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
int contourSize = contours.size();
std::cout << " There are a total of " << contourSize << " contours. \n";
for (int i = 0; i < contourSize; i++) {
cv::Mat approx;
double peri = cv::arcLength(contours[i], true);
cv::approxPolyDP(contours[i], approx, 0.04 * peri, true);
int num_vertices = approx.rows;
std::cout << " Contour # " << i << " has " << num_vertices << " vertices.\n";
if (num_vertices == 4) {
cv::Rect rect = cv::boundingRect(contours[i]);
cv::rectangle(imgOriginal, rect, cv::Scalar(255, 0, 0), 4);
}
}
cv::imshow("Display Window of Original Document", imgOriginal); // Show our image inside it.
cv::waitKey(0); // Wait for a keystroke in the window
}
With that said, the parameters for getStructuringElement() matter huge. I spent a lot of time trying different choices, with very mixed results. And it turns out there are a whole lot of findContours() responses that don't have four vertices. I suspect the whole findContours() approach is probably flawed. I often would get false rectangles identified around text characters in words and phrases. Additionally the lighter lines surrounding some boxed areas would be ignored.
Instead, I think I'd be looking hard at straight line detection, via techniques discussed here, if such a response exists for a C++ and not python. Perhaps here, or here? I hoping line detection techniques would ultimately get better results. And hey, if the documents / images selected always include a white background, it would be pretty easy to solid rectangle them OUT of the image, via LineTypes: cv::FILLED
Info here provided, not as an answer to the posted question but as a methodology to visually determine success in the future.

Scaling opencv matrix of type CV_8UC1 gives zeroes and ones

I am attempting to scale a grayscale image of type 8UC1 by 1.0f/255,
by the following operation
image.convertTo(image,CV_32F,1.0f/255,0); //convert and scale
After inspecting the output of the above, I find that all values are too close to zero. For instance, at a point where the value should be 0.2784, I'm getting 1.23417e-06.
So, I tried to see if I could undo the scaling and get the input back i.e. multiplying the result from above by 255, using
cv::imwrite("undo_scaling.jpg",image*255); //rescale and write to disk
strangely, the input image can be reconstructed.
Where am I going wrong with the scaling operation?
EDIT
The following is my attempt to preprocess an image. That involves the followings steps
Apply a mask
Crop the result
Finally, scale the pixel values by 255
I use the following code:
cv::Mat maskCrop(std::string imageName, std::string maskName)
{
cv::Mat image,mask,final_image;
image = cv::imread( imageName, CV_LOAD_IMAGE_GRAYSCALE);
mask = cv::imread( maskName,CV_LOAD_IMAGE_GRAYSCALE);
cv::resize(image, image, mask.size()); // make the size of mask and image same
cv::bitwise_and(image, mask, final_image); //Apply mask
// define rectangular window for cropping
int offset_x = 1250; // top left corner, for cropping
int offset_y = 1550;
cv::Rect roi;
roi.x = offset_x;
roi.y = offset_y;
roi.width = 550;
roi.height = 650;
// Crop the original image to the defined ROI //
cv::Mat crop = dstImage(roi);
crop.convertTo(crop,CV_32F,1.0f/255,0);
return crop;
}
Below is the input image:
The following is the mask to be applied on it:

OpenCV ROI on Real time camera

I am trying to set ROI in real time camera and copy a picture in the ROI.
However, I tried many methods from Internet but it is still unsuccessful.
Part of my code is shown below:
while(!protonect_shutdown)
{
listener.waitForNewFrame(frames);
libfreenect2::Frame *ir = frames[libfreenect2::Frame::Ir];
//! [loop start]
cv::Mat(ir->height, ir->width, CV_32FC1, ir->data).copyTo(irmat);
Mat img = imread("button.png");
cv::Rect r(1,1,100,200);
cv::Mat dstroi = img(Rect(0,0,r.width,r.height));
irmat(r).convertTo(dstroi, dstroi.type(), 1, 0);
cv::imshow("ir", irmat / 4500.0f);
int key = cv::waitKey(1);
protonect_shutdown = protonect_shutdown || (key > 0 && ((key & 0xFF) == 27));
listener.release(frames);
}
My real time camera can show the video normally. And no bugs in my program, but the picture cannot be shown in the ROI.
Does anyone have some ideas?
Any help is appreciate.
I hope I understood your question right and you want an output something like this:
I have created a rectangle of size 100x200 on the video feed and displaying an image in that rectangle.
Here is the code:
int main()
{
Mat frame,overlayFrame;
VideoCapture cap("video.avi");//use 0 for webcam
overlayFrame=imread("picture.jpg");
if (!cap.isOpened())
{
cout << "Could not capture video";
return -1;
}
Rect roi(1,1,100,200);//creating a rectangle of size 100x200 at point (1,1) on the videofeed
namedWindow("CameraFeed");
while ((cap.get(CV_CAP_PROP_POS_FRAMES) + 1) < cap.get(CV_CAP_PROP_FRAME_COUNT))
{
cap.read(frame);
resize(overlayFrame, overlayFrame, resize(overlayFrame, overlayFrame, Size(roi.width, roi.height));//changing the size of the image to fit in the roi
overlayFrame.copyTo(frame(roi));//copying the picture to the roi
imshow("CameraFeed", frame);
if (waitKey(27) >= 0)
break;
}
destroyAllWindows;
return 0;
}

How to count white object on Binary Image?

I'm trying to count object from image. I use logs photo, and I use some steps to get a binary image.
This is my code:
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <features2d.hpp>
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
//load image
Mat img = imread("kayu.jpg", CV_LOAD_IMAGE_COLOR);
if(img.empty())
return -1;
//namedWindow( "kayu", CV_WINDOW_AUTOSIZE );
imshow("kayu", img);
//convert to b/w
Mat bw;
cvtColor(img, bw, CV_BGR2GRAY);
imshow("bw1", bw);
threshold(bw, bw, 40, 255, CV_THRESH_BINARY);
imshow("bw", bw);
//distance transform & normalisasi
Mat dist;
distanceTransform(bw, dist, CV_DIST_L2, 3);
normalize(dist, dist, 0, 2., NORM_MINMAX);
imshow("dist", dist);
//threshold to draw line
threshold(dist, dist, .5, 1., CV_THRESH_BINARY);
imshow("dist2", dist);
//dist = bw;
//dilasi
Mat dilation, erotion, element;
int dilation_type = MORPH_ELLIPSE;
int dilation_size = 17;
element = getStructuringElement(dilation_type, Size(2*dilation_size + 1, 2*dilation_size+1), Point(dilation_size, dilation_size ));
erode(dist, erotion, element);
int erotionCount = 0;
for(int i=0; i<erotionCount; i++){
erode(erotion, erotion, element);
}
imshow("erotion", erotion);
dilate(erotion, dilation, element);
imshow("dilation", dilation);
waitKey(0);
return 0;
}
As you can see, I use Erosion and Dilation to get better circular object of log. My problem is, I'm stuck at counting the object. I tried SimpleBlobDetector but I got nothing, because when I try to convert the result of "dilation" step to CV_8U, the white object disappear. I got error too when I use findContours(). It say something about channel of image. I can't show the error here, because that's too many step and I already delete it from my code.
Btw, at the end, i got 1 channel of image.
Can i just use it to counting, or am i have to convert it and what is the best method to do it?
Two simple steps:
Find contours for the binarized image.
Get the count of the contours.
Code:
int count_trees(const cv::Mat& bin_image){
cv::Mat img;
if(bin_image.channels()>1){
cv::cvtColor(bin_image,img,cv::COLOR_BGR2GRAY);
}
else{
img=bin_image.clone();;
}
if(img.type()!=CV_8UC1){
img*=255.f; //This could be stupid, but I do not have an environment to try it
img.convertTo(img,CV_8UC1);
}
std::vector<std::vector<cv::Point>> contours
std::vector<Vec4i> hierarchy;
cv::findContours( img, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
return contours.size();
}
I have the same problem, here's an idea I'm about to implement.
1) Represent your image as an array of integers; 0 = black, 1 = white.
2) set N = 2;
3) Scan your image, pixel-by-pixel. Whenever you find a white pixel, activate a flood-fill algorithm, starting at the pixel just found; paint the region with the value of N++;
4) Iterate 3 until you reach the last pixel. (N-2) is the number of regions found.
This method depends on the shape of the objects; mine are more chaotic than yours (wish me luck..). I'll make use of a recursive flood-fill recipe found somewhere (maybe Rosetta Code).
This solution also makes it easy to compute the size of each region.
try to apply that on the your deleted img
// count
for (int i = 0; i< contours.size(); i = hierarchy[i][0]) // iteration sur chaque contour .
{
Rect r = boundingRect(contours[i]);
if (hierarchy[i][2]<0) {
rectangle(canny_output, Point(r.x, r.y), Point(r.x + r.width, r.y + r.height), Scalar(20, 50, 255), 3, 8, 0);
count++;
}
}
cout << "Numeber of contour = " << count << endl;
imshow("src", src);
imshow("contour", dst);
waitKey(0);

How to determine a region of interest and then crop an image using OpenCV

I asked a similar question here but that is focused more on tesseract.
I have a sample image as below. I would like to make the white square my Region of Interest and then crop out that part (square) and create a new image with it. I will be working with different images so the square won't always be at the same location in all images. So I will need to somehow detect the edges of the square.
What are some pre-processing methods I can perform to achieve the result?
Using your test image I was able to remove all the noises with a simple erosion operation.
After that, a simple iteration on the Mat to find for the corner pixels is trivial, and I talked about that on this answer. For testing purposes we can draw green lines between those points to display the area we are interested at in the original image:
At the end, I set the ROI in the original image and crop out that part.
The final result is displayed on the image below:
I wrote a sample code that performs this task using the C++ interface of OpenCV. I'm confident in your skills to translate this code to Python. If you can't do it, forget the code and stick with the roadmap I shared on this answer.
#include <cv.h>
#include <highgui.h>
int main(int argc, char* argv[])
{
cv::Mat img = cv::imread(argv[1]);
std::cout << "Original image size: " << img.size() << std::endl;
// Convert RGB Mat to GRAY
cv::Mat gray;
cv::cvtColor(img, gray, CV_BGR2GRAY);
std::cout << "Gray image size: " << gray.size() << std::endl;
// Erode image to remove unwanted noises
int erosion_size = 5;
cv::Mat element = cv::getStructuringElement(cv::MORPH_CROSS,
cv::Size(2 * erosion_size + 1, 2 * erosion_size + 1),
cv::Point(erosion_size, erosion_size) );
cv::erode(gray, gray, element);
// Scan the image searching for points and store them in a vector
std::vector<cv::Point> points;
cv::Mat_<uchar>::iterator it = gray.begin<uchar>();
cv::Mat_<uchar>::iterator end = gray.end<uchar>();
for (; it != end; it++)
{
if (*it)
points.push_back(it.pos());
}
// From the points, figure out the size of the ROI
int left, right, top, bottom;
for (int i = 0; i < points.size(); i++)
{
if (i == 0) // initialize corner values
{
left = right = points[i].x;
top = bottom = points[i].y;
}
if (points[i].x < left)
left = points[i].x;
if (points[i].x > right)
right = points[i].x;
if (points[i].y < top)
top = points[i].y;
if (points[i].y > bottom)
bottom = points[i].y;
}
std::vector<cv::Point> box_points;
box_points.push_back(cv::Point(left, top));
box_points.push_back(cv::Point(left, bottom));
box_points.push_back(cv::Point(right, bottom));
box_points.push_back(cv::Point(right, top));
// Compute minimal bounding box for the ROI
// Note: for some unknown reason, width/height of the box are switched.
cv::RotatedRect box = cv::minAreaRect(cv::Mat(box_points));
std::cout << "box w:" << box.size.width << " h:" << box.size.height << std::endl;
// Draw bounding box in the original image (debugging purposes)
//cv::Point2f vertices[4];
//box.points(vertices);
//for (int i = 0; i < 4; ++i)
//{
// cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(0, 255, 0), 1, CV_AA);
//}
//cv::imshow("Original", img);
//cv::waitKey(0);
// Set the ROI to the area defined by the box
// Note: because the width/height of the box are switched,
// they were switched manually in the code below:
cv::Rect roi;
roi.x = box.center.x - (box.size.height / 2);
roi.y = box.center.y - (box.size.width / 2);
roi.width = box.size.height;
roi.height = box.size.width;
std::cout << "roi # " << roi.x << "," << roi.y << " " << roi.width << "x" << roi.height << std::endl;
// Crop the original image to the defined ROI
cv::Mat crop = img(roi);
// Display cropped ROI
cv::imshow("Cropped ROI", crop);
cv::waitKey(0);
return 0;
}
Seeing that the text is the only large blob, and everything else is barely larger than a pixel, a simple morphological opening should suffice
You can do this in opencv
or with imagemagic
Afterwards the white rectangle should be the only thing left in the image. You can find it with opencvs findcontours, with the CvBlobs library for opencv or with the imagemagick -crop function
Here is your image with 2 steps of erosion followed by 2 steps of dilation applied:
You can simply plug this image into the opencv findContours function as in the Squares tutorial example to get the position
input
#objective:
#1)compress large images to less than 1000x1000
#2)identify region of interests
#3)save rois in top to bottom order
import cv2
import os
def get_contour_precedence(contour, cols):
tolerance_factor = 10
origin = cv2.boundingRect(contour)
return ((origin[1] // tolerance_factor) * tolerance_factor) * cols + origin[0]
# Load image, grayscale, Gaussian blur, adaptive threshold
image = cv2.imread('./images/sample_0.jpg')
#compress the image if image size is >than 1000x1000
height, width, color = image.shape #unpacking tuple (height, width, colour) returned by image.shape
while(width > 1000):
height = height/2
width = width/2
print(int(height), int(width))
height = int(height)
width = int(width)
image = cv2.resize(image, (width, height))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (9,9), 0)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,11,30)
# Dilate to combine adjacent text contours
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9,9))
ret,thresh3 = cv2.threshold(image,127,255,cv2.THRESH_BINARY_INV)
dilate = cv2.dilate(thresh, kernel, iterations=4)
# Find contours, highlight text areas, and extract ROIs
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#cnts = cv2.findContours(thresh3, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
#ORDER CONTOURS top to bottom
cnts.sort(key=lambda x:get_contour_precedence(x, image.shape[1]))
#delete previous roi images in folder roi to avoid
dir = './roi/'
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
ROI_number = 0
for c in cnts:
area = cv2.contourArea(c)
if area > 10000:
x,y,w,h = cv2.boundingRect(c)
#cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 3)
cv2.rectangle(image, (x, y), (x + w, y + h), (100,100,100), 1)
#use below code to write roi when results are good
ROI = image[y:y+h, x:x+w]
cv2.imwrite('roi/ROI_{}.jpg'.format(ROI_number), ROI)
ROI_number += 1
cv2.imshow('thresh', thresh)
cv2.imshow('dilate', dilate)
cv2.imshow('image', image)
cv2.waitKey()
roi detection
output