How to make my video not delay, in the beginning of the code it's not delay, but if I use the GaussianBlur, morphology operation, and SimpleBlobDetector, the video gets delayed, please someone help me..
Thanks in advance
using namespace cv;
using namespace std;
int _tmain(int argc, _TCHAR* argv[]){
VideoCapture cap(0);
cap.open("file.mp4"); //read the video
if (!cap.isOpened())
{
cout << "Cannot open the video cam" << endl;
return -1;
}
double Width = cap.get(CV_CAP_PROP_FRAME_WIDTH);
double Height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
cvNamedWindow("MyVideo", CV_WINDOW_AUTOSIZE);
while (1)
{
Mat frame;
bool bSuccess = cap.read(frame);
SimpleBlobDetector::Params params;
// Change thresholds
params.minThreshold = 0;
params.maxThreshold = 255;
params.filterByColor = true;
params.blobColor = 255;
// Filter by Area.
params.filterByArea = true;
params.minArea = 5 ;
// Filter by Circularity
params.filterByCircularity = true;
params.minCircularity = 0.1;
// Filter by Convexity
params.filterByConvexity = true;
params.minConvexity = 0.87;
// Filter by Inertia
params.filterByInertia = true;
params.minInertiaRatio = 0.01;
//crop the image with the pixel i want
Mat blur, crop;
GaussianBlur(frame, blur, Size(15, 15), 0); //blur the image
Point corners[1][4];
corners[0][0] = Point(550, 30); //top left
corners[0][1] = Point(250, 700); //bottom left
corners[0][2] = Point(1100, 700); //bottom right
corners[0][3] = Point(600, 30); //top right
const Point* corner_list[1] = { corners[0] };
int num_points = 4;
int num_polygons = 1;
int line_type = 8;
Mat mask(720, 1280, CV_8UC3, cv::Scalar(0, 0, 0));
fillPoly(mask, corner_list, &num_points, num_polygons, cv::Scalar(255, 255, 255), line_type);
bitwise_and(blur, mask, crop);//combine the image
Mat gray, changeToBlack;
cvtColor(crop, gray, COLOR_BGR2GRAY); //grayscale citra
inRange(gray, Scalar(0), Scalar(0), changeToBlack);
Mat black_image(gray.size(), CV_8U, Scalar(255));
black_image.copyTo(gray, changeToBlack);
Mat thres, tes;
threshold(gray, tes, 51, 255, THRESH_BINARY_INV); //threshold citra and 51 set value
Mat erosi, dilasi, open, close, tophat;
Mat kernel = Mat(3, 3, CV_8UC1, Scalar(1));
morphologyEx(tes, erosi, MORPH_ERODE, kernel, Point(-1, -1), 3);
morphologyEx(erosi, dilasi, MORPH_DILATE, kernel, Point(-1, -1), 20);
vector<KeyPoint> keypoints;
Ptr<SimpleBlobDetector> detector = SimpleBlobDetector::create(params);
detector->detect(dilasi, keypoints);
Mat im_with_keypoints;
drawKeypoints(dilasi, keypoints, im_with_keypoints, Scalar(205, 0, 0), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imshow("gray", frame);
imshow("MyVideo", im_with_keypoints);
if (waitKey(30) == 27)
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
Anyone please help me
Oh! All actions and memory allocation inside loop. Try this:
int _tmain(int argc, _TCHAR* argv[]){
VideoCapture cap(0);
cap.open("file.mp4"); //read the video
if (!cap.isOpened())
{
cout << "Cannot open the video cam" << endl;
return -1;
}
double Width = cap.get(CV_CAP_PROP_FRAME_WIDTH);
double Height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
cvNamedWindow("MyVideo", CV_WINDOW_AUTOSIZE);
Mat frame;
Mat blur, crop;
Mat mask(720, 1280, CV_8UC3, cv::Scalar(0, 0, 0));
Mat gray, changeToBlack;
Mat black_image(gray.size(), CV_8U, Scalar(255));
Mat thres, tes;
Mat erosi, dilasi, open, close, tophat;
Mat kernel = Mat(3, 3, CV_8UC1, Scalar(1));
Mat im_with_keypoints;
SimpleBlobDetector::Params params;
// Change thresholds
params.minThreshold = 0;
params.maxThreshold = 255;
params.filterByColor = true;
params.blobColor = 255;
// Filter by Area.
params.filterByArea = true;
params.minArea = 5 ;
// Filter by Circularity
params.filterByCircularity = true;
params.minCircularity = 0.1;
// Filter by Convexity
params.filterByConvexity = true;
params.minConvexity = 0.87;
// Filter by Inertia
params.filterByInertia = true;
params.minInertiaRatio = 0.01;
vector<KeyPoint> keypoints;
Ptr<SimpleBlobDetector> detector = SimpleBlobDetector::create(params);
while (cap.read(frame))
{
//crop the image with the pixel i want
GaussianBlur(frame, blur, Size(15, 15), 0); //blur the image
Point corners[1][4];
corners[0][0] = Point(550, 30); //top left
corners[0][1] = Point(250, 700); //bottom left
corners[0][2] = Point(1100, 700); //bottom right
corners[0][3] = Point(600, 30); //top right
const Point* corner_list[1] = { corners[0] };
int num_points = 4;
int num_polygons = 1;
int line_type = 8;
fillPoly(mask, corner_list, &num_points, num_polygons, cv::Scalar(255, 255, 255), line_type);
bitwise_and(blur, mask, crop);//combine the image
cvtColor(crop, gray, COLOR_BGR2GRAY); //grayscale citra
inRange(gray, Scalar(0), Scalar(0), changeToBlack);
black_image.copyTo(gray, changeToBlack);
threshold(gray, tes, 51, 255, THRESH_BINARY_INV); //threshold citra and 51 set value
morphologyEx(tes, erosi, MORPH_ERODE, kernel, Point(-1, -1), 3);
morphologyEx(erosi, dilasi, MORPH_DILATE, kernel, Point(-1, -1), 20);
detector->detect(dilasi, keypoints);
drawKeypoints(dilasi, keypoints, im_with_keypoints, Scalar(205, 0, 0), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imshow("gray", frame);
imshow("MyVideo", im_with_keypoints);
if (waitKey(1) == 27)
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
Related
Im struggling with the shape detection using OpenCV for C++. The edged figures such as triangle and rectangular are detected trouble-free. But when it comes to circle it estimates number of vertices up to 6-8. Could somebody help me?
void getContours(Mat video){
Mat grayscale, canny_output;
cvtColor(video, grayscale,COLOR_RGB2GRAY);//converting image to grayscale
GaussianBlur(grayscale, grayscale, Size(9, 9), 2, 2 );
threshold(grayscale, grayscale,60,255,THRESH_BINARY);
vector <vector<Point>> contours, output_contour;
vector <Vec4i> hierarchy;
findContours( grayscale, contours, hierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE );
Mat drawing = Mat::zeros( grayscale.size(), CV_8UC3 );
vector<Point> c;
for (size_t i = 0; i<contours.size(); i++){
c = contours[i];
Rect crect = boundingRect(c);
// compute the center of the contour, then detect the name of the
// shape using only the contour
Moments M = moments(c);
int cX, cY;
cX = static_cast<int>(M.m10/M.m00);
cY = static_cast<int>(M.m01/M.m00);
string shape = detect(Mat(c));
drawContours( drawing, contours, (int)i, Scalar(0, 255, 0), 2);
Point pt(cX,cY);
putText(drawing,shape,pt, FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 255, 255), 2);
imshow("contour", drawing);
}
}
string detect(const Mat &curve){
string shape = "unidentified";
double peri = arcLength(curve, true);
Mat approx;
approxPolyDP(curve, approx, 0.04 * peri, true); // 0.01~0.05
const int num_of_vertices = approx.rows;
if(num_of_vertices == 0){
shape = "circle";
}
if(num_of_vertices==2){
shape = "line";
}
cout<<"\n"<<num_of_vertices;
return to_string(num_of_vertices);
}
I am selecting the color from first frame using mouse-handler.
I am trying to replace the selected color with background frame.
This is working fine for red color but this is not working for any other color like green, blue, etc. I am using following graph for color selection:
Click here!
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
// structure to be used in mouseHandler function
struct userdata {
Mat im;
vector<Point2f> points;
};
void mouseHandler(int event, int x, int y, int flags, void* data_ptr)
{
if (event == EVENT_LBUTTONDOWN) {
userdata* data = ((userdata*)data_ptr);
circle(data->im, Point(x, y), 3, Scalar(0, 0, 255), 5, LINE_AA);
imshow("Image", data->im);
if (data->points.size() < 1) {
data->points.push_back(Point2f(x, y));
}
}
}
int main(int argc, char** argv) {
// Take video frame from camera to select color of material
VideoCapture capt(0);
Mat frames;
capt >> frames;
Mat hsvimg;
// Converting image from BGR to HSV
cvtColor(frames, hsvimg, COLOR_BGR2HSV);
// Set data for mouse event
Mat im_temp = frames.clone();
userdata data;
data.im = im_temp;
cout << "Select the point on image for the color you want to create cloak and than press 'Enter'" << endl;
// Show image and wait for a click.
imshow("Image", im_temp);
// Set the callback function for any mouse event
setMouseCallback("Image", mouseHandler, &data);
waitKey(0);
//defining the HSV values of the point selected
Vec3b HSV_Color = hsvimg.at<Vec3b>(data.points[0]);
int hue = HSV_Color.val[0];
int saturation = HSV_Color.val[1];
int value = HSV_Color.val[2];
// Create a VideoCapture object and open the input file for demonstrating the cloak working
// If the input is the web camera, pass 0 instead of the video file name
// In first frame only background should be there, i.e., no person present
VideoCapture cap(0);
// Check if camera opened successfully
if (!cap.isOpened()) {
cout << "Error opening video stream or file" << endl;
return -1;
}
Mat background;
for (int i = 0; i < 30; i++)
{
cap >> background;
}
//Laterally invert the image / flip the image.
flip(background, background, 1);
while (1)
{
Mat frame;
// Capture frame-by-frame
cap >> frame;
// Laterally invert the image / flip the image
flip(frame, frame, 1);
//Converting image from BGR to HSV color space.
Mat hsv;
cvtColor(frame, hsv, COLOR_BGR2HSV);
Mat mask1, mask2;
// Creating masks to detect the upper and lower red color.
// Otherwise mask1 and mask2 are same for other colors
// Making different conditions according to hue values
// Take help from this: https://stackoverflow.com/questions/10948589/choosing-the-correct-upper-and-lower-hsv-boundaries-for-color-detection-withcv
if (saturation > 100) {
if (hue <= 10 || hue>165) {
inRange(hsv, Scalar(0, 120, 20), Scalar(10, 255, 255), mask1);
inRange(hsv, Scalar(170, 120, 20), Scalar(180, 255, 255), mask2);
}
else if (10<hue<=25) {
inRange(hsv, Scalar(10, 120, 20), Scalar(25, 255, 255), mask1);
inRange(hsv, Scalar(10, 120, 20), Scalar(25, 255, 255), mask2);
}
else if (25 < hue <= 38) {
inRange(hsv, Scalar(25, 120, 20), Scalar(35, 255, 255), mask1);
inRange(hsv, Scalar(25, 120, 20), Scalar(35, 255, 255), mask2);
}
else if (38 < hue <= 71) {
inRange(hsv, Scalar(38, 100, 20), Scalar(71, 255, 255), mask1);
inRange(hsv, Scalar(38, 100, 20), Scalar(71, 255, 255), mask2);
}
else if (71 < hue <= 100) {
inRange(hsv, Scalar(71, 120, 20), Scalar(95, 255, 255), mask1);
inRange(hsv, Scalar(71, 120, 20), Scalar(95, 255, 255), mask2);
}
else if (100 < hue <= 140) {
inRange(hsv, Scalar(100, 150, 20), Scalar(130, 255, 255), mask1);
inRange(hsv, Scalar(100, 150, 20), Scalar(130, 255, 255), mask2);
}
else if (140 < hue <= 165) {
inRange(hsv, Scalar(140, 120, 20), Scalar(170, 255, 255), mask1);
inRange(hsv, Scalar(140, 120, 20), Scalar(170, 255, 255), mask2);
}
}
else {
cout << "Use colored material." << endl;
break;
}
// Generating the final mask
mask1 = mask1 + mask2;
Mat kernel = Mat::ones(3, 3, CV_32F);
morphologyEx(mask1, mask1, cv::MORPH_OPEN, kernel);
morphologyEx(mask1, mask1, cv::MORPH_DILATE, kernel);
// creating an inverted mask to segment out the cloth from the frame
bitwise_not(mask1, mask2);
Mat res1, res2, final_output;
// Segmenting the cloth out of the frame using bitwise and with the inverted mask
bitwise_and(frame, frame, res1, mask2);
// creating image showing static background frame pixels only for the masked region
bitwise_and(background, background, res2, mask1);
// Generating the final augmented output.
addWeighted(res1, 1, res2, 1, 0, final_output);
imshow("magic", final_output);
waitKey(1);
// Press ESC on keyboard to exit
char c = (char)waitKey(25);
if (c == 27)
break;
// Also relese all the mat created in the code to avoid memory leakage.
frame.release(), hsv.release(), mask1.release(), mask2.release(), res1.release(), res2.release(), final_output.release();
}
// When everything done, release the video capture object
cap.release();
// Closes all the frames
cv::destroyAllWindows();
return 0;
}
It should do the same for all major colors as it is doing for red.
Have you checked the output of these two points when you are selecting another color?
Vec3b HSV_Color = hsvimg.at<Vec3b>(data.points[0]);
int hue = HSV_Color.val[0];
int saturation = HSV_Color.val[1];
int value = HSV_Color.val[2];
and this one
cvtColor(frame, hsv, COLOR_BGR2HSV);
I need to detect the people in the picture. Here, I use haar cascade to detect the body. However, the result shows no changes. There is no rectangular on the image so I want to know what happened.
int main(int argc, char** argv)
{
argv[1] = "C:/people.jpg";//test image
Mat image;
Mat img;
Mat gray;
vector<Rect>human;
CascadeClassifier detectorBody;
string cascadeBody = "C:/opencv/opencv/sources/data/haarcascades_cuda/haarcascade_fullbody.xml";
bool loaded = detectorBody.load(cascadeBody);
image = imread(argv[1], CV_LOAD_IMAGE_COLOR);
cvtColor(image, gray, CV_BGR2GRAY);//turn to gray
detectorBody.detectMultiScale(gray, human, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(12, 28));
for (int i = 0; i < human.size(); i++) {
rectangle(gray, human[i].tl(), human[i].br(), Scalar(0, 0, 255), 2, 8, 0);
}
namedWindow("Display", CV_WINDOW_NORMAL);
imshow("Display", image);
namedWindow("gray", CV_WINDOW_NORMAL);
imshow("gray", gray);
waitKey(0);
return 0;
}
I have got bulk of car images and want to perform automatic number plate recognition but i am stuck at localization phase .I want to get license plate individually as output on which i can perform recognition.Here is my code for localization:
int main(int args,char* argv)
{
//String filename;
//filename="";
cv::Mat image=cv::imread("C:\\Users\\Sarora\\Downloads\\Images\\frame_1375.jpg",CV_LOAD_IMAGE_COLOR);
cv::Mat img;
cv::Mat img_sobel;
cv::Mat grad_x, grad_y;
cv::Mat abs_grad_x, abs_grad_y;
cv::Mat imgContours;
//vector <Plate>result;
cv::cvtColor(image, img, CV_BGR2GRAY);
blur(img, img, cv::Size(5,5));
//cv::namedWindow("Img1.jpg", CV_WINDOW_AUTOSIZE );
//sobel filter applied on image..............................................................................................
cv::Sobel(img, grad_x, CV_16S, 1, 0,3,1,0, cv::BORDER_DEFAULT);
convertScaleAbs( grad_x, abs_grad_x );
cv::Sobel(img, grad_y, CV_16S, 0, 1,3,1,0, cv::BORDER_DEFAULT);
convertScaleAbs( grad_y, abs_grad_y );
addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, img_sobel );
cv::imwrite("Img2.jpg",img_sobel);
//Threshold the image...................................................................................................................
cv::Mat Thresh_img;
threshold(img_sobel, Thresh_img, 0, 255, CV_THRESH_OTSU+CV_THRESH_BINARY);
//imshow("Threshold", Thresh_img);
//Morphological close operation applied................................................................................................
cv::Mat element1=cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
cv::morphologyEx(Thresh_img,Thresh_img,CV_MOP_CLOSE,element1);
cv::imwrite("Close1.jpg",Thresh_img);
//cv::waitKey(5000);
//find Contours of whole image......................................................................................................
std::vector <std::vector<cv::Point>> contours;
cv::findContours(Thresh_img, contours,CV_RETR_LIST,CV_CHAIN_APPROX_NONE);
//cv::drawContours(image,contours,-1,cv::Scalar(0,0,255),3);
cv::imwrite("Contours1.jpg",image);
std::vector <std::vector<cv::Point>>::iterator itc= contours.begin();
std::vector <cv::RotatedRect> rects;
//vector<vector<Point> > contours_poly(rects.size());
//vector<Rect> boundRect(rects.size());
//Remove patch not inside the limits of aspect ratio and area..................................................................................
while (itc!=contours.end()) {
cv::RotatedRect mr= cv::minAreaRect(cv::Mat(*itc));
if( !verifySizes(mr))
{ itc= contours.erase(itc);
}else {
++itc;
rects.push_back(mr);
}
}
cv::Mat drawing;
vector<vector<cv::Point> > contours_poly(rects.size());
vector<cv::Rect> boundRect(rects.size());
//Draw contours
cv::Mat output;
image.copyTo(output);
for(int i=0;i<rects.size();i++)
{
approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 10, true);
boundRect[i] = cv::boundingRect(cv::Mat(contours_poly[i]));
}
//cv::imwrite("Contours.jpg", output);
for (int i = 0; i < rects.size(); i++)
{
drawContours(output, contours_poly, i, CV_RGB(255, 255, 255), 1, 8, vector<cv::Vec4i>(), 0, cv::Point());
//rectangle(output, boundRect[i].tl(), boundRect[i].br(), CV_RGB(0, 255, 0), 3, 8, 0);
}
cv::imwrite("drawing1.jpg",output);
}
bool verifySizes(cv::RotatedRect mr){
float error=0.4;
//Set a min and max area. All other patches are discarded
int min= 5; // minimum area
int max=1000; // maximum area
//Get only patches that match
float rmin= 1;
float rmax= 10;
int area= mr.size.height * mr.size.width;
float r= (float)mr.size.width / (float)mr.size.height;
if(r<1)
r= (float)mr.size.height / (float)mr.size.width;
if(( area < min || area > max ) || ( r < rmin || r > rmax )){
return false;
}else{
return true;
}
}
I have performed sobel filter,Threshold(OTSU+binary),Morphological operation CLOSE,findContours(),removal of one not inside limits of area and aspect ratio and approxPolyDP on the imageThis is my input image
This is approxPolyDP image
Problem is output image is not forming rectangles around License plate.Can anyone tell what is wrong in the code and also how can i proceed further to automatically find license plates in bulk of images?I am confused.
Thank you
I'm trying to extract from an image only the contours which have a specific size.
I process like this
int offsetX ;
int offsetY ;
//here: read original image as 8UC3
cv::Mat original = cv::imread("0.png");
Mat imgx=original.clone();
cv::imshow("original", original);
cvtColor(imgx,imgx,CV_BGR2GRAY);
Mat thresh;
vector<Vec4i> hierarchy;
RNG rng(12345);
vector < vector<Point> > contours;
adaptiveThreshold(imgx, thresh, 255, 1, 1, 31, 2);
findContours(thresh, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
Mat drawing = Mat::zeros(thresh.size(), CV_8UC3);
cout << "drawing "<<drawing.type()<<endl;
cv::Mat image = cv::Mat(original.rows, original.cols, original.type());
image.setTo(cv::Scalar::all(255));
for (size_t i = 0; i < contours.size(); i++)
{
vector < Point > cnt = contours[i];
if (contourArea(cnt) > 0)
{
Rect rec = boundingRect(cnt);
if ((rec.height > 20 ) &&(3.5*rec.height>rec.width)&& (rec.width>15)/*&& (rec.width<40)*/)
{
cout<<rec.x<<" "<<rec.y<<endl;
offsetX=rec.x;
offsetY=rec.y;
Mat roi = original(rec);
int width = roi.cols;
int height = roi.rows;
cout <<"h= "<<height<<" w= "<<width<<endl;
cv::Rect characterLocation;
if(height>35)
characterLocation = cv::Rect(offsetX+3, offsetY, width, height);
else
characterLocation = cv::Rect(offsetX, offsetY, width, height);
original(characterLocation).copyTo(image(characterLocation));
imshow("jihedddd",roi);
imwrite("xxxxxx.png",roi);
Mat stagedImage;
Mat img;
Scalar color = Scalar(255, 255, 255);
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
imshow("Contours", drawing);
waitKey();
GaussianBlur(stagedImage, img, Size(5, 5), 2, 2);
medianBlur(img, stagedImage, 3);
Mat copy = original.clone();
rectangle(copy, Point(rec.x, rec.y),
Point(rec.x + rec.width, rec.y + rec.height),
CV_RGB(0x00,0x00,0xff), 3);
cv::imshow("char copied", image);
}
}
}
medianBlur(image,image,3);
cv::imshow("char copied", image);
cv::imwrite("characterC_result.tiff ", image);
cv::waitKey();
But when I run this code I have an error
opencv error :assertion failed
This is an example of image which give me the error.
you have to be careful with this:
characterLocation = cv::Rect(offsetX+3, offsetY, width, height);
if width is the imagewidth, you are out of bounds here. you either have to subtract x from width, or crop the rect to the image borders:
// get the Rect for the original image:
cv::Rect borders(Point(0,0), image.size());
// crop to the legal size:
characterLocation = cv::Rect(offsetX+3, offsetY, width, height) & borders;