I have trained Haar cascade and now i need to work with founded object. How i can crop it from original image and show in new window?(or show multiple window if i found 2 object on image). There is my code (opencv ver 2.4.13):
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
int main(void)
{
CascadeClassifier trafficLightCascader;
string Cascade_name = "TrafficLight.xml";
if (!trafficLightCascader.load(Cascade_name))
{
cout << "Can't load the face feature data" << endl;
return -1;
}
vector<Rect> trafficLights;
Mat src = imread("6копия.png");
CvRect AssignRect = Rect(0, 0, src.cols, src.rows / 2);
Mat srcImage = src(AssignRect);
Mat grayImage(srcImage.rows, srcImage.cols, CV_8UC1);
cvtColor(srcImage, grayImage, CV_BGR2GRAY);
equalizeHist(grayImage, grayImage);
trafficLightCascader.detectMultiScale(grayImage, trafficLights, 1.1, 1, 0, Size(3,3));
for (int i = 0; i < trafficLights.size(); ++i)
{
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}
imshow("src", src);
waitKey(0);
return 0;}
Your trafficLights vector is holding each rectangle's data of found objects. You just need to take left&top coordinates, width and height of each rectangle and you already have them. All you need is cropping each rectangle by creating Mat format of them and showing in different frames.
You can check here to learn more about cropping.
Here is the code which you need:
for (int i = 0; i < trafficLights.size(); ++i)
{
Rect crop_found(trafficLights[i].x,trafficLights[i].y, trafficLights[i].width, trafficLights[i].height);
Mat found(src, crop_found);
imshow(to_string(i),found);
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}
Related
I'm trying to identify drops on a water-sensitive card, as you can see in the figure below, in addition to the drops there are water risks that I don't want to account for. I'm using OpenCV's findContours function to detect these contours, the question is: can I separate the real drops, from the water drips on the card? Here is an excerpt from my code.
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src; Mat src_gray; Mat binary_image, goTo;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
cv::Scalar min_color_scanner = Scalar(0,0,0);
cv::Scalar max_color_scanner = Scalar(255,175,210);
int main(int argc, char** argv){
cv::Mat image, gray, thresh;
// MARK:- Load image, grayscale, Otsu's threshold
image = imread("/Users/user/Documents/Developer/Desktop/OpenCV-Teste3.3.1/normal1.png");
Mat circles_detect;
cvtColor( image, circles_detect, CV_BGR2GRAY );
GaussianBlur( circles_detect, circles_detect, Size(9, 9), 2, 2 );
//END CIRCLES
cvtColor(image, gray, CV_BGR2GRAY);
threshold(gray, thresh, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);
Mat mask(image.rows, image.cols, CV_8UC3, Scalar(255,255,255));
cv::Mat bgr_image, inRangeImage;
cv::cvtColor(image, bgr_image, CV_RGB2BGR);
cv::inRange(bgr_image, min_color_scanner, max_color_scanner, binary_image);
//Find contours and filter using contour area
vector<vector<Point>> contours;
cv::findContours(thresh, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
// MARK:- data from image
double largest_area=0.0;
int largest_contour_index=0;
double smallest_area=0.0;
int smallest_contour_index=0;
int drop_derive=0;
Rect boundig_rect;
for(int i=0;i<contours.size();i++){
double area = contourArea(contours[i]);
if(area > largest_area){
largest_area=area;
largest_contour_index = i;
//boundig_rect = boundingRect(contourArea(contours[i]));
}
}
smallest_area = largest_area;
for(int i=0;i<contours.size();i++){
double area = contourArea(contours[i]);
if(area < smallest_area){
smallest_area=area;
smallest_contour_index = i;
//boundig_rect = boundingRect(contourArea(contours[i]));
}
if (area < 4){
drop_derive++;
cv::drawContours(image, contours, i, Scalar(255,0,0));
}
}
//show datas and images..
return(0);
}
I have a video file from which I'm capturing a frames. I want to crop a triangle from captured frame and display it, but my program shows just a source frame.
Here is my code:
cv::Mat Detector::cropRegionOfInterest(cv::Mat& frame)
{
cv::Point corners[1][3];
corners[0][0] = cv::Point(0, frameHeight);
corners[0][1] = cv::Point(frameWidth, frameHeight);
corners[0][2] = cv::Point(frameWidth / 2, frameHeight / 2);
const cv::Point* cornerList[1] = { corners[0] };
int numPoints = 3;
int numPolygons = 1;
cv::Mat mask(frame.size(), CV_8UC1, cv::Scalar(0, 0, 0));
cv::fillPoly(mask, cornerList, &numPoints, numPolygons, cv::Scalar(255, 255, 255), 8);
cv::Mat result(frame.size(), CV_8UC3);
cv::bitwise_and(frame, mask, result);
return result;
}
Instead of displaying source frame I want it to display cropped triangle.
Since you're using CV_8UC3 as the type of result, I'm assuming (see the Edit at the end of the answer if that's not the case) that the input image frame also has 3 channels. In that case, I'm a bit surprised that you can even see the non-cropped image, as running your code simply throws an exception on my machine at the call to bitwise_and:
OpenCV(3.4.1) Error: Sizes of input arguments do not match
From the documentation, it seems to me that you can't mix different input and mask types. A quick and dirty solution is to split the input image into a vector of three channels, call bitwise_and for each of them, and then merge them back. The code below works for me:
#include <stdio.h>
#include <opencv2/opencv.hpp>
using namespace cv;
cv::Mat cropRegionOfInterest(cv::Mat& frame)
{
const int frameWidth=frame.cols-1;
const int frameHeight=frame.rows-1;
cv::Point corners[1][3];
corners[0][0] = cv::Point(0, frameHeight);
corners[0][1] = cv::Point(frameWidth, frameHeight);
corners[0][2] = cv::Point(frameWidth / 2, frameHeight / 2);
const cv::Point* cornerList[1] = { corners[0] };
int numPoints = 3;
int numPolygons = 1;
cv::Mat mask(frame.rows,frame.cols, CV_8UC1, cv::Scalar(0, 0, 0));
cv::fillPoly(mask, cornerList, &numPoints, numPolygons, cv::Scalar(255, 255, 255), 8);
std::vector<cv::Mat> src_channels;
std::vector<cv::Mat> result_channels;
cv::split(frame,src_channels);
for(int idx=0;idx<3;++idx)
{
result_channels.emplace_back(frame.rows,frame.cols,CV_8UC1);
cv::bitwise_and(src_channels[idx], mask,result_channels[idx]);
}
cv::Mat result;
cv::merge(result_channels,result);
return result;
}
int main(int argc, char** argv )
{
if ( argc != 2 )
{
printf("usage: DisplayImage.out <Image_Path>\n");
return -1;
}
Mat image;
image = imread( argv[1], 1 );
if ( !image.data )
{
printf("No image data \n");
return -1;
}
cv::Mat cropped=cropRegionOfInterest(image);
namedWindow("cropped Image", WINDOW_AUTOSIZE );
imshow("cropped Image", cropped);
waitKey(0);
return 0;
}
Edit: From your comments it seems that frame is actually grayscale. In that case, nevermind all the code above, and just change cv::Mat result(frame.size(), CV_8UC3); to
cv::Mat result(frame.rows,frame.cols,CV_8UC1);
in your original code.
My difficulty in implementing optical flow method to track vehicles with input from the haar cascade.
So far I can only implement Optical flow but input not from Haar Cascade.
can you help me .. ??
this is my code
using namespace cv;
using namespace std;
int main()
{
int count= 0; double areax, areay, KoorX, KoorY;
Mat prev_frame, gray, temp, prev_img;
Mat frameROI, imgROI, frameLKP;
Mat ROI, prevROI, ROIOF;
//Parameter OFLKP
int win_size = 24;
int maxCorners =24;
int maxlevel =8;
TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS,20,0.01);
vector<uchar>found;
vector<float>error;
//Parameter Shi-Tomasi
vector<Point2f> prevcorners, corners;
double qualityLevel = 0.05; //0.4
double minDistance = 1; //2
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
vector<Point2f> frame_corners; // CvPoint array of features
frame_corners.reserve(maxCorners);
vector<Point2f> prevframe_corners;
prevframe_corners.reserve(maxCorners);
//=======> Manggil dan buka video
VideoCapture video("Uji1.avi");
//=======> Manggil .xml
CascadeClassifier Casmobil;
String Casmobil_file = "car2500.xml";
Casmobil.load(Casmobil_file);
namedWindow("Video", 1);
namedWindow("Tracking OF", 1);
namedWindow("Deteksi Haar", 1);
video >> prev_frame;
Rect roi = Rect(50, 180, 540, 240);
prevROI=prev_frame(roi);
cvtColor(prevROI, gray, CV_BGR2GRAY);
gray.convertTo(prev_img, CV_8UC1);
while(true)
{
//=====> baca frame dr video
video >> frameROI;
//=====> ROI
Rect roi = Rect(50, 180, 540, 240);
Mat ROI=frameROI(roi);
cvtColor(ROI, gray, CV_BGR2GRAY); //=====> RGB to Grayscale
gray.convertTo(imgROI, CV_8UC1);
Mat ROIOF = frameROI(roi);
//======> Deteksi
vector<Rect> mobil;
Casmobil.detectMultiScale(gray, mobil, 1.1, 3,
CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE,
Size(0,0));
//======> Gambar kotak
for (size_t i = 0; i < mobil.size(); i++)
{
Rect kotak = mobil[i];
areax = (mobil[i].x + mobil[i].width*0.5);
areay = (mobil[i].y + mobil[i].height*0.5);
Point center = Point(areax ,areay);
rectangle(ROI, kotak,CV_RGB(0,255,0),2,8,0);
circle(ROI, center, 3,CV_RGB(255, 0, 0),-2);
}
//prev_frame
goodFeaturesToTrack(imgROI, frame_corners,maxCorners,
qualityLevel,minDistance,Mat(),
blockSize,useHarrisDetector,k);
cornerSubPix(imgROI, frame_corners, Size(win_size, win_size),
Size( -1, -1 ),termcrit);
calcOpticalFlowPyrLK(imgROI, prev_img, frame_corners,
prevframe_corners, found, error,
Size(win_size, win_size), maxlevel,termcrit);
for( int j = 0; j < frame_corners.size(); j++ )
{
circle(ROIOF, frame_corners[j], 2, CV_RGB(255, 0, 0), -1);
circle(ROIOF, prevframe_corners[j], 2, CV_RGB(0, 0, 255), -1);
//circle(copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 );
line(ROIOF, prevframe_corners[j], frame_corners[j], CV_RGB(0, 255, 0),2, 8, 0);
}
prev_img = imgROI.clone();
imshow("Video ", frameROI);
imshow("Deteksi Haar", ROI);
imshow("Tracking OF", ROIOF);
if(waitKey(400) >= 0) break;
}
return 0;
}
Thanks,,
do I need to replace the input image from goodfeaturesToTrack by croping images from Haar Results ??
like :
Mat Crop = imgROI(mobil[i]);
goodFeaturesToTrack(Crop,frame_corners,maxCorners,qualityLevel,minDistance,Mat(),blockSize,useHarrisDetector,k);
I am using opencv's grabcut.cpp and graphcut.cpp code which calls the function grabcut() . I have to find a way to save the background and foreground models computed from some image and apply it to another image. how do I save the 'bgdmodel' and 'fgdmodel' for future use?
This is the code I have written -
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
// Load an image
Mat img = imread("1.png",1);
const Mat* img_0;
img_0= &img;
// Create the mask
Mat mask;
mask.create( img_0->size(), CV_8UC1);
mask.setTo( GC_BGD );
Mat bgdModel, fgdModel;
Rect rect;
rect.x=0;
rect.y=0;
rect.width=img.cols-1;
rect.height=img.rows-1;
(mask(rect)).setTo( Scalar(GC_BGD) );
grabCut(img, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_RECT);
// Save model to file
FileStorage fs("mymodels.xml", FileStorage::WRITE);
fs << "BgdModel" << bgdModel;
fs << "FgdModel" << fgdModel;
fs.release();
// Load another image
Mat img1 = imread( "abc.png", 1);
const Mat* img_1;
img_1= &img1;
// Load models from file
Mat bgdModel1, fgdModel1;
Mat mask1;
FileStorage fs1("mymodels.xml", FileStorage::READ);
fs1["BgdModel"] >> bgdModel1;
fs1["FgdModel"] >> fgdModel1;
fs1.release();
// Create a mask
mask1.create( img_1->size(), CV_8UC1);
mask1.setTo( GC_PR_FGD );
Rect rect1;
rect1.x=0;
rect1.y=0;
rect1.width=0;//img.cols;
rect1.height=0;//img.rows;
(mask1(rect1)).setTo( Scalar(GC_BGD) );
grabCut(img1, mask1, rect1, bgdModel1, fgdModel1, 1, GC_EVAL);
FileStorage fs2("finalmask.xml", FileStorage::WRITE);
fs2 << "Final_MASK" << mask1;
fs2.release();
for (int i=0;i<img1.rows;i++)
{
for(int j=0;j<img1.cols;j++)
{
if(mask1.at<uchar>(i,j)==0||mask1.at<uchar>(i,j)==2)
{
img1.at<cv::Vec3b>(i,j)[0] = 0;
img1.at<cv::Vec3b>(i,j)[1] = 0;
img1.at<cv::Vec3b>(i,j)[2] = 0;
}
}
}
imshow("Result of Grabcut", img1);
waitKey(0);
return 0;
}
i need to ensure that bgdModel and fgdModel learnt for *image are applied for image1 too.
You can save the background and foreground models Mat using FileStorage, and then load them back to use with the next image.
This example should clarify this:
#include <opencv2\opencv.hpp>
using namespace cv;
int main()
{
{
// Load an image
Mat3b img = imread("path_to_image");
// Create the mask
Mat1b mask(img.rows, img.cols, uchar(GC_PR_BGD));
circle(mask, Point(img.cols / 2, img.rows / 2), 20, Scalar(GC_FGD), -1);
Mat bgdModel, fgdModel;
grabCut(img, mask, Rect(), bgdModel, fgdModel, 1);
imshow("Mask", mask);
waitKey(1);
// Save model to file
{
FileStorage fs("mymodels.yml", FileStorage::WRITE);
fs << "BgdModel" << bgdModel;
fs << "FgdModel" << fgdModel;
}
}
{
// Load another image
Mat3b img = imread("path_to_another_image");
// Load models from file
Mat bgdModel, fgdModel;
{
FileStorage fs("mymodels.yml", FileStorage::READ);
fs["BgdModel"] >> bgdModel;
fs["FgdModel"] >> fgdModel;
}
// Create a mask
Mat1b mask(img.rows, img.cols, uchar(GC_PR_BGD));
circle(mask, Point(img.cols / 2, img.rows / 2), 20, Scalar(GC_FGD), -1);
grabCut(img, mask, Rect(), bgdModel, fgdModel, 1);
imshow("Other Mask", mask);
waitKey(1);
}
return 0;
}
I am looking into the Hough Circle function. There are basically 4 parameters that i can play with to get the correct circle I wish.
So it come to my mind that I want to create a trackbar to monitor the status of the image being processed.
So I altered my code like this
#include <sstream>
#include <string>
#include <iostream>
#include <vector>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv\cv.h>
#include <opencv\highgui.h>
#include <stdlib.h>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(int argc, char** argv) {
//Create a window for trackbars
namedWindow("Trackbar Window", CV_WINDOW_AUTOSIZE);
//Create trackbar to change brightness
int iSliderValue1 = 50;
createTrackbar("Brightness", "Trackbar Window", &iSliderValue1, 100);
//Create trackbar to change contrast
int iSliderValue2 = 50;
createTrackbar("Contrast", "Trackbar Window", &iSliderValue2, 100);
int param1 = 10;
createTrackbar("param1", "Trackbar Window", ¶m1, 300);
int param2 = 10;
createTrackbar("param2", "Trackbar Window", ¶m2, 300);
Mat src;
VideoCapture capture;
capture.open("movingBall.wmv");
capture.read(src);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
if (!src.data) {
std::cout << "ERROR:\topening image" << std::endl;
return -1;
}
cv::namedWindow("image1", CV_WINDOW_AUTOSIZE);
cv::namedWindow("image2", CV_WINDOW_AUTOSIZE);
while (true){
capture.read(src);
Mat dst;
int iBrightness = iSliderValue1 - 50;
double dContrast = iSliderValue2 / 50.0;
src.convertTo(src, -1, dContrast, iBrightness);
cv::imshow("image1", src);
Mat src_gray2;
cvtColor(src, src_gray2, CV_BGR2GRAY);
GaussianBlur(src_gray2, src_gray2, cv::Size(9, 9), 2, 2);
vector<Vec3f> circles;
HoughCircles(src_gray2, circles, CV_HOUGH_GRADIENT,
2, // accumulator resolution (size of the image / 2)
5, // minimum distance between two circles
param1, // Canny high threshold
param2, // minimum number of votes
0, 0); // min and max radius
std::cout << circles.size() << std::endl;
std::cout << "end of test" << std::endl;
for (size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
circle(src, center, 3, Scalar(0, 255, 0), -1, 8, 0);
// circle outline
circle(src, center, radius, Scalar(0, 0, 255), 3, 8, 0);
}
/*std::vector<cv::Vec3f>::
const_iterator itc = circles.begin();
while (itc != circles.end()) {
cv::circle(src_gray2,
cv::Point((*itc)[0], (*itc)[1]), // circle centre
(*itc)[2], // circle radius
cv::Scalar(0,0,0), // color
2); // thickness
++itc;
}*/
cv::imshow("image2", src_gray2);
cvWaitKey(33);
}
return 0;
}
As seen at the Hough Circle function there, i used int param1; as the value i wish to change. However, the code has no syntax errors but it is unable to be compiled.
I wish to know if is there something wrong with my trackbar setup..
Thank you
Here i have tried it using Python you can try to port from it...
import cv2
import numpy as np
img = cv2.imread('C:/Python34/images/2.jpg',0)
cv2.namedWindow('image')
def nothing(x):
pass
cv2.createTrackbar('Param 1','image',0,100,nothing)
cv2.createTrackbar('Param 2','image',0,100,nothing)
switch = '0 : OFF \n1 : ON'
cv2.createTrackbar(switch, 'image',0,1,nothing)
while(1):
cv2.imshow('image',img)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
#To Get Parameter values from Trackbar Values
para1 = cv2.getTrackbarPos('Param 1','image')
para2 = cv2.getTrackbarPos('Param 2','image')
s = cv2.getTrackbarPos(switch,'image')
if s == 0:
cv2.imshow('image', img)
else:
#For finding Hough Circles according to trackbar parameters
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,para1,para2,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
#For drawing Hough Circles
for i in circles[0,:]:
cv2.circle(img,(i[0],i[1]),i[2],(0,255,0),2)
cv2.circle(img,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('image', img)
cv2.waitKey(0)
img = cv2.imread('C:/Python34/images/2.jpg',0)
cv2.destroyAllWindows()
You can use the above code as your refrence, firstly it creates a window and trackbars for switch and two parameter for hough circle.
then in the while loop para1 and para2 will store position of trackbars as value of canny parameter.
this is then used in cv2.HoughCircles function and the circles are drawn.
the image is again loaded so that every time you change parameter the output is given on fresh image to avoid confusing.
hope this might be useful.