My difficulty in implementing optical flow method to track vehicles with input from the haar cascade.
So far I can only implement Optical flow but input not from Haar Cascade.
can you help me .. ??
this is my code
using namespace cv;
using namespace std;
int main()
{
int count= 0; double areax, areay, KoorX, KoorY;
Mat prev_frame, gray, temp, prev_img;
Mat frameROI, imgROI, frameLKP;
Mat ROI, prevROI, ROIOF;
//Parameter OFLKP
int win_size = 24;
int maxCorners =24;
int maxlevel =8;
TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS,20,0.01);
vector<uchar>found;
vector<float>error;
//Parameter Shi-Tomasi
vector<Point2f> prevcorners, corners;
double qualityLevel = 0.05; //0.4
double minDistance = 1; //2
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
vector<Point2f> frame_corners; // CvPoint array of features
frame_corners.reserve(maxCorners);
vector<Point2f> prevframe_corners;
prevframe_corners.reserve(maxCorners);
//=======> Manggil dan buka video
VideoCapture video("Uji1.avi");
//=======> Manggil .xml
CascadeClassifier Casmobil;
String Casmobil_file = "car2500.xml";
Casmobil.load(Casmobil_file);
namedWindow("Video", 1);
namedWindow("Tracking OF", 1);
namedWindow("Deteksi Haar", 1);
video >> prev_frame;
Rect roi = Rect(50, 180, 540, 240);
prevROI=prev_frame(roi);
cvtColor(prevROI, gray, CV_BGR2GRAY);
gray.convertTo(prev_img, CV_8UC1);
while(true)
{
//=====> baca frame dr video
video >> frameROI;
//=====> ROI
Rect roi = Rect(50, 180, 540, 240);
Mat ROI=frameROI(roi);
cvtColor(ROI, gray, CV_BGR2GRAY); //=====> RGB to Grayscale
gray.convertTo(imgROI, CV_8UC1);
Mat ROIOF = frameROI(roi);
//======> Deteksi
vector<Rect> mobil;
Casmobil.detectMultiScale(gray, mobil, 1.1, 3,
CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE,
Size(0,0));
//======> Gambar kotak
for (size_t i = 0; i < mobil.size(); i++)
{
Rect kotak = mobil[i];
areax = (mobil[i].x + mobil[i].width*0.5);
areay = (mobil[i].y + mobil[i].height*0.5);
Point center = Point(areax ,areay);
rectangle(ROI, kotak,CV_RGB(0,255,0),2,8,0);
circle(ROI, center, 3,CV_RGB(255, 0, 0),-2);
}
//prev_frame
goodFeaturesToTrack(imgROI, frame_corners,maxCorners,
qualityLevel,minDistance,Mat(),
blockSize,useHarrisDetector,k);
cornerSubPix(imgROI, frame_corners, Size(win_size, win_size),
Size( -1, -1 ),termcrit);
calcOpticalFlowPyrLK(imgROI, prev_img, frame_corners,
prevframe_corners, found, error,
Size(win_size, win_size), maxlevel,termcrit);
for( int j = 0; j < frame_corners.size(); j++ )
{
circle(ROIOF, frame_corners[j], 2, CV_RGB(255, 0, 0), -1);
circle(ROIOF, prevframe_corners[j], 2, CV_RGB(0, 0, 255), -1);
//circle(copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 );
line(ROIOF, prevframe_corners[j], frame_corners[j], CV_RGB(0, 255, 0),2, 8, 0);
}
prev_img = imgROI.clone();
imshow("Video ", frameROI);
imshow("Deteksi Haar", ROI);
imshow("Tracking OF", ROIOF);
if(waitKey(400) >= 0) break;
}
return 0;
}
Thanks,,
do I need to replace the input image from goodfeaturesToTrack by croping images from Haar Results ??
like :
Mat Crop = imgROI(mobil[i]);
goodFeaturesToTrack(Crop,frame_corners,maxCorners,qualityLevel,minDistance,Mat(),blockSize,useHarrisDetector,k);
Related
The problem I'm experiencing is that when the horizontal line doesn't form a 180-degree straight line, the hough lines will be fragmented and untidy. Please help so that I can detect a horizontal line using a certain angle or if it has entered a certain angle it will be detected and not cut off in the middle.
This is when the line is straight
This is when the line is not straight
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
// using namespace cv;
int hueminFIELD = 32, huemaxFIELD = 86, satminFIELD= 81, satmaxFIELD = 255, valminFIELD = 0, valmaxFIELD = 255;
int hueminLINE = 0, huemaxLINE = 179, satminLINE= 0, satmaxLINE = 255, valminLINE = 218, valmaxLINE = 255;
cv::Mat hsv, mask;
void getCountours(cv::Mat tebel, cv::Mat gambar){
vector<vector<cv::Point>> kontur;
vector<cv::Vec4i> hierarki;
findContours(tebel,kontur,hierarki,cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
vector<cv::Rect> bataskotak(kontur.size());
vector<vector<cv::Point>> hull(kontur.size());
for (int i = 0; i < kontur.size(); i++)
{
int area = cv::contourArea(kontur[i]);// vector<vector<Point>> konpoli(kontur.size());[i]);
convexHull(cv::Mat(kontur[i]),hull[i]);
drawContours(gambar, hull, i, cv::Scalar(255,0,255),-1);
float batas = cv::arcLength(kontur[i],true);
bataskotak[i]=cv::boundingRect(kontur[i]);
// cv::rectangle(gambar, bataskotak[i].tl(), bataskotak[i].br(), cv::Scalar(255,0,255), -1);
cv::circle(gambar, cv::Point((bataskotak[i].tl()+bataskotak[i].br())/2), 6, cv::Scalar(255,255,255), cv::FILLED);
}
}
int main(){
cv::VideoCapture vid("lapa.mp4");
//trackbar buat nyari warna putih
cv::namedWindow("Trackbar", cv::WINDOW_AUTOSIZE);
cv::createTrackbar("Hue min", "Trackbar", &hueminLINE, 179);
cv::createTrackbar("Hue max", "Trackbar", &huemaxLINE, 179);
cv::createTrackbar("Saturation min", "Trackbar", &satminLINE, 255);
cv::createTrackbar("Saturation Max", "Trackbar", &satmaxLINE, 255);
cv::createTrackbar("Value min", "Trackbar", &valminLINE, 255);
cv::createTrackbar("Value max", "Trackbar", &valmaxLINE, 255);
while(1){
cv::Mat video;
vid.read(video);
cv::resize(video,video,cv::Size(640,480));
//Segmentasi warna lapangan
//cv::GaussianBlur(video,video,cv::Size(3,3),5,2);
cv::cvtColor(video, hsv, cv::COLOR_BGR2HSV);
cv::Scalar lower(hueminFIELD, satminFIELD, valminFIELD);
cv::Scalar upper(huemaxFIELD, satmaxFIELD, valmaxFIELD);
cv::inRange(hsv, lower,upper, mask);
// cv::cvtColor(mask,mask,cv::COLOR_GRAY2BGR);
// cv::Mat hasil; //untuk menyimpan hasil video.
//Bagian kontur
vector<vector<cv::Point>> kontur;
vector<cv::Vec4i> hierarki;
findContours(mask, kontur, hierarki, cv::RETR_TREE,cv::CHAIN_APPROX_NONE);
vector<cv::Point> for_convex;
cv::Mat drawing = cv::Mat::zeros(mask.size(), CV_8UC3);
for (int i = 0; i < kontur.size(); i++)
{
for_convex.insert(for_convex.end(), kontur[i].begin(), kontur[i].end());
}
vector<cv::Point> convexed;
convexHull(for_convex,convexed);
vector<vector<cv::Point>> convexbener;
convexbener.push_back(convexed);
drawContours(drawing,convexbener,0,cv::Scalar(255,255,255),-1);
//Bagian segmentasi garis putih
cv::Mat lapangan;
cv::bitwise_and(video,drawing,lapangan);
// cv::imshow("lapangan",lapangan);
// cv::imshow("drawing",drawing);
cv::Mat lapanganhsv;
cv::cvtColor(lapangan, lapanganhsv, cv::COLOR_BGR2HSV);
cv::Scalar lowerLINE(hueminLINE, satminLINE, valminLINE);
cv::Scalar upperLINE(huemaxLINE, satmaxLINE, valmaxLINE);
cv::Mat maskline;
cv::inRange(lapanganhsv, lowerLINE,upperLINE, maskline);
cv::Mat strukturgaris = cv::getStructuringElement(cv::MORPH_RECT,cv::Size(3,3));
cv::dilate(maskline,maskline,strukturgaris);
//cari kontur garis kemudian convexhull
// vector<vector<cv::Point>> kontur_line;
// vector<cv::Vec4i> hierarki_line;
// findContours(maskline, kontur_line, hierarki_line, cv::RETR_TREE,cv::CHAIN_APPROX_NONE);
// cv::Mat drawing_line = cv::Mat::zeros(maskline.size(), CV_8UC3);
// for (int i = 0; i < kontur_line.size(); i++)
// {
// int area_line = cv::contourArea(kontur_line[i]);
// drawContours(drawing_line,kontur_line,0,cv::Scalar(255,0,255),-1);
// }
// cv::cvtColor(maskline,maskline,cv::COLOR_GRAY2BGR);
cv::imshow("maskline",maskline);
// cv::imshow("kontur",drawing_line);
cv::Mat hasilakhir;
// cv::bitwise_and(video, drawing_line, hasilakhir);
// cv::imshow("hasilconvexline",hasilakhir);
//mulai deteksi garis
//horizontal
// cv::Mat cloning,horizontalStructure;
// cloning = maskline.clone();
// int horizontalSize = maskline.cols/7.5;
// horizontalStructure = cv::getStructuringElement(cv::MORPH_RECT,cv::Size(horizontalSize,1));
// erode(maskline, maskline, horizontalStructure, cv::Point(-1, -1));
// dilate(maskline, maskline, horizontalStructure, cv::Point(-1, -1));
// std::vector<std::vector<cv::Point>> contour_garis;
// std::vector<cv::Point > contour_line_point;
// findContours(maskline,contour_garis,hierarki,cv::RETR_TREE,cv::CHAIN_APPROX_NONE);
// for(size_t i = 0; i < contour_garis.size(); i++){
// // std::cout<<"cv::contourArea(contours_lines[i]) : "<<cv::contourArea(contours_lines[i])<<std::endl;
// if(cv::contourArea(contour_garis[i]) > 150){
// contour_line_point.insert(contour_line_point.end(), contour_garis[i].begin(), contour_garis[i].end());
// }
// cv::cvtColor(horizontal,horizontal,cv::COLOR_GRAY2BGR);
// cv::cvtColor(horizontal,horizontal,cv::COLOR_GRAY2BGR);
// cv::imshow("maskline",lapangan);
// cv::Mat hasilakhir;
//cv::bitwise_and(video,maskline,hasilakhir);
//cv::imshow("videoawaL",hasilakhir);
//SKELETON
cv::Mat skel(maskline.size(), CV_8UC1, cv::Scalar(0));
cv::Mat temp(maskline.size(), CV_8UC1);
cv::Mat element = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(3, 3));
bool done;
do
{
cv::morphologyEx(maskline, temp, cv::MORPH_OPEN, element);
cv::bitwise_not(temp, temp);
cv::bitwise_and(maskline, temp, temp);
cv::bitwise_or(skel, temp, skel);
cv::erode(maskline, maskline, element);
double max;
cv::minMaxLoc(maskline, 0, &max);
done = (max == 0);
} while (!done);
//Hough Line dari skeleton
vector<cv::Vec4i> lines;
HoughLinesP(skel,lines,2, CV_PI/180,80,0,200);
cv::Mat maskbgr;
cvtColor(maskline, maskline, cv::COLOR_GRAY2BGR);
// gambar hough line
for (size_t i = 0; i < lines.size(); i++)
{
cv::Vec4i l = lines[i];
line(maskline, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(255,0,0),1,cv::LINE_AA);
}
//HORIZONTAL
cv::Mat bw;
cv::adaptiveThreshold(skel, bw, 255, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, 15, -2);
cv::Mat horizontal = bw.clone();
int horizontal_size = horizontal.cols / 40;
cv::Mat horizontalStructure = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(horizontal_size, 1));
cv::erode(horizontal, horizontal, horizontalStructure);
cv::dilate(horizontal, horizontal, horizontalStructure,cv::Point(-1,-1),10);
// getCountours(horizontal,video);
// // Hough line prob
vector<cv::Vec4i> horiz_lines;
HoughLinesP(horizontal,lines,1, CV_PI/180,80,0,300);
// Mat maskbgr;
cvtColor(horizontal, horizontal, cv::COLOR_GRAY2BGR);
// // gambar hough line
for (size_t i = 0; i < lines.size(); i++)
{
cv::Vec4i l = lines[i];
line(video, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(255,0,0),1,cv::LINE_AA);
}
cv::Mat eroded;
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(5,5));
cv::erode(horizontal,eroded,kernel,cv::Point(-1,-1), 3);
cv::imshow("horizontal", horizontal);
cv::imshow("video", video);
//VERTIKAL
// cv::cvtColor(horizontal,horizontal,cv::COLOR_BGR2GRAY);
// cv::Mat blur_line;
// cv::GaussianBlur(horizontal, blur_line,cv::Size(5,5),0);
// cv::Mat edges;
// cv::Canny(blur_line,edges,50,150);
// cv::imshow("edges",edges);
cv::waitKey(100);
}
return 0;
}
I have trained Haar cascade and now i need to work with founded object. How i can crop it from original image and show in new window?(or show multiple window if i found 2 object on image). There is my code (opencv ver 2.4.13):
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
int main(void)
{
CascadeClassifier trafficLightCascader;
string Cascade_name = "TrafficLight.xml";
if (!trafficLightCascader.load(Cascade_name))
{
cout << "Can't load the face feature data" << endl;
return -1;
}
vector<Rect> trafficLights;
Mat src = imread("6копия.png");
CvRect AssignRect = Rect(0, 0, src.cols, src.rows / 2);
Mat srcImage = src(AssignRect);
Mat grayImage(srcImage.rows, srcImage.cols, CV_8UC1);
cvtColor(srcImage, grayImage, CV_BGR2GRAY);
equalizeHist(grayImage, grayImage);
trafficLightCascader.detectMultiScale(grayImage, trafficLights, 1.1, 1, 0, Size(3,3));
for (int i = 0; i < trafficLights.size(); ++i)
{
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}
imshow("src", src);
waitKey(0);
return 0;}
Your trafficLights vector is holding each rectangle's data of found objects. You just need to take left&top coordinates, width and height of each rectangle and you already have them. All you need is cropping each rectangle by creating Mat format of them and showing in different frames.
You can check here to learn more about cropping.
Here is the code which you need:
for (int i = 0; i < trafficLights.size(); ++i)
{
Rect crop_found(trafficLights[i].x,trafficLights[i].y, trafficLights[i].width, trafficLights[i].height);
Mat found(src, crop_found);
imshow(to_string(i),found);
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}
I have a video file from which I'm capturing a frames. I want to crop a triangle from captured frame and display it, but my program shows just a source frame.
Here is my code:
cv::Mat Detector::cropRegionOfInterest(cv::Mat& frame)
{
cv::Point corners[1][3];
corners[0][0] = cv::Point(0, frameHeight);
corners[0][1] = cv::Point(frameWidth, frameHeight);
corners[0][2] = cv::Point(frameWidth / 2, frameHeight / 2);
const cv::Point* cornerList[1] = { corners[0] };
int numPoints = 3;
int numPolygons = 1;
cv::Mat mask(frame.size(), CV_8UC1, cv::Scalar(0, 0, 0));
cv::fillPoly(mask, cornerList, &numPoints, numPolygons, cv::Scalar(255, 255, 255), 8);
cv::Mat result(frame.size(), CV_8UC3);
cv::bitwise_and(frame, mask, result);
return result;
}
Instead of displaying source frame I want it to display cropped triangle.
Since you're using CV_8UC3 as the type of result, I'm assuming (see the Edit at the end of the answer if that's not the case) that the input image frame also has 3 channels. In that case, I'm a bit surprised that you can even see the non-cropped image, as running your code simply throws an exception on my machine at the call to bitwise_and:
OpenCV(3.4.1) Error: Sizes of input arguments do not match
From the documentation, it seems to me that you can't mix different input and mask types. A quick and dirty solution is to split the input image into a vector of three channels, call bitwise_and for each of them, and then merge them back. The code below works for me:
#include <stdio.h>
#include <opencv2/opencv.hpp>
using namespace cv;
cv::Mat cropRegionOfInterest(cv::Mat& frame)
{
const int frameWidth=frame.cols-1;
const int frameHeight=frame.rows-1;
cv::Point corners[1][3];
corners[0][0] = cv::Point(0, frameHeight);
corners[0][1] = cv::Point(frameWidth, frameHeight);
corners[0][2] = cv::Point(frameWidth / 2, frameHeight / 2);
const cv::Point* cornerList[1] = { corners[0] };
int numPoints = 3;
int numPolygons = 1;
cv::Mat mask(frame.rows,frame.cols, CV_8UC1, cv::Scalar(0, 0, 0));
cv::fillPoly(mask, cornerList, &numPoints, numPolygons, cv::Scalar(255, 255, 255), 8);
std::vector<cv::Mat> src_channels;
std::vector<cv::Mat> result_channels;
cv::split(frame,src_channels);
for(int idx=0;idx<3;++idx)
{
result_channels.emplace_back(frame.rows,frame.cols,CV_8UC1);
cv::bitwise_and(src_channels[idx], mask,result_channels[idx]);
}
cv::Mat result;
cv::merge(result_channels,result);
return result;
}
int main(int argc, char** argv )
{
if ( argc != 2 )
{
printf("usage: DisplayImage.out <Image_Path>\n");
return -1;
}
Mat image;
image = imread( argv[1], 1 );
if ( !image.data )
{
printf("No image data \n");
return -1;
}
cv::Mat cropped=cropRegionOfInterest(image);
namedWindow("cropped Image", WINDOW_AUTOSIZE );
imshow("cropped Image", cropped);
waitKey(0);
return 0;
}
Edit: From your comments it seems that frame is actually grayscale. In that case, nevermind all the code above, and just change cv::Mat result(frame.size(), CV_8UC3); to
cv::Mat result(frame.rows,frame.cols,CV_8UC1);
in your original code.
In the following code I'm trying to detect face and eye. from a video.
My problem is that I'm trying to set ROI to detect eyes . but I think there an error in cvSetImageROI funcition .
this error is displayed
error C2664: 'cvSetImageROI' : cannot convert parameter 1 from 'cv::Mat' to 'IplImage *'
Thanks for helping me
#include<stdio.h>
#include<math.h>
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv2\objdetect\objdetect.hpp>
#include<opencv2\highgui\highgui.hpp>
#include<opencv2\imgproc\imgproc.hpp>
#include<vector>
using namespace cv;
using namespace std;
int main()
{
CascadeClassifier face_cascade, eye_cascade;
if(!face_cascade.load("haarcascade_frontalface_alt2.xml")) {
printf("Error loading cascade file for face");
return 1;
}
if(!eye_cascade.load("haarcascade_eye.xml")) {
printf("Error loading cascade file for eye");
return 1;
}
VideoCapture capture("w.mp4"); //-1, 0, 1 device id
if(!capture.isOpened())
{
printf("error to initialize camera");
return 1;
}
Mat cap_img,gray_img;
vector<Rect> faces, eyes;
while(1)
{
capture >> cap_img;
waitKey(10);
cvtColor(cap_img, gray_img, CV_BGR2GRAY);
cv::equalizeHist(gray_img,gray_img);
face_cascade.detectMultiScale(gray_img, faces, 1.1, 10, CV_HAAR_SCALE_IMAGE | CV_HAAR_DO_CANNY_PRUNING, cvSize(0,0), cvSize(300,300));
for(int i=0; i < faces.size();i++)
{
Point pt1(faces[i].x+faces[i].width, faces[i].y+faces[i].height);
Point pt2(faces[i].x,faces[i].y);
Mat faceROI = gray_img(faces[i]);
cvSetImageROI(faceROI, cvRect(faces->x,faces->y + (faces->height)/5,faces->width, (faces->height)/3 );
eye_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30,30));
for(size_t j=0; j< eyes.size(); j++)
{
//Point center(faces[i].x+eyes[j].x+eyes[j].width*0.5, faces[i].y+eyes[j].y+eyes[j].height*0.5);
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound((eyes[j].width+eyes[j].height)*0.25);
circle(cap_img, center, radius, Scalar(255,0,0), 2, 8, 0);
}
rectangle(cap_img, pt1, pt2, cvScalar(0,255,0), 2, 8, 0);
}
imshow("Result", cap_img);
waitKey(3);
char c = waitKey(3);
if(c == 27)
break;
}
return 0;
}
The problem is that you are trying to use old openCV method. Your image is in the Mat format and cvSetImageROI cannot take Mat image as an argument.
Suggestion:
Rect region_of_interest = Rect(x, y, w, h);
Mat image_roi = image(region_of_interest);
I am using openCV to capture and save the output image with fixed size (92 by 112). It will capture the frames of video and crop them. However, the output image did not show correct size (some time are 147 by 147, 140 by 140...). What is problem in my code. There are crop image code and whole code. Thanks in advance
CROP image
crop = frame(roi_b);
resize(crop, res, Size(92, 112), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
Let see whole code to view the meaning of parameters
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
// Function Headers
void detectAndDisplay(Mat frame);
// Global variables
// Copy this file from opencv/data/haarscascades to target folder
string face_cascade_name = "haarcascade_frontalface_alt.xml";
CascadeClassifier face_cascade;
string window_name = "Capture - Face detection";
int filenumber; // Number of file to be saved
string filename;
// Function main
int main(void)
{
VideoCapture capture(0);
if (!capture.isOpened()) // check if we succeeded
return -1;
// Load the cascade
if (!face_cascade.load(face_cascade_name))
{
printf("--(!)Error loading\n");
return (-1);
};
// Read the video stream
Mat frame;
for (;;)
{
capture >> frame;
// Apply the classifier to the frame
if (!frame.empty())
{
detectAndDisplay(frame);
}
else
{
printf(" --(!) No captured frame -- Break!");
break;
}
int c = waitKey(10);
if (27 == char(c))
{
break;
}
}
return 0;
}
// Function detectAndDisplay
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Set Region of Interest
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0; // ic is index of current element
int ac = 0; // ac is area of current element
size_t ib = 0; // ib is index of biggest element
int ab = 0; // ab is area of biggest element
for (ic = 0; ic < faces.size(); ic++) // Iterate through all current elements (detected faces)
{
roi_c.x = faces[ic].x;
roi_c.y = faces[ic].y;
roi_c.width = (faces[ic].width);
roi_c.height = (faces[ic].height);
ac = roi_c.width * roi_c.height; // Get the area of current element (detected face)
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
ab = roi_b.width * roi_b.height; // Get the area of biggest element, at beginning it is same as "current" element
if (ac > ab)
{
ib = ic;
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
}
crop = frame(roi_b);
resize(crop, res, Size(92, 112), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
// Form a filename
filename = "";
stringstream ssfn;
ssfn << filenumber << ".pgm";
filename = ssfn.str();
filenumber++;
imwrite(filename, gray);
Point pt1(faces[ic].x, faces[ic].y); // Display detected faces on main window - live stream from camera
Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
}
// Show image
sstm << "Crop area size: " << roi_b.width << "x" << roi_b.height << " Filename: " << filename;
text = sstm.str();
putText(frame, text, cvPoint(30, 30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 255), 1, CV_AA);
imshow("original", frame);
if (!crop.empty())
{
imshow("detected", crop);
}
else
destroyWindow("detected");
}
You get it wrong in the 'resize image' part. The cv::resize is defined as
void resize(InputArray src, OutputArray dst, Size dsize, double fx=0, double fy=0, int interpolation=INTER_LINEAR )
thus in your code, when you call
resize(crop, res, Size(92, 112), 0, 0, INTER_LINEAR);
the resulted resized image is stored in 'res', not in 'crop'.
The correct code for the crop part is:
crop = frame(roi_b);
resize(crop, res, Size(92, 112), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(res, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
and 'gray' is the grayscale resulted of your resized cropped image.