I'm trying to use SimpleBlobDetector in OpenCV 3 to detect blobs of heat in thermal images, for example people. Any simple code or example will be appreciated.
i tried`
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include "opencv2\features2d.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
if (argc != 2)
{
cout << " Usage: display_image ImageToLoadAndDisplay" << endl;
return 0;
}
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); // Read the file
if (!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl;
return 0;
}
// Set up the detector with default parameters.
//SimpleBlobDetector detector;
// Setup SimpleBlobDetector parameters.
SimpleBlobDetector::Params params;
// Change thresholds
params.minThreshold = 50;
params.maxThreshold = 200;
// Filter by Area.
params.filterByArea = true;
params.minArea = 1500;
// Filter by Circularity
params.filterByCircularity = true;
params.minCircularity = 0.1;
// Filter by Convexity
params.filterByConvexity = true;
params.minConvexity = 0.87;
// Filter by Inertia
params.filterByInertia = true;
params.minInertiaRatio = 0.01;
// Detect blobs.
std::vector<KeyPoint> keypoints;
cv::Ptr<cv::SimpleBlobDetector> detector = cv::SimpleBlobDetector::create(params);
//detector->detect(img, keypoints);
detector->detect(image, keypoints);
//params.detect(image, keypoints);
// Draw detected blobs as red circles.
//DrawMatchesFlags::DRAW_RICH_KEYPOINTS flag ensures the size of the circle corresponds to the size of blob
Mat im_with_keypoints;
drawKeypoints(image, keypoints, im_with_keypoints, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
// Show blobs
imshow("keypoints", im_with_keypoints);
waitKey(0);
//namedWindow("Display window", WINDOW_AUTOSIZE);// Create a window for display.
//imshow("Display window", image); // Show our image inside it.
//waitKey(0); // Wait for a keystroke in the window
//return 0;
}
` but it just returns the gray image unchanged.
To detect blobs with OpenCV you need to:
Instantiate a SimpleBlobDetector type
Declare a vector of type KeyPoints
Call SimpleBlobDetector::detect()
There is a brilliant tutorial online here (where I nicked the code from): https://www.learnopencv.com/blob-detection-using-opencv-python-c/
using namespace cv;
Mat im = imread( "blob.jpg", IMREAD_GRAYSCALE );
SimpleBlobDetector detector;
std::vector<KeyPoint> keypoints;
detector.detect( im, keypoints);
drawKeypoints( im, keypoints, im_with_keypoints, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
imshow("keypoints", im_with_keypoints );// Show blobs
waitKey(0);
You can also adjust parameters to select blobs with specific attributes, it's all listed in the tutorial. I'd suggest having a play, to get a feel for how it works.
Related
I am using OpenCV to add a face recognition feature to my C++ program. I have never used it before and I cant seem to get the cascade feature for facial recognition to work. I am wondering if they have made some changes for the FLAGS in the new version. I can display an image but when that comes to the cascade it always throws an error. Can anyone tell me what am I missing?
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <stdio.h>
#include <iostream>
#include <opencv2\objdetect.hpp>
using namespace std;
using namespace cv;
String face_cascade_name = "sources/data/haarcascades/haarcascade_frontalface_default.xml";
String eyes_cascade_name = "sources/data/haarcascades/haarcascade_eye_tree_eyeglasses.xml";
String smile_cascade_name = "sources/data/haarcascades/haarcascade_smile.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
CascadeClassifier smile_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
int main()
{
//-- 1. Load the cascades
if (!face_cascade.load(face_cascade_name)) { printf("--(!)Error loading file 1\n"); return -1; };
if (!eyes_cascade.load(eyes_cascade_name)) { printf("--(!)Error loading file 2\n"); return -1; };
if (!smile_cascade.load(smile_cascade_name)) { printf("--(!)Error loading file 3\n"); return -1; };
std::string image_path = samples::findFile("test.jpg");
Mat img = imread(image_path, IMREAD_COLOR);
Mat img_gry;
if (img.empty())
{
std::cout << "Could not read the image: " << image_path << std::endl;
return 1;
}
imshow("Display window", img);
// Detect faces
std::vector<Rect> faces;
cvtColor(img, img_gry, COLOR_BGR2GRAY);
equalizeHist(img, img_gry);
//I GET ERROR HERE
face_cascade.detectMultiScale(img_gry, faces, 1.1, 2, CASCADE_SCALE_IMAGE, Size(30, 30), Size(130, 130)); //I GET ERROR HERE
/*...REST WILL BE PARSING faces...*/
int g_key = waitKey(0); // Wait for a keystroke in the window
if (g_key == 's')
{
imwrite("starry_night.png", img); //save image in same path
}
return 0;
}
I modified the code as:
/*...
OTHER CODE
...*/
std::vector<Rect> faces;
cvtColor(img, img_gry, COLOR_BGR2GRAY);
equalizeHist(img_gry, img_gry);
face_cascade.detectMultiScale(img_gry, faces);
for (size_t i = 0; i < faces.size(); i++)
{
/*...PARSE...*/
}
imshow("Display window", img);
As #SourceCode mentioned :
equalizeHist( smallImg, smallImg); //my variable is img_gry instead.
But also, I modified :
face_cascade.detectMultiScale(img_gry, faces, 1.1, 2, CASCADE_SCALE_IMAGE, Size(30, 30), Size(130, 130));
To
face_cascade.detectMultiScale(img_gry, faces);
Otherwise it does not work in my case.
Source if that helps: https://docs.opencv.org/3.4/db/d28/tutorial_cascade_classifier.html
Have you tried using something like:
face_cascade.detectMultiScale(img_gry, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
I'm trying to identify drops on a water-sensitive card, as you can see in the figure below, in addition to the drops there are water risks that I don't want to account for. I'm using OpenCV's findContours function to detect these contours, the question is: can I separate the real drops, from the water drips on the card? Here is an excerpt from my code.
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src; Mat src_gray; Mat binary_image, goTo;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
cv::Scalar min_color_scanner = Scalar(0,0,0);
cv::Scalar max_color_scanner = Scalar(255,175,210);
int main(int argc, char** argv){
cv::Mat image, gray, thresh;
// MARK:- Load image, grayscale, Otsu's threshold
image = imread("/Users/user/Documents/Developer/Desktop/OpenCV-Teste3.3.1/normal1.png");
Mat circles_detect;
cvtColor( image, circles_detect, CV_BGR2GRAY );
GaussianBlur( circles_detect, circles_detect, Size(9, 9), 2, 2 );
//END CIRCLES
cvtColor(image, gray, CV_BGR2GRAY);
threshold(gray, thresh, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);
Mat mask(image.rows, image.cols, CV_8UC3, Scalar(255,255,255));
cv::Mat bgr_image, inRangeImage;
cv::cvtColor(image, bgr_image, CV_RGB2BGR);
cv::inRange(bgr_image, min_color_scanner, max_color_scanner, binary_image);
//Find contours and filter using contour area
vector<vector<Point>> contours;
cv::findContours(thresh, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
// MARK:- data from image
double largest_area=0.0;
int largest_contour_index=0;
double smallest_area=0.0;
int smallest_contour_index=0;
int drop_derive=0;
Rect boundig_rect;
for(int i=0;i<contours.size();i++){
double area = contourArea(contours[i]);
if(area > largest_area){
largest_area=area;
largest_contour_index = i;
//boundig_rect = boundingRect(contourArea(contours[i]));
}
}
smallest_area = largest_area;
for(int i=0;i<contours.size();i++){
double area = contourArea(contours[i]);
if(area < smallest_area){
smallest_area=area;
smallest_contour_index = i;
//boundig_rect = boundingRect(contourArea(contours[i]));
}
if (area < 4){
drop_derive++;
cv::drawContours(image, contours, i, Scalar(255,0,0));
}
}
//show datas and images..
return(0);
}
I have a problem with the code for the simpleBlobDetector. I can build and run all the code just fine, but the blobs that the program detects are only the size of a pixel or so. I've already tried to change the param.minArea and maxArea but it doesn't work. So Im asking you guys for help. By the way the image i was using is in grayscale already so it isn't because of my threshold command that it isn't working. Thanks before hand!
Martin.
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main(){
Mat src;
Mat dst;
src = imread("C:\\Users\\martin\\Desktop\\ThermalImage2.png", CV_LOAD_IMAGE_GRAYSCALE); //Load an image from directory path
if (! src.data){
cout << "Could not open or find the image" << endl ; // Look for invalid input
return -1;
}
else{
double thresh = 130; // Threshold
double maxValue = 255; // Value assigned to the pixel if it is over 'thresh'
threshold(src, dst, thresh, maxValue, THRESH_BINARY); // threshold the picture src and call it dst
namedWindow("thresholdedPicture", WINDOW_AUTOSIZE); // Create a window
imshow("thresholdedPicture", dst); // display thresholded picture in the window
}
SimpleBlobDetector::Params params; // Set parameters for the object detection
params.minDistBetweenBlobs = 10; //Minimum distance between blobs
params.filterByColor = true;
params.blobColor = 255;
params.filterByArea = true; // filter by area of the blob
params.minArea = 1 ;// Minimum area of the blob
params.maxArea = 100000; // Maximum area of the blob
vector<KeyPoint> keypoints;
cv::SimpleBlobDetector detector(params); // Set up the blob detector with the parameters (params)
detector.detect(dst, keypoints); // Input thresholded picture for detection of the blobs
Mat dst_blob_dect; // New array to store the picture with the blobs detected
drawKeypoints( dst, keypoints, dst_blob_dect, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS ); //Drawing a red line around the detected objects
namedWindow("keypoints", WINDOW_AUTOSIZE); // Create a window
imshow("keypoints", dst_blob_dect); // Show the picture with the blobs detected in the window "keypoints"
waitKey(0); // Press any key and the main function returns 0
return 0;}
Try this and use different values for params.minDistBetweenBlobs.
#include "stdafx.h"
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <opencv2/opencv.hpp>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(){
Mat src;
Mat dst;
src = imread("C:\\Users\\sanche8x\\Pictures\\gather.png", CV_LOAD_IMAGE_GRAYSCALE); //Load an image from directory path
if (! src.data){
cout << "Could not open or find the image" << endl ; // Look for invalid input
return -1;
}
else{
double thresh = 130; // Threshold
double maxValue = 255; // Value assigned to the pixel if it is over 'thresh'
threshold(src, dst, thresh, maxValue, THRESH_BINARY); // threshold the picture src and call it dst
namedWindow("thresholdedPicture", WINDOW_AUTOSIZE); // Create a window
imshow("thresholdedPicture", dst); // display thresholded picture in the window
}
SimpleBlobDetector::Params params; // Set parameters for the object detection
params.minDistBetweenBlobs = 10; //Minimum distance between blobs
params.filterByColor = true;
params.blobColor = 255;
params.filterByCircularity = false;
params.filterByConvexity = false;
params.filterByInertia = false;
params.filterByArea = true; // filter by area of the blob
params.minArea = 1 ;// Minimum area of the blob
params.maxArea = 100000; // Maximum area of the blob
vector<KeyPoint> keypoints;
cv::SimpleBlobDetector detector(params); // Set up the blob detector with the parameters (params)
detector.detect(dst, keypoints); // Input thresholded picture for detection of the blobs
Mat dst_blob_dect; // New array to store the picture with the blobs detected
drawKeypoints( dst, keypoints, dst_blob_dect, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS ); //Drawing a red line around the detected objects
namedWindow("keypoints", WINDOW_AUTOSIZE); // Create a window
imshow("keypoints", dst_blob_dect); // Show the picture with the blobs detected in the window "keypoints"
waitKey(0); // Press any key and the main function returns 0
return 0;
}
I want to capture a video and display it on one window and have second window in which contours are displayed simultaneous. I am struggling with how to have the processed video displayed in the second window. Please analyze my code and suggest a solution or indicate where am going wrong maybe give me some directions to an online tutorial or sources. Thanks.
#include "iostream"
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv\ml.h>
#include<opencv\cxcore.h>
#include <iostream>
#include <vector>
#include <string>
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/highgui/highgui.hpp> // Video write
using namespace cv;
using namespace std;
Mat image; Mat image_gray; Mat image_gray2; Mat threshold_output;
Mat frame;
int thresh=100, max_thresh=255;
int main(int argc, char** argv) {
//Capture Video
VideoCapture capCam(1);
if (!capCam.isOpened()){
cout<<"ERROR: Failed to Initialize Camera"<<endl;
return 1;
}
else{
cout<<"Camera Initialized"<<endl;
}
//Create Window
char* ImputFootage = "Source";
namedWindow(ImputFootage, CV_WINDOW_AUTOSIZE);
imshow(ImputFootage, frame);
char* OutputFootage = "Processed";
namedWindow(OutputFootage, CV_WINDOW_AUTOSIZE);
imshow(OutputFootage, frame);
while(1){
capCam>> frame;
imshow("Source", frame);
return(1);
if(capCam.read(ImputFootage)){
//Convert Image to gray & blur it
cvtColor( image,
image_gray,
CV_BGR2GRAY );
blur( image_gray,
image_gray2,
Size(3,3) );
//Threshold Gray&Blur Image
threshold(image_gray2,
threshold_output,
thresh,
max_thresh,
THRESH_BINARY);
//2D Container
vector<vector<Point>> contours;
//Fnd Countours Points, (Imput Image, Storage, Mode1, Mode2, Offset??)
findContours(threshold_output,
contours, // a vector of contours
CV_RETR_EXTERNAL,// retrieve the external contours
CV_CHAIN_APPROX_NONE,
Point(0, 0)); // all pixels of each contours
// Draw black contours on a white image
Mat result(threshold_output.size(),CV_8U,Scalar(255));
drawContours(result,contours,
-1, // draw all contours
Scalar(0), // in black
2); // with a thickness of 2
}
}
char CheckForEscKey = waitKey(10);
return 1;
}
You should call imshow("Processed", result); after calling drawContours
You were trying to show frames even before they were captured with camera. Compiler was not giving you error because Mat were declared ,but they were without value (null), Moreover you were trying to display Mat image, but what you capture from camera is Mat frame. Also, you lack exit (esc sequence, and your wait key was OUT of camera loop.
Anyway, here is your code (rewritten), I hope this is what you wanted.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <cstdio>
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
using namespace std;
using namespace cv;
Mat image;
Mat image_gray;
Mat image_gray2;
Mat threshold_output;
Mat frame;
int thresh = 100, max_thresh = 255;
int main(int argc, char** argv)
{
//Capture Video
VideoCapture capCam(0);
if (!capCam.isOpened())
{
cout << "ERROR: Failed to Initialize Camera" << endl;
return 1;
}
else
{
cout << "Camera Initialized" << endl;
}
//Create Window
char* ImputFootage = "Source";
namedWindow(ImputFootage, CV_WINDOW_AUTOSIZE);
char* OutputFootage = "Processed";
namedWindow(OutputFootage, CV_WINDOW_AUTOSIZE);
while (1)
{
capCam >> frame;
imshow(ImputFootage, frame);
if (capCam.read(frame))
{
//Convert Image to gray & blur it
cvtColor(frame, image_gray, CV_BGR2GRAY);
blur(image_gray, image_gray2, Size(3, 3));
//Threshold Gray&Blur Image
threshold(image_gray2, threshold_output, thresh, max_thresh, THRESH_BINARY);
//2D Container
vector<vector<Point> > contours;
//Fnd Countours Points, (Imput Image, Storage, Mode1, Mode2, Offset??)
findContours(threshold_output, contours, // a vector of contours
CV_RETR_EXTERNAL, // retrieve the external contours
CV_CHAIN_APPROX_NONE, Point(0, 0)); // all pixels of each contours
// Draw black contours on a white image
Mat result(threshold_output.size(), CV_8U, Scalar(255));
drawContours(result, contours, -1, // draw all contours
Scalar(0), // in black
2); // with a thickness of 2
imshow(OutputFootage, result);
char CheckForEscKey = waitKey(10);
//If the key pressed by user is Esc(ASCII is 27) then break out of the loop
if (CheckForEscKey == 27)
{
break;
}
}
}
return 0;
}
How can I convert a cv::Mat to a gray scale?
I am trying to run drawKeyPoints func from opencv, however I have been getting an Assertion Filed error. My guess is that it needs to receive a gray scale image rather than a color image in the parameter.
void SurfDetector(cv::Mat img){
vector<cv::KeyPoint> keypoints;
cv::Mat featureImage;
cv::drawKeypoints(img, keypoints, featureImage, cv::Scalar(255,255,255) ,cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
cv::namedWindow("Picture");
cv::imshow("Picture", featureImage);
}
Using the C++ API, the function name has slightly changed and it writes now:
#include <opencv2/imgproc/imgproc.hpp>
cv::Mat greyMat, colorMat;
cv::cvtColor(colorMat, greyMat, CV_BGR2GRAY);
The main difficulties are that the function is in the imgproc module (not in the core), and by default cv::Mat are in the Blue Green Red (BGR) order instead of the more common RGB.
OpenCV 3
Starting with OpenCV 3.0, there is yet another convention.
Conversion codes are embedded in the namespace cv:: and are prefixed with COLOR.
So, the example becomes then:
#include <opencv2/imgproc/imgproc.hpp>
cv::Mat greyMat, colorMat;
cv::cvtColor(colorMat, greyMat, cv::COLOR_BGR2GRAY);
As far as I have seen, the included file path hasn't changed (this is not a typo).
May be helpful for late comers.
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
if (argc != 2) {
cout << "Usage: display_Image ImageToLoadandDisplay" << endl;
return -1;
}else{
Mat image;
Mat grayImage;
image = imread(argv[1], IMREAD_COLOR);
if (!image.data) {
cout << "Could not open the image file" << endl;
return -1;
}
else {
int height = image.rows;
int width = image.cols;
cvtColor(image, grayImage, CV_BGR2GRAY);
namedWindow("Display window", WINDOW_AUTOSIZE);
imshow("Display window", image);
namedWindow("Gray Image", WINDOW_AUTOSIZE);
imshow("Gray Image", grayImage);
cvWaitKey(0);
image.release();
grayImage.release();
return 0;
}
}
}