Actually, I have read the official documentation here about class Map in opencv to try to use the module reg. And This is my test image:
This is my code:
#include<opencv.hpp>
#include "opencv2/reg/mapshift.hpp"
#include "opencv2/reg/mappergradshift.hpp"
#include "opencv2/reg/mapperpyramid.hpp"
using namespace cv;
using namespace std;
using namespace cv::reg;
Mat highlight1(const Mat src, const Mat t_mask) {
Mat srcImg = src.clone(), mask = t_mask.clone();
threshold(mask, mask, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);
cvtColor(mask, mask, COLOR_GRAY2BGR);
cvtColor(srcImg, srcImg, COLOR_GRAY2BGR);
dilate(mask - Scalar(0, 0, 255), mask, Mat(), Point(-1, -1), 1);
return srcImg - mask;
}
int main() {
Mat img1 = imread("img.jpg", 0);
Mat img2;
// Warp original image
Vec<double, 2> shift(5., 5.);
MapShift mapTest(shift);
mapTest.warp(img1, img2);
// Register
Ptr<MapperGradShift> mapper = makePtr<MapperGradShift>();
MapperPyramid mappPyr(mapper);
Ptr<Map> mapPtr = mappPyr.calculate(img1, img2);
MapShift* mapShift = dynamic_cast<MapShift*>(mapPtr.get());
// Display registration result
Mat result;
mapShift->inverseWarp(img2, result);
Mat registration_before = highlight1(img1, img2);
Mat registration_after = highlight1(img1, result);
return 0;
}
But as we see, the registration_after is even worse than registration_before. What's I have missed?
This is registration_before:
This is registration_after:
Related
I have trained Haar cascade and now i need to work with founded object. How i can crop it from original image and show in new window?(or show multiple window if i found 2 object on image). There is my code (opencv ver 2.4.13):
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
int main(void)
{
CascadeClassifier trafficLightCascader;
string Cascade_name = "TrafficLight.xml";
if (!trafficLightCascader.load(Cascade_name))
{
cout << "Can't load the face feature data" << endl;
return -1;
}
vector<Rect> trafficLights;
Mat src = imread("6копия.png");
CvRect AssignRect = Rect(0, 0, src.cols, src.rows / 2);
Mat srcImage = src(AssignRect);
Mat grayImage(srcImage.rows, srcImage.cols, CV_8UC1);
cvtColor(srcImage, grayImage, CV_BGR2GRAY);
equalizeHist(grayImage, grayImage);
trafficLightCascader.detectMultiScale(grayImage, trafficLights, 1.1, 1, 0, Size(3,3));
for (int i = 0; i < trafficLights.size(); ++i)
{
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}
imshow("src", src);
waitKey(0);
return 0;}
Your trafficLights vector is holding each rectangle's data of found objects. You just need to take left&top coordinates, width and height of each rectangle and you already have them. All you need is cropping each rectangle by creating Mat format of them and showing in different frames.
You can check here to learn more about cropping.
Here is the code which you need:
for (int i = 0; i < trafficLights.size(); ++i)
{
Rect crop_found(trafficLights[i].x,trafficLights[i].y, trafficLights[i].width, trafficLights[i].height);
Mat found(src, crop_found);
imshow(to_string(i),found);
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}
I'm trying to identify drops on a water-sensitive card, as you can see in the figure below, in addition to the drops there are water risks that I don't want to account for. I'm using OpenCV's findContours function to detect these contours, the question is: can I separate the real drops, from the water drips on the card? Here is an excerpt from my code.
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src; Mat src_gray; Mat binary_image, goTo;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
cv::Scalar min_color_scanner = Scalar(0,0,0);
cv::Scalar max_color_scanner = Scalar(255,175,210);
int main(int argc, char** argv){
cv::Mat image, gray, thresh;
// MARK:- Load image, grayscale, Otsu's threshold
image = imread("/Users/user/Documents/Developer/Desktop/OpenCV-Teste3.3.1/normal1.png");
Mat circles_detect;
cvtColor( image, circles_detect, CV_BGR2GRAY );
GaussianBlur( circles_detect, circles_detect, Size(9, 9), 2, 2 );
//END CIRCLES
cvtColor(image, gray, CV_BGR2GRAY);
threshold(gray, thresh, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);
Mat mask(image.rows, image.cols, CV_8UC3, Scalar(255,255,255));
cv::Mat bgr_image, inRangeImage;
cv::cvtColor(image, bgr_image, CV_RGB2BGR);
cv::inRange(bgr_image, min_color_scanner, max_color_scanner, binary_image);
//Find contours and filter using contour area
vector<vector<Point>> contours;
cv::findContours(thresh, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
// MARK:- data from image
double largest_area=0.0;
int largest_contour_index=0;
double smallest_area=0.0;
int smallest_contour_index=0;
int drop_derive=0;
Rect boundig_rect;
for(int i=0;i<contours.size();i++){
double area = contourArea(contours[i]);
if(area > largest_area){
largest_area=area;
largest_contour_index = i;
//boundig_rect = boundingRect(contourArea(contours[i]));
}
}
smallest_area = largest_area;
for(int i=0;i<contours.size();i++){
double area = contourArea(contours[i]);
if(area < smallest_area){
smallest_area=area;
smallest_contour_index = i;
//boundig_rect = boundingRect(contourArea(contours[i]));
}
if (area < 4){
drop_derive++;
cv::drawContours(image, contours, i, Scalar(255,0,0));
}
}
//show datas and images..
return(0);
}
I have a video file from which I'm capturing a frames. I want to crop a triangle from captured frame and display it, but my program shows just a source frame.
Here is my code:
cv::Mat Detector::cropRegionOfInterest(cv::Mat& frame)
{
cv::Point corners[1][3];
corners[0][0] = cv::Point(0, frameHeight);
corners[0][1] = cv::Point(frameWidth, frameHeight);
corners[0][2] = cv::Point(frameWidth / 2, frameHeight / 2);
const cv::Point* cornerList[1] = { corners[0] };
int numPoints = 3;
int numPolygons = 1;
cv::Mat mask(frame.size(), CV_8UC1, cv::Scalar(0, 0, 0));
cv::fillPoly(mask, cornerList, &numPoints, numPolygons, cv::Scalar(255, 255, 255), 8);
cv::Mat result(frame.size(), CV_8UC3);
cv::bitwise_and(frame, mask, result);
return result;
}
Instead of displaying source frame I want it to display cropped triangle.
Since you're using CV_8UC3 as the type of result, I'm assuming (see the Edit at the end of the answer if that's not the case) that the input image frame also has 3 channels. In that case, I'm a bit surprised that you can even see the non-cropped image, as running your code simply throws an exception on my machine at the call to bitwise_and:
OpenCV(3.4.1) Error: Sizes of input arguments do not match
From the documentation, it seems to me that you can't mix different input and mask types. A quick and dirty solution is to split the input image into a vector of three channels, call bitwise_and for each of them, and then merge them back. The code below works for me:
#include <stdio.h>
#include <opencv2/opencv.hpp>
using namespace cv;
cv::Mat cropRegionOfInterest(cv::Mat& frame)
{
const int frameWidth=frame.cols-1;
const int frameHeight=frame.rows-1;
cv::Point corners[1][3];
corners[0][0] = cv::Point(0, frameHeight);
corners[0][1] = cv::Point(frameWidth, frameHeight);
corners[0][2] = cv::Point(frameWidth / 2, frameHeight / 2);
const cv::Point* cornerList[1] = { corners[0] };
int numPoints = 3;
int numPolygons = 1;
cv::Mat mask(frame.rows,frame.cols, CV_8UC1, cv::Scalar(0, 0, 0));
cv::fillPoly(mask, cornerList, &numPoints, numPolygons, cv::Scalar(255, 255, 255), 8);
std::vector<cv::Mat> src_channels;
std::vector<cv::Mat> result_channels;
cv::split(frame,src_channels);
for(int idx=0;idx<3;++idx)
{
result_channels.emplace_back(frame.rows,frame.cols,CV_8UC1);
cv::bitwise_and(src_channels[idx], mask,result_channels[idx]);
}
cv::Mat result;
cv::merge(result_channels,result);
return result;
}
int main(int argc, char** argv )
{
if ( argc != 2 )
{
printf("usage: DisplayImage.out <Image_Path>\n");
return -1;
}
Mat image;
image = imread( argv[1], 1 );
if ( !image.data )
{
printf("No image data \n");
return -1;
}
cv::Mat cropped=cropRegionOfInterest(image);
namedWindow("cropped Image", WINDOW_AUTOSIZE );
imshow("cropped Image", cropped);
waitKey(0);
return 0;
}
Edit: From your comments it seems that frame is actually grayscale. In that case, nevermind all the code above, and just change cv::Mat result(frame.size(), CV_8UC3); to
cv::Mat result(frame.rows,frame.cols,CV_8UC1);
in your original code.
I am trying to write code to track hands. I am using the convexity defects function to find fingers, but for some reason, there seems to always be a problem with the last defect.
Here is a picture of what I'm talking about (sorry, i''m new to the forum, so cannot post images)
The cyan line is the contours, the yellow line is the hull points, and the red lines are the defect points. As you can see the last defect point detects the defect from the wrong side of the contour.
Here is my code:
#include "opencv2\opencv.hpp"
using namespace cv;
using namespace std;
int main() {
VideoCapture cap(0);
Mat src, gray, background, binary, diff;
cap >> background;
cvtColor(background, background, CV_BGR2GRAY);
vector<vector<Point>> contours;
vector < vector<int>> hullI = vector<vector<int>>(1);
vector < vector<Point>> hullP = vector<vector<Point>>(1);
vector<Vec4i> defects;
while (waitKey(30)!='q') {
cap >> src;
cvtColor(src, gray, CV_BGR2GRAY);
blur(gray, gray, Size(3, 3));
absdiff(gray, background, diff);
threshold(diff, binary, 15, 255, THRESH_BINARY);
erode(binary, binary, Mat(Size(5, 5), CV_8U));
imshow("binary", binary);
findContours(binary, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
if (!contours.empty()) {
sort(contours.begin(), contours.end(), [](vector<Point> a, vector<Point> b) { return a.size() > b.size(); });
drawContours(src, contours, 0, Scalar(255, 255, 0));
convexHull(contours[0], hullI[0]);
convexHull(contours[0], hullP[0]);
drawContours(src, hullP, 0, Scalar(0, 255, 255));
if (hullI[0].size() > 2) {
convexityDefects(contours[0], hullI[0], defects);
for (Vec4i defect : defects) {
line(src, contours[0][defect[0]], contours[0][defect[2]], Scalar(0, 0, 255));
line(src, contours[0][defect[1]], contours[0][defect[2]], Scalar(0, 0, 255));
}
}
}
imshow("src", src);
char key = waitKey(30);
if (key == 'q')break;
else if (key == 'p') waitKey();
else if (key == 'b') {
cap >> background;
cvtColor(background, background, CV_BGR2GRAY);
}
}
}
I have confirmed through experiments that it is always the last defect in the defect vector that this happens too. Is this a bug in opencv or am I doing something wrong?
i tested your code (with a small modification) with the image below (OpenCV version is 3.2).
as you can see on the result image it works as expected. probably you are using an old version of OpenCV and getting a buggy result. (i think it was a bug recently fixed)
#include "opencv2\opencv.hpp"
using namespace cv;
using namespace std;
int main() {
//VideoCapture cap(0);
Mat src, gray, background, binary, diff;
//cap >> background;
//cvtColor(background, background, CV_BGR2GRAY);
vector<vector<Point> > contours;
vector < vector<int> > hullI = vector<vector<int> >(1);
vector < vector<Point> > hullP = vector<vector<Point> >(1);
vector<Vec4i> defects;
src = imread("hand.png");
cvtColor(src, gray, CV_BGR2GRAY);
blur(gray, gray, Size(3, 3));
threshold(gray, binary, 150, 255, THRESH_BINARY_INV);
//erode(binary, binary, Mat(Size(5, 5), CV_8U));
imshow("binary", binary);
findContours(binary, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
if (!contours.empty()) {
sort(contours.begin(), contours.end(), [](vector<Point> a, vector<Point> b) { return a.size() > b.size(); });
drawContours(src, contours, 0, Scalar(255, 255, 0));
convexHull(contours[0], hullI[0]);
convexHull(contours[0], hullP[0]);
drawContours(src, hullP, 0, Scalar(0, 255, 255));
if (hullI[0].size() > 2) {
convexityDefects(contours[0], hullI[0], defects);
for (Vec4i defect : defects) {
line(src, contours[0][defect[0]], contours[0][defect[2]], Scalar(0, 0, 255));
line(src, contours[0][defect[1]], contours[0][defect[2]], Scalar(0, 0, 255));
}
}
}
imshow("result", src);
char key = waitKey(0);
return 0;
}
I have a solution which involves detecting the skin using OpenCV. I implemented it using python, which you can convert easily to C++.
I obtained the HSV values of the image you uploaded using:
hsv_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
This the range for the HSV values of human skin:
l = np.array([0, 48, 80], dtype = "uint8")
u = np.array([20, 255, 255], dtype = "uint8")
skin_img = cv2.inRange(hsv_img, l, u)
cv2.imshow("Hand", skin_img)
I then performed morphological dilation and obtained the following:
You can now apply contour hull and find convexity defects.
I am using opencv's grabcut.cpp and graphcut.cpp code which calls the function grabcut() . I have to find a way to save the background and foreground models computed from some image and apply it to another image. how do I save the 'bgdmodel' and 'fgdmodel' for future use?
This is the code I have written -
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
// Load an image
Mat img = imread("1.png",1);
const Mat* img_0;
img_0= &img;
// Create the mask
Mat mask;
mask.create( img_0->size(), CV_8UC1);
mask.setTo( GC_BGD );
Mat bgdModel, fgdModel;
Rect rect;
rect.x=0;
rect.y=0;
rect.width=img.cols-1;
rect.height=img.rows-1;
(mask(rect)).setTo( Scalar(GC_BGD) );
grabCut(img, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_RECT);
// Save model to file
FileStorage fs("mymodels.xml", FileStorage::WRITE);
fs << "BgdModel" << bgdModel;
fs << "FgdModel" << fgdModel;
fs.release();
// Load another image
Mat img1 = imread( "abc.png", 1);
const Mat* img_1;
img_1= &img1;
// Load models from file
Mat bgdModel1, fgdModel1;
Mat mask1;
FileStorage fs1("mymodels.xml", FileStorage::READ);
fs1["BgdModel"] >> bgdModel1;
fs1["FgdModel"] >> fgdModel1;
fs1.release();
// Create a mask
mask1.create( img_1->size(), CV_8UC1);
mask1.setTo( GC_PR_FGD );
Rect rect1;
rect1.x=0;
rect1.y=0;
rect1.width=0;//img.cols;
rect1.height=0;//img.rows;
(mask1(rect1)).setTo( Scalar(GC_BGD) );
grabCut(img1, mask1, rect1, bgdModel1, fgdModel1, 1, GC_EVAL);
FileStorage fs2("finalmask.xml", FileStorage::WRITE);
fs2 << "Final_MASK" << mask1;
fs2.release();
for (int i=0;i<img1.rows;i++)
{
for(int j=0;j<img1.cols;j++)
{
if(mask1.at<uchar>(i,j)==0||mask1.at<uchar>(i,j)==2)
{
img1.at<cv::Vec3b>(i,j)[0] = 0;
img1.at<cv::Vec3b>(i,j)[1] = 0;
img1.at<cv::Vec3b>(i,j)[2] = 0;
}
}
}
imshow("Result of Grabcut", img1);
waitKey(0);
return 0;
}
i need to ensure that bgdModel and fgdModel learnt for *image are applied for image1 too.
You can save the background and foreground models Mat using FileStorage, and then load them back to use with the next image.
This example should clarify this:
#include <opencv2\opencv.hpp>
using namespace cv;
int main()
{
{
// Load an image
Mat3b img = imread("path_to_image");
// Create the mask
Mat1b mask(img.rows, img.cols, uchar(GC_PR_BGD));
circle(mask, Point(img.cols / 2, img.rows / 2), 20, Scalar(GC_FGD), -1);
Mat bgdModel, fgdModel;
grabCut(img, mask, Rect(), bgdModel, fgdModel, 1);
imshow("Mask", mask);
waitKey(1);
// Save model to file
{
FileStorage fs("mymodels.yml", FileStorage::WRITE);
fs << "BgdModel" << bgdModel;
fs << "FgdModel" << fgdModel;
}
}
{
// Load another image
Mat3b img = imread("path_to_another_image");
// Load models from file
Mat bgdModel, fgdModel;
{
FileStorage fs("mymodels.yml", FileStorage::READ);
fs["BgdModel"] >> bgdModel;
fs["FgdModel"] >> fgdModel;
}
// Create a mask
Mat1b mask(img.rows, img.cols, uchar(GC_PR_BGD));
circle(mask, Point(img.cols / 2, img.rows / 2), 20, Scalar(GC_FGD), -1);
grabCut(img, mask, Rect(), bgdModel, fgdModel, 1);
imshow("Other Mask", mask);
waitKey(1);
}
return 0;
}