What is output from OpenCV's Dense optical flow (Farneback) function? Cpp - c++

First of all sorry for my bad english.
So, im using dense optical flow, GunnarFarneback speciffically. The code works fine.
Here's the code:
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/video/tracking.hpp"
#include <vector>
#include <iostream>
using namespace std;
using namespace cv;
// Display the results of the matches
//
int main(int argc, char** argv)
{
cv::Mat res, img1, img2, img2Original, img2OriginalC;
cv::VideoWriter writer;
cv::VideoCapture cap;
//cv::VideoCapture cap("prueba1.mp4");
// cap.open(std::string(argv[1]));
cap.open(0);
cv::namedWindow("cat", cv::WINDOW_AUTOSIZE);
cap >> img1;
cvtColor(img1, img1, COLOR_BGR2GRAY);
double fps = cap.get(cv::CAP_PROP_FPS);
//cv::Size tamano((int)cap.get(cv::CAP_PROP_FRAME_WIDTH), (int)cap.get(cv::CAP_PROP_FRAME_HEIGHT));
Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH),(int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));
writer.open("mouse.avi", CV_FOURCC('M', 'J', 'P', 'G'), fps, S);
for (;;) {
cap >> img2;
if (img2.empty()) break;
img2.copyTo(img2OriginalC);
cvtColor(img2, img2, COLOR_BGR2GRAY);
img2.copyTo(img2Original);
cv::calcOpticalFlowFarneback(img1, img2, res, .4, 1, 12, 2, 8, 1.2, 0);
for (int y = 0; y < img2.rows; y += 5) {
for (int x = 0; x < img2.cols; x += 5)
{
// get the flow from y, x position * 3 for better visibility
const Point2f flowatxy = res.at<Point2f>(y, x) * 1;
// draw line at flow direction
line(img2OriginalC, Point(x, y), Point(cvRound(x + flowatxy.x), cvRound(y + flowatxy.y)), Scalar(255, 0, 0));
// draw initial point
circle(img2OriginalC, Point(x, y), 1, Scalar(0, 0, 0), -1);
}
}
img2Original.copyTo(img1);
imshow("cat", img2OriginalC);
writer << img2OriginalC;
cout <<"Fps"<<endl;
cout <<fps<< endl;
for( int i=3; i>=0; i--)
{
//cout<< res << endl;
//return 0;
}
if (cv::waitKey(1) == 27) break;
}
cap.release();
return 0;
}
i think i know whats the output matrix but im not sure at all. Also,i dont know what does the values mean, finally does anyone knows how to use that output data??
Thanks

Related

Why the video is performing very slow in deep learning opencv

This is HandKeypoint Detection using caffe deep learning on opencv the problem is When i run the code it is performing very slow untill the video is freezing for sometime but when i tried by using a static image it is performing as normal. I have declare the Net class outside of the while loop too but it is still the same the video does not run smothly. How to make the video to run smoothly?
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/dnn.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::dnn;
const int POSE_PAIRS[20][2] =
{
{0,1}, {1,2}, {2,3}, {3,4}, // thumb
{0,5}, {5,6}, {6,7}, {7,8}, // index
{0,9}, {9,10}, {10,11}, {11,12}, // middle
{0,13}, {13,14}, {14,15}, {15,16}, // ring
{0,17}, {17,18}, {18,19}, {19,20} // small
};
string protoFile = "/home/hanish/Test_Opencv/HandKeyPoint/Models/pose_deploy.prototxt";
string weightsFile = "/home/hanish/Test_Opencv/HandKeyPoint/Models/pose_iter_102000.caffemodel";
int nPoints = 22;
int main(int argc, char **argv)
{
float thresh = 0.01;
VideoCapture cap(0);
Mat frame, frameCopy;
int frameWidth = cap.get(CAP_PROP_FRAME_WIDTH);
int frameHeight = cap.get(CAP_PROP_FRAME_HEIGHT);
float aspect_ratio = frameWidth/(float)frameHeight;
int inHeight = 368;
int inWidth = (int(aspect_ratio*inHeight) * 8) / 8;
cout << "inWidth = " << inWidth << " ; inHeight = " << inHeight << endl;
Net net = readNetFromCaffe(protoFile, weightsFile);
net.setPreferableBackend(DNN_TARGET_CPU);
while(1)
{
cap >> frame;
frameCopy = frame.clone();
Mat inpBlob = blobFromImage(frame, 1.0 / 255, Size(inWidth, inHeight), Scalar(0, 0, 0), false, false);
net.setInput(inpBlob);
Mat output = net.forward();
int H = output.size[2];
int W = output.size[3];
// find the position of the body parts
vector<Point> points(nPoints);
for (int n=0; n < nPoints; n++)
{
// Probability map of corresponding body's part.
Mat probMap(H, W, CV_32F, output.ptr(0,n));
resize(probMap, probMap, Size(frameWidth, frameHeight));
Point maxLoc;
double prob;
minMaxLoc(probMap, 0, &prob, 0, &maxLoc);
if (prob > thresh)
{
circle(frameCopy, cv::Point((int)maxLoc.x, (int)maxLoc.y), 8, Scalar(0,255,255), -1);
putText(frameCopy, cv::format("%d", n), cv::Point((int)maxLoc.x, (int)maxLoc.y), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 0, 255), 2);
}
points[n] = maxLoc;
}
int nPairs = sizeof(POSE_PAIRS)/sizeof(POSE_PAIRS[0]);
for (int n = 0; n < nPairs; n++)
{
// lookup 2 connected body/hand parts
Point2f partA = points[POSE_PAIRS[n][0]];
Point2f partB = points[POSE_PAIRS[n][1]];
if (partA.x<=0 || partA.y<=0 || partB.x<=0 || partB.y<=0)
continue;
line(frame, partA, partB, Scalar(0,255,255), 8);
circle(frame, partA, 8, Scalar(0,0,255), -1);
circle(frame, partB, 8, Scalar(0,0,255), -1);
}
// imshow("Output-Keypoints", frameCopy);
imshow("Output-Skeleton", frame);
waitKey(1);
}
return 0;
}

How to draw a rectangle on the Aruco marker in opencv?

I have a program to draw and detect the aruco markers, and write the marker id on it. I need a rectangle to appear on each marker instead of the marker id, i could draw a rectangle but in a fixed position not on the marker, here is the code:
#include <opencv2\highgui.hpp>
#include <opencv2\aruco.hpp>
#include <opencv2\core.hpp>
#include <opencv2\imgcodecs.hpp>
#include <opencv2\imgproc.hpp>
#include <opencv2\calib3d.hpp>
#include <sstream>
#include <fstream>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char *argv[]) {
cv::VideoCapture inputVideo;
inputVideo.open(0);
Mat outputMarker;
auto markerDict = aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME::DICT_4X4_50);
for (int i = 0; i < 50; i++) {
aruco::drawMarker(markerDict, i, 500, outputMarker, 1);
ostringstream convert;
String imageName = "4x4marker_";
convert << imageName << i << ".jpg";
imwrite(convert.str(), outputMarker);
while (inputVideo.grab()) {
cv::Mat image, imageCopy;
inputVideo.retrieve(image);
image.copyTo(imageCopy);
std::vector<int> ids;
std::vector<std::vector<cv::Point2f> > corners;
cv::aruco::detectMarkers(image, markerDict, corners, ids);
// if at least one marker detected
if (ids.size() > 0)
cv::aruco::drawDetectedMarkers(imageCopy, corners, ids);
int x = 0;
int y = 3;
rectangle(imageCopy, Point(imageCopy.cols/2, imageCopy.rows/2),
Point(x,y),Scalar::all(255), -1, 8, 0);
cv::imshow("out", imageCopy);
char key = (char)cv::waitKey(5);
if (key == 27)
break;
}
}
}
An example marker for easing code testing.
When you use the detectMarkers function, it returns the corners of each detection. In your case, you are putting it into the std::vector<std::vector<cv::Point2f> > corners. To draw the rectangles you ask for, you can do something similar to (the following code is an example and has not been tested):
for (size_t i = 0; i< corners.size(); +i)
{
cv::Point2f p0(image.cols,image.rows);
cv::Ponit2f p1(0,0);
for (auto p: corners[i])
{
if (p.x < p0.x)
p0.x = p.x;
if (p.y < p0.y)
p0.y = p.y;
if (p.x > p1.x)
p1.x = p.x;
if (p.y > p1.y)
p1.y = p.y;
}
rectangle(imageCopy, p0, p1,Scalar::all(255), -1, 8, 0);
}
However, you probably want to draw a polygon that will better fit the marker, as the marker projection on the image will not be a rectangle if the marker is not perfectly perpendicular to the camera optical axis and there's no distortion. For that, you can use fillPoly, or if you don't want it filled line.
Here is a fully functional example tested with OpenCV 4.3.0-pre and 3.2.0 in Ubuntu and 3.4.9 in Windows:
#include <iostream>
#include <opencv2/aruco.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
int main(int argc, char** argv)
{
// Check cv versionmake
std::cout << "Using OpenCV version: " << CV_VERSION << std::endl;
// Create video input
cv::VideoCapture inputVideo;
int input_source(0);
if (argc > 1)
input_source = std::atoi(argv[1]);
if (!inputVideo.open(input_source))
{
std::cerr << "Error opening input video soruce: " << input_source << std::endl;
return EXIT_FAILURE;
}
// Create marker dictionary
auto marker_dict = cv::aruco::getPredefinedDictionary(cv::aruco::PREDEFINED_DICTIONARY_NAME::DICT_4X4_50);
// Get imshow ready
cv::namedWindow("Display window", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
cv::resizeWindow("Display window", 800, 600);
// Grab images until escape is pressed
int key = 0;
while (key != 27 && inputVideo.grab())
{
// Retrieve image
cv::Mat image;
inputVideo.retrieve(image);
// Get image output ready
cv::Size image_size = image.size();
cv::Mat out_image(image_size.height, 3 * image_size.width, CV_8UC3);
cv::Mat left(out_image, cv::Rect(0, 0, image_size.width, image_size.height));
image.copyTo(left);
cv::Mat mid(out_image, cv::Rect(image_size.width, 0, image_size.width, image_size.height));
image.copyTo(mid);
cv::Mat right(out_image, cv::Rect(2 * image_size.width, 0, image_size.width, image_size.height));
image.copyTo(right);
// Add names to images
int corner_offset = 50;
cv::putText(left, "Original image", cv::Point(corner_offset, corner_offset), cv::FONT_HERSHEY_DUPLEX, 1.0,
CV_RGB(0, 0, 0), 2);
cv::putText(mid, "Image with OpenCV drawing", cv::Point(corner_offset, corner_offset), cv::FONT_HERSHEY_DUPLEX, 1.0,
CV_RGB(0, 0, 0), 2);
cv::putText(right, "Image with custom drawing", cv::Point(corner_offset, corner_offset), cv::FONT_HERSHEY_DUPLEX,
1.0, CV_RGB(0, 0, 0), 2);
// Detect markers
std::vector<int> ids;
std::vector<std::vector<cv::Point2f> > corners;
cv::aruco::detectMarkers(image, marker_dict, corners, ids);
// Draw markers using opencv tool
cv::aruco::drawDetectedMarkers(mid, corners, ids);
// Draw markers custom
for (size_t i = 0; i < corners.size(); ++i)
{
// Convert to integer ponits
int num = static_cast<int>(corners[i].size());
std::vector<cv::Point> points;
for (size_t j = 0; j < corners[i].size(); ++j)
points.push_back(cv::Point(static_cast<int>(corners[i][j].x), static_cast<int>(corners[i][j].y)));
const cv::Point* pts = &(points[0]);
// Draw
cv::fillPoly(right, &pts, &num, 1, cv::Scalar(255, 0, 0));
// Draw contour
for (size_t j = 0; j < corners[i].size(); ++j)
{
size_t next = (j + 1) % corners[i].size();
cv::line(right, corners[i][j], corners[i][next], cv::Scalar(0, 255, 0), 5);
}
}
// Display
cv::imshow("Display window", out_image);
key = cv::waitKey(5);
}
return EXIT_SUCCESS;
}
This is the output you will get with this code:
#apalomerHere is a marker to test

How do I take out a plane from image, modify it and insert it back?

I am trying to take out the ground and make grids on it for path mapping and insert it back to the image. Here I am using findhomography and warpPerspective functions to do so. But when I switch the points for inserting back the modified plane, everything except the plane becomes black in the image.
I have tried to do it using an intermediate image but the result is the same.
#include "pch.h"
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
struct userdata {
Mat im;
vector<Point2f> points;
};
void mouseHandler(int event, int x, int y, int flags, void* data_ptr)
{
if (event == EVENT_LBUTTONDOWN) {
userdata* data = ((userdata*)data_ptr);
circle(data - > im, Point(x, y), 3, Scalar(0, 0, 255), 5, LINE_AA);
imshow("Image", data - > im);
if (data - > points.size() < 4) {
data - > points.push_back(Point2f(x, y));
}
}
}
int main(int argc, char** argv)
{
// Read source image.
Mat im_src = imread("imagesindoor.jpg");
// Destination image. The aspect ratio of the book is 3/4
Size size(400, 300);
Size size2(im_src.cols, im_src.rows);
Mat im_dst = Mat::zeros(size, CV_8UC3);
// Create a vector of destination points.
vector<Point2f> pts_dst;
pts_dst.push_back(Point2f(0, 0));
pts_dst.push_back(Point2f(size.width - 1, 0));
pts_dst.push_back(Point2f(size.width - 1, size.height - 1));
pts_dst.push_back(Point2f(0, size.height - 1));
// Set data for mouse event
Mat im_temp = im_src.clone();
userdata data;
data.im = im_temp;
cout << "Click on the four corners of the book -- top left first and" <<
endl
<< "bottom left last -- and then hit ENTER" << endl;
// Show image and wait for 4 clicks.
imshow("Image", im_temp);
// Set the callback function for any mouse event
setMouseCallback("Image", mouseHandler, &data);
waitKey(0);
// Calculate the homography
Mat h = getPerspectiveTransform(data.points, pts_dst);
// Warp source image to destination
warpPerspective(im_src, im_dst, h, size);
// changing clor of im_dst
for (int i = 0; i < im_dst.rows; i++) {
for (int j = 0; j < im_dst.cols; j++) {
//apply condition here
im_dst.at<cv::Vec3b>(i, j) = 255;
}
}
Mat p = getPerspectiveTransform(pts_dst, data.points);
warpPerspective(im_dst, im_src, p, size2);
// Show image
//imshow("Image", im_dst);
imshow("Image2", im_src);
waitKey(0);
return 0;
}
addWeighted can be used to blend the current result with the source image to get the expected result.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv2/opencv.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
struct userdata {
Mat im;
vector<Point2f> points;
};
void mouseHandler(int event, int x, int y, int flags, void* data_ptr)
{
if (event == EVENT_LBUTTONDOWN) {
userdata* data = ((userdata*)data_ptr);
circle(data-> im, Point(x, y), 3, Scalar(0, 0, 255), 5, LINE_AA);
imshow("Image", data->im);
if (data-> points.size() < 4) {
data-> points.push_back(Point2f(x, y));
}
}
}
int main(int argc, char** argv)
{
// Read source image.
Mat im_src = imread("test.png");
// Destination image. The aspect ratio of the book is 3/4
Size size(400, 300);
Size size2(im_src.cols, im_src.rows);
Mat im_dst = Mat::zeros(size, CV_8UC3);
// Create a vector of destination points.
vector<Point2f> pts_dst;
pts_dst.push_back(Point2f(0, 0));
pts_dst.push_back(Point2f(size.width - 1, 0));
pts_dst.push_back(Point2f(size.width - 1, size.height - 1));
pts_dst.push_back(Point2f(0, size.height - 1));
// Set data for mouse event
Mat im_temp = im_src.clone();
userdata data;
data.im = im_temp;
cout << "Click on the four corners of the book -- top left first and" <<
endl
<< "bottom left last -- and then hit ENTER" << endl;
// Show image and wait for 4 clicks.
imshow("Image", im_temp);
// Set the callback function for any mouse event
setMouseCallback("Image", mouseHandler, &data);
waitKey(0);
// Calculate the homography
Mat h = getPerspectiveTransform(data.points, pts_dst);
// Warp source image to destination
warpPerspective(im_src, im_dst, h, size);
// changing clor of im_dst
for (int i = 0; i < im_dst.rows; i++) {
for (int j = 0; j < im_dst.cols; j++) {
//apply condition here
im_dst.at<cv::Vec3b>(i, j) = 255;
}
}
Mat t;
Mat p = getPerspectiveTransform(pts_dst, data.points);
warpPerspective(im_dst, t, p, size2);
// Show image
//imshow("Image", im_dst);
std::cout << "t :" <<t.cols << ", " <<t.rows <<std::endl;
Mat final;
addWeighted(im_src, 0.5, t, 0.5, 0, final);
imshow("Image2", final);
waitKey(0);
return 0;
}

Video very long time to load. Opencv

I'm a novice programmer. My experience is very small. Now I have a code to detected people. I use SVM classifier and a HOG descriptor. Video is very long to be loaded and processed. Please help me with this problem.
#include <assert.h>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <conio.h>
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
int main()
{
string filename = "Street.MP4";
VideoCapture capture(filename);
Mat frame;
//namedWindow("w", 1);
while (true)
{
capture >> frame;
if (frame.empty())
break;
Mat img, res;
Mat framecopy = frame.clone();
resize(framecopy, img, Size(2 * framecopy.cols, 2 * framecopy.rows));
int nbins = 9;
Size cellSize(8, 8);
Size blockSize(16, 16);
Size blockStride(8, 8);
Size winSize(64, 128);
Size winStride(4, 4);
HOGDescriptor hog(winSize, blockSize, blockStride, cellSize, nbins);
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
assert(hog.checkDetectorSize());
vector<Rect> locations;
vector<double> weights;
hog.detectMultiScale(img, locations, weights, 0.0, winStride, Size(), 1.05, 2., true);
resize(img, res, Size(framecopy.cols / 2, framecopy.rows / 2));
for (size_t i = 0; i < locations.size(); ++i)
{
Rect detection = locations[i];
detection.x /= 2;
detection.y /= 2;
detection.width /= 2;
detection.height /= 2;
rectangle(res, detection, Scalar(0, 0, 255), 2);
}
imshow("w", res);
waitKey(20); // waits to display frame
}
waitKey();
return 0;
}
Don't create the hog detector in each iteration.
try:
#include <assert.h>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <conio.h>
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
int main()
{
string filename = "Street.MP4";
VideoCapture capture(filename);
Mat frame;
//namedWindow("w", 1);
int nbins = 9;
Size cellSize(8, 8);
Size blockSize(16, 16);
Size blockStride(8, 8);
Size winSize(64, 128);
Size winStride(4, 4);
HOGDescriptor hog(winSize, blockSize, blockStride, cellSize, nbins);
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
assert(hog.checkDetectorSize());
while (true)
{
capture >> frame;
if (frame.empty())
break;
Mat img, res;
Mat framecopy = frame.clone();
resize(framecopy, img, Size(2 * framecopy.cols, 2 * framecopy.rows));
vector<Rect> locations;
vector<double> weights;
hog.detectMultiScale(img, locations, weights, 0.0, winStride, Size(), 1.05, 2., true);
resize(img, res, Size(framecopy.cols / 2, framecopy.rows / 2));
for (size_t i = 0; i < locations.size(); ++i)
{
Rect detection = locations[i];
detection.x /= 2;
detection.y /= 2;
detection.width /= 2;
detection.height /= 2;
rectangle(res, detection, Scalar(0, 0, 255), 2);
}
imshow("w", res);
waitKey(1); // waits to display frame
}
waitKey();
return 0;
}
But keep in mind that HoG detection is a quite expensive operation. What's the resolution of your images?

OpenCV Error : Assertion failed when using fitLine

I want to use the fitLine function to come up with a line to draw on my source image src_crop. I load the frame in my main() and call the drawLine().
But the code aborts with the following error :
Code:
#include "stdafx.h"
#include <fstream>
#include <iostream>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
using namespace cv;
/// Global variables
Mat src_gray;
Mat src_crop;
Mat dst, detected_edges;
int edgeThresh = 1;
int lowThreshold = 27;
int const max_lowThreshold = 100;
int ratio = 3;
int kernel_size = 3;
char* window_name = "Edge Map";
int i,j;
void drawLine(int, void*)
{
vector<Vec4f> outline;
vector<Point2f> ssline;
int flag2 = 0;
/// Reduce noise with a kernel 3x3
blur(src_gray, detected_edges, Size(3, 3));
/// Canny detector
Canny(detected_edges, detected_edges, lowThreshold, lowThreshold*ratio, kernel_size);
/// Using Canny's output as a mask, we display our result
dst.create(detected_edges.size(), detected_edges.type());
dst = Scalar::all(0);
src_crop.copyTo(dst, detected_edges);
//namedWindow("Detected Edges", CV_WINDOW_AUTOSIZE);
//imshow("Detected Edges", detected_edges);
cvtColor(dst, dst, CV_BGR2GRAY);
for (j = 0; j < dst.cols; j++)
{
for (i = 0; i < dst.rows; i++)
{
if (Scalar(dst.at<uchar>(i,j)).val[0] >= 90)
{
//cout << "Hi";
flag2 = 1;
break;
}
}
if (flag2 == 1)
break;
}
int k = j;
int l = i;
for (j = k; j < dst.cols; j++)
{
Point2f ss = Point2f(l,j);
ssline.push_back(ss);
}
fitLine(ssline, outline, CV_DIST_L1, 0, 0.01, 0.01);
//imshow("Result", src_crop);
}
int main(int argc, char** argv)
{
/// Load an image
src = imread(s);
if (!src.data)
{
return -1;
}
/// Create a matrix of the same type and size as src (for dst)
//dst.create(src.size(), src.type());
src_crop = src;
/// Convert the image to grayscale
cvtColor(src_crop, src_gray, CV_BGR2GRAY);
/// Create a window
namedWindow(window_name, CV_WINDOW_AUTOSIZE);
/// Create a Trackbar for user to enter threshold
createTrackbar("Min Threshold:", window_name, &lowThreshold, max_lowThreshold, drawLine);
/// Show the image
drawLine(0, 0);
if (waitKey(30) >= 0) break;
return 0;
}
The code stops working at the point fitLine() is called. This I found by testing the code with printf statements.
Can anyone kindly help me solve the issue?
Aside the fact the your code won't compile, the issue is that you're passing to fitLine the parameter outline as a vector<Vec4f>, while it should be a Vec4f.
Change outline declaration as:
Vec4f outline;