I am trying to find the optical flow between two sequence of images,when I run the program it shows up assertion failed at calcOpticalFlowFarneback() function.Can anyone help me with this issue.
#include "stdafx.h"
#include<opencv\cv.h>
#include<opencv2\highgui\highgui.hpp>
#include<opencv2\calib3d\calib3d.hpp>
#include<opencv2\core\core.hpp>
using namespace cv;
using namespace std;
void drawOptFlowMap (const Mat& flow, Mat& cflowmap, int step, double scale, const
Scalar& color)
{
for(int y = 0; y < cflowmap.rows; y += step)
for(int x = 0; x < cflowmap.cols; x += step)
{
const Point2f& fxy = flow.at<Point2f>(y, x);
line(cflowmap, Point(x,y), Point(cvRound(x+fxy.x), cvRound(y+fxy.y)),
color);
circle(cflowmap, Point(cvRound(x+fxy.x), cvRound(y+fxy.y)), 1, color, -1);
}
}
int main()
{
Mat flow,cflow;
Mat Previous_Gray=imread("image2.png");
Mat Current_Gray=imread("image3.png");
cvtColor(Current_Gray,Current_Gray,CV_BGR2GRAY);
cvNamedWindow("optical Flow", CV_WINDOW_NORMAL);
calcOpticalFlowFarneback(Previous_Gray,Current_Gray,flow,0.5, 3, 15, 3, 5, 1.2, 0);
cvtColor(Previous_Gray, cflow, CV_GRAY2BGR);
drawOptFlowMap(flow, cflow, 32, 50, CV_RGB(0, 255, 0));
imshow("optical Flow", cflow);
return 0;
}
Mat imread(const string& filename, int flags=1), what is the call you're using in the line Mat Previous_Gray=imread("image2.png"); returns an 3 channel rgb image. So beacuse part of the assertion
prev0.channels() == next0.channels() == 1
is false. Did you forget to convert PreviousGray before call calcOpticalFlowFarneback?
http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#imread
Related
I was trying to use the findEssentialMat function to produce an essential matrix and kept getting an empty matrix, even with very low probability and high threshold values. I made reproduceable code that tries to compute the essential matrix from a still image, and I still get no essential matrix. I'm not sure why this is happening.
Code:
int main(int argc, char** argv) {
Mat in = imread("test.jpg", IMREAD_GRAYSCALE);
std::vector<KeyPoint> keypoints;
std::vector<Point2f> points;
std::vector<Point2f> prevPoints;
std::vector<uchar> status;
points = featureDetection(in, keypoints, 30);
prevPoints = std::vector<Point2f>(points);
double focal = 0;
Point2d opticalCenter(in.rows / 2, in.cols / 2);
// Track features
featureTracking(in, in, points, prevPoints, status);
// FIXME RANSAC algorithm not working. Try LMEDS?
Mat E, mask;
E = findEssentialMat(points, prevPoints, focal, opticalCenter, RANSAC, 0.001, 100.0, mask);
Mat R, t;
if(E.size().area() == 0) {
std::cout << mask.size().area() << " points, essential matrix is empty\n";
} else {
recoverPose(E, points, prevPoints, R, t, focal, opticalCenter, mask);
}
// Draw tracked features (this frame)
for(int i = 0; i < prevPoints.size(); i++) {
// Tracking lines
line(in, points[i], prevPoints[i], Scalar(0, 100, 0), 5, LineTypes::LINE_4);
}
// Show output
imshow("Data", in);
char c = waitKey(0);
imwrite("out.jpg", in);
std::vector<Point2f> featureDetection(const Mat& imgIn, std::vector<KeyPoint>& pointsOut, int threshold) {
bool nonmaxSuppression = true;
FAST(imgIn, pointsOut, threshold, nonmaxSuppression);
std::vector<Point2f> points(0);
for(KeyPoint p : pointsOut) {
points.push_back(p.pt);
}
return points;
}
void featureTracking(const Mat& img_1, const Mat& img_2, std::vector<Point2f>& points1, std::vector<Point2f>& points2, std::vector<uchar>& status) {
//this function automatically gets rid of points for which tracking fails
std::vector<float> err;
Size winSize=Size(21,21);
TermCriteria termcrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01);
cv::calcOpticalFlowPyrLK(img_1, img_2, points1, points2, status, err, winSize, 3, termcrit, 0, 0.001);
//getting rid of points for which the KLT tracking failed or those who have gone outside the frame
int indexCorrection = 0;
for( int i=0; i<status.size(); i++) {
Point2f pt = points2.at(i- indexCorrection);
if ((status.at(i) == 0)||(pt.x<0)||(pt.y<0)) {
if((pt.x<0)||(pt.y<0)) {
status.at(i) = 0;
}
points1.erase (points1.begin() + i - indexCorrection);
points2.erase (points2.begin() + i - indexCorrection);
indexCorrection++;
}
}
}
Input:
Output (Markers denoted by *):
I'm using OpenCV 4.5.4 built for MinGW
It looks like the findEssentialMat function does not work with a focal length of 0, setting it to 1 fixed the issue!
For some background, I am compiling in Visual Studio 2019 and running the code inside LabVIEW 2017. The reason I am doing it in LabVIEW is for research to control a robotic gantry. This is the vision system and it is supposed to detect rectangles (wirebond pads for silicon detectors).
I need it to atleast show me a picture or something but when I run it in LabVIEW, it just says it is not responding and makes me hard close the program. So frustrating! If theres no huge errors in my C++ code then I know I have to dig deeper into my LabVIEW code.
The following code is my problem. I am fairly new to C++ and programming in general. I have done the step each line inside LabVIEW and when it stops responding is when it starts to grab the nominalHeight, xfov etc... or just when it goes into the WBPdetection function in general.
Any help is much appreciated. Or if someone could just point me in the right direction.
#include "stdafx.h"
#include "utils.h"
#include "WBPdetection.h"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <vector>
using namespace std;
void show3(cv::Mat img)
{
cv::namedWindow("MyWindow", cv::WINDOW_AUTOSIZE);
cv::imshow("MyWindow", img);
cv::waitKey(0);
cv::destroyWindow("MyWindow");
}
__declspec(dllexport) int __cdecl WBPdetection(
char* imgPtr,
int imgLineWidth,
int imgWidth,
int imgHeight,
double percent_size,
double nominalWidth,
double nominalHeight,
double tolerance,
double xfov,
double yfov)
{
cv::Mat img(imgHeight, imgWidth, CV_8U, (void*)imgPtr, imgLineWidth);
cv::resize(img, img, cv::Size(img.cols * percent_size, img.rows * percent_size), 0, 0);
//PREPPING IMAGE FOR DETECTION ALGORITHIM
cv::threshold(img, img, 125, 255, cv::THRESH_OTSU);
cv::GaussianBlur(img, img, cv::Size(5, 5), 0);
cv::erode(img, img, cv::Mat(), cv::Point(-1, -1), 2, 1, 1);
cv::dilate(img, img, cv::Mat(), cv::Point(-1, -1), 1, 1, 1);
//USE FIND CONTOURS ALGORITHIM
vector<vector<cv::Point>> contours;
vector<cv::Vec4i> hierarchy;
cv::findContours(img, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
for( int i = 0; i < contours.size(); i++ )
{ approxPolyDP( cv::Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = cv::boundingRect( cv::Mat(contours_poly[i]) );
}
vector<vector<double>> dimRects; //ex [ [w1,h1], [w2,h2], [w3,h3], ...]
vector<cv::Point> centerRects; //ex [ [c1], [c2], [c3], ... ]
//PUTTING DIMENSIONS OF ALL RECTANGLES IN VECTORS
for (int i = 0; i < contours.size(); i++)
{
cv::Point center = ((boundRect[i].tl().x + boundRect[i].br().x) / 2, (boundRect[i].tl().y + boundRect[i].br().y) / 2); //what about even pixels
double rectWidth = (boundRect[i].br().x - boundRect[i].tl().x) * (xfov / img.cols); //might not matter tbh
double rectHeight = (boundRect[i].tl().y - boundRect[i].br().y) * (yfov / img.rows);
dimRects[i].push_back(rectWidth);
dimRects[i].push_back(rectHeight);
centerRects.push_back(center);
}
//DEFINING minWidth, etc... FROM tolerance AND nominalWidth
double minWidth = nominalWidth * (1 - tolerance);
double maxWidth = nominalWidth * (1 + tolerance);
double minHeight = nominalHeight * (1 - tolerance);
double maxHeight = nominalHeight * (1 + tolerance);
// DRAWING CONTOURS AND BOUNDING RECTANGLE + CENTER
for( int i = 0; i< dimRects.size(); i++ )
{
cv::Scalar color = cv::Scalar(255,255,255); //creates color
if ((dimRects[i][0] > minWidth && dimRects[i][0] < maxWidth) && (dimRects[i][1] > minHeight && dimRects[i][1] < maxHeight))
{
drawContours(img, contours_poly, i, color, 1, 8, vector<cv::Vec4i>(), 0, cv::Point());
rectangle(img, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0);
circle(img, centerRects[i], 1, cv::Scalar(0, 0, 255), 1, cv::LINE_8);
}
}
show3(img);
return 0;
}
Well there is one error here
vector<vector<double>> dimRects;
...
for (int i = 0; i < contours.size(); i++)
{
...
dimRects[i].push_back(rectWidth);
dimRects[i].push_back(rectHeight);
dimRects has zero size but your code treats it as if it has the same size as contours.
Overall goal is to be able to read the histogram from binary image in order to crop the image.
My code works, but for my binary image, histogram is not showing properly (the histogram is blank)
Can anybody tell me whats wrong with my code?
Histogram is working for RGB image as well as Grey image
I would like to be able to get the histogram of the binary image
#include <opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<iostream>
using namespace std;
using namespace cv;
void show_histogram(std::string const& name, cv::Mat1b const& image)
{
// Set histogram bins count
int bins = 255;
int histSize[] = { bins };
// Set ranges for histogram bins
float lranges[] = { 0, 255 };
const float* ranges[] = { lranges };
// create matrix for histogram
cv::Mat hist;
int channels[] = { 0 };
// create matrix for histogram visualization
int const hist_height = 255;
cv::Mat1b hist_image = cv::Mat1b::zeros(hist_height, bins);
cv::calcHist(&image, 1, channels, cv::Mat(), hist, 1, histSize, ranges, true, false);
double max_val = 0;
minMaxLoc(hist, 0, &max_val);
// visualize each bin
for (int b = 0; b < bins; b++) {
float const binVal = hist.at<float>(b);
int const height = cvRound(binVal*hist_height / max_val);
cv::line
(hist_image
, cv::Point(b, hist_height - height), cv::Point(b, hist_height)
, cv::Scalar::all(255)
);
}
cv::imshow(name, hist_image);
}
int main()
{
Mat Rgb;
Mat Grey;
Mat Binary;
//Mat Histogram;
Rgb = imread("license.jpg", WINDOW_AUTOSIZE);
cvtColor(Rgb, Grey, cv::COLOR_BGR2GRAY);
threshold(Grey, Binary, 150, 250, THRESH_BINARY);
//namedWindow("RGB");
//namedWindow("Grey");
namedWindow("Binary");
//imshow("RGB", Rgb);
imshow("Gray", Grey);
imshow("Binary", Binary);
show_histogram("Histogram1", Grey);
show_histogram("Histogram2", Binary);
waitKey(0);
cv::destroyAllWindows();
return 0;
}
I am trying to calculate the skew of text in an image so I can correct it for the best OCR results.
Currently this is the function I am using:
double compute_skew(Mat &img)
{
// Binarize
cv::threshold(img, img, 225, 255, cv::THRESH_BINARY);
// Invert colors
cv::bitwise_not(img, img);
cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 3));
cv::erode(img, img, element);
std::vector<cv::Point> points;
cv::Mat_<uchar>::iterator it = img.begin<uchar>();
cv::Mat_<uchar>::iterator end = img.end<uchar>();
for (; it != end; ++it)
if (*it)
points.push_back(it.pos());
cv::RotatedRect box = cv::minAreaRect(cv::Mat(points));
double angle = box.angle;
if (angle < -45.)
angle += 90.;
cv::Point2f vertices[4];
box.points(vertices);
for(int i = 0; i < 4; ++i)
cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(255, 0, 0), 1, CV_AA);
return angle;
}
When I look at then angle in debug I get 0.000000
However when I give it this image I get proper results of a skew of about 16 degrees:
How can I properly detect the skew in the first image?
there are a few other ways to get the skew degree, 1) by hough transform 2) by horizontal projection profile. rotate the image in different angle bins and calculate horizontal projection. the angle with the greatest horizontal histogram value is the deskewed angle.
i have provided below implementation of 1). i believe this to be superior to the boxing method you are using because it requires that you completely clean the image of any noise,which just isnt possible in most of the time.
you should know that the method doesnt work well if there's too much noise. you can reduce noise in different ways depending on what type of "line" you want to treat as the most dominant in the image. i have provided two methods for this. be sure to play with parameters and threshold etc.
results (all run using preprocess2, all run using same parameter set)
code
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
void hough_transform(Mat& im,Mat& orig,double* skew)
{
double max_r=sqrt(pow(.5*im.cols,2)+pow(.5*im.rows,2));
int angleBins = 180;
Mat acc = Mat::zeros(Size(2*max_r,angleBins),CV_32SC1);
int cenx = im.cols/2;
int ceny = im.rows/2;
for(int x=1;x<im.cols-1;x++)
{
for(int y=1;y<im.rows-1;y++)
{
if(im.at<uchar>(y,x)==255)
{
for(int t=0;t<angleBins;t++)
{
double r =(x-cenx)*cos((double)t/angleBins*CV_PI)+(y-ceny)*sin((double)t /angleBins*CV_PI);
r+=max_r;
acc.at<int>(t,int(r))++;
}
}
}
}
Mat thresh;
normalize(acc,acc,255,0,NORM_MINMAX);
convertScaleAbs(acc,acc);
/*debug
Mat cmap;
applyColorMap(acc,cmap,COLORMAP_JET);
imshow("cmap",cmap);
imshow("acc",acc);*/
Point maxLoc;
minMaxLoc(acc,0,0,0,&maxLoc);
double theta = (double)maxLoc.y/angleBins*CV_PI;
double rho = maxLoc.x-max_r;
if(abs(sin(theta))<0.000001)//check vertical
{
//when vertical, line equation becomes
//x = rho
double m = -cos(theta)/sin(theta);
Point2d p1 = Point2d(rho+im.cols/2,0);
Point2d p2 = Point2d(rho+im.cols/2,im.rows);
line(orig,p1,p2,Scalar(0,0,255),1);
*skew=90;
cout<<"skew angle "<<" 90"<<endl;
}else
{
//convert normal form back to slope intercept form
//y = mx + b
double m = -cos(theta)/sin(theta);
double b = rho/sin(theta)+im.rows/2.-m*im.cols/2.;
Point2d p1 = Point2d(0,b);
Point2d p2 = Point2d(im.cols,im.cols*m+b);
line(orig,p1,p2,Scalar(0,0,255),1);
double skewangle;
skewangle= p1.x-p2.x>0? (atan2(p1.y-p2.y,p1.x-p2.x)*180./CV_PI):(atan2(p2.y-p1.y,p2. x-p1.x)*180./CV_PI);
*skew=skewangle;
cout<<"skew angle "<<skewangle<<endl;
}
imshow("orig",orig);
}
Mat preprocess1(Mat& im)
{
Mat ret = Mat::zeros(im.size(),CV_32SC1);
for(int x=1;x<im.cols-1;x++)
{
for(int y=1;y<im.rows-1;y++)
{
int gy = (im.at<uchar>(y-1,x+1)-im.at<uchar>(y-1,x-1))
+2*(im.at<uchar>(y,x+1)-im.at<uchar>(y,x-1))
+(im.at<uchar>(y+1,x+1)-im.at<uchar>(y+1,x-1));
int gx = (im.at<uchar>(y+1,x-1) -im.at<uchar>(y-1,x-1))
+2*(im.at<uchar>(y+1,x)-im.at<uchar>(y-1,x))
+(im.at<uchar>(y+1,x+1)-im.at<uchar>(y-1,x+1));
int g2 = (gy*gy + gx*gx);
ret.at<int>(y,x)=g2;
}
}
normalize(ret,ret,255,0,NORM_MINMAX);
ret.convertTo(ret,CV_8UC1);
threshold(ret,ret,50,255,THRESH_BINARY);
return ret;
}
Mat preprocess2(Mat& im)
{
// 1) assume white on black and does local thresholding
// 2) only allow voting top is white and buttom is black(buttom text line)
Mat thresh;
//thresh=255-im;
thresh=im.clone();
adaptiveThreshold(thresh,thresh,255,CV_ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY,15,-2);
Mat ret = Mat::zeros(im.size(),CV_8UC1);
for(int x=1;x<thresh.cols-1;x++)
{
for(int y=1;y<thresh.rows-1;y++)
{
bool toprowblack = thresh.at<uchar>(y-1,x)==0 || thresh.at<uchar>(y-1,x-1)==0 || thresh.at<uchar>(y-1,x+1)==0;
bool belowrowblack = thresh.at<uchar>(y+1,x)==0 || thresh.at<uchar>(y+1, x-1)==0 || thresh.at<uchar>(y+1,x+1)==0;
uchar pix=thresh.at<uchar>(y,x);
if((!toprowblack && pix==255 && belowrowblack))
{
ret.at<uchar>(y,x) = 255;
}
}
}
return ret;
}
Mat rot(Mat& im,double thetaRad)
{
cv::Mat rotated;
double rskew = thetaRad* CV_PI/180;
double nw = abs(sin(thetaRad))*im.rows+abs(cos(thetaRad))*im.cols;
double nh = abs(cos(thetaRad))*im.rows+abs(sin(thetaRad))*im.cols;
cv::Mat rot_mat = cv::getRotationMatrix2D(Point2d(nw*.5,nh*.5), thetaRad*180/CV_PI, 1);
Mat pos = Mat::zeros(Size(1,3),CV_64FC1);
pos.at<double>(0)=(nw-im.cols)*.5;
pos.at<double>(1)=(nh-im.rows)*.5;
Mat res = rot_mat*pos;
rot_mat.at<double>(0,2) += res.at<double>(0);
rot_mat.at<double>(1,2) += res.at<double>(1);
cv::warpAffine(im, rotated, rot_mat,Size(nw,nh), cv::INTER_LANCZOS4);
return rotated;
}
int main(int argc, char** argv)
{
string src="C:/data/skew.png";
Mat im= imread(src);
Mat gray;
cvtColor(im,gray,CV_BGR2GRAY);
Mat preprocessed = preprocess2(gray);
imshow("preprocessed2",preprocessed);
double skew;
hough_transform(preprocessed,im,&skew);
Mat rotated = rot(im,skew* CV_PI/180);
imshow("corrected",rotated);
waitKey(0);
return 0;
}
the approach you posted has its own "ideal binarization" assumption. the threshold value directly affects the process. utilize otsu threshold, or think about DFT for a generic solution.
otsu trial:
int main()
{
Mat input = imread("your text");
cvtColor(input, input, CV_BGR2GRAY);
Mat img;
cv::threshold(input, img, 100, 255, cv::THRESH_OTSU);
cv::bitwise_not(img, img);
imshow("img ", img);
waitKey(0);
vector<Point> points;
findNonZero(img, points);
cv::RotatedRect box = cv::minAreaRect(points);
double angle = box.angle;
if (angle < -45.)
angle += 90.;
cv::Point2f vertices[4];
box.points(vertices);
for(int i = 0; i < 4; ++i)
cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(255, 0, 0));
imshow("img ", img);
waitKey(0);
return 0;
}
I try to use the calcOpticalFlowSF() function, but when I launch it, the programm doesn't repond, here the part of the code that use it:
frame1 = cv::imread("frame10.png");
frame2 = cv::imread("frame11.png");
if (frame1.empty()) {
cout<<"could not read image oldori"<<endl;
return;
}
if (frame2.empty()) {
cout<<"could not read image ori"<<endl;
return;
}
if (frame1.rows != frame2.rows && frame1.cols != frame2.cols) {
cout<<"images should be of equal sizes "endl;
return;
}
if (frame1.type() != 16 || frame2.type() != 16) {
cout<<"images should be of equal type CV_8UC3")endl;
return;
}
cv::Mat flow;
cv::calcOpticalFlowSF(frame1, frame2, flow, 2, 2, 4);
// calcOpticalFlowSF(frame1, frame1, // doesn't work too.
// flow,
// 3, 2, 4, 4.1, 25.5, 18, 55.0, 25.5, 0.35, 18, 55.0, 25.5, 10);
I know that the error come from the function calcOpticalFlowSF, because if I comment it, the programm works. I use the same pictures as they use in the SimpleFlow demo. If you look here:How to get cv::calcOpticalFlowSF to work? it seems that he got no problem with the function itself...
Do you have an idea why it doesn't work?
thanks,
best regards.
make sure the image is 3-channel, is better to see the docs here
#if 1
#define _CRT_SECURE_NO_WARNINGS
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <stdlib.h>
#include <opencv2/opencv.hpp>
#include "opencv2/optflow.hpp"
#include "opencv2/highgui.hpp"
using namespace std;
using namespace cv;
using namespace cv::optflow;//calcOpticalFlowSF 's namespace
const size_t choice = 2 ;
// choice
// 1 2 3
// calcOpticalFlowFarneback calcOpticalFlowSF calcOpticalFlowPyrLK
void drawOptFlowMap(const Mat& flow, Mat& cflowmap, int step, const Scalar& color) {
// cflowmap is the pre frame with the line of Optical Flow
// flow is a xxx array, store float point
// store the delta of x,y
// step is every step pixel
for (int y = 0; y < cflowmap.rows; y += step)
for (int x = 0; x < cflowmap.cols; x += step)
{
const Point2f& fxy = flow.at< Point2f>(y, x);
line(cflowmap, Point(x, y), Point(cvRound(x + fxy.x), cvRound(y + fxy.y)),
color);
circle(cflowmap, Point(cvRound(x + fxy.x), cvRound(y + fxy.y)), 1, color, -1);
}
}
void drawOptFlowMap(Mat& cflowmap, int step, const Scalar& color, vector<Point2f> &retPts) {
// same as above, retPts is the next frame point
auto it = retPts.begin();
for (int y = 0; y < cflowmap.rows; y += step)
for (int x = 0; x < cflowmap.cols; x += step)
{
line(cflowmap, Point(x, y), *it, color);
circle(cflowmap, *it, 1, color, -1);
it++;
}
}
int main(int argc, char *argv[])
{
Mat flow;//flow = aft - pre
Mat pre = imread("1hf.png", IMREAD_COLOR);
Mat aft = imread("2hf.png", IMREAD_COLOR);// CV_LOAD_IMAGE_GRAYSCALE gray ; IMREAD_COLOR color
if (pre.empty() || aft.empty()){
printf("Unable to load the image");
return 1;
}
Mat cflow = pre; Mat cflow2 = aft;// store the 3-channel mat of frame, cflow is for show color with line
cvtColor(pre, pre, CV_BGR2GRAY);
cvtColor(aft, aft, CV_BGR2GRAY);
//below parameter of calcOpticalFlowPyrLK
vector<Point2f> prePts;
size_t step = 10;
vector<Point2f> nextPts(pre.rows * pre.cols);
vector<uchar> status;
vector<float> err;
TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
switch (choice)
{
case 1:// calcOpticalFlowFarneback
calcOpticalFlowFarneback(pre, aft, flow, 0.5, 3, 15, 3, 5, 1.2, 0); // info in the flow; note that input mat should in 1-channel
drawOptFlowMap(flow, cflow, 10, CV_RGB(0, 255, 0)); break;
case 2:// calcOpticalFlowSF
calcOpticalFlowSF(cflow, cflow2,
flow,
3, 2, 4, 4.1, 25.5, 18, 55.0, 25.5, 0.35, 18, 55.0, 25.5, 10);// info in the flow; note that input mat should in 3-channel
drawOptFlowMap(flow, cflow, 10, CV_RGB(0, 255, 0)); break;
case 3:// calcOpticalFlowPyrLK
for (int y = 0; y < pre.rows; y += step)
for (int x = 0; x < pre.cols; x += step)
{
prePts.push_back(Point(x, y));
}
// above get a point vector in step
calcOpticalFlowPyrLK(pre, aft, prePts, nextPts, status, err, Size(31, 31), 3, termcrit, 0, 0.001);// info in the flow; note that input mat should in 1-channel
drawOptFlowMap(cflow, step, CV_RGB(0, 255, 0), nextPts);
break;
default:
break;
}
imshow("pre", pre);
imshow("after", aft);
//cflow is the pre with OpticalFlow line
imshow("pre with OpticalFlow line", cflow);
waitKey(0);
return 0;
}
#endif