I'm looking for a correct way to measure openCV FPS. I've found several ways to do it. but none of them looks right for me.
The first one I've tested, uses time_t start and time_t end. I think that one is wrong once it returns me a dumped function as fps x time plot (I really can't imagine how a fps plot could be a dumped function).
Here the image of this plot.
The second I've tested uses t = (double)cvGetTickCount() to measure fps. This way is wrong once it returns 120 fps as result, but, for a 30 seconds length video captured with 120 fps shouldn't take more than 1 minute to be processed. so this is a wrong way to measure FPS.
Someone knows another way to measure FPS in openCV?
Ps. I'm trying to find circles in each frame of the video. The video frame size is 320x240 pixels.
Update 2
The code that I'm trying to measure FPS.
for(;;)
{
clock_t start=CLOCK();
Mat frame, finalFrame;
capture >> frame;
finalFrame = frame;
cvtColor(frame, frame, CV_BGR2GRAY);
GaussianBlur(frame, frame, Size(7,7), 1.5, 1.5);
threshold(frame, frame, 20, 255, CV_THRESH_BINARY);
dilate(frame, frame, Mat(), Point(-1, -1), 2, 1, 1);
erode(frame, frame, Mat(), Point(-1, -1), 2, 1, 1);
Canny(frame, frame, 20, 20*2, 3 );
vector<Vec3f> circles;
findContours(frame,_contours,_storage,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
vector<vector<Point> > contours_poly( _contours.size() );
vector<Rect> boundRect( _contours.size() );
vector<Point2f>center( _contours.size() );
vector<float>radius( _contours.size() );
int temp = 0;
for( int i = 0; i < _contours.size(); i++ )
{
if( _contours[i].size() > 100 )
{
approxPolyDP( Mat(_contours[i]), contours_poly[i], 3, true );
boundRect[i] = boundingRect( Mat(_contours[i]) );
minEnclosingCircle( (Mat)_contours[i], center[i], radius[i] );
temp = i;
break;
}
}
double dur = CLOCK()-start;
printf("avg time per frame %f ms. fps %f. frameno = %d\n",avgdur(dur),avgfps(),frameno++ );
frameCounter++;
if(frameCounter == 3600)
break;
if(waitKey(1000/120) >= 0) break;
}
Update
Program execution using the Zaw Lin method!
I have posted a way to do that # Getting current FPS of OpenCV. It is necessary to do a bit of averaging otherwise the fps will be too jumpy.
edit
I have put a Sleep inside process() and it gives correct fps and duration(+/- 1ms).
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv/cv.h>
#include <sys/timeb.h>
using namespace cv;
#if defined(_MSC_VER) || defined(WIN32) || defined(_WIN32) || defined(__WIN32__) \
|| defined(WIN64) || defined(_WIN64) || defined(__WIN64__)
#include <windows.h>
bool _qpcInited=false;
double PCFreq = 0.0;
__int64 CounterStart = 0;
void InitCounter()
{
LARGE_INTEGER li;
if(!QueryPerformanceFrequency(&li))
{
std::cout << "QueryPerformanceFrequency failed!\n";
}
PCFreq = double(li.QuadPart)/1000.0f;
_qpcInited=true;
}
double CLOCK()
{
if(!_qpcInited) InitCounter();
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return double(li.QuadPart)/PCFreq;
}
#endif
#if defined(unix) || defined(__unix) || defined(__unix__) \
|| defined(linux) || defined(__linux) || defined(__linux__) \
|| defined(sun) || defined(__sun) \
|| defined(BSD) || defined(__OpenBSD__) || defined(__NetBSD__) \
|| defined(__FreeBSD__) || defined __DragonFly__ \
|| defined(sgi) || defined(__sgi) \
|| defined(__MACOSX__) || defined(__APPLE__) \
|| defined(__CYGWIN__)
double CLOCK()
{
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return (t.tv_sec * 1000)+(t.tv_nsec*1e-6);
}
#endif
double _avgdur=0;
double _fpsstart=0;
double _avgfps=0;
double _fps1sec=0;
double avgdur(double newdur)
{
_avgdur=0.98*_avgdur+0.02*newdur;
return _avgdur;
}
double avgfps()
{
if(CLOCK()-_fpsstart>1000)
{
_fpsstart=CLOCK();
_avgfps=0.7*_avgfps+0.3*_fps1sec;
_fps1sec=0;
}
_fps1sec++;
return _avgfps;
}
void process(Mat& frame)
{
Sleep(3);
}
int main(int argc, char** argv)
{
int frameno=0;
cv::Mat frame;
cv::VideoCapture cap(0);
for(;;)
{
//cap>>frame;
double start=CLOCK();
process(frame);
double dur = CLOCK()-start;
printf("avg time per frame %f ms. fps %f. frameno = %d\n",avgdur(dur),avgfps(),frameno++ );
if(waitKey(1)==27)
exit(0);
}
return 0;
}
You can use opencv helper cv::getTickCount()
#include <iostream>
#include <string>
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/video.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
int main(int ac, char** av) {
VideoCapture capture(0);
Mat frame;
for (;;) {
int64 start = cv::getTickCount();
capture >> frame;
if (frame.empty())
break;
/* do some image processing here */
char key = (char)waitKey(1);
double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
std::cout << "FPS : " << fps << std::endl;
}
return 0;
}
You can use OpenCV's API to get the original FPS if you are dealing with video files. The following method will not work when capturing from a live stream:
cv::VideoCapture capture("C:\\video.avi");
if (!capture.isOpened())
{
std::cout << "!!! Could not open input video" << std::endl;
return;
}
std::cout << "FPS: " << capture.get(CV_CAP_PROP_FPS) << std::endl;
To get the actual FPS after the processing, you can try Zaw's method.
I would just measure the walltime and simply divide the frames by time elapsed. On linux:
/*
* compile with:
* g++ -ggdb webcam_fps_example2.cpp -o webcam_fps_example2 `pkg-config --cflags --libs opencv`
*/
#include "opencv2/opencv.hpp"
#include <time.h>
#include <sys/time.h>
using namespace cv;
using namespace std;
double get_wall_time(){
struct timeval time;
if (gettimeofday(&time,NULL)){
// Handle error
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
int main(int argc, char** argv)
{
VideoCapture cap;
// open the default camera, use something different from 0 otherwise;
// Check VideoCapture documentation.
if(!cap.open(0))
return 0;
cap.set(CV_CAP_PROP_FRAME_WIDTH,1920);
cap.set(CV_CAP_PROP_FRAME_HEIGHT,1080);
double wall0 = get_wall_time();
for(int x = 0; x < 500; x++)
{
Mat frame;
cap >> frame;
if( frame.empty() ) break; // end of video stream
//imshow("this is you, smile! :)", frame);
if( waitKey(10) == 27 ) break; // stop capturing by pressing ESC
}
double wall1 = get_wall_time();
double fps = 500/(wall1 - wall0);
cout << "Wall Time = " << wall1 - wall0 << endl;
cout << "FPS = " << fps << endl;
// the camera will be closed automatically upon exit
// cap.close();
return 0;
}
Wall Time = 43.9243
FPS = 11.3832
Related
I am trying to measure Frames Per Second when processing frame from camera. Calculations are nothing special and can be found in this question: How to write function with parameter which type is deduced with 'auto' word?
My camera is pretty old and manufacturer declared FPS is no more than 30 with resolution 640x480. However, when I am running those calculations it shows me 40-50 on live streams. How can it be?
Update: Code:
#include <chrono>
#include <iostream>
using std::cerr;
using std::cout;
using std::endl;
#include <string>
#include <numeric>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/imgproc.hpp>
using cv::waitKey;
using cv::Mat;
using time_type = decltype(std::chrono::high_resolution_clock::now());
void showFPS(Mat* frame, const time_type &startTime);
int main(int argc, char** argv) {
cv::VideoCapture capture;
std::string videoDevicePath = "/dev/video0";
if (!capture.open(videoDevicePath)) {
std::cerr << "Unable to open video capture.";
return 1;
}
//TODO normally through cmd or from cameraParameters.xml
bool result;
result = capture.set(CV_CAP_PROP_FOURCC, CV_FOURCC('M', 'J', 'P', 'G'));
if (result) {
std::cout << "Camera: PROP_FOURCC: MJPG option set.";
} else {
std::cerr << "Camera: PROP_FOURCC: MJPG option was not set.";
}
result = capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
if (result) {
std::cout << "Camera: PROP_FRAME_WIDTH option set.";
} else {
std::cerr << "Camera: PROP_FRAME_WIDTH option was not set.";
}
result = capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
if (result) {
std::cout << "Camera: PROP_FRAME_HEIGHT option set.";
} else {
std::cerr << "Camera: PROP_FRAME_HEIGHT option was not set.";
}
result = capture.set(CV_CAP_PROP_FPS, 30);
if (result) {
std::cout << "Camera: PROP_FPS option set.";
} else {
std::cerr << "Camera: PROP_FPS option was not set.";
}
Mat frame, raw;
while (cv::waitKey(5) != 'q') {
auto start = std::chrono::high_resolution_clock::now();
capture >> raw;
if (raw.empty()) {
return 1;
}
if (raw.channels() > 1) {
cv::cvtColor(raw, frame, CV_BGR2GRAY);
} else {
frame = raw;
}
showFPS(&raw1, start);
}
return 0;
}
void showFPS(Mat* frame, const time_type &startTime) {
typedef std::chrono::duration<float> fsec_t;
auto stopTime = std::chrono::high_resolution_clock::now();
fsec_t duration = stopTime - startTime;
double sec = duration.count();
double fps = (1.0 / sec);
std::stringstream s;
s << "FPS: " << fps;
cv::putText(*frame, s.str(), Point2f(20, 20), constants::font,
constants::fontScale, constants::color::green);
}
Camera's FPS is the number of frames that camera could provide per second.
It means that camera provides new frame every 33ms.
On the other side, what you are measuring is not FPS.
You are measuring inverse time of the function of the new frame retrieval plus color converting.
And this time is 20-25ms, based on your results.
This is not correct way of measuring FPS, at least because you can't guarantee the synchronization of these two processes.
If you want to measure FPS correctly, you can measure the time for showing last N frames.
Pseudocode:
counter = 0;
start = getTime();
N = 100;
while (true) {
captureFrame();
convertColor();
counter++;
if (counter == N) {
fps = N / (getTime() - start);
printFPS(fps);
counter = 0;
start = getTime();
}
}
Aleksey Petrov's answer is not bad, but while averaging over the last N frames gives smoother values, one can measure the frame rate relatively accurately without averaging. Here the code from the question modified to do that:
// see question for earlier code
Mat frame, raw;
time_type prevTimePoint; // default-initialized to epoch value
while (waitKey(1) != 'q') {
capture >> raw;
auto timePoint = std::chrono::high_resolution_clock::now();
if (raw.empty()) {
return 1;
}
if (raw.channels() > 1) {
cv::cvtColor(raw, frame, CV_BGR2GRAY);
} else {
frame = raw;
}
showFPS(&frame, prevTimePoint, timePoint);
cv::imshow("frame", frame);
}
return 0;
}
void showFPS(Mat* frame, time_type &prevTimePoint, const time_type &timePoint) {
if (prevTimePoint.time_since_epoch().count()) {
std::chrono::duration<float> duration = timePoint - prevTimePoint;
cv::putText(*frame, "FPS: " + std::to_string(1/duration.count()),
cv::Point2f(20, 40), 2, 2, cv::Scalar(0,255,0));
}
prevTimePoint = timePoint;
}
Note that this measures the time point right after capture >> raw returns, which (without messing with OpenCV) is the closest one can get to when the camera sent the frame, and that the time is measured only once per loop and compared against the previous measurement, which gives a quite precise current frame rate. Of course, if the processing takes more time than 1/(frame rate), the measurement will be off.
The reason the question's code gave too high a frame rate was actually the code between the two time measurements: the now() in showFPS() and the now() in the while loop. My hunch is this code included cv::imshow(), which is not in the question and which together with cv::waitKey(5) and cv::putText() is likely responsible for most of the "missing time" in the frame rate calculation (causing too high a frame rate).
You have a cvtColor in between, so it affects your time computing because the process time of cvtColor may vary in each loop (probably because of the other processes of windows).
Consider this example:
You get the first frame with capture at moment 0, then do a
cvtColor and that takes e.g. 10 ms, then you make a stopTime at
moment 10 ms. 23 ms later (33-10) you capture the second frame. But
this time cvtColor takes 5 ms (It could happen) and you make the
second stopTime at moment 38 (33+5), so the first tick was at moment
10 and the second tick is at moment 38. Now your fps becomes
1000/(38-10) = 35.7
I'm currently trying to use a monochrome camera with the aruco and opencv libraries in order to accelerate the computation and get better marker capturing. The problem i am having is that the monochrome feed is being tripled on screen when running the aruco_test program and so the resolution in diminished by two thirds and the markers are being detected three times each instead of one.
I saw feeds which talk about similar problems with monochrome cameras in opencv. Some answers suggested cropping the image (which fixes the tripling problem but not the smaller resolution) but it all seems to be caused by the conversion from either BGR2GRAY or GRAY2BGR.
Any help on what exactly is causing the images being tripled and how to bypass that part either in the aruco source code or opencv source code would be appreciated.
INFO :
Driver Info (not using libv4l2):
Driver name : uvcvideo
Card type : oCam-1MGN-U
Bus info : usb-0000:00:1d.0-1.5
Driver version: 3.13.11
Capabilities : 0x84000001
Video Capture
Streaming
Device Capabilities
Device Caps : 0x04000001
Video Capture
Streaming
Priority: 2
Video input : 0 (Camera 1: ok)
Format Video Capture:
Width/Height : 1280/960
Pixel Format : 'GREY'
Field : None
Bytes per Line: 1280
Size Image : 1228800
Colorspace : Unknown (00000000)
Crop Capability Video Capture:
Bounds : Left 0, Top 0, Width 1280, Height 960
Default : Left 0, Top 0, Width 1280, Height 960
Pixel Aspect: 1/1
Streaming Parameters Video Capture:
Capabilities : timeperframe
Frames per second: 30.000 (30/1)
Read buffers : 0
brightness (int) : min=0 max=127 step=1 default=64 value=64
exposure_absolute (int) : min=1 max=625 step=1 default=39 value=39
Using Aruco 2.0.19 and OpenCV 3.2
Pixel Format not being YUYV i cannot simply take the Y channel from the camera feed.
code executed :
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include "aruco.h"
#include "cvdrawingutils.h"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace aruco;
MarkerDetector MDetector;
VideoCapture TheVideoCapturer;
vector< Marker > TheMarkers;
Mat TheInputImage, TheInputImageCopy;
CameraParameters TheCameraParameters;
void cvTackBarEvents(int pos, void *);
pair< double, double > AvrgTime(0, 0); // determines the average time required for detection
int iThresParam1, iThresParam2;
int waitTime = 0;
class CmdLineParser{int argc; char **argv; public: CmdLineParser(int _argc,char **_argv):argc(_argc),argv(_argv){} bool operator[] ( string param ) {int idx=-1; for ( int i=0; i<argc && idx==-1; i++ ) if ( string ( argv[i] ) ==param ) idx=i; return ( idx!=-1 ) ; } string operator()(string param,string defvalue="-1"){int idx=-1; for ( int i=0; i<argc && idx==-1; i++ ) if ( string ( argv[i] ) ==param ) idx=i; if ( idx==-1 ) return defvalue; else return ( argv[ idx+1] ); }};
cv::Mat resize(const cv::Mat &in,int width){
if (in.size().width<=width) return in;
float yf=float( width)/float(in.size().width);
cv::Mat im2;
cv::resize(in,im2,cv::Size(width,float(in.size().height)*yf));
return im2;
}
int main(int argc, char **argv) {
try {
CmdLineParser cml(argc,argv);
if (argc < 2 || cml["-h"]) {
cerr << "Invalid number of arguments" << endl;
cerr << "Usage: (in.avi|live[:idx_cam=0]) [-c camera_params.yml] [-s marker_size_in_meters] [-d dictionary:ARUCO by default] [-h]" << endl;
cerr<<"\tDictionaries: "; for(auto dict:aruco::Dictionary::getDicTypes()) cerr<<dict<<" ";cerr<<endl;
cerr<<"\t Instead of these, you can directly indicate the path to a file with your own generated dictionary"<<endl;
return false;
}
/////////// PARSE ARGUMENTS
string TheInputVideo = argv[1];
// read camera parameters if passed
if (cml["-c"] ) TheCameraParameters.readFromXMLFile(cml("-c"));
float TheMarkerSize = std::stof(cml("-s","-1"));
//aruco::Dictionary::DICT_TYPES TheDictionary= Dictionary::getTypeFromString( cml("-d","ARUCO") );
/////////// OPEN VIDEO
// read from camera or from file
if (TheInputVideo.find("live") != string::npos) {
int vIdx = 0;
// check if the :idx is here
char cad[100];
if (TheInputVideo.find(":") != string::npos) {
std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' ');
sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx);
}
cout << "Opening camera index " << vIdx << endl;
TheVideoCapturer.open(vIdx);
waitTime = 10;
}
else TheVideoCapturer.open(TheInputVideo);
// check video is open
if (!TheVideoCapturer.isOpened()) throw std::runtime_error("Could not open video");
///// CONFIGURE DATA
// read first image to get the dimensions
TheVideoCapturer >> TheInputImage;
if (TheCameraParameters.isValid())
TheCameraParameters.resize(TheInputImage.size());
MDetector.setDictionary(cml("-d","ARUCO"));//sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)
MDetector.setThresholdParams(7, 7);
MDetector.setThresholdParamRange(2, 0);
// MDetector.setCornerRefinementMethod(aruco::MarkerDetector::SUBPIX);
//gui requirements : the trackbars to change this parameters
iThresParam1 = MDetector.getParams()._thresParam1;
iThresParam2 = MDetector.getParams()._thresParam2;
cv::namedWindow("in");
cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
//go!
char key = 0;
int index = 0;
// capture until press ESC or until the end of the video
do {
TheVideoCapturer.retrieve(TheInputImage);
// copy image
double tick = (double)getTickCount(); // for checking the speed
// Detection of markers in the image passed
TheMarkers= MDetector.detect(TheInputImage, TheCameraParameters, TheMarkerSize);
// chekc the speed by calculating the mean speed of all iterations
AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
AvrgTime.second++;
cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second << " milliseconds nmarkers=" << TheMarkers.size() << std::endl;
// print marker info and draw the markers in image
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i = 0; i < TheMarkers.size(); i++) {
cout << TheMarkers[i]<<endl;
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
}
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid() && TheMarkerSize>0)
for (unsigned int i = 0; i < TheMarkers.size(); i++) {
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
}
// DONE! Easy, right?
// show input with augmented information and the thresholded image
cv::imshow("in", resize(TheInputImageCopy,1280));
cv::imshow("thres", resize(MDetector.getThresholdedImage(),1280));
key = cv::waitKey(waitTime); // wait for key to be pressed
if(key=='s') waitTime= waitTime==0?10:0;
index++; // number of images captured
} while (key != 27 && (TheVideoCapturer.grab() ));
} catch (std::exception &ex)
{
cout << "Exception :" << ex.what() << endl;
}
}
void cvTackBarEvents(int pos, void *) {
(void)(pos);
if (iThresParam1 < 3) iThresParam1 = 3;
if (iThresParam1 % 2 != 1) iThresParam1++;
if (iThresParam1 < 1) iThresParam1 = 1;
MDetector.setThresholdParams(iThresParam1, iThresParam2);
// recompute
MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters);
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i = 0; i < TheMarkers.size(); i++)
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid())
for (unsigned int i = 0; i < TheMarkers.size(); i++)
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
cv::imshow("in", resize(TheInputImageCopy,1280));
cv::imshow("thres", resize(MDetector.getThresholdedImage(),1280));
}
In the documentation for SuperResolution
The code required to output the nextframe is :
void superres::SuperResolution::nextFrame(OutputArray frame)
The input frame source has to be set with :
void superres::SuperResolution::setInput(const Ptr<FrameSource>& frameSource)
I have a code that takes frames from a video :
#include "opencv2/opencv.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main(int, char**)
{
VideoCapture cap ( "video1.mp4" ); // open the default camera
if( ! cap.isOpened () ) // check if we succeeded
return -1;
/* Mat edges; */
namedWindow ( "Video" , 1 );
double frnb ( cap.get ( CV_CAP_PROP_FRAME_COUNT ) );
std::cout << "frame count = " << frnb << endl;
for(;;)
{
Mat frame;
double fIdx;
std::cout << "frame index ? ";
std::cin >> fIdx;
if ( fIdx < 0 || fIdx >= frnb ) break;
cap.set ( CV_CAP_PROP_POS_FRAMES , fIdx );
bool success = cap.read(frame);
if ( ! success )
{
cout << "Cannot read frame " << endl;
break;
}
/* cap >> frame; // get a new frame from camera */
imshow("Video", frame);
if ( waitKey (0) == 27 ) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
Given this I can use the frame variable as the parameter for the setInput method, but how can I initialize the OutputArray frame needed to generate the output ?
I think you cannot use frame as the parameter for the setInput and you don't need to initalize the OutputArray frame.
Check this example out:
FrameSource is created like this:
121. frameSource = createFrameSource_Video(inputVideoName);
And then the outputArray frame is used like this:
142. Mat result; // no intialization, just declaration
144. MEASURE_TIME(superRes->nextFrame(result));
I am using opencv c++ on Mac OS X 10.10.2 to process video frames and display them. The performance of imshow with waitKey to display the video is extremely slow.
I have the following code which displays HD (1920x1080) grayscale frames correctly, except that it runs about 10 times too slow (i.e. 2 to 3 frames per second instead of 30 frames per second).
cv::Mat framebuf[TEST_COUNT];
//--- Code here to allocate and fill the frame buffer with about 4 seconds of video. This part works correctly.
//--- This loop runs too slow by factor of approximately 10x
for (int f = 0; f < TEST_COUNT; f++)
{
cv::imshow(windowName, framebuf[f]);
cv::waitKey(33);
}
Can anyone suggest how to get real-time or near real-time performance from opencv imshow()? I have seen many posts that state that they are displaying video in real-time or even faster than real-time, so I am not sure what I am doing wrong. Any help would be greatly appreciated.
I could be wrong but for me the problem is not with your code, but with your os/configuration. I've written a small test:
import cv2
import numpy as np
from random import randrange
img = np.zeros((1920, 1080), dtype = np.uint8)
counter = 0
while counter < 1000:
cv2.line(img, (randrange(0, 1920), randrange(0, 1080)), (randrange(0, 1920), randrange(0, 1080)), (randrange(0, 255)))
cv2.imshow('test', img)
temp = cv2.waitKey(1)
counter += 1
print counter
On my machine (Core 2 duo 2,6Ghz x64, 8gb ram, ssd) it took about 30 seconds for this test to complete. Run it and if you will get significantly bigger time than definitelly something is wrong with your laptop/opencv configuration/etc. I've used OpenCV 2.4.x on Mac OS X (it was 10.9 i think) and it was running fine. Reinstalling OpenCV is the most obvious solution which comes to my mind. When you remove OpenCV, use brew to install it again - brew install opencv --with-tbb --with-python --with-ffpmeg (or something similar - check using brew options opencv) should be fine. First options tells brew to build opencv with tbb(thread building block - library for multithreading, sometimes can significantly improve speed), second to install python wrappers, and the last one to install ffmpeg(handle codecs etc).
You would have to reduce the input to the function wait key. Try using a lower number in the range of 2-5. It also depends on the other processes you have running simultaneously, try shutting down other processes and see if it improves
you can create your own window to show the image. Add MyWindow.m MyWindow.h file to project.
MyWindow.h
#ifndef MY_WINDOW_H
#define MY_WINDOW_H
#ifdef __cplusplus
extern "C" {
#endif
void* createNSWindow(int x, int y, int w, int h);
void renderNSWindow(void* inwindow, void* data, int w, int h, int c);
void processNSEvent();
#ifdef __cplusplus
}
#endif
#endif
usage, in main.cpp, do not forget waitKey
#include "MyWindow.h"
// need create a cv window and do nothing
cv::namedWindow("xxx", 1);
// create window
void* w = createNSWindow(0, 0, 0, 0);
// frame image
cv::Mat frameImage;
// render loop
renderNSWindow(w, frameImage.data, frameImage.cols, frameImage.rows, frameImage.channels());
// need waitKey to display window
processNSEvent();
implement, in MyWindow.m, delete import "MyWindow.h"
#import <Cocoa/Cocoa.h>
#interface MyWindow : NSWindow
#property(nonatomic, strong) NSImageView *imgv;
#end
#implementation MyWindow
#end
static NSImage* _createNSImage(void* data, int w, int h, int c);
void* createNSWindow(int x, int y, int w, int h) {
NSRect screenFrame = [[NSScreen mainScreen] frame];
NSRect frame = NSMakeRect(x, y, w, h);
if (w == 0 || h == 0) {
frame = screenFrame;
}
MyWindow* window = [[MyWindow alloc] initWithContentRect:frame
styleMask:NSWindowStyleMaskBorderless
backing:NSBackingStoreBuffered
defer:NO] ;
//_initApp(window);
[window makeKeyAndOrderFront:NSApp];
window.titleVisibility = TRUE;
window.styleMask = NSWindowStyleMaskResizable | NSWindowStyleMaskTitled |NSWindowStyleMaskFullSizeContentView;
window.imgv = [[NSImageView alloc] initWithFrame:NSMakeRect(0, 0, frame.size.width, frame.size.height)];
[window.contentView addSubview:window.imgv];
return (void*)CFBridgingRetain(window);
}
static NSImage* _createNSImage(void* data, int w, int h, int c) {
size_t bufferLength = w * h * c;
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, data, bufferLength, NULL);
size_t bitsPerComponent = 8;
size_t bitsPerPixel = c * bitsPerComponent;
size_t bytesPerRow = c * w;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast;
if (c < 4) {
bitmapInfo = kCGBitmapByteOrderDefault | kCGImageAlphaNone;
unsigned char* buf = data;
for(int i = 0; i < w*h; i++) {
unsigned char temp = buf[i*c];
buf[i*c] = buf[i*c+c-1];
buf[i*c+c-1] = temp;
}
}
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef iref = CGImageCreate(w,
h,
bitsPerComponent,
bitsPerPixel,
bytesPerRow,
colorSpaceRef,
bitmapInfo,
provider, // data provider
NULL, // decode
YES, // should interpolate
renderingIntent);
NSImage* image = [[NSImage alloc] initWithCGImage:iref size:NSMakeSize(w, h)];
return image;
}
void renderNSWindow(void* inwindow, void* data, int w, int h, int c) {
MyWindow* window = (__bridge MyWindow*)inwindow;
window.imgv.image = _createNSImage(data, w, h, c);
}
void processNSEvent() {
for (;;)
{
NSEvent* event = [NSApp nextEventMatchingMask:NSEventMaskAny
untilDate:[NSDate distantPast]
inMode:NSDefaultRunLoopMode
dequeue:YES];
if (event == nil)
break;
[NSApp sendEvent:event];
}
}
other things, the waitKey now take about 20ms, you can do OpenCV in background thread, and show window in main thread. Also use processNSEvent instead of waitKey that only take about 10ms.
full source code:
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <dispatch/dispatch.h>
#include "MyWindow.h"
using namespace std;
using namespace cv;
int opencvfunc(int argc, const char *argv[]);
bool newFrame = false;
cv::Mat back_frame;
int main(int argc, const char * argv[]) {
cv::namedWindow("render", 1);
void* w = createNSWindow(0, 0, 0, 0);
dispatch_queue_t opencvq = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_LOW, 0);
dispatch_async(opencvq, ^{
opencvfunc(argc, argv);
});
while(true) {
usleep(3*1000);
if(newFrame) {
std::chrono::system_clock::time_point starttime = std::chrono::system_clock::now();
renderNSWindow(w, back_frame.data, back_frame.cols, back_frame.rows, back_frame.channels());
newFrame = false;
//auto key = cv::waitKey(1);
//if (key == 'q') {
// break;
//}
processNSEvent();
std::chrono::system_clock::time_point endtime = std::chrono::system_clock::now();
std::cout << "imshow:" << std::chrono::duration_cast<std::chrono::duration<double>>(endtime-starttime).count()*1000 << std::endl;
}
}
return 0;
}
int opencvfunc(int argc, const char *argv[]) {
cv::VideoCapture cap;
cap.open(0);
if (!cap.isOpened()) {
std::cout << "Couldn't open camera 0." << endl;
return EXIT_FAILURE;
}
Mat frame, unmodified_frame;
for (;;) {
cap >> frame; // get a new frame from camera
if (frame.empty()) { // stop if we're at the end of the video
break;
}
//unmodified_frame = frame.clone();
// ...
back_frame = frame.clone();
newFrame = true;
}
return EXIT_SUCCESS;
}
OpenCV 4 had resolved this issue, please update to new version.
One more thing, process video and show video in two thread.
#include <stdio.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <dispatch/dispatch.h>
using namespace cv;
using namespace std;
bool newFrame = false;
Mat back_frame;
int opencvmain(int argc, char** argv ) {
// open camear
cv::VideoCapture cap;
cap.open(0);
if (!cap.isOpened()) {
std::cout << "Couldn't open camera 0." << std::endl;
return EXIT_FAILURE;
}
// define frame images
cv::Mat frame;
// frame loop
for (;;) {
// get video frame
cap >> frame;
if (frame.empty()) {
break;
}
// render
back_frame = frame.clone();
newFrame = true;
}
return 0;
}
int main(int argc, char** argv ) {
namedWindow("video", WINDOW_AUTOSIZE );
dispatch_queue_t opencvq = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0);
dispatch_async(opencvq, ^{
opencvmain(argc, argv);
});
while(true) {
usleep(3*1000);
if(newFrame) {
imshow("video", back_frame);
auto key = cv::waitKey(1);
if (key == ' ') {
break;
}
newFrame = false;
}
}
return 0;
}
I am using Hog+SVM in opencv for detecting cars in Video avi file. I am using car.xml model file.
I am not getting good results when I used LatentSvmDetetction for detecting cars
A Large number of false detections in frame.
It is very slow. It takes about 5 seconds to detect objects from a frame.
Please suggest me how to improve object detection time.
My code is below:
#include <iostream>
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
#if defined(WIN32) || defined(_WIN32)
#include <io.h>
#else
#include <dirent.h>
#endif
#ifdef HAVE_CVCONFIG_H
#include <cvconfig.h>
#endif
#ifdef HAVE_TBB
#include "tbb/task_scheduler_init.h"
#endif
using namespace std;
using namespace cv;
static void detectAndDrawObjects( Mat& frame, LatentSvmDetector& detector, const vector<Scalar>& colors, float overlapThreshold, int numThreads )
{
vector<LatentSvmDetector::ObjectDetection> detections;
TickMeter tm;
tm.start();
detector.detect( frame, detections, overlapThreshold, numThreads);
tm.stop();
cout << "Detection time = " << tm.getTimeSec() << " sec" << endl;
const vector<string> classNames = detector.getClassNames();
CV_Assert( colors.size() == classNames.size() );
for( size_t i = 0; i < detections.size(); i++ )
{
const LatentSvmDetector::ObjectDetection& od = detections[i];
rectangle( frame, od.rect, colors[od.classID], 3 );
putText( frame, classNames[od.classID], Point(od.rect.x+4,od.rect.y+13), FONT_HERSHEY_SIMPLEX, 0.55, colors[od.classID], 2 );
}
}
static void readDirectory( const string& directoryName, vector<string>& filenames, bool addDirectoryName=true )
{
filenames.clear();
#if defined(WIN32) | defined(_WIN32)
struct _finddata_t s_file;
string str = directoryName + "\\*.*";
intptr_t h_file = _findfirst( str.c_str(), &s_file );
if( h_file != static_cast<intptr_t>(-1.0) )
{
do
{
if( addDirectoryName )
filenames.push_back(directoryName + "\\" + s_file.name);
else
filenames.push_back((string)s_file.name);
}
while( _findnext( h_file, &s_file ) == 0 );
}
_findclose( h_file );
#else
DIR* dir = opendir( directoryName.c_str() );
if( dir != NULL )
{
struct dirent* dent;
while( (dent = readdir(dir)) != NULL )
{
if( addDirectoryName )
filenames.push_back( directoryName + "/" + string(dent->d_name) );
else
filenames.push_back( string(dent->d_name) );
}
closedir( dir );
}
#endif
sort( filenames.begin(), filenames.end() );
}
int main()
{
string frames_folder, models_folder;
float overlapThreshold = 0.2f;
int numThreads = -1;
models_folder = "D:\\Downloads\\models_VOC2007";
VideoCapture cap("D:\\images\\videos\\vid2.AVI"); // open the video file for reading
cvNamedWindow("MyVideo", CV_WINDOW_AUTOSIZE);
if ( !cap.isOpened() ) // if not success, exit program
{
cout << "Cannot open the video file" << endl;
return -1;
}
LatentSvmDetector detector( models_filenames );
if( detector.empty() )
{
cout << "Models cann't be loaded" << endl;
exit(-1);
}
vector<Scalar> colors;
generateColors( colors, detector.getClassNames().size() );
Mat frame;
while(1)
{
bool bSuccess = cap.read(frame);
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read the frame from video file" << endl;
break;
}
detectAndDrawObjects( frame, detector, colors, overlapThreshold, numThreads );
imshow( "MyVideo", frame );
//imshow("video", frame); //show the frame in "MyVideo" window
if(waitKey(30) == 27) //wait for 'esc' key press for 30 ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
I suggest you
to resize the frame to a size that 10x5 pixel is the smallest possible car in the frame;
to do a blur first; it is possible to get lots of false-positives because there is noise that generate edges that can be similar to the car;
I suppose that the detector is for side cars (I have not tested it) ant it will not detect cars rotated with more than 60 degrees and it is trained on some database that was not similar to your environment; so maybe it is better to train your own detector (car.xml).
HOG is based on edges and edges are very sensible to light and shadows. Try to preprocess (contrast enhancement) the frame before detecting the cars.