Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 10 months ago.
Improve this question
**Requirements: **
(1) Build OpenCV with Cuda and compile in C++
(2) Version = OpenCV Latest Version
(3) Build and Compile OpenCV Link: https://techawarey.com/programming/install-opencv-c-c-in-ubuntu-18-04-lts-step-by-step-guide/#Summary
(4) Library samples_utility is here: https://github.com/opencv/opencv_contrib/blob/master/modules/tracking/samples/samples_utility.hpp
(4) Compile Program Command: g++ test.cpp -o testoutput -std=c++11 'pkg-config --cflags --libs opencv'
(5) Run Program Command: ./testoutput
Code is working fine but not accurate
Step: 1
Read Frame from Camera
Select ROI(Region of Interest)
After that start KCF tracker with Sobal Features Extractor
Tracking the selected object.
Step: 2
Failure detect
After that call template matching function called MatchingMethod()
Run template matching
Get x, y value from template matching
After that reinitialize KCF tracker with Sobal Features Extractor.
This code is fine for still object when the object is moving the tracker false detection. I want to improve accuracy and reduce false detection.
#include <opencv2/core/utility.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/ocl.hpp>
#include <iostream>
#include <cstring>
#include <unistd.h>
#include "sample_utility.hpp"
#include <thread>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/cudaarithm.hpp>
#include <iomanip>
#include <stdlib.h>
#include <unistd.h>
//////////////////////////////
using namespace cv;
using namespace std;
////////////////////////////
// Convert to string
#define SSTR( x ) static_cast< std::ostringstream & >( \
( std::ostringstream() << std::dec << x ) ).str()
/// Global Variables
struct Array {
int arr[2];
};
Mat img;
Mat templ;
Mat result_h;
bool flag = true;
int match_method = 5;
int i=0;
int max_Trackbar = 5;
float fps;
int seconds = 0;
// Function Headers
void delay();
// prototype of the functino for feature extractor
void sobelExtractor(const Mat img, const Rect roi, Mat& feat);
struct Array MatchingMethod( int, void* );
int main(int argc, char **argv)
{
TrackerKCF::Params param;
param.compress_feature = true;
param.compressed_size = 2;
param.desc_npca = 0;
param.desc_pca = TrackerKCF::GRAY | TrackerKCF::CN;
param.detect_thresh = 0.8;
// create a tracker object
Ptr<TrackerKCF> tracker = TrackerKCF::create(param);
tracker->setFeatureExtractor(sobelExtractor);
VideoCapture cap(0);
// Exit if video is not opened
if(!cap.isOpened())
{
//cout << "Could not read video file" << endl;
return 1;
}
// Read first frame
Mat frame;
bool ok = cap.read(frame);
// Define initial bounding box
//Rect bbox(x, y, w, h);
// Uncomment the line below to select a different bounding box
Rect bbox = selectROI(frame, false);
// Display bounding box.
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////
int H, W, cW, cH;
// print(f"hight {H} , Width {W}")
H = display_height;
W = display_width;
// Center point of the screen
cW = int(W / 2);
cH = int(H / 2);
Point p1(cW, cH);
// get bounding box
Mat imCrop = frame(bbox);
imwrite("1.png", imCrop);
//quit if ROI was not selected
if(bbox.width==0 || bbox.height==0)
return 0;
//////////////////////////
//imshow("Tracking", frame);
tracker->init(frame, bbox);
while(true)
{
Mat frame;
cap >> frame;
circle(frame, p1, 3, Scalar(0,255,0), -1);
// Start timer
if(bbox.width!=0 || bbox.height!=0){
double timer = (double)getTickCount();
// Update the tracking result
/////////////////////////////////////
bool ok = tracker->update(frame, bbox);
//////////////////////////////////////
//ok, bbox = tracker->update(frame);
// Calculate Frames per second (FPS)
fps = getTickFrequency() / ((double)getTickCount() - timer);
if (ok)
{
// Tracking success : Draw the tracked object
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////////////////////////////
int xxx, yyy, height, width;
xxx = bbox.x;
yyy = bbox.y;
height = bbox.height;
width = bbox.width;
int diffX, diffY;
float cxROI, cyROI;
cxROI = int((xxx + (xxx + width)) / 2);
cyROI = int((yyy + (yyy + height)) / 2);
diffX = cxROI - cW;
diffY = cH - cyROI;
//cout<<diffX<<"\n";
//cout<<diffY<<"\n";
Point p(cxROI, cyROI);
circle(frame, p, 3, Scalar(128,0,0), -1);
putText(frame, "FPS : " + SSTR(int(fps)), Point(100,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(50,170,50), 2);
putText(frame, "Difference From X-Axis: "+SSTR(int(diffX)), Point(100, 50), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
putText(frame, "Difference From Y-Axis: "+SSTR(int(diffY)), Point(100, 80), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
}
else
{
// Tracking failure detected.
putText(frame, "Tracking failure detected", Point(100,110), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0,0,255),2);
templ = imread( "1.png", 1 );
img=frame.clone();
struct Array a = MatchingMethod( 0, 0 );
cout<<"X: "<<a.arr[0]<<"\n";
cout<<"Y: "<<a.arr[1]<<"\n";
cout<<"Width: "<<w<<"\n";
cout<<"Height: "<<h<<"\n";
int xx, yy, ww, hh;
xx = a.arr[0];
yy = a.arr[1];
ww = w;
hh = h;
Rect bbox(xx, yy, ww, hh);
tracker.release();
tracker = TrackerKCF::create(param);
tracker->setFeatureExtractor(sobelExtractor);
tracker->init(frame, bbox);
//roi.x = MatchingMethod.
//waitKey(30);
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
////////////////////////////////////////////////////////////////////////
int diffX, diffY;
float cxROI, cyROI;
cxROI = int((xx + (xx + ww)) / 2);
cyROI = int((yy + (yy + hh)) / 2);
diffX = cxROI - cW;
diffY = cH - cyROI;
//cout<<diffX<<"\n";
//cout<<diffY<<"\n";
Point p(cxROI, cyROI);
circle(frame, p, 3, Scalar(128,0,0), -1);
///////////////////////////////////////////////////////////////////////////
}
}
else{
}
// Display frame.
imshow("Tracking", frame);
// Exit if ESC pressed.
int k = waitKey(1);
if(k == 27)
{
break;
}
}
return 0;
}
///////////////////////
void sobelExtractor(const Mat img, const Rect roi, Mat& feat){
Mat sobel[2];
Mat patch;
Rect region=roi;
// extract patch inside the image
if(roi.x<0){region.x=0;region.width+=roi.x;}
if(roi.y<0){region.y=0;region.height+=roi.y;}
if(roi.x+roi.width>img.cols)region.width=img.cols-roi.x;
if(roi.y+roi.height>img.rows)region.height=img.rows-roi.y;
if(region.width>img.cols)region.width=img.cols;
if(region.height>img.rows)region.height=img.rows;
patch=img(region).clone();
cvtColor(patch,patch, COLOR_BGR2GRAY);
// add some padding to compensate when the patch is outside image border
int addTop,addBottom, addLeft, addRight;
addTop=region.y-roi.y;
addBottom=(roi.height+roi.y>img.rows?roi.height+roi.y-img.rows:0);
addLeft=region.x-roi.x;
addRight=(roi.width+roi.x>img.cols?roi.width+roi.x-img.cols:0);
copyMakeBorder(patch,patch,addTop,addBottom,addLeft,addRight,BORDER_REPLICATE);
Sobel(patch, sobel[0], CV_32F,1,0,1);
Sobel(patch, sobel[1], CV_32F,0,1,1);
merge(sobel,2,feat);
feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5
}
////////////////////////////////////////////////////
struct Array MatchingMethod( int, void* )
{
/// Source image to display
Mat frame;
struct Array a;
/////////
for(int i=1; i<=4; i++){
img.copyTo( frame );
// break;
//}
//////////////////////////
cv::cuda::setDevice(0); // initialize CUDA
// convert from mat to gpumat
cv::cuda::GpuMat image_d(img);
cv::cuda::GpuMat templ_d(templ);
cv::cuda::GpuMat result;
// GPU -> NG
cv::Ptr<cv::cuda::TemplateMatching> alg =
cv::cuda::createTemplateMatching(image_d.type(), cv::TM_CCOEFF_NORMED);
alg->match(image_d, templ_d, result); // no return.
//cv::cuda::normalize(result, result, 0, 1, cv::NORM_MINMAX, -1);
double max_value, min_value;
cv::Point location;
cv::cuda::minMaxLoc(result, &min_value, &max_value, 0, &location);
/////////////////////////
double THRESHOLD = 3e-09; //0.3;
if( min_value <= THRESHOLD) {
//struct Array a;
a.arr[0] = location.x;
a.arr[1] = location.y;
cout<<"Hi"<<endl;
}
}
if(flag==true){
return a;
flag = false;
}
//}
}
Okey here is my answer to your question.
First of all, you are making a mistake by applying template matching when the tracker misses. Because template matching matches the feature if and only if it is totally same with the reference feature. So in your case, there will be shadows, light issues etc. in the environment, and you will never be able to get success results.
Secondly, if you delete the template matching scope, tracker will continue to search the target in the image effectively. Which changements I did in your code is listed below. With these changes, I got better results:
Delete the template matching scope
Decrease the detection threshold(param.detect_thresh) to 0.5
Create more tracker objects to catch the target: This change is the most important part. What I am suggesting is that create more and more tracker objects(in my case I did 4 tracker objects, but you can increase the number). Each tracker should get as input rectangle similar to ROI user chose but not the same coordinates. For example, if user chooses cv::Rect(200,200,400,400) then other tracker should get target as cv::Rect(180,190,400,400) , cv::Rect(220,180,400,400) ... and so on. Why you should do it because, tracker algorithm is feature based, so it will always try to get a similar features to the reference. By doing this, you will increase the feature references.
And here is my code to guide you:
#include <opencv2/core/utility.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/ocl.hpp>
#include <iostream>
#include <cstring>
#include <unistd.h>
#include <thread>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/cudaarithm.hpp>
#include <iomanip>
#include <stdlib.h>
#include <unistd.h>
//////////////////////////////
using namespace cv;
using namespace std;
////////////////////////////
// Convert to string
#define SSTR( x ) static_cast< std::ostringstream & >( \
( std::ostringstream() << std::dec << x ) ).str()
/// Global Variables
struct Array {
int arr[2];
};
Mat img;
Mat templ;
Mat result_h;
bool flag = true;
int match_method = 5;
int i=0;
int max_Trackbar = 5;
float fps;
int seconds = 0;
// Function Headers
void delay();
// prototype of the functino for feature extractor
void sobelExtractor(const Mat img, const Rect roi, Mat& feat);
struct Array MatchingMethod( int, void* );
int main(int argc, char **argv)
{
TrackerKCF::Params param;
param.compress_feature = true;
param.compressed_size = 2;
param.desc_npca = 0;
param.desc_pca = TrackerKCF::GRAY | TrackerKCF::CN;
param.detect_thresh = 0.5;
// create a tracker object
Ptr<TrackerKCF> tracker = TrackerKCF::create(param);
tracker->setFeatureExtractor(sobelExtractor);
Ptr<TrackerKCF> tracker2 = TrackerKCF::create(param);
tracker2->setFeatureExtractor(sobelExtractor);
Ptr<TrackerKCF> tracker3 = TrackerKCF::create(param);
tracker3->setFeatureExtractor(sobelExtractor);
Ptr<TrackerKCF> tracker4 = TrackerKCF::create(param);
tracker4->setFeatureExtractor(sobelExtractor);
VideoCapture cap(0);
// Exit if video is not opened
if(!cap.isOpened())
{
//cout << "Could not read video file" << endl;
return 1;
}
cv::imshow("Tracking",0);
// Read first frame
Mat frame;
bool ok = cap.read(frame);
// Define initial bounding box
//Rect bbox(x, y, w, h);
// Uncomment the line below to select a different bounding box
Rect2d bbox = selectROI(frame, false);
// Display bounding box.
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////
int H, W, cW, cH;
// print(f"hight {H} , Width {W}")
H = frame.rows;
W = frame.cols;
// Center point of the screen
cW = int(W / 2);
cH = int(H / 2);
Point p1(cW, cH);
//quit if ROI was not selected
if(bbox.width==0 || bbox.height==0)
return 0;
//////////////////////////
//imshow("Tracking", frame);
tracker->init(frame, bbox);
tracker2->init(frame, cv::Rect2d(bbox.x-10,bbox.y-10, bbox.width,bbox.height));
tracker3->init(frame, cv::Rect2d(bbox.x+10,bbox.y+10, bbox.width,bbox.height));
tracker4->init(frame, cv::Rect2d(bbox.x+20,bbox.y+20, bbox.width,bbox.height));
while(true)
{
Mat frame;
cap >> frame;
circle(frame, p1, 3, Scalar(0,255,0), -1);
// Start timer
if(bbox.width!=0 || bbox.height!=0){
double timer = (double)getTickCount();
// Update the tracking result
/////////////////////////////////////
bool ok = tracker->update(frame, bbox);
bool ok2 = tracker->update(frame, bbox);
bool ok3 = tracker->update(frame, bbox);
bool ok4 = tracker->update(frame, bbox);
//////////////////////////////////////
//ok, bbox = tracker->update(frame);
// Calculate Frames per second (FPS)
fps = getTickFrequency() / ((double)getTickCount() - timer);
if (ok || ok2 || ok3 || ok4)
{
// Tracking success : Draw the tracked object
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////////////////////////////
int xxx, yyy, height, width;
xxx = bbox.x;
yyy = bbox.y;
height = bbox.height;
width = bbox.width;
int diffX, diffY;
float cxROI, cyROI;
cxROI = int((xxx + (xxx + width)) / 2);
cyROI = int((yyy + (yyy + height)) / 2);
diffX = cxROI - cW;
diffY = cH - cyROI;
//cout<<diffX<<"\n";
//cout<<diffY<<"\n";
Point p(cxROI, cyROI);
circle(frame, p, 3, Scalar(128,0,0), -1);
putText(frame, "FPS : " + SSTR(int(fps)), Point(100,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(50,170,50), 2);
putText(frame, "Difference From X-Axis: "+SSTR(int(diffX)), Point(100, 50), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
putText(frame, "Difference From Y-Axis: "+SSTR(int(diffY)), Point(100, 80), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
}
}
else{
}
// Display frame.
imshow("Tracking", frame);
// Exit if ESC pressed.
int k = waitKey(1);
if(k == 27)
{
break;
}
}
return 0;
}
///////////////////////
void sobelExtractor(const Mat img, const Rect roi, Mat& feat){
Mat sobel[2];
Mat patch;
Rect region=roi;
// extract patch inside the image
if(roi.x<0){region.x=0;region.width+=roi.x;}
if(roi.y<0){region.y=0;region.height+=roi.y;}
if(roi.x+roi.width>img.cols)region.width=img.cols-roi.x;
if(roi.y+roi.height>img.rows)region.height=img.rows-roi.y;
if(region.width>img.cols)region.width=img.cols;
if(region.height>img.rows)region.height=img.rows;
patch=img(region).clone();
cvtColor(patch,patch, COLOR_BGR2GRAY);
// add some padding to compensate when the patch is outside image border
int addTop,addBottom, addLeft, addRight;
addTop=region.y-roi.y;
addBottom=(roi.height+roi.y>img.rows?roi.height+roi.y-img.rows:0);
addLeft=region.x-roi.x;
addRight=(roi.width+roi.x>img.cols?roi.width+roi.x-img.cols:0);
copyMakeBorder(patch,patch,addTop,addBottom,addLeft,addRight,BORDER_REPLICATE);
Sobel(patch, sobel[0], CV_32F,1,0,1);
Sobel(patch, sobel[1], CV_32F,0,1,1);
merge(sobel,2,feat);
feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5
}
////////////////////////////////////////////////////
struct Array MatchingMethod( int, void* )
{
/// Source image to display
Mat frame;
struct Array a;
/////////
for(int i=1; i<=4; i++){
img.copyTo( frame );
// break;
//}
//////////////////////////
cv::cuda::setDevice(0); // initialize CUDA
// convert from mat to gpumat
cv::cuda::GpuMat image_d(img);
cv::cuda::GpuMat templ_d(templ);
cv::cuda::GpuMat result;
// GPU -> NG
cv::Ptr<cv::cuda::TemplateMatching> alg =
cv::cuda::createTemplateMatching(image_d.type(), cv::TM_CCOEFF_NORMED);
alg->match(image_d, templ_d, result); // no return.
//cv::cuda::normalize(result, result, 0, 1, cv::NORM_MINMAX, -1);
double max_value, min_value;
cv::Point location;
cv::cuda::minMaxLoc(result, &min_value, &max_value, 0, &location);
/////////////////////////
double THRESHOLD = 3e-09; //0.3;
if( min_value <= THRESHOLD) {
//struct Array a;
a.arr[0] = location.x;
a.arr[1] = location.y;
cout<<"Hi"<<endl;
}
}
if(flag==true){
return a;
flag = false;
}
//}
}
I have written a simple code to perform canny edge detection on a live stream. The code is as shown below,
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int lowThreshold=0;
int const max_lowThreshold = 100;
int kernel_size = 3;
int ratio = 3;
Mat img;
Mat display;
void CannyThreshold()
{
cvtColor(img, display, COLOR_RGB2GRAY);
// GaussianBlur(display,display,Size(7,7),3,3);
GaussianBlur(display, display, Size(1, 1), 1,1);
printf("%d\n",lowThreshold);
Canny(display,display,lowThreshold,3);
imshow("Canny",display);
}
int main()
{
VideoCapture cap(0);
namedWindow("Canny");
createTrackbar("Min Threshold: ","Canny",&lowThreshold,max_lowThreshold);
while(1)
{
cap.read(img);
int ret = waitKey(1);
CannyThreshold();
if(ret == 'q')
break;
}
cap.release();
return 0;
}
I get the following run-time error when I run the code. (I'm using OpenCV 4)
error: (-215:Assertion failed) ksize.width > 0 && ksize.width % 2 == 1 && ksize.height > 0 && ksize.height % 2 == 1 in function 'createGaussianKernels'
Any suggestions on how I can solve this error?
The issue is GaussianBlur cant accept kernel size of 1. Correct it to 3x3 or 5x5 in your code as follows
#include <opencv2/core/utility.hpp>
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
int main(int argc, const char** argv)
{
VideoCapture cap;
Mat frame;
Mat image; // from cap to image
Mat src_gray;
Mat dst;
Mat detected_edges;
const String window_name = "Canny Edge Detector - VideoCapture";
int lowThreshold = 10;
const int max_lowThreshold = 100;
const int ratio = 3;
const int kernel_size = 3;
int FPS;
int frame_width, frame_height;
int camNum = 0;
cap.open(camNum);
if (!cap.isOpened())
{
cout << "***Could not initialize capturing...***\n";
cout << "Current parameter's value: \n";
return -1;
}
FPS = cap.get(CAP_PROP_FPS);
frame_width = cap.get(CAP_PROP_FRAME_WIDTH);
frame_height = cap.get(CAP_PROP_FRAME_HEIGHT);
dst.create(frame_width, frame_height, CV_8UC3);
//cout << CV_8UC3;
while (true)
{
cap >> frame;
if (frame.empty())
break;
frame.copyTo(image);
// Convert the image to grayscale
cvtColor(image, src_gray, COLOR_BGR2GRAY);
//![reduce_noise]
/// Reduce noise with a kernel 3x3
blur(src_gray, detected_edges, Size(3, 3));
//![reduce_noise]
//![canny]
/// Canny detector
Canny(detected_edges, detected_edges, lowThreshold, lowThreshold*ratio, kernel_size);
//![canny]
/// Using Canny's output as a mask, we display our result
//![fill]
dst = Scalar::all(0);
//![fill]
//![copyto]
image.copyTo(dst, detected_edges);
//![copyto]
//![display]
imshow(window_name, dst);
if (waitKey(1000 / FPS) >= 0)
break;
}
return 0;
}
I have written a program in opencv(c++) to manipulate camera property. I am trying to blur my camera display using "track bar". The code is working but in certain condition. It works, when i change the position of "track bar" using mouse click. But if i tried to slide the track bar it gives me an error as mention below.
Here is my code
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
Mat image, image_blurred;
int slider=5;
float sigma=0.3 *((slider - 1)*0.5 - 1) +0.8;
void on_Trackbar(int,void *)
{
int k_size = max(1,slider);
//k_size = k_size%2 == 0 ? k_size+1 : k_size;
setTrackbarPos("kernel","Blur window",3);
sigma=0.3 *((slider - 1)*0.5 - 1) +0.8;
GaussianBlur(image,image_blurred,Size(3,3),sigma);
}
int main()
{
Mat img;
VideoCapture cap(0);
if(!cap.isOpened())
{
cout<<"Camera is not successfully opened"<<endl;
return -1;
}
namedWindow("original image",CV_WINDOW_AUTOSIZE);
namedWindow("Blur Image",CV_WINDOW_AUTOSIZE);
while(!char(waitKey(30)=='q') && cap.isOpened())
{
cap>>img;
GaussianBlur(img,image_blurred,Size(slider,slider),sigma);
createTrackbar("kernel","Blur Image",&slider,21,on_Trackbar);
imshow("Blur Image",image_blurred);
imshow("original image",img);
}
destroyAllWindows();
return 0;
}
Please give your valuable views. Thanks in advance!!.
In the while loop, you're passing an invalid value to GaussianBlur, since slider can also be an even number.
You can correct this introducing a new variable int kernel_size = 2*slider+1. slider now is the radius of the kernel, and kernel_size is guaranteed to be odd.
Also you don't need to call GaussianBlur in the callback function, since it's already called in the main loop. The only goal of the callback is to update the values of kernel_size and sigma.
This code will work as expected:
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
Mat image, image_blurred;
int slider = 0;
int kernel_size = 3;
float sigma = 0.3 *((kernel_size - 1)*0.5 - 1) + 0.8;
void on_Trackbar(int, void *)
{
kernel_size = 2 * slider + 1;
sigma = 0.3 *((kernel_size - 1)*0.5 - 1) + 0.8;
}
int main()
{
Mat img;
VideoCapture cap(0);
if (!cap.isOpened())
{
cout << "Camera is not successfully opened" << endl;
return -1;
}
namedWindow("original image", CV_WINDOW_AUTOSIZE);
namedWindow("Blur Image", CV_WINDOW_AUTOSIZE);
createTrackbar("kernel", "Blur Image", &slider, 21, on_Trackbar);
while (!char(waitKey(30) == 'q') && cap.isOpened())
{
cap >> img;
GaussianBlur(img, image_blurred, Size(kernel_size, kernel_size), sigma);
imshow("Blur Image", image_blurred);
imshow("original image", img);
}
destroyAllWindows();
return 0;
}
I am trying to display video for in a separate function for this i am using vector i push_back each frame in and then pass this vector to function but my function displays a single frame repetitively. My code is below. Please tell me what i am doing wrong.
// newproject.cpp : Defines the entry point for the console application.
#include "stdafx.h"
#include "highgui.h"
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
#include <conio.h>
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
class frameprocessing{
Mat hsv_base;
MatND hist_base;
public:
void hsv_histogram(Mat Frame)
{
cvtColor( Frame, hsv_base, CV_BGR2HSV );
int h_bins = 50;
int s_bins = 32;
int histSize[] = { h_bins, s_bins };
float h_ranges[] = { 0, 256 };
float s_ranges[] = { 0, 180 };
const float* ranges[] = { h_ranges, s_ranges };
int channels[] = { 0, 1 };
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
}
};
class video{
Mat frame;
string filename;
double dWidth;
double dHeight;
public:
video()
{
}
video(string videoname)
{
vector<Mat> videoframes;
std::vector<Mat>::iterator it;
it = videoframes.begin();
filename = videoname;
VideoCapture capture(filename);
if( !capture.isOpened() )
{
exit(0);
}
dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
frameprocessing obj;
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
obj.hsv_histogram(frame);
videoframes.push_back(frame);
}
displayvideo(videoframes);
// waitKey(0); // key press to close window
}
void writer()
{
Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));
VideoWriter oVideoWriter ("D:/MyVideo.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object
if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program
{
cout << "ERROR: Failed to write the video" << endl;
exit(0);
}
}
void displayvideo(vector<Mat> videoframe)
{
Mat tempframe;
while(!videoframe.empty()) //Show the image captured in the window and repeat
{
tempframe = videoframe.back();
imshow("video", tempframe);
videoframe.pop_back();
waitKey(20); // waits to display frame
}
// waitKey(0);
}
void displayframe(Mat frame)
{
imshow("video", frame);
waitKey(20); // waits to display frame
}
};
int _tmain(int argc, _TCHAR* argv[])
{
video obj("video.avi");
//obj.readvideo();
}
You need to copy or clone your frame to another Mat then push to your vector, change your code like
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
Mat tmp=frame.clone();
obj.hsv_histogram(tmp);
videoframes.push_back(tmp);
}
In your code your passing the same pointer allocated for frame to your vector every time, so you will get array of Mat pointing single(same) memory location in your vector. To know more about OpenCV Mat and memory allocation See documantation
#include <iostream>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <highgui.h>
using namespace cv;
using namespace std;
Mat image;
char window_name[20]="Get coordinates";
static void onMouse( int event, int x, int y, int f, void* ){
cout << x << " " << y << endl;
//putText(image, "point", Point(x,y), CV_FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,0,0));
}
int main() {
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
image = imread("image.png");
imshow( window_name, image );
setMouseCallback( window_name, onMouse, 0 );
waitKey(0);
return 0;
}
This is the code that prints the value of coordinates of the mouse pointer, while hovering on the image, on the console. If I wish to print the coordinates similarly on the image, how am I supposed to do it ?
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
using namespace cv;
Mat img;
void onMouse(int event, int x, int y, int flags, void* param)
{
char text[100];
Mat img2, img3;
img2 = img.clone();
if (event == CV_EVENT_LBUTTONDOWN)
{
Vec3b p = img2.at<Vec3b>(y,x);
sprintf(text, "R=%d, G=%d, B=%d", p[2], p[1], p[0]);
}
else if (event == CV_EVENT_RBUTTONDOWN)
{
cvtColor(img, img3, CV_BGR2HSV);
Vec3b p = img3.at<Vec3b>(y,x);
sprintf(text, "H=%d, S=%d, V=%d", p[0], p[1], p[2]);
}
else
sprintf(text, "x=%d, y=%d", x, y);
putText(img2, text, Point(5,15), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0));
imshow("image", img2);
}
int main(int argc, char** argv)
{
img = imread(argc == 2 ? argv[1] : "lena.jpg");
if (img.empty())
return -1;
namedWindow("image");
setMouseCallback("image", onMouse, 0);
imshow("image", img);
waitKey(0);
return 0;
}
Shall do the work..