i tried to change code ipl to mat
but failed
i use opencv 4.1.2
this sample uses opencv 2.4.13
https://jadeshin.tistory.com/entry/cvAcc에-의한-배경-영상-계산
i can't use ipl
so i changed
#include <opencv2\opencv.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\core\mat.hpp>
#include <opencv2\imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
VideoCapture cap("ball.avi");
if (!cap.isOpened())
{
cout << "file not found." << endl;
return 0;
}
Mat image;
Size size = Size((int)CAP_PROP_FRAME_WIDTH, (int)CAP_PROP_FRAME_HEIGHT);
Mat grayImage(size, CV_8UC1);
Mat sumImage(size, CV_32FC1);
sumImage.setTo(Scalar::all(0));
int nFrameCount = 0;
for (;;)
{
cap.read(image);
if (image.empty())
{
cout << "could'nt capture" << endl;
break;
}
cvtColor(image, grayImage, COLOR_BGR2GRAY);
accumulate(grayImage, sumImage, NULL); //here is error
imshow("grayImage", grayImage);
char chKey = waitKey(50);
if (chKey == 27)
break;
nFrameCount++;
}
convertScaleAbs(sumImage, sumImage, 1.0 / nFrameCount);
imwrite("ballBkg.jpg", sumImage);
destroyAllWindows();
return 0;
}
nothing wrong to compile but wrong to excute
i did also try, catch
but also failed
what's wrong with accumulate?
C++ version of accumulate void accumulate(InputArray src, InputOutputArray dst, InputArray mask=noArray() )
your are passing NULL instead of noArray() . so just do :
accumulate(grayImage, sumImage);
cv::noArray() is an empty Mat not NULL.
Edit :
Also change
Size size = Size((int)CAP_PROP_FRAME_WIDTH, (int)CAP_PROP_FRAME_HEIGHT);
to
Size size = Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH), (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT));
When I run this program and adjust the slider multiple times bar the image appears different even though it is at the same slider position. If you try this code, move the slider from the minimum to maximum position back and forth several times and you can see a slight alteration to the image each time.
I have traced the point at which this happens to the line running the add function in my onProgram6Trackbar1 function. Removing it removes the variations between slide movements. Why is this happening?
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
#include <cmath>
class ProgramData {
public:
ProgramData() {
k1=0;
k2=0;
k3=0;
k4=0;
k5=0;
}
int k1;
int k2;
int k3;
int k4;
int k5;
Mat * source_U8C3;
Mat * temp1_U8C3;
Mat * temp2_U8C3;
Mat * temp3_U8C1;
Mat * temp4_U8C1;
Mat * temp5_U8C1;
Mat * temp6_U8C1;
Mat * temp7_U8C1;
vector<Mat> tempv1_U8C1;
vector<Mat> tempv2_U8C1;
Mat * output_U8C1;
Mat * output_U8C3;
Mat * dim1by1;
};
static void onProgram6Trackbar1(int v, void* vp) {
ProgramData * pd = (ProgramData *) vp;
*(pd->temp3_U8C1) = pd->tempv1_U8C1[2].clone();
inRange(*(pd->temp3_U8C1), pd->k1, 255, *(pd->temp4_U8C1));
bitwise_not(*(pd->temp4_U8C1), *(pd->temp5_U8C1));
bitwise_and(*(pd->temp5_U8C1), *(pd->temp3_U8C1), *(pd->temp6_U8C1));
bitwise_or(pd->temp6_U8C1, Scalar(pd->k1), pd->temp7_U8C1, pd->temp4_U8C1);
imshow( "Glare Reduction 4", *(pd->temp7_U8C1));
}
void program6(char * argv) {
ProgramData pd;
pd.k1 = 0;
Mat source = imread(argv, IMREAD_COLOR); // Read the file
pd.source_U8C3 = &source;
Size s( pd.source_U8C3->size().width / 1.3, pd.source_U8C3->size().height / 1.3 );
resize( *(pd.source_U8C3), *(pd.source_U8C3), s, 0, 0, CV_INTER_AREA );
pd.output_U8C3 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,pd.source_U8C3->type());
pd.output_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
//pd.temp1_U8C3 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,pd.source_U8C3->type());
pd.temp2_U8C3 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,pd.source_U8C3->type());
pd.temp3_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp4_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp5_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp6_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp7_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.dim1by1 = new Mat(100,800,CV_8UC1);
cout << "source type = " << pd.source_U8C3->type() << endl;
if(! pd.source_U8C3->data ) { cout << "Could not open image" << std::endl; return;}
cvtColor(*(pd.source_U8C3), *(pd.temp2_U8C3), CV_BGR2HSV); // original to hsv
split(*(pd.temp2_U8C3), pd.tempv1_U8C1);
namedWindow( "Glare Reduction - Controls", WINDOW_AUTOSIZE ); // Create a window for display.
onProgram6Trackbar1(0,&pd);
createTrackbar("k1", "Glare Reduction - Controls", &(pd.k1), 255, &onProgram6Trackbar1, &pd);
imshow( "Glare Reduction - Controls", *(pd.dim1by1) ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
}
int main( int argc, char** argv )
{
program6("Blocks1.jpg");
}
Update 1:
New code posted below. I tried changing the code to not use any Mat pointers. Still does the exact same thing.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
#include <cmath>
class ProgramData {
public:
ProgramData() {
k1=0;
}
int k1;
Mat source_U8C3;
Mat temp1_U8C3;
Mat temp2_U8C3;
Mat temp3_U8C1;
Mat temp4_U8C1;
Mat temp5_U8C1;
Mat temp6_U8C1;
Mat temp7_U8C1;
vector<Mat> tempv1_U8C1;
vector<Mat> tempv2_U8C1;
Mat output_U8C1;
Mat output_U8C3;
Mat dim1by1;
};
static void onProgram6Trackbar1(int v, void* vp) {
ProgramData * pd = (ProgramData *) vp;
pd->temp3_U8C1 = pd->tempv1_U8C1[2].clone();
inRange(pd->temp3_U8C1, Scalar(pd->k1), Scalar(255), pd->temp4_U8C1);
bitwise_not(pd->temp4_U8C1, pd->temp5_U8C1); // Note for monday, here does not work below works. Why?
bitwise_and(pd->temp5_U8C1, pd->temp3_U8C1, pd->temp6_U8C1);
bitwise_or(pd->temp6_U8C1, Scalar(pd->k1), pd->temp7_U8C1, pd->temp4_U8C1);
imshow( "Glare Reduction 4", pd->temp7_U8C1);
}
int main( int argc, char** argv ) {
ProgramData pd;
pd.k1 = 0;
pd.source_U8C3 = imread("Photo Examples/Blocks1.jpg", IMREAD_COLOR); // Read the file
Size s( pd.source_U8C3.size().width / 1.3, pd.source_U8C3.size().height / 1.3 );
resize( pd.source_U8C3, pd.source_U8C3, s, 0, 0, CV_INTER_AREA );
pd.dim1by1.create(100,800,CV_8UC1);
cout << "source type = " << pd.source_U8C3.type() << endl;
if(! pd.source_U8C3.data ) { cout << "Could not open image" << std::endl; return 0;}
cvtColor(pd.source_U8C3, pd.temp2_U8C3, CV_BGR2HSV); // original to hsv
split(pd.temp2_U8C3, pd.tempv1_U8C1);
namedWindow( "Glare Reduction - Controls", WINDOW_AUTOSIZE ); // Create a window for display.
onProgram6Trackbar1(0,&pd);
createTrackbar("k1", "Glare Reduction - Controls", &(pd.k1), 255, &onProgram6Trackbar1, &pd);
imshow( "Glare Reduction - Controls", pd.dim1by1 ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
return 0;
}
Update 2:
I think I may have found the source of the problem. When add this line
static void onProgram6Trackbar1(int v, void* vp) {
ProgramData * pd = (ProgramData *) vp;
pd->temp3_U8C1 = pd->tempv1_U8C1[2].clone();
inRange(pd->temp3_U8C1, Scalar(pd->k1), Scalar(255), pd->temp4_U8C1);
bitwise_not(pd->temp4_U8C1, pd->temp5_U8C1);
bitwise_and(pd->temp5_U8C1, pd->temp3_U8C1, pd->temp6_U8C1);
pd->temp7_U8C1 = pd->tempv1_U8C1[2].clone(); // <----
bitwise_or(pd->temp6_U8C1, Scalar(pd->k1), pd->temp7_U8C1, pd->temp4_U8C1);
imshow( "Glare Reduction 4", pd->temp7_U8C1);
}
to onProgram6Trackbar1 it suddenly works as expected. I thought since opencv 2 does its own memory allocation I didn't have to initialize pd->temp7_U8C1 which is serving as the output matrix in the call to bitwise_or. It's almost like the underlying memory in pd->temp7_U8C1 was pointing to memory that belonged to one of the buffers that was used as output to the image processing done in main (pd.tempv1_U8C1 or pd.source_U8C3). Either the line I added did something else that I have not thought of.
So my new question is why did this line fix it and what is going on underneath. Is the result of using an uninitialized mat behavior defined somewhere in the documentation? It was my understanding that you don't have to initialize the size or type of a matrix that you are using as an output mat.
maybe a bit too old, anyway: First check the slightly cleaned code. I removed everything that's redundant and send the actual function of the trackbar into a member of your class. This way, you can directly operate on the members.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
class ProgramData
{
public:
ProgramData()
{
k1 = 0;
}
int k1;
Mat source_U8C3,
temp2_U8C3, temp4_U8C1,
temp5_U8C1, temp6_U8C1,
temp7_U8C1;
vector<Mat> tempv1_U8C1;
void reduce_glare(void)
{
// sets elements in temp4 to 255 if within range
inRange(tempv1_U8C1[2], Scalar(k1), Scalar(255), temp4_U8C1);
// bitwise_not(InputArray src, OutputArray dst)
bitwise_not(temp4_U8C1, temp5_U8C1);
// bitwise_and(InputArray src1, InputArray src2, OutputArray dst)
bitwise_and(temp5_U8C1, tempv1_U8C1[2], temp6_U8C1);
// watch out here:
temp7_U8C1 = Mat::ones(tempv1_U8C1[2].size(), CV_8UC1);
Mat x = Mat::ones(tempv1_U8C1[2].size(), CV_8UC1) * k1;
// bitwise_or(InputArray src1, InputArray src2, OutputArray dst, InputArray mask)
bitwise_or(temp6_U8C1, x, temp7_U8C1, temp4_U8C1);
cout << "source type = " << temp7_U8C1.type() << endl;
cout << "source channels = " << temp7_U8C1.channels() << endl;
cout << "source depth = " << temp7_U8C1.depth() << endl;
}
};
void onProgram6Trackbar1(int v, void *vp)
{
ProgramData *pd = static_cast<ProgramData *>(vp);
(*pd).reduce_glare();
imshow("Glare Reduction 4", pd->temp7_U8C1);
}
int main(int argc, char **argv)
{
ProgramData pd;
pd.source_U8C3 = imread("CutDat.jpeg", IMREAD_COLOR);
Size s(pd.source_U8C3.size().width / 1.3, pd.source_U8C3.size().height / 1.3);
resize(pd.source_U8C3, pd.source_U8C3, s, 0, 0, CV_INTER_AREA);
cout << "source type = " << pd.source_U8C3.type() << endl;
cvtColor(pd.source_U8C3, pd.temp2_U8C3, CV_BGR2HSV);
split(pd.temp2_U8C3, pd.tempv1_U8C1);
namedWindow("Glare Reduction - Controls", WINDOW_AUTOSIZE);
imshow("Glare Reduction - Controls", Mat(100, 800, CV_8UC1));
createTrackbar("k1", "Glare Reduction - Controls", &(pd.k1), 255, &onProgram6Trackbar1, &pd);
waitKey(0);
return 0;
}
Important is the line where temp7_U8C1 is initialized, but not with the original data. The result you get is still not want you want, but it highlights that the issue is within the call to bitwise_or. Your question regarding the Scalar bug doesn't apply here as I've shown it in the code.
The code is tested on Windows with 2.4.10 and on Ubuntu 2.4.8 both giving the same results. Testing the code on valgrind runs fine.
I'm using OpenCV master branch (3.0.0. dev) with CUDA on Ubuntu 12.04, and trying to compile the following opencv with gpu code:
#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/gpu/gpu.hpp"
using namespace cv;
int main (int argc, char* argv[])
{
try
{
cv::Mat src_host = cv::imread("file.png", CV_LOAD_IMAGE_GRAYSCALE);
cv::gpu::GpuMat dst, src;
src.upload(src_host);
cv::gpu::threshold(src, dst, 128.0, 255.0, CV_THRESH_BINARY);
cv::Mat result_host = dst;
cv::imshow("Result", result_host);
cv::waitKey();
}
catch(const cv::Exception& ex)
{
std::cout << "Error: " << ex.what() << std::endl;
}
return 0;
}
The compiling command is:
g++ testgpu.cpp -o test `pkg-config --cflags --libs opencv` -lopencv_gpu
It has the following compiling errors:
testgpu.cpp: In function ‘int main(int, char**)’:
testgpu.cpp:13:51: error: ‘CV_LOAD_IMAGE_GRAYSCALE’ was not declared in this scope
cv::Mat src_host = cv::imread("file.png", CV_LOAD_IMAGE_GRAYSCALE);
^
testgpu.cpp:17:52: error: ‘CV_THRESH_BINARY’ was not declared in this scope
cv::gpu::threshold(src, dst, 128.0, 255.0, CV_THRESH_BINARY);
^
testgpu.cpp:19:31: error: conversion from ‘cv::gpu::GpuMat’ to non-scalar type ‘cv::Mat’ requested
cv::Mat result_host = dst;
^
It is something wrong with the installation of OpenCV, or the API change in Opencv 3.0.0?
The gpu module was redesigned in OpenCV 3.0. It was splitted onto several modules, it was renamed to cuda and gpu:: namespace was renamed to cuda::. The correct code for OpenCV 3.0:
#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/cudaarithm.hpp"
using namespace cv;
int main (int argc, char* argv[])
{
try
{
cv::Mat src_host = cv::imread("file.png", cv::IMREAD_GRAYSCALE);
cv::cuda::GpuMat dst, src;
src.upload(src_host);
cv::cuda::threshold(src, dst, 128.0, 255.0, cv::THRESH_BINARY);
cv::Mat result_host(dst);
cv::imshow("Result", result_host);
cv::waitKey();
}
catch(const cv::Exception& ex)
{
std::cout << "Error: " << ex.what() << std::endl;
}
return 0;
}
Ah, they've been playing with the constants in master. Expect the CV_* prefix removed almost anywhere ( except the types, CV_8U and such are still alive).
So it's cv::THRESH_BINARY, cv::LOAD_IMAGE_GRAYSCALE, but .... cv::COLOR_BGR2GRAY (you didn't use it now, but i'll spare you the searching ;) )
Sorry, I've no experience with GPU stuff, so i can't solve the last riddle there.
I'm using opencv3.0.0 dev version with cmake.I installed opencv following this tutorial
http://docs.opencv.org/trunk/doc/tutorials/introduction/linux_install/linux_install.html#linux-installation
After that I'm use the following cmake file to compile a simple face-detector:
cmake_minimum_required(VERSION 2.8)
project( face-detect )
find_package( OpenCV 3.0.0 EXACT REQUIRED )
add_executable( face-detect face-detect.cpp )
target_link_libraries( face-detect ${OpenCV_LIBS} )
Makefile is generated succefully, but when I compile the code there're missing classes under namespace cv:
face-detect.cpp:15: error: 'CommandLineParser' is not a member of 'cv'
cv::CommandLineParser parser(argc, argv, keys);
^
face-detect.cpp:23: error: invalid initialization of reference of type 'cv::InputArray {aka const cv::_InputArray&}' from expression of type 'const char*'
std::cout << cv::format("Error: cannot load cascade file!\n");
However it does find cv::CascadeClassifier. How to fix this problem?
This is the code for face-detect, I took it from internet:
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
const char* keys =
{
"{i|input| |The source image}"
"{o|outdir| |The output directory}"
};
int main(int argc, const char** argv)
{
cv::CommandLineParser parser(argc, argv, keys);
std::string infile = parser.get<std::string>("input");
std::string outdir = parser.get<std::string>("outdir");
std::string cascade_file = "haarcascade_frontalface_alt.xml";
cv::CascadeClassifier cascade;
if (cascade_file.empty() || !cascade.load(cascade_file))
{
std::cout << cv::format("Error: cannot load cascade file!\n");
return -1;
}
cv::Mat src = cv::imread(infile);
if (src.empty())
{
std::cout << cv::format("Error: cannot load source image!\n");
return -1;
}
cv::Mat gray;
cv::cvtColor(src, gray, CV_BGR2GRAY);
cv::equalizeHist(gray, gray);
std::vector<cv::Rect> faces;
cascade.detectMultiScale(gray, faces, 1.2, 3);
std::cout << cv::format("0, %s (%dx%d)\n", infile.c_str(), src.cols, src.rows);
cv::Mat src_copy = src.clone();
for (int i = 0; i < faces.size(); i++)
{
std::string outfile(cv::format("%s/face-%d.jpg", outdir.c_str(), i+1));
cv::Rect r = faces[i];
cv::rectangle(src, r, CV_RGB(0,255,0), 2);
cv::imwrite(outfile, src_copy(r));
cv::imwrite(infile, src);
std::cout << cv::format("%d, %s (%dx%d)\n", i+1, outfile.c_str(), r.width, r.height);
}
return 0;
}
Hi I am trying to play a video using the following code:
//#include <stdio.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
//#include <iostream>
using namespace cv;
int main(int argc, char** argv)
{
string filename = "anime.avi";
VideoCapture capture(filename);
Mat frame;
if( !capture.isOpened() )
throw "Error when reading steam_avi";
namedWindow( "w", 1);
for( ; ; )
{
capture >> frame;
if(!frame)
break;
imshow("w", frame);
waitKey(20); // waits to display frame
}
waitKey(0); // key press to close window
// releases and window destroy are automatic in C++ interface
}
When I run it though, I get the following errors:
project.cpp: In function ‘int main(int, char**)’:
project.cpp:23:13: error: no match for ‘operator!’ in ‘!frame’
project.cpp:23:13: note: candidates are:
project.cpp:23:13: note: operator!(bool) <built-in>
project.cpp:23:13: note: no known conversion for argument 1 from ‘cv::Mat’ to ‘bool’
/usr/local/include/opencv2/core/operations.hpp:2220:20: note: bool cv::operator!(const cv::Range&)
/usr/local/include/opencv2/core/operations.hpp:2220:20: note: no known conversion for argument 1 from ‘cv::Mat’ to ‘const cv::Range&’
Could you possibly help. I've been on this for hours without success :(
Because there is no operator! overloaded for class cv::Mat. In the documentation, it not said clearly, what should happen with the image in case of acquisition failed. That's the implementation of cv::VideoCapture::operator>> from cap.cpp:
VideoCapture& VideoCapture::operator >> (Mat& image)
{
if(!grab())
image.release();
else
retrieve(image);
return *this;
}
Now go to documentation on cv::Mat:release. And let's double check it's implementation from the mat.hpp:
inline void Mat::release()
{
if( refcount && CV_XADD(refcount, -1) == 1 )
deallocate();
data = datastart = dataend = datalimit = 0;
size.p[0] = 0;
refcount = 0;
}
Hence, finally, you can check the data pointer to find out, whether the grab was successful:
if (!frame.data) break;
However, I recommend to use function-style call cv::VideoCapture::read in this case, since it explicitly returns whether it was successful, or not:
if (!capture.read(frame)) break;
HTH