OpenCV - getting pixel data from camera device - c++

I am using OpenCV 2.4.6. I have found over the Internet some example of getting frame from a camera. It works well (it displays my ugly face onto the screen). However, I absolutely cannot get pixel data from the frames. I've found some topic here: http://answers.opencv.org/question/1934/reading-pixel-values-from-a-frame-of-a-video/ but it doesn't work for me.
Here is the code - in the commented parts I pointed out what is wrong.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
int main() {
int c;
IplImage* img;
CvCapture* capture = cvCaptureFromCAM(1);
cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE);
while(1) {
img = cvQueryFrame(capture);
uchar* data = (uchar*)img->imageData; // access violation
// this does not work either
//Mat m(img);
//uchar a = m.data[0]; // access violation
cvShowImage("mainWin", img);
c = cvWaitKey(10);
if(c == 27)
break;
}
}
Could you give me some suggestions, please?

I suggest using the newer Mat structure instead of IplImage since your question is tagged with C++ tag. For your task you can use a data member of Mat - it points to internal Mat storage. For example Mat img; uchar* data = img.data;. Here's a full example
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
int main() {
int c;
Mat img;
VideoCapture capture(0);
namedWindow("mainWin", CV_WINDOW_AUTOSIZE);
bool readOk = true;
while(capture.isOpened()) {
readOk = capture.read(img);
// make sure we grabbed the frame successfully
if (!readOk) {
std::cout << "No frame" << std::endl;
break;
}
uchar* data = img.data; // this should work
imshow("mainWin", img);
c = waitKey(10);
if(c == 27)
break;
}
}

Related

Trying to Locate Aruco Fiducials with USB camera Ubuntu 18.04

I have some code I think should be working using open CV to detect a set of fiducials. For some reason, I cant get my code to run. It gives the error "Unable to stop the stream: Invalid argument"
#include "opencv2/opencv.hpp"
using namespace cv;
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/aruco.hpp>
int main(int argc, char** argv)
{
Mat markerImage;
VideoCapture cap;
// open the default camera, use something different from 0 otherwise;
// Check VideoCapture documentation.
if(!cap.open(1))
return 0;
for(;;)
{
Mat frame;
cap >> frame;
if( frame.empty() ) break; // end of video stream
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f>> markerCorners, rejectedCandidates;
cv::Ptr<cv::aruco::DetectorParameters> parameters = cv::aruco::DetectorParameters::create();
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_5X5_50);
cv::aruco::detectMarkers(frame, dictionary, markerCorners, markerIds, parameters, rejectedCandidates);
cv::aruco::drawDetectedMarkers(frame, markerCorners, markerIds);
imshow("Camera)", frame);
if( waitKey(10) == 27 ) break; // stop capturing by pressing ESC
}
// the camera will be closed automatically upon exit
// cap.close();
return 0;
}

opencv c++ change ipl to mat accumulate

i tried to change code ipl to mat
but failed
i use opencv 4.1.2
this sample uses opencv 2.4.13
https://jadeshin.tistory.com/entry/cvAcc에-의한-배경-영상-계산
i can't use ipl
so i changed
#include <opencv2\opencv.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\core\mat.hpp>
#include <opencv2\imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
VideoCapture cap("ball.avi");
if (!cap.isOpened())
{
cout << "file not found." << endl;
return 0;
}
Mat image;
Size size = Size((int)CAP_PROP_FRAME_WIDTH, (int)CAP_PROP_FRAME_HEIGHT);
Mat grayImage(size, CV_8UC1);
Mat sumImage(size, CV_32FC1);
sumImage.setTo(Scalar::all(0));
int nFrameCount = 0;
for (;;)
{
cap.read(image);
if (image.empty())
{
cout << "could'nt capture" << endl;
break;
}
cvtColor(image, grayImage, COLOR_BGR2GRAY);
accumulate(grayImage, sumImage, NULL); //here is error
imshow("grayImage", grayImage);
char chKey = waitKey(50);
if (chKey == 27)
break;
nFrameCount++;
}
convertScaleAbs(sumImage, sumImage, 1.0 / nFrameCount);
imwrite("ballBkg.jpg", sumImage);
destroyAllWindows();
return 0;
}
nothing wrong to compile but wrong to excute
i did also try, catch
but also failed
what's wrong with accumulate?
C++ version of accumulate void accumulate(InputArray src, InputOutputArray dst, InputArray mask=noArray() )
your are passing NULL instead of noArray() . so just do :
accumulate(grayImage, sumImage);
cv::noArray() is an empty Mat not NULL.
Edit :
Also change
Size size = Size((int)CAP_PROP_FRAME_WIDTH, (int)CAP_PROP_FRAME_HEIGHT);
to
Size size = Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH), (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT));

Converting webcam program to process one image

I am currently trying to modify a program that takes in a webcam stream as input. The problem is, when I try to alter the program to use a single image, it doesn't display the output that I am expecting e.g. with video stream (code below)
#include "opencv2/opencv.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include "BackgroundRemover.h"
#include "SkinDetector.h"
#include "FaceDetector.h"
#include "FingerCount.h"
using namespace cv;
using namespace std;
int main(int, char**) {
VideoCapture videoCapture(0);
videoCapture.set(CV_CAP_PROP_SETTINGS, 1);
if (!videoCapture.isOpened()) {
cout << "Can't find camera!" << endl;
return -1;
}
Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover;
SkinDetector skinDetector;
FaceDetector faceDetector;
FingerCount fingerCount;
for (int i = 0; i < 2; i++)
{
videoCapture >> frame;
frameOut = frame.clone();
skinDetector.drawSkinColorSampler(frameOut);
foreground = backgroundRemover.getForeground(frame);
faceDetector.removeFaces(frame, foreground);
handMask = skinDetector.getSkinMask(foreground);
fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut);
imshow("output", frameOut);
imshow("foreground", foreground);
imshow("handMask", handMask);
imshow("handDetection", fingerCountDebug);
if (i == 0)
{
backgroundRemover.calibrate(frame);
skinDetector.calibrate(frame);
}
}
waitKey(0);
}
The output shows a detection. Whereas, if I modify the code so that frame doesn't read from the video stream, the output shows nothing at all. Can anybody help to fix this? EDIT: Due to confusion from some members of the community, the modified code is below that reads in a single image:
#include "opencv2/opencv.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include "BackgroundRemover.h"
#include "SkinDetector.h"
#include "FaceDetector.h"
#include "FingerCount.h"
using namespace cv;
using namespace std;
int main(int, char**) {
string imageName("C:/Users/whoever/Desktop/hand_test.jpg"); // by default
Mat image;
image = imread(imageName.c_str(), IMREAD_COLOR); // Read the file
Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover;
SkinDetector skinDetector;
FaceDetector faceDetector;
FingerCount fingerCount;
for (int i = 0; i < 2; i++)
{
frame = image;
frameOut = frame.clone();
skinDetector.drawSkinColorSampler(frameOut);
foreground = backgroundRemover.getForeground(frame);
faceDetector.removeFaces(frame, foreground);
handMask = skinDetector.getSkinMask(foreground);
fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut);
imshow("output", frameOut);
imshow("foreground", foreground);
imshow("handMask", handMask);
imshow("handDetection", fingerCountDebug);
if (i == 0)
{
cout << "Calibrating...";
backgroundRemover.calibrate(frame);
skinDetector.calibrate(frame);
}
}
waitKey(0);
}
The original code processes different images captured from the camera each time it goes round the loop and outputs the differences. Since you are now using the same image every time round there are never any differences hence the output is completely blank. (Note that it will still be playing the output as a video, just a constantly blank one)
The first line in the for loop is where it grabs a new image from the camera:
videoCapture >> frame;
As you can see in your updated code you are removing this and just using the same image again:
frame = image;
Try saving 2 different images instead and have the program load in a different one each time round the loop.
Here is a fairly brute force way to do it which you could improve to load a different file each time it loops, use arrays and so on:
string imageName1("C:/Users/whoever/Desktop/hand_test_1.jpg"); // by default
string imageName2("C:/Users/whoever/Desktop/hand_test_2.jpg"); // by default
Mat image1;
Mat image2;
image1 = imread(imageName1.c_str(), IMREAD_COLOR); // Read the file
image2 = imread(imageName2.c_str(), IMREAD_COLOR); // Read the file
Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover;
SkinDetector skinDetector;
FaceDetector faceDetector;
FingerCount fingerCount;
for (int i = 0; i < 2; i++)
{
if (i = 0) { frame = image1 } else { frame = image2 };
...

Object of abstract class type "cv::BackgroundSubtractorMOG2" is not allowed. all the methods are pure virtual

I develop the code in VS2015 + OpenCV3.0 in Windows 7 64bit. This is a demo code that I want have a try. And I have tried many demo but I was coming across the same problem:
object of abstract class type "cv::BackgroundSubtractorMOG2" is not allowed. all the methods are pure virtual function.
The demo code is:
using namespace cv;
using namespace std;
int main() {
VideoCapture video("1.avi");
Mat frame, mask, thresholdImage, output;
//video>>frame;
Ptr<BackgroundSubtractor> pMOG2;
pMOG2 = new BackgroundSubtractorMOG2();
BackgroundSubtractorMOG2 bgSubtractor(20, 16, true);
while (true) {
video >> frame;
++frameNum;
bgSubtractor(frame, mask, 0.001);
cout << frameNum << endl;
//imshow("mask",mask);
//waitKey(10);
}
return 0;
}
I include a lot of heaerd files but I still can not use the class BackgroundSubtractorMOG2 and what is worse, the class of BackgroundSubtractorMOG is shown undeclared.
Syntax has changed from OpenCV 2.9.X. This will work in OpenCV 3.0.0:
#include <opencv2\opencv.hpp>
using namespace cv;
using namespace std;
int main() {
VideoCapture video("1.avi");
Mat frame, mask, thresholdImage, output;
int frameNum = 0;
Ptr<BackgroundSubtractor> pMOG2 = createBackgroundSubtractorMOG2(20, 16, true);
while (true) {
video >> frame;
++frameNum;
pMOG2->apply(frame, mask, 0.001);
cout << frameNum << endl;
imshow("mask",mask);
waitKey(10);
}
return 0;
}

Change resolution of extracted frame in Opencv

I have an H.264 video stream and I need to extract frames from it. However when I extract the frames,the quality is really poor since I need to perform color segmentation!I want to know how can i extract the frame and convert it to B G R so as to have a better quality picture.
Here is the code I have so far:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(){
VideoCapture capture("1.dv4");
if(!capture.isOpened())
return 1;
double rate=capture.get(CV_CAP_PROP_FPS);
bool stop(false);
Mat frame;
namedWindow("Extracted Frame",CV_WINDOW_NORMAL);
cout <<"Rate is="<<rate;
int delay=1000/rate;
while(!stop){
if(!capture.read(frame))
break;
imshow("Extracted Frame",frame);
imwrite("C:/Users/DELL/Documents/Visual Studio 2010/Projects/VideoFrameCapture/VideoFrameCapture/frame.jpg",frame);
if(waitKey(delay)>=0)
stop=true;
}
capture.release();
waitKey(0);
return 1;
}
Once you have opened the VideoCapture, add this:
capture.set(CV_CAP_PROP_CONVERT_RGB, 1);