I have an H.264 video stream and I need to extract frames from it. However when I extract the frames,the quality is really poor since I need to perform color segmentation!I want to know how can i extract the frame and convert it to B G R so as to have a better quality picture.
Here is the code I have so far:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(){
VideoCapture capture("1.dv4");
if(!capture.isOpened())
return 1;
double rate=capture.get(CV_CAP_PROP_FPS);
bool stop(false);
Mat frame;
namedWindow("Extracted Frame",CV_WINDOW_NORMAL);
cout <<"Rate is="<<rate;
int delay=1000/rate;
while(!stop){
if(!capture.read(frame))
break;
imshow("Extracted Frame",frame);
imwrite("C:/Users/DELL/Documents/Visual Studio 2010/Projects/VideoFrameCapture/VideoFrameCapture/frame.jpg",frame);
if(waitKey(delay)>=0)
stop=true;
}
capture.release();
waitKey(0);
return 1;
}
Once you have opened the VideoCapture, add this:
capture.set(CV_CAP_PROP_CONVERT_RGB, 1);
Related
I'm using visual studio 2019 with OpenCV 4.4.0
every thing was great but when i want to start face detection the cascade classifiar doesn't load the haarcascade
you also have to know that i installed openCV in the c partition and this is a simple code
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2\opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <Windows.h>
#include <vector>
#include <stdio.h>
using namespace std;
using namespace cv;
int main()
{
VideoCapture cam(0);
Mat img;
CascadeClassifier detector;
vector<Rect> faces;
Point p[2];
bool cap = false;
if (!detector.load("c:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_default.xml"))
{
cout << "Image Detector Doesn't work\n";
return EXIT_FAILURE;
}
if (!cam.isOpened())
{
cout << "Can't Open Camera\n";
return EXIT_FAILURE;
}
while (!cap)
{
cam.read(img);
imshow("Cam", img);
waitKey(0);
if (GetAsyncKeyState(VK_ESCAPE))
cap = true;
}
destroyWindow("Cam");
cout << "Detecting Face...\n";
detector.detectMultiScale(img, faces);
for (int i = 0; i < faces.size(); i++)
{
p[0] = Point(faces[i].x,faces[i].y);
p[1] = Point(faces[i].x + faces[i].height,faces[i].y + faces[i].width);
rectangle(img,p[0],p[1],Scalar(0,0,255),3);
}
imwrite("Result.jpg",img);
return EXIT_SUCCESS;
}
this code doesn't load the haarcascade and it returns "can't load" in the cmd
so i really need help with and thanks for all
\ is used as escape sequence in C++ string literals.
Therefore, you should use \\ to put a character \ in them.
if (!dec.load("c:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_default.xml"))
I am currently trying to modify a program that takes in a webcam stream as input. The problem is, when I try to alter the program to use a single image, it doesn't display the output that I am expecting e.g. with video stream (code below)
#include "opencv2/opencv.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include "BackgroundRemover.h"
#include "SkinDetector.h"
#include "FaceDetector.h"
#include "FingerCount.h"
using namespace cv;
using namespace std;
int main(int, char**) {
VideoCapture videoCapture(0);
videoCapture.set(CV_CAP_PROP_SETTINGS, 1);
if (!videoCapture.isOpened()) {
cout << "Can't find camera!" << endl;
return -1;
}
Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover;
SkinDetector skinDetector;
FaceDetector faceDetector;
FingerCount fingerCount;
for (int i = 0; i < 2; i++)
{
videoCapture >> frame;
frameOut = frame.clone();
skinDetector.drawSkinColorSampler(frameOut);
foreground = backgroundRemover.getForeground(frame);
faceDetector.removeFaces(frame, foreground);
handMask = skinDetector.getSkinMask(foreground);
fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut);
imshow("output", frameOut);
imshow("foreground", foreground);
imshow("handMask", handMask);
imshow("handDetection", fingerCountDebug);
if (i == 0)
{
backgroundRemover.calibrate(frame);
skinDetector.calibrate(frame);
}
}
waitKey(0);
}
The output shows a detection. Whereas, if I modify the code so that frame doesn't read from the video stream, the output shows nothing at all. Can anybody help to fix this? EDIT: Due to confusion from some members of the community, the modified code is below that reads in a single image:
#include "opencv2/opencv.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include "BackgroundRemover.h"
#include "SkinDetector.h"
#include "FaceDetector.h"
#include "FingerCount.h"
using namespace cv;
using namespace std;
int main(int, char**) {
string imageName("C:/Users/whoever/Desktop/hand_test.jpg"); // by default
Mat image;
image = imread(imageName.c_str(), IMREAD_COLOR); // Read the file
Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover;
SkinDetector skinDetector;
FaceDetector faceDetector;
FingerCount fingerCount;
for (int i = 0; i < 2; i++)
{
frame = image;
frameOut = frame.clone();
skinDetector.drawSkinColorSampler(frameOut);
foreground = backgroundRemover.getForeground(frame);
faceDetector.removeFaces(frame, foreground);
handMask = skinDetector.getSkinMask(foreground);
fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut);
imshow("output", frameOut);
imshow("foreground", foreground);
imshow("handMask", handMask);
imshow("handDetection", fingerCountDebug);
if (i == 0)
{
cout << "Calibrating...";
backgroundRemover.calibrate(frame);
skinDetector.calibrate(frame);
}
}
waitKey(0);
}
The original code processes different images captured from the camera each time it goes round the loop and outputs the differences. Since you are now using the same image every time round there are never any differences hence the output is completely blank. (Note that it will still be playing the output as a video, just a constantly blank one)
The first line in the for loop is where it grabs a new image from the camera:
videoCapture >> frame;
As you can see in your updated code you are removing this and just using the same image again:
frame = image;
Try saving 2 different images instead and have the program load in a different one each time round the loop.
Here is a fairly brute force way to do it which you could improve to load a different file each time it loops, use arrays and so on:
string imageName1("C:/Users/whoever/Desktop/hand_test_1.jpg"); // by default
string imageName2("C:/Users/whoever/Desktop/hand_test_2.jpg"); // by default
Mat image1;
Mat image2;
image1 = imread(imageName1.c_str(), IMREAD_COLOR); // Read the file
image2 = imread(imageName2.c_str(), IMREAD_COLOR); // Read the file
Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover;
SkinDetector skinDetector;
FaceDetector faceDetector;
FingerCount fingerCount;
for (int i = 0; i < 2; i++)
{
if (i = 0) { frame = image1 } else { frame = image2 };
...
I used a for loop for reading 300 frames and for accumulating them.I gave an imshow command inside to print the frames continuously but they are not printed during the for loop is processing but it comes as a single image
Here's my code:
enter code here
#include<iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include<stdlib.h>
#include<stdio.h>
using namespace cv;
using namespace std;
int main()
{
char k;
int learningframes=300;
VideoCapture cap(0);
if(cap.isOpened()==0)
{
cout<<"ERROR";
return -1;
}
//while(1)
//{
Mat frame;
cap>>frame;
Mat frameaccF,frameacc,framethr32,framehsv,framethr;
frameaccF=Mat::zeros(frame.size(),CV_32FC1);
for(int i=0;i<=learningframes;i++)
{
cap>>frame;
imshow("nn",frame);
cvtColor(frame,framehsv,CV_BGR2HSV);
inRange(framehsv,Scalar(0,30,0),Scalar(50,150,255),framethr);
framethr.convertTo(framethr,CV_32F);
accumulate(framethr,frameaccF);
}
frameaccF=frameaccF/300;
frameaccF.convertTo(frameaccF,CV_8U);
imshow("frame",frame);
imshow("frameacc",frameaccF);
waitKey(0);
//if(k=='q')
//break;
//}
return 0;
}
You need to put the waiKey() inside the for loop
for(int i=0;i<=learningframes;i++)
{
cap>>frame;
imshow("nn",frame);
cvtColor(frame,framehsv,CV_BGR2HSV);
inRange(framehsv,Scalar(0,30,0),Scalar(50,150,255),framethr);
framethr.convertTo(framethr,CV_32F);
accumulate(framethr,frameaccF);
waitKey(0);
}
See the ticked answer below :)
Error 1 error C2065: 'capture' : undeclared identifier
Using VS2013 Express with OpenCV
Older code examples have worked, but I cant get this one to:
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
using namespace cv;
int main()
{
Mat frame = cvQueryFrame(capture);
imshow("Video", frame);
}
I had to change "opencv2/core/core.hpp"
To #include <opencv2\core\core.hpp>, and It got that bit.
but I've tried including highgui but I cant get "capture" to work?
Any ideas?
x64 on Debug, and using x64 libs...
that capture part is a leftover from the old c-api.
try this instead:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core/core.hpp"
using namespace cv;
int main()
{
VideoCapture cap(0);
while( cap.isOpened() )
{
Mat frame;
if ( ! cap.read(frame) )
break;
imshow("lalala",frame);
int k = waitKey(10);
if ( k==27 )
break;
}
return 0;
}
Of course how will it work when you haven't declared the variable capture? Probably you want to do something like this:
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
using namespace cv;
int main()
{
CvCapture* capture = cvCreateFileCapture("path to video file");
Mat frame = cvQueryFrame(capture);
imshow("Video", frame);
waitKey();
cvReleaseCapture(&capture);
}
I am using OpenCV 2.4.6. I have found over the Internet some example of getting frame from a camera. It works well (it displays my ugly face onto the screen). However, I absolutely cannot get pixel data from the frames. I've found some topic here: http://answers.opencv.org/question/1934/reading-pixel-values-from-a-frame-of-a-video/ but it doesn't work for me.
Here is the code - in the commented parts I pointed out what is wrong.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
int main() {
int c;
IplImage* img;
CvCapture* capture = cvCaptureFromCAM(1);
cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE);
while(1) {
img = cvQueryFrame(capture);
uchar* data = (uchar*)img->imageData; // access violation
// this does not work either
//Mat m(img);
//uchar a = m.data[0]; // access violation
cvShowImage("mainWin", img);
c = cvWaitKey(10);
if(c == 27)
break;
}
}
Could you give me some suggestions, please?
I suggest using the newer Mat structure instead of IplImage since your question is tagged with C++ tag. For your task you can use a data member of Mat - it points to internal Mat storage. For example Mat img; uchar* data = img.data;. Here's a full example
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
int main() {
int c;
Mat img;
VideoCapture capture(0);
namedWindow("mainWin", CV_WINDOW_AUTOSIZE);
bool readOk = true;
while(capture.isOpened()) {
readOk = capture.read(img);
// make sure we grabbed the frame successfully
if (!readOk) {
std::cout << "No frame" << std::endl;
break;
}
uchar* data = img.data; // this should work
imshow("mainWin", img);
c = waitKey(10);
if(c == 27)
break;
}
}