I'm trying to get frames form a Logitech C920 camera
and show them.
The camera seems to be working but nothing is being shown on the display window.
I tried to configure all the camera settings but nothing.
What am I missing???
#include "pch.h"
#include <iostream>
#include "opencv2\imgcodecs.hpp"
#include "opencv2\core.hpp"
#include "opencv2\highgui.hpp"
#include "opencv2\videoio.hpp"
using namespace std;
using namespace cv;
int main()
{
VideoCapture camera(CAP_ANY);
Mat frame;
namedWindow("x", WINDOW_AUTOSIZE);
camera.set(CAP_PROP_FOURCC, VideoWriter::fourcc('M', 'J', 'P', 'G'));
camera.set(CAP_PROP_FRAME_WIDTH, 1920);
camera.set(CAP_PROP_FRAME_HEIGHT, 1080);
while (1)
{
camera.read(frame);
imshow("x", frame);
}
waitKey(0);
return 0;
}
You have to put waitKey(int delay) after each frame. Your while loop should look like this:
while (1)
{
camera.read(frame);
imshow("x", frame);
waitKey(1);
}
Related
I'm trying to run the following code and convert the RGB image to YCbCr color model. But when building this code segment it gives the above error. I have attached a screenshot. Can you refer that and give me a solution.
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace cv2;
using namespace std;
int main()
{
Mat src1;
src1 = imread("face.jpg", CV_LOAD_IMAGE_COLOR);
namedWindow("Original image", CV_WINDOW_AUTOSIZE);
imshow("Original image", src1);
Mat gray, edge, draw;
//cvtColor(src1, gray, CV_BGR2GRAY);
Mat imgYCC = cv2.cvtColor(src1, cv2.COLOR_BGR2YCR_CB);
//equalizeHist(gray, draw);
//Canny(gray, edge, 50, 255, 3);
edge.convertTo(draw, CV_8U);
namedWindow("image", CV_WINDOW_AUTOSIZE);
imshow("image", imgYCC);
waitKey(0);
return 0;
}
The namespace cv2 doesn't exists. It's the name of the python wrapper.
Just remove the line:
using namespace cv2;
and don't use it in your code, e.g.:
Mat imgYCC = cvtColor(src1, COLOR_BGR2YCR_CB);
I'm trying to make a simple program that draws a rectangle over a video stream coming from my webcam. The following code compiles and runs, but the rectangle isn't visible. I've tried various line thicknesses, colors, and positions; as well as tried to put rectangles simply on images rather than a video stream.
After looking through examples and tutorials as well as the OpenCV docs, I still can't seem to figure it out. If anyone could assist me in making the rectangle visible, it would be greatly appreciated.
#include <opencv2/video.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
VideoCapture vid(0);
Mat frame;
int main()
{
while(true)
{
vid.read(frame);
imshow("Webcam", frame);
rectangle(frame, Point(100, 100), Point(300, 300), Scalar(255), 10, 8, 0);
if (waitKey(30) == 27)
break;
}
}
Simply draw the rectangle before you show the image:
#include <opencv2\opencv.hpp> // It's just easier to #include only this
using namespace cv;
int main() {
// Don't use global variables if they are not needed!
VideoCapture vid(0);
Mat frame;
while(true)
{
// Read frame
vid.read(frame);
// Draw rectangle
rectangle(frame, Point(100, 100), Point(300, 300), Scalar(255, 0, 0) /*blue*/, 10, 8, 0);
// Show image
imshow("Webcam", frame);
if ((waitKey(30) & 0xFF) == 27) { // for portability
break;
}
}
}
I'm developing a project with visual studio 2010 and opencv. Here is my problem: i acquire a video from webcam, analize it, do some operation on it and then i show the result in another window (Object Tracking). The code is ok, no compiling errors but as soon as i start the program the console windows it closes immediately and i cannot see both the original and the modified video. If i debug the code i can see the webcam works and acquire images but obviously i ned to do this in real time. Any suggestion?
Can you give any code?
Are you write and compile any video player program like this?
#include "opencv2/opencv.hpp"
using namespace cv;
int main(int, char**)
{
VideoCapture cap(0); // open the default camera
//Video Capture cap(path_to_video); // open the video file
if(!cap.isOpened()) // check if we succeeded
return -1;
namedWindow("Video",1);
for(;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
imshow("Video", frame);
if(waitKey(30) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
Try this:
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
using namespace std;
int main() {
VideoCapture cap(0);
while (true)
{
Mat imgOriginal;
Mat imgHSV;
bool bSuccess = cap.read(imgOriginal);
cvtColor(imgOriginal, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
imshow("Thresholded Image", imgHSV);
imshow("Original", imgOriginal);
waitKey(33);
}
return 0;
}
Im trying to learn opencv and object detection. I used objecdetection.cpp in opencv samples and when I run it I get this error
The cascade loads perfectly fine and also the camera the only problem is detectmultiscale because whenever i commented it out the program doesn't crash
here is the code of objectdecetion2.cpp
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay(Mat frame);
/** Global variables */
String face_cascade_name = "..\\Debug\\haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "..\\Debug\\haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
String window_name = "Capture - Face detection";
/**
* #function main
*/
int main(void)
{
VideoCapture capture;
Mat frame;
//-- 1. Load the cascade
if (!face_cascade.load(face_cascade_name)){ printf("--(!)Error loading face cascade\n"); return -1; };
if (!eyes_cascade.load(eyes_cascade_name)){ printf("--(!)Error loading eyes cascade\n"); return -1; };
//-- 2. Read the video stream
capture.open(0);
if (!capture.isOpened()) { printf("--(!)Error opening video capture\n"); return -1; }
while (capture.read(frame))
{
if (frame.empty())
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
detectAndDisplay(frame);
//-- bail out if escape was pressed
int c = waitKey(10);
if ((char)c == 27) { break; }
}
return 0;
}
/**
* #function detectAndDisplay
*/
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0, Size(80, 80));
imshow(window_name, frame);
}
You are likely experiencing an OpenCV bug, described here: http://code.opencv.org/issues/3710
The code you posted looks OK to me, otherwise.
The code you posted is right! But I doubt that your opencv configuration is not right! If you work on windows, Please check your .dll files and lib files!
I am trying to capture a video and store it in a file and then read the same video file. I am able to write it but not able to read the same file. On pressing escape, the program is supposed to quit the webcam and play the recorded video but displays the following error instead:
mpeg1video # 0x2a16f40] ac-tex damaged at 14 28
[mpeg1video # 0x2a16f40] Warning MVs not available
OpenCV Error: Bad flag (parameter or structure field) (Unrecognized or unsupported array type) in cvGetMat, file /home/ujjwal/Downloads/OpenCV-2.4.0/modules/core/src/array.cpp, line 2482
terminate called after throwing an instance of 'cv::Exception'
what(): /home/ujjwal/Downloads/OpenCV-2.4.0/modules/core/src/array.cpp:2482: error: (-206) Unrecognized or unsupported array type in function cvGetMat
The code is:
#include <sstream>
#include <string>
#include <iostream>
#include <opencv/highgui.h>
#include <opencv/cv.h>
using namespace cv;
int main(int argc, char* argv[])
{
Mat inputVideo;
Mat frame;
Mat HSV;
Mat tracking;
char checkKey;
VideoCapture capture;
capture.open(0);
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);
VideoWriter writer("OutputFile.mpeg", CV_FOURCC('P','I','M','1'), 50, Size(640, 480));
while(1){
capture.read(inputVideo);
imshow("Original Video",inputVideo);
writer.write(inputVideo);
checkKey = cvWaitKey(20);
if(checkKey == 27)
break;
}
capture.open("OutputFile.mpeg");
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);
while(1){
capture.read(inputVideo);
imshow("Tracking Video", inputVideo);
}
return 0;
}
Can someone please help me? Thanks!
You need to correct several things to make it work:
You have to create the window before showing images in the window.
You have to close the writer to finish writing before open it later.
You need to add cvWaitKey(20) for the second image showing (check out here for why this is essential).
The whole fixed code is as follows:
#include <sstream>
#include <string>
#include <iostream>
#include <opencv/highgui.h>
#include <opencv/cv.h>
using namespace cv;
int main(int argc, char* argv[])
{
Mat inputVideo;
Mat frame;
Mat HSV;
Mat tracking;
char checkKey;
VideoCapture capture;
capture.open(0);
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);
VideoWriter writer("OutputFile.mpeg", CV_FOURCC('P','I','M','1'), 50, Size(640, 480));
namedWindow("Original Video", WINDOW_AUTOSIZE );
while(1){
capture.read(inputVideo);
imshow("Original Video",inputVideo);
writer.write(inputVideo);
checkKey = cvWaitKey(20);
if(checkKey == 27)
break;
}
writer.release();
capture.open("OutputFile.mpeg");
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);
namedWindow("Tracking Video", WINDOW_AUTOSIZE );
while(1){
capture.read(inputVideo);
if (!inputVideo.empty())
{
imshow("Tracking Video", inputVideo);
checkKey = cvWaitKey(20);
}
else
break;
}
return 0;
}