I am facing a weird problem.
I am able to load and show image. Also, I am able to capture image but I cannot see image in display. The camera connected fine and capture image fine but cannot see image.
My system is window 10- 64 bit with opencv 3.3.0.
Code is below.
int main()
{
cv::VideoCapture cap(0);
if (!cap.isOpened()) {
std::cerr << "camera didn't connected." << std::endl;
return 0;
}
int nFrame = 0;
cv::Mat image = cv::imread("orgin102.jpg");
cv::imshow("image", image);
cvWaitKey(0);
while (true) {
cv::Mat origin;
cap >> origin;
//flip orign
flip(origin, origin, 1);
nFrame++;
cv::imshow("image", origin);
//if (cv::waitKey(27) >= 0) break;
cvWaitKey(0);
}
return 0;
}
I fixed this problem by changing parameter of cap.
here it is
cv::VideoCapture cap(1);
For some systems, 0 index shows as first camera. and for others index 1 shows first camera.
Hope this helps.
Cheers!
Related
I would like to put an image on video and i'm wondering if it's possible in opencv without multithreading.
I would like to avoid it because in my project i am operating on RPI 0W(that's whyi don't want multithreading) .
i can't find anything about it on internet. I got some basic code in c++ . I'm new to open cv.
int main(){
VideoCapture cap(0);
if (!cap.isOpened())
{
cout << "error"<<endl;
return -1;
}
Mat edges;
namedWindow("edges", 1);
Mat img = imread("logo.png");
for (;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
imshow("edges", WINDOW_AUTOSIZE );
imshow("edges", img);
imshow("edges", frame);
if (waitKey(30) >= 0) break;
}
}
In OpenCV showing two things in the same window overwrites the previous one which I think is happening in your case.
You can use OpenCV addWeighted() function or bitwise operations.
OpenCV has good documentation on this. You can find it here
I am trying to set ROI in real time camera and copy a picture in the ROI.
However, I tried many methods from Internet but it is still unsuccessful.
Part of my code is shown below:
while(!protonect_shutdown)
{
listener.waitForNewFrame(frames);
libfreenect2::Frame *ir = frames[libfreenect2::Frame::Ir];
//! [loop start]
cv::Mat(ir->height, ir->width, CV_32FC1, ir->data).copyTo(irmat);
Mat img = imread("button.png");
cv::Rect r(1,1,100,200);
cv::Mat dstroi = img(Rect(0,0,r.width,r.height));
irmat(r).convertTo(dstroi, dstroi.type(), 1, 0);
cv::imshow("ir", irmat / 4500.0f);
int key = cv::waitKey(1);
protonect_shutdown = protonect_shutdown || (key > 0 && ((key & 0xFF) == 27));
listener.release(frames);
}
My real time camera can show the video normally. And no bugs in my program, but the picture cannot be shown in the ROI.
Does anyone have some ideas?
Any help is appreciate.
I hope I understood your question right and you want an output something like this:
I have created a rectangle of size 100x200 on the video feed and displaying an image in that rectangle.
Here is the code:
int main()
{
Mat frame,overlayFrame;
VideoCapture cap("video.avi");//use 0 for webcam
overlayFrame=imread("picture.jpg");
if (!cap.isOpened())
{
cout << "Could not capture video";
return -1;
}
Rect roi(1,1,100,200);//creating a rectangle of size 100x200 at point (1,1) on the videofeed
namedWindow("CameraFeed");
while ((cap.get(CV_CAP_PROP_POS_FRAMES) + 1) < cap.get(CV_CAP_PROP_FRAME_COUNT))
{
cap.read(frame);
resize(overlayFrame, overlayFrame, resize(overlayFrame, overlayFrame, Size(roi.width, roi.height));//changing the size of the image to fit in the roi
overlayFrame.copyTo(frame(roi));//copying the picture to the roi
imshow("CameraFeed", frame);
if (waitKey(27) >= 0)
break;
}
destroyAllWindows;
return 0;
}
I have an OpenCV program that is supposed to use a webcam (in this case the built in laptop camera). The program compiles fine without error but when I run it the program seems to hang on the line:
img_scene = cvCaptureFromCAM(0);
I have a different program that access the camera in the same fashion so I don't understand as to why this program seems to be having trouble.
Full capture code from non-working program:
Mat captureThread() {
if(captureOpen == false){
img_scene = cvCaptureFromCAM(0);
cvSetCaptureProperty(img_scene, CV_CAP_PROP_FRAME_WIDTH, 640);
cvSetCaptureProperty(img_scene, CV_CAP_PROP_FRAME_HEIGHT, 480);
}
while(1) {
image = cvQueryFrame(img_scene);
if(image.empty()) {
continue;
}
cvtColor(image, gray, CV_BGR2GRAY);
return gray;
}
}
This is the code that is being used in a different program that is working as expected:
img_scene = cvCaptureFromCAM(0);
cvSetCaptureProperty(img_scene, CV_CAP_PROP_FRAME_WIDTH, 640);
cvSetCaptureProperty(img_scene, CV_CAP_PROP_FRAME_HEIGHT, 480);
while(1) {
imageFrame = cvQueryFrame(img_scene);
if(imageFrame.empty()) {
continue;
cout << "image frame is empty" << endl;
}
cvtColor(imageFrame, gray, CV_BGR2GRAY);
What is causing the non-working program to have trouble access the camera? I have also tried plugging in a USB camera and setting cvCaptureFromCam(-1) but it does not work either.
EDIT:
The non-working program uses multiple threads whereas the working program does not.
I'm trying to use the open CV FAST algorithim in order to detect corners from a video feed. The method call and set-up seems pretty straight forward yet I'm running into a few problems. When I try and use this code
while(run)
{
clock_t begin,end;
img = cvQueryFrame(capture);
key = cvWaitKey(10);
cvShowImage("stream",img);
//Cv::FAST variables
int threshold=9;
vector<KeyPoint> keypoints;
if(key=='a'){
//begin = clock();
Mat mat(tempImg);
FAST(mat,keypoints,threshold,true);
//end = clock();
//cout << "\n TIME FOR CALCULATION: " << double(diffClock(begin,end)) << "\n" ;
}
I get this error:
OpenCV Error: Assertion failed (image.data && image.type() == CV_8U) in unknown
function, file ........\ocv\opencv\src\cvaux\cvfast.cpp, line 6039
So I figured its a problem with the depth of the image so I when I add this:
IplImage* tempImg = cvCreateImage(Size(img->width,img->height),8,1);
cvCvtColor(img,tempImg,CV_8U);
I get:
OpenCV Error: Bad number of channels (Incorrect number of channels for this conv
ersion code) in unknown function, file ........\ocv\opencv\src\cv\cvcolor.cpp
, line 2238
I've tried using a Mat instead of a IplImage to capture but I keep getting the same kind of errors.
Any suggestions or help?
Thanks in advance.
The entire file just to make it easier for anyone:
#include "cv.h"
#include "cvaux.hpp"
#include "highgui.h"
#include <time.h>
#include <iostream>
double diffClock(clock_t begin, clock_t end);
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
//Create Mat img for camera capture
IplImage* img;
bool run = true;
CvCapture* capture= 0;
capture = cvCaptureFromCAM(-1);
int key =0;
cvNamedWindow("stream", 1);
while(run)
{
clock_t begin,end;
img = cvQueryFrame(capture);
key = cvWaitKey(10);
cvShowImage("stream",img);
//Cv::FAST variables
int threshold=9;
vector<KeyPoint> keypoints;
if(key=='a'){
//begin = clock();
IplImage* tempImg = cvCreateImage(Size(img->width,img->height),8,1);
cvCvtColor(img,tempImg,CV_8U);
Mat mat(img);
FAST(mat,keypoints,threshold,true);
//end = clock();
//cout << "\n TIME FOR CALCULATION: " << double(diffClock(begin,end)) << "\n" ;
}
else if(key=='x'){
run= false;
}
}
cvDestroyWindow( "stream" );
return 0;
}
Whenever you have a problem using the OpenCV API go check the tests/examples available in the source code: fast.cpp
This practice is extremely useful and educational. Now, if you take a look at that code you will notice that the image gets converted to grayscale before calling cv::FAST() on it:
Mat mat(tempImg);
Mat gray;
cvtColor(mat, gray, CV_BGR2GRAY);
FAST(gray,keypoints,threshold,true);
Seems pretty straight forward, indeed.
You need change this
cvCvtColor(img,tempImg,CV_8U);
To
cvCvtColor(img,tempImg,CV_BGR2GRAY);
You can read this
Good Luck
I started getting the same message with code that had worked previously, and i was certain my Mat was U8 grayscale. It turned out that one of the images i was trying to process was no longer there. So in my case it was a misleading error message.
Take a look at this sample code. The code you are using looks quite outdated opencv, in this sample you will find how feature detectors should be used now.
The sample is generic for several feature detectors (including FAST) so that is like it looks a bit more complicated.
http://code.opencv.org/projects/opencv/repository/entry/branches/2.4/opencv/samples/cpp/matching_to_many_images.cpp
You will also find more samples in the parent directory.
Please follow the following code to have your desired result. For showing an example, I am considering an image only but you can simply use the same idea for video frames
Mat img = imread("IMG.jpg", IMREAD_UNCHANGED);
if( img.empty())
{
cout << "File not available for reading"<<endl;
return -1;
}
Mat grayImage;
if(img.channels() >2){
cvtColor( img, grayImage, CV_BGR2GRAY ); // converting color to gray image
}
else{
grayImage = img;
}
double sigma = 1;
GaussianBlur(grayImage, grayImage, Size(), sigma, sigma); // applying gaussian blur to remove some noise,if present
int thresholdCorner = 40;
vector<KeyPoint> keypointsCorners;
FAST(grayImage,keypointsCorners,thresholdCorner,true); // applying FAST key point detector
if(keypointsCorners.size() > 0){
cout << keypointsCorners.size() << endl;
}
// Drawing a circle around corners
for( int i = 0; i < keypointsCorners.size(); i++ )
{
circle( grayImage, keypointsCorners.at(i).pt, 5, Scalar(0), 2, 8, 0 );
}
cv::namedWindow("Display Image");
cv::imshow("Display Image", grayImage);
cvWaitKey(0);
cvDestroyWindow( "Display Image" );
I am using a Mac OS X 10.6 machine. I have OpenCV 2.1 x64 compiled from source using Xcode and its GCC compiler.
I am having trouble using the C++ video reading features of OpenCV. Here is the simple test code I am using (came straight from OpenCV documentation):
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main(int, char**)
{
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
Mat edges;
namedWindow("edges",1);
for(;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
cvtColor(frame, edges, CV_BGR2GRAY);
GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
Canny(edges, edges, 0, 30, 3);
imshow("edges", edges);
if(waitKey(200) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
The program compiles fine, but when I try to run it, I see the green light on my webcam come on for a few seconds, then the program exits with the error message:
OpenCV Error: Bad flag (parameter or structure field) (Unrecognized or unsupported array type) in cvGetMat, file /Users/mark/Downloads/OpenCV-2.1.0/src/cxcore/cxarray.cpp, line 2476
terminate called after throwing an instance of 'cv::Exception'
what(): /Users/mark/Downloads/OpenCV-2.1.0/src/cxcore/cxarray.cpp:2476: error: (-206) Unrecognized or unsupported array type in function cvGetMat
Under debug mode, the matrix still seems to be empty after the cap >> frame line.
I get similar behavior when I try to capture from a video file or an image, so it's not the camera. What is wrong, do you think? Anything I can do to make this work?
EDIT: I'd like to add that if I use the C features, everything works fine. But I would like to stick with C++ if I can.
Thanks
I've seen the same problem. When I use the C features, sometimes the similar question also comes up. From the error message of the C code, I think it happened because the camera got a NULL frame. So I think it can be solved in this way:
do
{
capture>>frame;
}while(frame.empty());
That way it works on my machine.
I encountered the same problem, it seems that the first two attempts to get the video wont return any signal, so if you try to use it you'll get an error, here is how I got around this, simply by adding a counter and checking the size of the video.
int cameraNumber = 0;
if ( argc > 1 )
cameraNumber = atoi(argv[1]);
cv::VideoCapture camera;
camera.open(cameraNumber);
if ( !camera.isOpened() ) {
cerr << "ERROR: Could not access the camera or video!" << endl;
exit(1);
}
//give the camera 40 frames attempt to get the camera object,
//if it fails after X (40) attemts the app will terminatet,
//till then it will display 'Accessing camera' note;
int CAMERA_CHECK_ITERATIONS = 40;
while (true) {
Mat cameraFrame;
camera >> cameraFrame;
if ( cameraFrame.total() > 0 ) {
Mat displayFrame( cameraFrame.size(), CV_8UC3 );
doSomething( cameraFrame, displayFrame );
imshow("Image", displayFrame );
} else {
cout << "::: Accessing camera :::" << endl;
if ( CAMERA_CHECK_ITERATIONS > 0 ) CAMERA_CHECK_ITERATIONS--;
if ( CAMERA_CHECK_ITERATIONS < 0 ) break;
}
int key = waitKey(200);
if (key == 27) break;
}
Try simplifying the program so that you can identify the exact location of the problem, e.g. change your loop so that it looks like this:
for(;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
// cvtColor(frame, edges, CV_BGR2GRAY);
// GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
// Canny(edges, edges, 0, 30, 3);
// imshow("edges", edges);
imshow("edges", frame);
if(waitKey(200) >= 0) break;
}
If that works OK then try adding the processing calls back in, one at a time, e.g
for(;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
cvtColor(frame, edges, CV_BGR2GRAY);
// GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
// Canny(edges, edges, 0, 30, 3);
imshow("edges", edges);
if(waitKey(200) >= 0) break;
}
and so on...
Once you've identified the problematic line you can then focus on that and investigate further.
Go to project->project properties->configuration properties->linker->input
In the additional dependencies paste cv210.lib cvaux210.lib cxcore210.lib highgui210.lib
Hi I got the solution for you :)
VideoCapture san_cap(0);
if (san_cap.isOpened()) {
while (1) {
san_cap.read(san);
imshow("Video", san);
Mat frame;
san_cap.read(frame); // get a new frame from camera
cvtColor(frame, edges, CV_BGR2GRAY);
imshow("Video2", edges);
int key = cv::waitKey(waitKeyValue);
if (key == 27 ) {
break;
}
}
}