My develop H/W environment is Raspberry Pi2 & VX-1000 camera
My develop S/W environment is Arch linux & c++ & opencv
I could stream on webpage by using mjpg-streamer
with this command mjpg_streamer -i "./input_uvc.so -d /dev/video0 -n -f 30 -r 320x240" -o "./output_http.so -n -w ./www"
This run is good. (turns on camera (means turns on led on camera) )
But when I execute the program made by opencv code, it doesn't turn on camera (not turn on led) and the function cvCaptureFromCAM() returns NULL.
My source code is
#include <iostream>
#include <time.h>
#include <opencv/cv.h>
#include <opencv/highgui.h>
using namespace cv;
using namespace std;
int main(int argc,char** argv)
{
char c;
IplImage* frame;
CvCapture* capture;
capture = cvCaptureFromCAM(-1);
if(capture == NULL)
cout << "Strange!!" << endl;
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH , 320);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT , 240);
bool bLoop = true;
cout << "Start streaming" << endl;
while (bLoop){
cvGrabFrame(capture); // Get a frame from cam
frame = cvRetrieveFrame(capture, 0); // get a frame from capture
cvSaveImage("save.jpg", frame);
cvWaitKey(33); // wait key input for 33ms
}
cvReleaseCapture(&capture);
cvDestroyAllWindows();
return 0;
}
Let's change this line:
capture = cvCaptureFromCam(-1);
To:
capture = cvCaptureFromCam(0);
This will use the correct index that your video camera is in!
Try to pass 0 as the argument of cvCaptureFromCAM() or just simply listen to some connected camera by a loop:
CvCapture* capture = NULL;
for(int i = 0; i < 100 && capture == NULL; i++)
capture = cvCaptureFromCAM(i);
if(capture == NULL)
return;
Last but not least you should avoid the old C API, because it is deprecated and supported poorly, try to use cv::VideoCapture class instead.
Related
I am trying to open a window of my webcam using opencv in c++ but it seems the webcam does not want to open. I tested before in other apps like cheese and it works.
#include <opencv2/highgui.hpp>
#include <iostream>
int main() {
int PORT = 0;
cv::Mat image;
cv::namedWindow("Webcam window", cv::WINDOW_AUTOSIZE);
cv::VideoCapture cap(PORT);
cap.set(cv::CAP_PROP_FRAME_WIDTH, 640);
cap.set(cv::CAP_PROP_FRAME_HEIGHT, 480);
if (!cap.isOpened()) {
std::cout << "Could not open the camera" << std::endl;
return -1;
}
while (true) {
cap >> image;
if (!image.empty())
cv::imshow("Webcam window", image);
if (cv::waitKey(10) >= 0) {
break;
}
}
return 0;
}
I am using ubuntu budgie 21.10.
Well this might sound silly, but I don't see a call to cv::open function. You mentioned that when you call it it simply returns false, but I don't see it in the code you provided.
You should also try to explicitly open default camera device.
Have you tried official examples from OpenCV? (such as https://docs.opencv.org/3.4/d8/dfe/classcv_1_1VideoCapture.html)
Do they work?
Also, you should turn on logging on the most verbose level, it often provide additional info about what's going on: How to enable logging for OpenCV
I am setting up a ros system to publish images with ros, c++ and opencv-2 for my drone. The code, below, is publishing raw images. While publishing, I want to write gray-scale images frame-by-frame with resolution of 1280 x 720 to record a video. I have found a readily available video writing code with opencv. However, I could not incorporate this code into image_publisher code. Here is the image_publisher code:
#include <ros/ros.h>
#include <camera_info_manager/camera_info_manager.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/CameraInfo.h>
#include <opencv2/opencv.hpp>
int main(int argc, char **argv)
{
ROS_INFO("Starting image_pub ROS node...\n");
ros::init(argc, argv, "image_pub");
ros::NodeHandle nh("~");
std::string camera_topic;
std::string camera_info_topic;
std::string camera_info_url;
std::string img_path;
std::string frame_id;
float pub_rate;
int start_sec;
bool repeat;
nh.param<std::string>("camera_topic", camera_topic, "/camera/image_raw");
nh.param<std::string>("camera_info_topic", camera_info_topic, "/camera/camera_info");
nh.param<std::string>("camera_info_url", camera_info_url, "");
nh.param<std::string>("img_path", img_path, "");
nh.param<std::string>("frame_id", frame_id, "");
nh.param("pub_rate", pub_rate, 30.0f);
nh.param("start_sec", start_sec, 0);
nh.param("repeat", repeat, false);
ROS_INFO("CTopic : %s", camera_topic.c_str());
ROS_INFO("ITopic : %s", camera_info_topic.c_str());
ROS_INFO("CI URL : %s", camera_info_url.c_str());
ROS_INFO("Source : %s", img_path.c_str());
ROS_INFO("Rate : %.1f", pub_rate);
ROS_INFO("Start : %d", start_sec);
ROS_INFO("Repeat : %s", repeat ? "yes" : "no");
ROS_INFO("FrameID: %s", frame_id.c_str());
camera_info_manager::CameraInfoManager camera_info_manager(nh);
if (camera_info_manager.validateURL(camera_info_url))
camera_info_manager.loadCameraInfo(camera_info_url);
ros::Publisher img_pub = nh.advertise<sensor_msgs::Image>(camera_topic, 1);
ros::Publisher info_pub = nh.advertise<sensor_msgs::CameraInfo>(camera_info_topic, 1);
cv::VideoCapture vid_cap(img_path.c_str());
if (start_sec > 0)
vid_cap.set(CV_CAP_PROP_POS_MSEC, 1000.0 * start_sec);
ros::Rate rate(pub_rate);
while (ros::ok())
{
cv::Mat img;
if (!vid_cap.read(img))
{
if (repeat)
{
vid_cap.open(img_path.c_str());
if (start_sec > 0)
vid_cap.set(CV_CAP_PROP_POS_MSEC, 1000.0 * start_sec);
continue;
}
ROS_ERROR("Failed to capture frame.");
ros::shutdown();
}
else
{
//ROS_DEBUG("Image: %dx%dx%d, %zu, %d", img.rows, img.cols, img.channels(), img.elemSize(), img.type() == CV_8UC3);
if (img.type() != CV_8UC3)
img.convertTo(img, CV_8UC3);
// Convert image from BGR format used by OpenCV to RGB.
cv::cvtColor(img, img, CV_BGR2RGB);
auto img_msg = boost::make_shared<sensor_msgs::Image>();
img_msg->header.stamp = ros::Time::now();
img_msg->header.frame_id = frame_id;
img_msg->encoding = "rgb8";
img_msg->width = img.cols;
img_msg->height = img.rows;
img_msg->step = img_msg->width * img.channels();
auto ptr = img.ptr<unsigned char>(0);
img_msg->data = std::vector<unsigned char>(ptr, ptr + img_msg->step * img_msg->height);
img_pub.publish(img_msg);
if (camera_info_manager.isCalibrated())
{
auto info = boost::make_shared<sensor_msgs::CameraInfo>(camera_info_manager.getCameraInfo());
info->header = img_msg->header;
info_pub.publish(info);
}
}
ros::spinOnce();
rate.sleep();
}
return 0;
}
The video writing code (referred to this link):
#include "opencv2/opencv.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main(){
// Create a VideoCapture object and use camera to capture the video
VideoCapture cap(0);
// Check if camera opened successfully
if(!cap.isOpened())
{
cout << "Error opening video stream" << endl;
return -1;
}
// Default resolution of the frame is obtained.The default resolution is system dependent.
int frame_width = cap.get(CV_CAP_PROP_FRAME_WIDTH);
int frame_height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
// Define the codec and create VideoWriter object.The output is stored in 'outcpp.avi' file.
VideoWriter video("outcpp.avi",CV_FOURCC('M','J','P','G'),10,
Size(frame_width,frame_height));
while(1)
{
Mat frame;
// Capture frame-by-frame
cap >> frame;
// If the frame is empty, break immediately
if (frame.empty())
break;
// Write the frame into the file 'outcpp.avi'
video.write(frame);
// Display the resulting frame
imshow( "Frame", frame );
// Press ESC on keyboard to exit
char c = (char)waitKey(1);
if( c == 27 )
break;
}
// When everything done, release the video capture and write object
cap.release();
video.release();
// Closes all the windows
destroyAllWindows();
return 0;
}
Firstly, I tried incorporating video writing code (without grayscale) into publisher code. But it failed to run. All in all, image_publisher code should result a video after completing its task. What is the right way of doing this ?
You cannot send opencv Mat inside ros topic system since Image topic has another coding system.
You need to use cv_bridge to convert images that you got from Opencv to ROS-image Format
there are some details and examples Here
I'm trying to make a live stereovision setup using OpenCV in C++ and two webcams. It is possible to seperately get frames from the two webcams. However, when I try to access them simultaneously in threads, I get a runtime error:
VIDIOC_STREAMON: Cannot allocate memory
OpenCV Error: Assertion failed (size.width>0 && size.height>0) in imshow, file /home/lorre851/Downloads/opencv-3.1.0/modules/highgui/src/window.cpp, line 281
terminate called after throwing an instance of 'cv::Exception'
what(): /home/lorre851/Downloads/opencv-3.1.0/modules/highgui/src/window.cpp:281: error: (-215) size.width>0 && size.height>0 in function imshow
My code is as follows:
#include <iostream>
#include <thread>
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
void stream(int camera) {
VideoCapture cap(camera); // open the default camera
if(cap.isOpened()) { // check if we succeeded
while(true) {
Mat frame;
cap >> frame; // get a new frame from camera
imshow("Stream " + to_string(camera), frame);
if (waitKey(30) >= 0) break;
}
}
}
int main() {
thread cam1 (stream, 1);
thread cam2 (stream, 2);
cam1.join();
cam2.join();
return 0;
}
Anybody got any idea what could be causing this? I'm using CLion on Ubuntu 15.10 LTE.
UPDATE 1: I'm using index 1 and 2 for the camera's because I have a built-in camera in my laptop (0) and two USB camera's (1 and 2) plugged in. The USB camera's are the target hardware here.
UPDATE 2: Putting both camera feeds in one thread (see code below) works just fine (assuming your USB ports use separate busses, otherwise you'll get a 'NO SPACE LEFT ON DEVICE' error), but the delay between the two frames is noticeable, which is not ideal for a stereovision setup.
cv::VideoCapture camera0(0);
cv::VideoCapture camera1(1);
if( !camera0.isOpened() ) return 1;
if( !camera1.isOpened() ) return 1;
cv::Mat3b frame0;
cv::Mat3b frame1;
while(true) {
camera0 >> frame0;
camera1 >> frame1;
if(mat_is_empty(frame0)) cout << "SKIPPED FRAME IN 0";
else cv::imshow("Stream 0", frame0);
if(mat_is_empty(frame1)) cout << "SKIPPED FRAME IN 1";
else cv::imshow("Stream 1", frame1);
int c = cvWaitKey(40);
//exit the loop if user press "Esc" key (ASCII value of "Esc" is 27)
if(27 == char(c)) break;
}
Maybe, I have stupid answer. Your code works fine..
Count cameras from zero.
thread cam1 (stream, 0); THIS works
// thread cam2 (stream, 1); I dont have second camera
First of all, I am on windows machine. My cameras are count from 0,1,2
I do not have second web camera.
In the picture i just use your code and paste cam(0) my web camera
and for the second camera i use testing rtsp stream.
#include <iostream>
#include <thread>
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
void stream(int camera) {
VideoCapture cap;
if (camera == 0) {
cap.open(0); // open the default camera
}
if (camera == 1) {
cap.open("rtsp://mpv.cdn3.bigCDN.com:554/bigCDN/definst/mp4:bigbuckbunnyiphone_400.mp4"); // open the default camera
}
if (cap.isOpened()) { // check if we succeeded
while (true) {
Mat frame;
cap >> frame; // get a new frame from camera
imshow("Stream " + to_string(camera), frame);
if (waitKey(30) >= 0) break;
}
}
}
int main() {
thread cam1(stream, 0);
thread cam2(stream, 1);
cam1.join();
cam2.join();
return 0;
}
I am using OpenCV 3.0.0-rc1 on Ubuntu 14.04 LTS Guest in VirtualBox with Windows 8 Host. I have an extremely simple program to read in frames from a webcam (Logitech C170) (from the OpenCV documentation). Unfortunately, it doesn't work (I have tried 3 different webcams). It throws an error "select timeout" every couple of seconds and reads a frame, but the frame is black. Any ideas?
The code is the following:
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
using namespace std;
using namespace cv;
// Main
int main(int argc, char **argv) {
/* webcam setup */
VideoCapture stream;
stream.open(0);
// check if video device has been initialized
if (!stream.isOpened()) {
fprintf(stderr, "Could not open Webcam device");
return -1;
}
int image_width = 640; // image resolution
int image_height = 480;
Mat colorImage,currentImage;
bool loop = true;
/* infinite loop for video stream */
while (loop) {
loop = stream.read(colorImage); // read webcam stream
cvtColor(colorImage, currentImage, CV_BGR2GRAY); // color to gray for current image
imshow("Matches", currentImage);
if(waitKey(30) >= 0) break;
// end stream while-loop
}
return 0;
}
I found the problem: When using a webcam, make sure to connect it to the Virtual Machine using Devices->Webcams and NOT Devices->USB. Even though the webcam is detected as video0 when attaching it via Devices->USB, for some reasons it does not work.
I'm new at this but have been doing my share of reading and trying different setups to help narrow down the problem! Any help tp get me past this road block would be much appreciated.
Currently I'm running: Win 7 Ultimate, Visual C++ 2010 Express, OpenCV 2.2.0, and a Microsoft - LifeCam Studio Webcam - Silver 1080p HD.
I'm getting no Build errors and when I run the program my camera comes on (blue light indicating it being on) and the screen pops up that i thought should show my camera feed but instead its just a grey box with nothing inside. The below code I thought would help narrow down the problem but I'm at a loss.
int main()
{
CvCapture *webcam = NULL;
webcam = cvCreateCameraCapture(-1);
if(webcam!=NULL)
{
IplImage *frame = cvQueryFrame(webcam);
cvShowImage("WEBCAM_TEST",frame);
cvWaitKey(0);
return 0;
}
else
{
std::cout<<"CAMERA NOT DETECTED"<<std::endl;
return 0;
}
}
your code is some times showing a black image sometimes showing a correct image on my system(Windows 7 64 VS2010 OpenCV 2.4.3)...how ever when I put it in a loop for non stop streaming the image is ok...so just modify your code slightly and try...
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
int main()
{
CvCapture *webcam = NULL;
webcam = cvCreateCameraCapture(-1);
if(webcam!=NULL)
{
while(true)
{
IplImage *frame = cvQueryFrame(webcam);
cvShowImage("WEBCAM_TEST",frame);
cvWaitKey(20);
}
}
else
{
std::cout<<"CAMERA NOT DETECTED"<<std::endl;
return 0;
}
return 0;
}
In OpenCV if you get frame just after creating camera capture usually it's grey. All you have to do is just get next frame or wait before getting first frame. This code:
int _tmain(int argc, _TCHAR* argv[])
{
VideoCapture cap(0);
if(!cap.isOpened())
return -1;
Mat frame;
namedWindow("01",1);
//cap >> frame; //option 1
//waitKey(5000); //option 2
cap >> frame;
imshow("01", frame);
int key = waitKey(30);
return 0;
}
will show grey frame, but if you uncomment option 1 or option 2 - it will work fine.