I am trying to use a .caffemodel alongside OpenCV on iOS devices. I found this github repository, but it can only be built with Xcode 6. I am working with Xcode 7, but I also downloaded Xcode 6 and still have no success on building it.
How can I use a caffemodel with OpenCV on iOS 9?
PS: The alternative would be this but it's written with swift & metal and I need to be able to use it with OpenCV.
You can use OpenCV DNN contrib module.
You need first to build OpenCV with contrib modules, you can find the steps here.
Then you can import and use the .caffemodel following this tutorial.
Here is an updated version of the tutorial, since it's not working as is:
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace cv::dnn;
#include <fstream>
#include <iostream>
#include <cstdlib>
using namespace std;
/* Find best class for the blob (i. e. class with maximal probability) */
void getMaxClass(dnn::Blob &probBlob, int *classId, double *classProb)
{
Mat probMat = probBlob.matRefConst().reshape(1, 1); //reshape the blob to 1x1000 matrix
Point classNumber;
minMaxLoc(probMat, NULL, classProb, NULL, &classNumber);
*classId = classNumber.x;
}
std::vector<String> readClassNames(const char *filename = "synset_words.txt")
{
std::vector<String> classNames;
std::ifstream fp(filename);
if (!fp.is_open())
{
std::cerr << "File with classes labels not found: " << filename << std::endl;
exit(-1);
}
std::string name;
while (!fp.eof())
{
std::getline(fp, name);
if (name.length())
classNames.push_back( name.substr(name.find(' ')+1) );
}
fp.close();
return classNames;
}
int main(int argc, char **argv)
{
cv::dnn::initModule();
String modelTxt = "bvlc_googlenet.prototxt";
String modelBin = "bvlc_googlenet.caffemodel";
String imageFile = (argc > 1) ? argv[1] : "space_shuttle.jpg";
Ptr<dnn::Importer> importer;
try //Try to import Caffe GoogleNet model
{
importer = dnn::createCaffeImporter(modelTxt, modelBin);
}
catch (const cv::Exception &err) //Importer can throw errors, we will catch them
{
std::cerr << err.msg << std::endl;
}
if (!importer)
{
std::cerr << "Can't load network by using the following files: " << std::endl;
std::cerr << "prototxt: " << modelTxt << std::endl;
std::cerr << "caffemodel: " << modelBin << std::endl;
std::cerr << "bvlc_googlenet.caffemodel can be downloaded here:" << std::endl;
std::cerr << "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel" << std::endl;
exit(-1);
}
dnn::Net net;
importer->populateNet(net);
importer.release(); //We don't need importer anymore
Mat img = imread(imageFile);
if (img.empty())
{
std::cerr << "Can't read image from the file: " << imageFile << std::endl;
exit(-1);
}
resize(img, img, Size(224, 224)); //GoogLeNet accepts only 224x224 RGB-images
dnn::Blob inputBlob = dnn::Blob(img); //Convert Mat to dnn::Blob batch of images
net.setBlob(".data", inputBlob); //set the network input
net.forward(); //compute output
dnn::Blob prob = net.getBlob("prob"); //gather output of "prob" layer
int classId;
double classProb;
getMaxClass(prob, &classId, &classProb);//find the best class
std::vector<String> classNames = readClassNames();
std::cout << "Best class: #" << classId << " '" << classNames.at(classId) << "'" << std::endl;
std::cout << "Probability: " << classProb * 100 << "%" << std::endl;
return 0;
} //main
I will post another answer because with recent versions there are some differences.
First of all now dnn is already inside the standard OpenCV library so you do not have to build it from contrib_modules.
The function to load the network is readNetFromCaffe.
For example the following code load the NN:
std::string modelName = "path/to/mymodel.caffemodel";
std::string protoName = "path/to/deploy.prototxt";
cv::dnn::Net net;
try
{
net = cv::dnn::readNetFromCaffe(protoName, modelName);
}
catch (cv::Exception& e)
{
std::cerr << "Exception: " << e.what() << std::endl;
if (net.empty())
{
std::cerr << "Can't load network by using the following files: " << std::endl;
std::cerr << "prototxt: " << protoName << std::endl;
std::cerr << "caffemodel: " << modelName << std::endl;
std::cerr << "bvlc_googlenet.caffemodel can be downloaded here:" << std::endl;
std::cerr << "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel" << std::endl;
exit(-1);
}
}
Then you can run the NN:
cv::Mat res_mat;
float res;
cv::Mat inputBlob = cv::dnn::blobFromImage(roi, 1.0f, cv::Size(227, 227),
cv::Scalar(0, 0, 0), false);
net.setInput(inputBlob);
//During the forward pass output of each network layer is computed,
//but in this example we need output from "prob" layer only.
res_mat = net.forward("score");
std::cout<<res_mat<<std::endl;
res_mat = res_mat.reshape(1, 1); //reshape the blob to 1x2 matrix
return res_mat.at<float>(0);
The function cv::dnn::blobFromImage resize the image to the input network size specified by the third argument (in my case cv::Size(227, 227)). The argument cv::Scalar(0, 0, 0) is to subtract the means from the three BGR channels.
score is the name of the output layer in the NN I used. You can see this information in the prototxt file.
Related
I can use VideoReader class of OpenCV to decode an IP camera stream or any video file by using its path. This decoding process is using GPU as expected, no problem up to now. Here is simple code which is working fine and using GPU for decoding:
int main()
{
const std::string fname("rtsp://user:pwd#192.168.1.108");
// const std::string fname("/path/to/video/file.mp4"); // this also works
cv::cuda::GpuMat d_frame;
cv::Ptr<cv::cudacodec::VideoReader> d_reader = cv::cudacodec::createVideoReader(fname);
Mat frame;
for (;;)
{
if (!d_reader->nextFrame(d_frame))
break;
Mat myMat(d_frame);
cv::imshow("GPU", myMat);
if (cv::waitKey(3) > 0)
break;
}
return 0;
}
I want to use GPU to capture streams from my webcam as like VideoCapture(0). I know as #berak mentioned here : There is no way to do that with VideoCapture
My questions are:
1 - Is it possible to stream by using GPU with VideoReader class? Because VideoReader class only accepts strings not indexes.
2- What are the other ways to be able to stream by using GPU?
1) Yes, it seems so! I found the following code in the openCV GPU samples here. You could give it a try. You need to have OpenCV built with OpenGL though... Currently that's where I'm stuck.
2) I'm not sure about other options, but here is the code from the Github.
#include <iostream>
#include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_CUDACODEC)
#include <string>
#include <vector>
#include <algorithm>
#include <numeric>
#include <opencv2/core.hpp>
#include <opencv2/core/opengl.hpp>
#include <opencv2/cudacodec.hpp>
#include <opencv2/highgui.hpp>
int main(int argc, const char* argv[])
{
std::cout << "Starting,...\n";
const std::string fname = "0";
cv::namedWindow("CPU", cv::WINDOW_NORMAL);
cv::namedWindow("GPU", cv::WINDOW_OPENGL);
cv::cuda::setGlDevice();
cv::Mat frame;
cv::VideoCapture reader(fname);
cv::cuda::GpuMat d_frame;
cv::Ptr<cv::cudacodec::VideoReader> d_reader = cv::cudacodec::createVideoReader(fname);
cv::TickMeter tm;
std::vector<double> cpu_times;
std::vector<double> gpu_times;
int gpu_frame_count=0, cpu_frame_count=0;
for (;;)
{
tm.reset(); tm.start();
if (!reader.read(frame))
break;
tm.stop();
cpu_times.push_back(tm.getTimeMilli());
cpu_frame_count++;
cv::imshow("CPU", frame);
if (cv::waitKey(3) > 0)
break;
}
for (;;)
{
tm.reset(); tm.start();
if (!d_reader->nextFrame(d_frame))
break;
tm.stop();
gpu_times.push_back(tm.getTimeMilli());
gpu_frame_count++;
cv::imshow("GPU", d_frame);
if (cv::waitKey(3) > 0)
break;
}
if (!cpu_times.empty() && !gpu_times.empty())
{
std::cout << std::endl << "Results:" << std::endl;
std::sort(cpu_times.begin(), cpu_times.end());
std::sort(gpu_times.begin(), gpu_times.end());
double cpu_avg = std::accumulate(cpu_times.begin(), cpu_times.end(), 0.0) / cpu_times.size();
double gpu_avg = std::accumulate(gpu_times.begin(), gpu_times.end(), 0.0) / gpu_times.size();
std::cout << "CPU : Avg : " << cpu_avg << " ms FPS : " << 1000.0 / cpu_avg << " Frames " << cpu_frame_count << std::endl;
std::cout << "GPU : Avg : " << gpu_avg << " ms FPS : " << 1000.0 / gpu_avg << " Frames " << gpu_frame_count << std::endl;
}
return 0;
}
#else
int main()
{
std::cout << "OpenCV was built without CUDA Video decoding support\n" << std::endl;
return 0;
}
#endif
I'm attempting to write a simple program to extract some data from a bunch of AVRO files. The schema for each file may be different so I would like to read the files generically (i.e. without having to pregenerate and then compile in the schema for each) using the C++ interface.
I have been attempting to follow the generic.cc example but it assumes a separate schema where I would like to read the schema from each AVRO file.
Here is my code:
#include <fstream>
#include <iostream>
#include "Compiler.hh"
#include "DataFile.hh"
#include "Decoder.hh"
#include "Generic.hh"
#include "Stream.hh"
const std::string BOLD("\033[1m");
const std::string ENDC("\033[0m");
const std::string RED("\033[31m");
const std::string YELLOW("\033[33m");
int main(int argc, char**argv)
{
std::cout << "AVRO Test\n" << std::endl;
if (argc < 2)
{
std::cerr << BOLD << RED << "ERROR: " << ENDC << "please provide an "
<< "input file\n" << std::endl;
return -1;
}
avro::DataFileReaderBase dataFile(argv[1]);
auto dataSchema = dataFile.dataSchema();
// Write out data schema in JSON for grins
std::ofstream output("data_schema.json");
dataSchema.toJson(output);
output.close();
avro::DecoderPtr decoder = avro::binaryDecoder();
auto inStream = avro::fileInputStream(argv[1]);
decoder->init(*inStream);
avro::GenericDatum datum(dataSchema);
avro::decode(*decoder, datum);
std::cout << "Type: " << datum.type() << std::endl;
return 0;
}
Everytime I run the code, no matter what file I use, I get this:
$ ./avrotest twitter.avro
AVRO Test
terminate called after throwing an instance of 'avro::Exception'
what(): Cannot have negative length: -40 Aborted
In addition to my own data files, I have tried using the data files located here: https://github.com/miguno/avro-cli-examples, with the same result.
I tried using the avrocat utility on all of the same files and it works fine. What am I doing wrong?
(NOTE: outputting the data schema for each file in JSON works correctly as expected)
After a bunch more fooling around, I figured it out. You're supposed to use DataFileReader templated with GenericDatum. With the end result being something like this:
#include <fstream>
#include <iostream>
#include "Compiler.hh"
#include "DataFile.hh"
#include "Decoder.hh"
#include "Generic.hh"
#include "Stream.hh"
const std::string BOLD("\033[1m");
const std::string ENDC("\033[0m");
const std::string RED("\033[31m");
const std::string YELLOW("\033[33m");
int main(int argc, char**argv)
{
std::cout << "AVRO Test\n" << std::endl;
if (argc < 2)
{
std::cerr << BOLD << RED << "ERROR: " << ENDC << "please provide an "
<< "input file\n" << std::endl;
return -1;
}
avro::DataFileReader<avro::GenericDatum> reader(argv[1]);
auto dataSchema = reader.dataSchema();
// Write out data schema in JSON for grins
std::ofstream output("data_schema.json");
dataSchema.toJson(output);
output.close();
avro::GenericDatum datum(dataSchema);
while (reader.read(datum))
{
std::cout << "Type: " << datum.type() << std::endl;
if (datum.type() == avro::AVRO_RECORD)
{
const avro::GenericRecord& r = datum.value<avro::GenericRecord>();
std::cout << "Field-count: " << r.fieldCount() << std::endl;
// TODO: pull out each field
}
}
return 0;
}
Perhaps an example like this should be included with libavro...
I am trying to use use the MobileNet SSD + deep neural network ( dnn ) module in OpenCV for object detection. I loaded and used the model successfully. As the output of net.forward I obtain Mat object containing the information about the detected objects. Unfortunately, I struggle with "the easy part of work", with reading what exactly was detected.
Here is information I know about the output Mat object:
It has 4 dimensions.
The size is 1 x 1 x number_of_objects_detected x 7.
The seven pieces of information about each object were: the 1st is the class ID, the 2nd is the confidence, the 3rd-7th are the bounding box values.
I can't find any c++ example, but I found many python examples. They read the data like this:
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
What is the easiest way how to do this in c++? I.e. I need to read the data at the specific coordinates in high-dimensional Mat:class.
Thank you for your kind help. I am quite new in c++ and sometimes found it overwhelming...
I am using OpenCV 3.3.0. The GitHub with the MobileNet SSD I am using: https://github.com/chuanqi305/MobileNet-SSD.
The code of my program:
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <fstream>
#include <iostream>
using namespace cv;
using namespace cv::dnn;
using namespace std;
// function to create vector of class names
std::vector<String> createClaseNames() {
std::vector<String> classNames;
classNames.push_back("background");
classNames.push_back("aeroplane");
classNames.push_back("bicycle");
classNames.push_back("bird");
classNames.push_back("boat");
classNames.push_back("bottle");
classNames.push_back("bus");
classNames.push_back("car");
classNames.push_back("cat");
classNames.push_back("chair");
classNames.push_back("cow");
classNames.push_back("diningtable");
classNames.push_back("dog");
classNames.push_back("horse");
classNames.push_back("motorbike");
classNames.push_back("person");
classNames.push_back("pottedplant");
classNames.push_back("sheep");
classNames.push_back("sofa");
classNames.push_back("train");
classNames.push_back("tvmonitor");
return classNames;
}
// main function
int main(int argc, char **argv)
{
// set inputs
String modelTxt = "C:/Users/acer/Desktop/kurz_OCV/cv4faces/project/python/object-detection-deep-learning/MobileNetSSD_deploy.prototxt";
String modelBin = "C:/Users/acer/Desktop/kurz_OCV/cv4faces/project/python/object-detection-deep-learning/MobileNetSSD_deploy.caffemodel";
String imageFile = "C:/Users/acer/Desktop/kurz_OCV/cv4faces/project/puppies.jpg";
std::vector<String> classNames = createClaseNames();
//read caffe model
Net net;
try {
net = dnn::readNetFromCaffe(modelTxt, modelBin);
}
catch (cv::Exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
if (net.empty())
{
std::cerr << "Can't load network." << std::endl;
exit(-1);
}
}
// read image
Mat img = imread(imageFile);
// create input blob
resize(img, img, Size(300, 300));
Mat inputBlob = blobFromImage(img, 0.007843, Size(300, 300), Scalar(127.5)); //Convert Mat to dnn::Blob image batch
// apply the blob on the input layer
net.setInput(inputBlob); //set the network input
// classify the image by applying the blob on the net
Mat detections = net.forward("detection_out"); //compute output
// print some information about detections
std::cout << "dims: " << detections.dims << endl;
std::cout << "size: " << detections.size << endl;
//show image
String winName("image");
imshow(winName, img);
// Wait for keypress
waitKey();
}
Check out the official OpenCV tutorial on how to scan images.
The normal way you'd access a 3-channel (i.e. color) Mat way would be using the Mat::at() method of the Mat class, which is heavily overloaded for all sorts of accessor options. Specifically, you can send in an array of indices or a vector of indices.
Here's a most basic example creating a 4D Mat and accessing a specific element:
#include <opencv2/opencv.hpp>
#include <iostream>
int main() {
int size[4] = { 2, 2, 5, 7 };
cv::Mat M(4, size, CV_32FC1, cv::Scalar(1));
int indx[4] = { 0, 0, 2, 3 };
std::cout << "M[0, 0, 2, 3] = " << M.at<float>(indx) << std::endl;
}
M[0, 0, 2, 3] = 1
Someone might find this question in the context of using the MobileNet SSD + deep neural network ( dnn ) module in OpenCV for object detection. So here I post the already functional code of object detection. Alexander Reynolds thank you for your help.
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <fstream>
#include <iostream>
using namespace cv;
using namespace cv::dnn;
using namespace std;
// function to create vector of class names
std::vector<String> createClaseNames() {
std::vector<String> classNames;
classNames.push_back("background");
classNames.push_back("aeroplane");
classNames.push_back("bicycle");
classNames.push_back("bird");
classNames.push_back("boat");
classNames.push_back("bottle");
classNames.push_back("bus");
classNames.push_back("car");
classNames.push_back("cat");
classNames.push_back("chair");
classNames.push_back("cow");
classNames.push_back("diningtable");
classNames.push_back("dog");
classNames.push_back("horse");
classNames.push_back("motorbike");
classNames.push_back("person");
classNames.push_back("pottedplant");
classNames.push_back("sheep");
classNames.push_back("sofa");
classNames.push_back("train");
classNames.push_back("tvmonitor");
return classNames;
}
// main function
int main(int argc, char **argv)
{
// set inputs
String modelTxt = "Path to MobileNetSSD_deploy.prototxt";
String modelBin = "Path to MobileNetSSD_deploy.caffemodel";
String imageFile = "Path to test image";
std::vector<String> classNames = createClaseNames();
//read caffe model
Net net;
try {
net = dnn::readNetFromCaffe(modelTxt, modelBin);
}
catch (cv::Exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
if (net.empty())
{
std::cerr << "Can't load network." << std::endl;
exit(-1);
}
}
// read image
Mat img = imread(imageFile);
Size imgSize = img.size();
// create input blob
Mat img300;
resize(img, img300, Size(300, 300));
Mat inputBlob = blobFromImage(img300, 0.007843, Size(300, 300), Scalar(127.5)); //Convert Mat to dnn::Blob image batch
// apply the blob on the input layer
net.setInput(inputBlob); //set the network input
// classify the image by applying the blob on the net
Mat detections = net.forward("detection_out"); //compute output
// look what the detector found
for (int i=0; i < detections.size[2]; i++) {
// print information into console
cout << "-----------------" << endl;
cout << "Object nr. " << i + 1 << endl;
// detected class
int indxCls[4] = { 0, 0, i, 1 };
int cls = detections.at<float>(indxCls);
std::cout << "class: " << classNames[cls] << endl;
// confidence
int indxCnf[4] = { 0, 0, i, 2 };
float cnf = detections.at<float>(indxCnf);
std::cout << "confidence: " << cnf * 100 << "%" << endl;
// bounding box
int indxBx[4] = { 0, 0, i, 3 };
int indxBy[4] = { 0, 0, i, 4 };
int indxBw[4] = { 0, 0, i, 5 };
int indxBh[4] = { 0, 0, i, 6 };
int Bx = detections.at<float>(indxBx) * imgSize.width;
int By = detections.at<float>(indxBy) * imgSize.height;
int Bw = detections.at<float>(indxBw) * imgSize.width - Bx;
int Bh = detections.at<float>(indxBh) * imgSize.height - By;
std::cout << "bounding box [x, y, w, h]: " << Bx << ", " << By << ", " << Bw << ", " << Bh << endl;
// draw bounding box to image
Rect bbox(Bx, By, Bw, Bh);
rectangle(img, bbox, Scalar(255,0,255),1,8,0);
}
//show image
String winName("image");
imshow(winName, img);
// Wait for keypress
waitKey();
}
I am trying to integrate a code already written in ROS with some basic Visp lines so as to display a camera feed using Visp functions. I am a beginner in visp and hence I am trying something basic.I am attaching the relevant code lines here
//Lots of lines of code above and blow this code block
cv::Mat src_gray;
cv::cvtColor(imageLeft, src_gray, CV_RGB2GRAY );//imageLeft is a colour image got from the camera through another node
vpImage<unsigned char> I;
vpImageConvert::convert(src_gray,I) ;
vpDisplayOpenCV display;
if(this->lt == false)//this if loop is to prevent from infinite windows coming out
{display.init(I, 100, 100, "Line tracking");
this->lt = true;}
vpDisplay::display(I);
vpDisplay::flush(I);
Let me ensure you that this piece of code is in a callback and hence it is equivalent to an infinte while loop unless the process is stopped.
I am not able to get the camera output in the window.When I run the node the window opens but no image.Any ideas?
The ViSP-ROS interfece has been changing recently. While ViSP Bridge provides low level interface between ROS and ViSP, Visp ROS is a better and higher level interface. From there you can reach to this tutorial where a regular ViSP code is modified to use ROS.
The ViSP code similar to yours:
#include <visp/vp1394TwoGrabber.h>
#include <visp/vpDisplayX.h>
#include <visp/vpImage.h>
int main()
{
#ifdef VISP_HAVE_DC1394_2
try {
vpImage<unsigned char> I; // Create a gray level image container
bool reset = true; // Enable bus reset during construction (default)
vp1394TwoGrabber g(reset); // Create a grabber based on libdc1394-2.x third party lib
g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8);
g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_60);
g.open(I);
std::cout << "Image size: " << I.getWidth() << " " << I.getHeight() << std::endl;
#ifdef VISP_HAVE_X11
vpDisplayX d(I);
#else
std::cout << "No image viewer is available..." << std::endl;
#endif
while(1) {
g.acquire(I);
vpDisplay::display(I);
vpDisplay::flush(I);
if (vpDisplay::getClick(I, false))
break;
}
}
catch(vpException e) {
std::cout << "Catch an exception: " << e << std::endl;
}
#endif
}
And the ROS enabled code:
#include <visp/vpDisplayX.h>
#include <visp/vpImage.h>
#include <visp_ros/vpROSGrabber.h>
int main()
{
try {
//vpImage<unsigned char> I; // Create a gray level image container
vpImage<vpRGBa> I; // Create a color image container
vpROSGrabber g; // Create a grabber based on ROS
g.setCameraInfoTopic("/camera/camera_info");
g.setImageTopic("/camera/image_raw");
g.setRectify(true);
g.open(I);
std::cout << "Image size: " << I.getWidth() << " " << I.getHeight() << std::endl;
#ifdef VISP_HAVE_X11
vpDisplayX d(I);
#else
std::cout << "No image viewer is available..." << std::endl;
#endif
while(1) {
g.acquire(I);
vpDisplay::display(I);
vpDisplay::flush(I);
if (vpDisplay::getClick(I, false))
break;
}
}
catch(vpException e) {
std::cout << "Catch an exception: " << e << std::endl;
}
}
Hope this helps!
I am developing a small webcam application with Logitech C920 and OpenCV.
I can actually get images from camera without any problem with given resolution. But except for frame width and height, I can't access any setting in the camera. I have following code. As you can guess, the code produced all zero lines except for the first two.
Am I doing something wrong? or Is this another proprietary BS from Logitech? If this is limitation of OpenCV, is there any other option to develop an application for this webcam in Windows OS?
m_cam.open(0);
if(!m_cam.isOpened() ) // check if we succeeded
{
std::cerr << "ERROR: Could not open cameras." << std::endl;
return;
}
int ex = static_cast<int>(m_cam.get(CV_CAP_PROP_FOURCC));
char EXT[] = {ex & 0XFF , (ex & 0XFF00) >> 8,(ex & 0XFF0000) >> 16,(ex & 0XFF000000) >> 24, 0};
m_cam.set(CV_CAP_PROP_FOURCC,CV_FOURCC('H','2','6','4'));
m_cam.set(CV_CAP_PROP_FRAME_WIDTH,1280);//2304);//1829//1200//800
m_cam.set(CV_CAP_PROP_FRAME_HEIGHT,720);//1536); //1080//800//600
m_cam.set(CV_CAP_PROP_FPS, 30);
//m_cam.set(CV_CAP_PROP_EXPOSURE,0);
std::cout<< m_cam.get(CV_CAP_PROP_FRAME_WIDTH) << std::endl;
std::cout<< m_cam.get(CV_CAP_PROP_FRAME_HEIGHT) << std::endl;
std::cout<< m_cam.get(CV_CAP_PROP_FPS) << std::endl;
std::cout<< m_cam.get(CV_CAP_PROP_EXPOSURE) << std::endl;
std::cout<< m_cam.get(CV_CAP_PROP_FORMAT) << std::endl;
std::cout<< m_cam.get(CV_CAP_PROP_CONTRAST) << std::endl;
std::cout<< m_cam.get(CV_CAP_PROP_BRIGHTNESS) << std::endl;
std::cout<< m_cam.get(CV_CAP_PROP_SATURATION) << std::endl;
std::cout<< m_cam.get(CV_CAP_PROP_HUE) << std::endl;
std::cout<< m_cam.get(CV_CAP_PROP_POS_FRAMES) << std::endl;
Fixed the problem by rebuilding OpenCV after getting dshow and ffmpeg installed. I can even set some of the values such as frame rate now, but the camera working as specified seems to be a separate matter. In my case, after setting resolution without setting frame rate, camera resolution goes to 640 x 480. Although my computer has H264 decoder installed, 1920 x 1080 produces 5-7 fps with OpenCV.
In this post is an example code how you can access your camera at 30fps in full hd.
Edit:
To elaborate a bit:
I also have the Logitech c920, OpenCV 2.4.3, Windows 7 64bi. This are the things i can read with the code below (width and height is by default on 640*480 i think).
CV_CAP_PROP_FRAME_WIDTH 1920
CV_CAP_PROP_FRAME_HEIGHT 1080
CV_CAP_PROP_FPS 0
CV_CAP_PROP_EXPOSURE -5
CV_CAP_PROP_FORMAT -1
CV_CAP_PROP_CONTRAST 128
CV_CAP_PROP_BRIGHTNESS 128
CV_CAP_PROP_SATURATION 128
CV_CAP_PROP_HUE -8.58993e+008
CV_CAP_PROP_POS_FRAMES -1
CV_CAP_PROP_FOURCC -4.66163e+008
Input codec type: }Ù6õ //Obviously wrong
The Code i used:
#include <iostream> // for standard I/O
#include <string> // for strings
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/highgui/highgui.hpp> // Video write
#include "opencv2/opencv.hpp"
using namespace cv; using namespace std;
void getCameraInfo(VideoCapture m_cam){
std::cout<<"CV_CAP_PROP_FRAME_WIDTH " << m_cam.get(CV_CAP_PROP_FRAME_WIDTH) << std::endl;
std::cout<<"CV_CAP_PROP_FRAME_HEIGHT " << m_cam.get(CV_CAP_PROP_FRAME_HEIGHT) << std::endl;
std::cout<<"CV_CAP_PROP_FPS " << m_cam.get(CV_CAP_PROP_FPS) << std::endl;
std::cout<<"CV_CAP_PROP_EXPOSURE " << m_cam.get(CV_CAP_PROP_EXPOSURE) << std::endl;
std::cout<<"CV_CAP_PROP_FORMAT " << m_cam.get(CV_CAP_PROP_FORMAT) << std::endl; //deafult CV_8UC3?!
std::cout<<"CV_CAP_PROP_CONTRAST " << m_cam.get(CV_CAP_PROP_CONTRAST) << std::endl;
std::cout<<"CV_CAP_PROP_BRIGHTNESS "<< m_cam.get(CV_CAP_PROP_BRIGHTNESS) << std::endl;
std::cout<<"CV_CAP_PROP_SATURATION "<< m_cam.get(CV_CAP_PROP_SATURATION) << std::endl;
std::cout<<"CV_CAP_PROP_HUE "<< m_cam.get(CV_CAP_PROP_HUE) << std::endl;
std::cout<<"CV_CAP_PROP_POS_FRAMES "<< m_cam.get(CV_CAP_PROP_POS_FRAMES) << std::endl;
std::cout<<"CV_CAP_PROP_FOURCC "<< m_cam.get(CV_CAP_PROP_FOURCC) << std::endl;
int ex = static_cast<int>(m_cam.get(CV_CAP_PROP_FOURCC)); // Get Codec Type- Int form
char EXT[] = {(char)(ex & 255) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};
cout << "Input codec type: " << EXT << endl;
}
int main(int, char**){
string resVideoPath = "C:\\yourPath\\video.avi";
VideoCapture vidSource;
double fps=10;
vidSource = VideoCapture(0); // open the default camera
vidSource.set(CV_CAP_PROP_FRAME_WIDTH, 1920);
vidSource.set(CV_CAP_PROP_FRAME_HEIGHT, 1080);
if(!vidSource.isOpened()){
cout << "Could not open the input video to read"<< endl;
return -1;
}
getCameraInfo(vidSource);
namedWindow("Capture", CV_WINDOW_AUTOSIZE);
while(true){
Mat frame;
vidSource >> frame;
if(!frame.data){
cerr << "Could not retrieve frame.";
return -1;}
imshow("Capture", frame);
if(waitKey(1) == 27)
break;
}
return 0;
}