OpenCV Trackbar callback function and unexpected state changes - c++

When I run this program and adjust the slider multiple times bar the image appears different even though it is at the same slider position. If you try this code, move the slider from the minimum to maximum position back and forth several times and you can see a slight alteration to the image each time.
I have traced the point at which this happens to the line running the add function in my onProgram6Trackbar1 function. Removing it removes the variations between slide movements. Why is this happening?
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
#include <cmath>
class ProgramData {
public:
ProgramData() {
k1=0;
k2=0;
k3=0;
k4=0;
k5=0;
}
int k1;
int k2;
int k3;
int k4;
int k5;
Mat * source_U8C3;
Mat * temp1_U8C3;
Mat * temp2_U8C3;
Mat * temp3_U8C1;
Mat * temp4_U8C1;
Mat * temp5_U8C1;
Mat * temp6_U8C1;
Mat * temp7_U8C1;
vector<Mat> tempv1_U8C1;
vector<Mat> tempv2_U8C1;
Mat * output_U8C1;
Mat * output_U8C3;
Mat * dim1by1;
};
static void onProgram6Trackbar1(int v, void* vp) {
ProgramData * pd = (ProgramData *) vp;
*(pd->temp3_U8C1) = pd->tempv1_U8C1[2].clone();
inRange(*(pd->temp3_U8C1), pd->k1, 255, *(pd->temp4_U8C1));
bitwise_not(*(pd->temp4_U8C1), *(pd->temp5_U8C1));
bitwise_and(*(pd->temp5_U8C1), *(pd->temp3_U8C1), *(pd->temp6_U8C1));
bitwise_or(pd->temp6_U8C1, Scalar(pd->k1), pd->temp7_U8C1, pd->temp4_U8C1);
imshow( "Glare Reduction 4", *(pd->temp7_U8C1));
}
void program6(char * argv) {
ProgramData pd;
pd.k1 = 0;
Mat source = imread(argv, IMREAD_COLOR); // Read the file
pd.source_U8C3 = &source;
Size s( pd.source_U8C3->size().width / 1.3, pd.source_U8C3->size().height / 1.3 );
resize( *(pd.source_U8C3), *(pd.source_U8C3), s, 0, 0, CV_INTER_AREA );
pd.output_U8C3 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,pd.source_U8C3->type());
pd.output_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
//pd.temp1_U8C3 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,pd.source_U8C3->type());
pd.temp2_U8C3 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,pd.source_U8C3->type());
pd.temp3_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp4_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp5_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp6_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp7_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.dim1by1 = new Mat(100,800,CV_8UC1);
cout << "source type = " << pd.source_U8C3->type() << endl;
if(! pd.source_U8C3->data ) { cout << "Could not open image" << std::endl; return;}
cvtColor(*(pd.source_U8C3), *(pd.temp2_U8C3), CV_BGR2HSV); // original to hsv
split(*(pd.temp2_U8C3), pd.tempv1_U8C1);
namedWindow( "Glare Reduction - Controls", WINDOW_AUTOSIZE ); // Create a window for display.
onProgram6Trackbar1(0,&pd);
createTrackbar("k1", "Glare Reduction - Controls", &(pd.k1), 255, &onProgram6Trackbar1, &pd);
imshow( "Glare Reduction - Controls", *(pd.dim1by1) ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
}
int main( int argc, char** argv )
{
program6("Blocks1.jpg");
}
Update 1:
New code posted below. I tried changing the code to not use any Mat pointers. Still does the exact same thing.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
#include <cmath>
class ProgramData {
public:
ProgramData() {
k1=0;
}
int k1;
Mat source_U8C3;
Mat temp1_U8C3;
Mat temp2_U8C3;
Mat temp3_U8C1;
Mat temp4_U8C1;
Mat temp5_U8C1;
Mat temp6_U8C1;
Mat temp7_U8C1;
vector<Mat> tempv1_U8C1;
vector<Mat> tempv2_U8C1;
Mat output_U8C1;
Mat output_U8C3;
Mat dim1by1;
};
static void onProgram6Trackbar1(int v, void* vp) {
ProgramData * pd = (ProgramData *) vp;
pd->temp3_U8C1 = pd->tempv1_U8C1[2].clone();
inRange(pd->temp3_U8C1, Scalar(pd->k1), Scalar(255), pd->temp4_U8C1);
bitwise_not(pd->temp4_U8C1, pd->temp5_U8C1); // Note for monday, here does not work below works. Why?
bitwise_and(pd->temp5_U8C1, pd->temp3_U8C1, pd->temp6_U8C1);
bitwise_or(pd->temp6_U8C1, Scalar(pd->k1), pd->temp7_U8C1, pd->temp4_U8C1);
imshow( "Glare Reduction 4", pd->temp7_U8C1);
}
int main( int argc, char** argv ) {
ProgramData pd;
pd.k1 = 0;
pd.source_U8C3 = imread("Photo Examples/Blocks1.jpg", IMREAD_COLOR); // Read the file
Size s( pd.source_U8C3.size().width / 1.3, pd.source_U8C3.size().height / 1.3 );
resize( pd.source_U8C3, pd.source_U8C3, s, 0, 0, CV_INTER_AREA );
pd.dim1by1.create(100,800,CV_8UC1);
cout << "source type = " << pd.source_U8C3.type() << endl;
if(! pd.source_U8C3.data ) { cout << "Could not open image" << std::endl; return 0;}
cvtColor(pd.source_U8C3, pd.temp2_U8C3, CV_BGR2HSV); // original to hsv
split(pd.temp2_U8C3, pd.tempv1_U8C1);
namedWindow( "Glare Reduction - Controls", WINDOW_AUTOSIZE ); // Create a window for display.
onProgram6Trackbar1(0,&pd);
createTrackbar("k1", "Glare Reduction - Controls", &(pd.k1), 255, &onProgram6Trackbar1, &pd);
imshow( "Glare Reduction - Controls", pd.dim1by1 ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
return 0;
}
Update 2:
I think I may have found the source of the problem. When add this line
static void onProgram6Trackbar1(int v, void* vp) {
ProgramData * pd = (ProgramData *) vp;
pd->temp3_U8C1 = pd->tempv1_U8C1[2].clone();
inRange(pd->temp3_U8C1, Scalar(pd->k1), Scalar(255), pd->temp4_U8C1);
bitwise_not(pd->temp4_U8C1, pd->temp5_U8C1);
bitwise_and(pd->temp5_U8C1, pd->temp3_U8C1, pd->temp6_U8C1);
pd->temp7_U8C1 = pd->tempv1_U8C1[2].clone(); // <----
bitwise_or(pd->temp6_U8C1, Scalar(pd->k1), pd->temp7_U8C1, pd->temp4_U8C1);
imshow( "Glare Reduction 4", pd->temp7_U8C1);
}
to onProgram6Trackbar1 it suddenly works as expected. I thought since opencv 2 does its own memory allocation I didn't have to initialize pd->temp7_U8C1 which is serving as the output matrix in the call to bitwise_or. It's almost like the underlying memory in pd->temp7_U8C1 was pointing to memory that belonged to one of the buffers that was used as output to the image processing done in main (pd.tempv1_U8C1 or pd.source_U8C3). Either the line I added did something else that I have not thought of.
So my new question is why did this line fix it and what is going on underneath. Is the result of using an uninitialized mat behavior defined somewhere in the documentation? It was my understanding that you don't have to initialize the size or type of a matrix that you are using as an output mat.

maybe a bit too old, anyway: First check the slightly cleaned code. I removed everything that's redundant and send the actual function of the trackbar into a member of your class. This way, you can directly operate on the members.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
class ProgramData
{
public:
ProgramData()
{
k1 = 0;
}
int k1;
Mat source_U8C3,
temp2_U8C3, temp4_U8C1,
temp5_U8C1, temp6_U8C1,
temp7_U8C1;
vector<Mat> tempv1_U8C1;
void reduce_glare(void)
{
// sets elements in temp4 to 255 if within range
inRange(tempv1_U8C1[2], Scalar(k1), Scalar(255), temp4_U8C1);
// bitwise_not(InputArray src, OutputArray dst)
bitwise_not(temp4_U8C1, temp5_U8C1);
// bitwise_and(InputArray src1, InputArray src2, OutputArray dst)
bitwise_and(temp5_U8C1, tempv1_U8C1[2], temp6_U8C1);
// watch out here:
temp7_U8C1 = Mat::ones(tempv1_U8C1[2].size(), CV_8UC1);
Mat x = Mat::ones(tempv1_U8C1[2].size(), CV_8UC1) * k1;
// bitwise_or(InputArray src1, InputArray src2, OutputArray dst, InputArray mask)
bitwise_or(temp6_U8C1, x, temp7_U8C1, temp4_U8C1);
cout << "source type = " << temp7_U8C1.type() << endl;
cout << "source channels = " << temp7_U8C1.channels() << endl;
cout << "source depth = " << temp7_U8C1.depth() << endl;
}
};
void onProgram6Trackbar1(int v, void *vp)
{
ProgramData *pd = static_cast<ProgramData *>(vp);
(*pd).reduce_glare();
imshow("Glare Reduction 4", pd->temp7_U8C1);
}
int main(int argc, char **argv)
{
ProgramData pd;
pd.source_U8C3 = imread("CutDat.jpeg", IMREAD_COLOR);
Size s(pd.source_U8C3.size().width / 1.3, pd.source_U8C3.size().height / 1.3);
resize(pd.source_U8C3, pd.source_U8C3, s, 0, 0, CV_INTER_AREA);
cout << "source type = " << pd.source_U8C3.type() << endl;
cvtColor(pd.source_U8C3, pd.temp2_U8C3, CV_BGR2HSV);
split(pd.temp2_U8C3, pd.tempv1_U8C1);
namedWindow("Glare Reduction - Controls", WINDOW_AUTOSIZE);
imshow("Glare Reduction - Controls", Mat(100, 800, CV_8UC1));
createTrackbar("k1", "Glare Reduction - Controls", &(pd.k1), 255, &onProgram6Trackbar1, &pd);
waitKey(0);
return 0;
}
Important is the line where temp7_U8C1 is initialized, but not with the original data. The result you get is still not want you want, but it highlights that the issue is within the call to bitwise_or. Your question regarding the Scalar bug doesn't apply here as I've shown it in the code.
The code is tested on Windows with 2.4.10 and on Ubuntu 2.4.8 both giving the same results. Testing the code on valgrind runs fine.

Related

opencv c++ change ipl to mat accumulate

i tried to change code ipl to mat
but failed
i use opencv 4.1.2
this sample uses opencv 2.4.13
https://jadeshin.tistory.com/entry/cvAcc에-의한-배경-영상-계산
i can't use ipl
so i changed
#include <opencv2\opencv.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\core\mat.hpp>
#include <opencv2\imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
VideoCapture cap("ball.avi");
if (!cap.isOpened())
{
cout << "file not found." << endl;
return 0;
}
Mat image;
Size size = Size((int)CAP_PROP_FRAME_WIDTH, (int)CAP_PROP_FRAME_HEIGHT);
Mat grayImage(size, CV_8UC1);
Mat sumImage(size, CV_32FC1);
sumImage.setTo(Scalar::all(0));
int nFrameCount = 0;
for (;;)
{
cap.read(image);
if (image.empty())
{
cout << "could'nt capture" << endl;
break;
}
cvtColor(image, grayImage, COLOR_BGR2GRAY);
accumulate(grayImage, sumImage, NULL); //here is error
imshow("grayImage", grayImage);
char chKey = waitKey(50);
if (chKey == 27)
break;
nFrameCount++;
}
convertScaleAbs(sumImage, sumImage, 1.0 / nFrameCount);
imwrite("ballBkg.jpg", sumImage);
destroyAllWindows();
return 0;
}
nothing wrong to compile but wrong to excute
i did also try, catch
but also failed
what's wrong with accumulate?
C++ version of accumulate void accumulate(InputArray src, InputOutputArray dst, InputArray mask=noArray() )
your are passing NULL instead of noArray() . so just do :
accumulate(grayImage, sumImage);
cv::noArray() is an empty Mat not NULL.
Edit :
Also change
Size size = Size((int)CAP_PROP_FRAME_WIDTH, (int)CAP_PROP_FRAME_HEIGHT);
to
Size size = Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH), (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT));

How to get the Chain Code xml file in OpenCV 3.0 c++

I'm using Freeman Chain Code as Feature Extraction for an image.
I'm not able to read the image and I need to obtain a chain code xml file.
How can i retrieve the chain code xml file and save it?
Below is my c++ code in OpenCV 3.0
Can someone help..
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/opencv.hpp>
#include "opencv2/imgcodecs.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
#include <fstream>
#include<string.h>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("test.jpg")
imshow("Test", img);
vector<vector<Point>> contours; // Vector for storing contour
vector<Vec4i> hierarchy;
cv::findContours(img, contours, RETR_EXTERNAL,CV_CHAIN_CODE);
cout << Mat(contours[0]) << endl;
findContours(img, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
cout << "CHAIN_APPROX_SIMPLE" << endl;
cout << Mat(contours[0]) << endl;
CvChain* chain = 0;
CvMemStorage* storage = 0;
storage = cvCreateMemStorage();
cvFindContours(&IplImage(img), storage, (CvSeq**)(&chain), sizeof(*chain), CV_RETR_TREE, CV_CHAIN_CODE);
int total = chain->total;
cv::Mat hist(1, 8, CV_32F, Scalar(0));
int totalCount = 0;
for (; chain != NULL; chain = (CvChain*)chain->h_next)
{
int numChain = 0;
CvSeqReader reader;
int i, total = chain->total;
cvStartReadSeq((CvSeq*)chain, &reader, 0);
cout<<"--------------------chain\n";
for (i = 0; i<total; i++)
{
char code;
CV_READ_SEQ_ELEM(code, reader);
int Fchain = (int)code;
hist.at<float>(0, Fchain)++;
totalCount++;
cout<<"%d"<<code;
}
}
Mat prob = hist / totalCount;
cout << prob << endl;
waitKey(0);
return 0;
}
Whenever the code is being run,I'm having this error.Have I used a wrong format?? Can anyone please help?
OpenCV Error: Unsupported format or combination of formats ([Start]FindContours supports only CV_8UC1 images when mode != CV_RETR_FLOODFILL otherwise supports CV_32SC1 images only) in cvStartFindContours, file C:\buildslave64\win64_amdocl\master_PackSlave-win64-vc14-shared\opencv\modules\imgproc\src\contours.cpp, line 198
I have updated my code.I'm able to save the xml file but but I'm getting the data in only 1 row.
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/opencv.hpp>
#include "opencv2/imgcodecs.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
#include <fstream>
#include<string.h>
using namespace std;
using namespace cv;
vector<String> files;
int main() {
double totalCount = 0;
cv::glob("C:/Users//Videos/Database/Frames/*.jpg", files);
for (size_t i = 0; i < files.size(); i++) {
Mat image = imread(files[i]);
//Mat image = imread("Outline.jpg");
Canny(image, image, 100, 100 * 2, 3, false);
CvChain* chain;
CvMemStorage* storage = 0;
storage = cvCreateMemStorage();
cvFindContours(&IplImage(image), storage, (CvSeq**)(&chain), sizeof(*chain), CV_RETR_EXTERNAL, CV_CHAIN_CODE);
int total = chain->total;
// 1 row, 8 cols, filled with zeros, (float type, because we want to normalize later):
cv::Mat hist(1, 8, CV_32F, Scalar(0));
for (; chain != NULL; chain = (CvChain*)chain->h_next)
{
CvSeqReader reader;
int i, total = chain->total;
cvStartReadSeq((CvSeq*)chain, &reader, 0);
for (i = 0; i < total; i++)
{
char code;
CV_READ_SEQ_ELEM(code, reader);
int Fchain = (int)code;
// increase the counter for the respective bin:
hist.at<float>(0, Fchain)++;
totalCount++;
}
}
// print the raw histogram:
cout << "Histo: " << hist << endl;
cout << "Total: " << totalCount << endl;
// normalize it:
Mat prob = hist / totalCount;
cout << "Proba: " << prob << endl;
FileStorage fs("freeman.xml", FileStorage::WRITE);
fs << "chain" << prob;
waitKey(0);
return 0;
}
}
As shown below i'm having my chain code xml like this.Why am i getting this? Can anyone help me please?
<?xml version="1.0"?>
<opencv_storage>
<chain type_id="opencv-matrix">
<rows>1</rows>
<cols>8</cols>
<dt>f</dt>
<data>
5.00000000e-01 0. 0. 0. 5.00000000e-01 0. 0. 0.</data></chain>
</opencv_storage>
The error message says exactly and unambiguously what is wrong - it's enough to read it. cv::findContours() accepts only images of CV_8UC1 pixel type or only CV_32SC1 if you use CV_RETR_FLOODFILL mode. In your particular case, you need to convert your img object to CV_8UC1 after loading - you are probably loading an RGB image.

OpenCV reading image pixel value

I'm trying very simple thing with opencv but getting error.
I'm just trying read 16bit png image and access to the specific pixel value. I tried many ways but couldn't manage the get value. I'm using OpenCV3.0 on windows8 64bit.
NOTE: while reading image using CV_LOAD_IMAGE_GRAYSCALE is fine, but CV_LOAD_IMAGE_ANYDEPTH rising error. But when i use CV_LOAD_IMAGE_GRAYSCALE my highest pixel is 9, which should be around 2000
I uploaded example image.example image
my example code:
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
int _tmain(int argc, _TCHAR* argv[])
{
cv::Mat frame = cv::imread("filepath", CV_LOAD_IMAGE_ANYDEPTH );//using CV_LOAD_IMAGE_GRAYSCALE is fine, but CV_LOAD_IMAGE_ANYDEPTH rising error
frame.convertTo(frame, CV_16U);// to be sure... i omitted this part also and same error
double min, max;
cv::Point mloc, mxloc;
cv::minMaxLoc(frame, &min, &max, &mloc, &mxloc);
//i can access min and max values but not the specific pixel value
float zmx = frame.at<unsigned char>(118, 38);//rise error
float zm = frame.at<float>(30,40);//rise error
return 0;
}
Error message:
Unhandled exception at 0x00007FF8EB288A5C in OpenCVTest.exe: Microsoft C++ exception: cv::Exception at memory location 0x000000A47F40F230.
But i think this is misleading error, i check the my image is 320*240, so i'm sure that there is pixel at that location.
I tried with Scalar also, but i'm getting same error
Your biggest problem is that you're trying to access a 16 bpp image, i.e. a Mat of type CV_16U with the wrong data type. You should use frame.at<ushort>(...) in case of single channel 16 bpp image (I suppose this is the case here), or with frame.at<Vec3w>(...) for 3 channels images.
Also you should make sure that you're loading the image properly. Using imread with parameter IMREAD_GRAYSCALE you're converting your image to 8bpp, which is not what you want. You should use IMREAD_ANYDEPTH or IMREAD_UNCHANGED.
Take a look at this code:
#include<opencv2/opencv.hpp>
int main()
{
// Read the image as original bpp
cv::Mat frame = cv::imread("path_to_image", cv::IMREAD_ANYDEPTH);
// Be sure that the image is loaded
if (frame.empty())
{
// No image loaded
return -1;
}
// Be sure that the image is 16bpp and single channel
if (frame.type() != CV_16U || frame.channels() != 1)
{
// Wrong image depth or channels
return -1;
}
double min_val, max_val;
cv::Point min_loc, max_loc;
cv::minMaxLoc(frame, &min_val, &max_val, &min_loc, &max_loc);
// Access values with correct data type
ushort zmx = frame.at<ushort>(max_loc);
return 0;
}
A couple of things you should notice.
First you are defining frame twice.
second, this line float zmx = frame.at<unsigned char>(118, 38); has a couple of issues. you are assigning unsigned char to a float. also you should notice the order is reversed to access the x,y pixel value you call frame.at<unsigned char>(y, x) then best way to assign to a Scalar instead like this
Scalar fmx = frame.at<uchar>(118, 38);
or use Point to avoid confusion
Scalar fmx = frame.at<uchar>(Point(38,118));
last thing, make sure you loaded the image properly and that frame has the image data
UPDATE
I just tested your code and it worked fine (check below), I can't think of anything but not finding the image in the path provided
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
int main(int argc, char** argv)
{
cv::Mat frame = cv::imread("0FD0X.png", CV_LOAD_IMAGE_GRAYSCALE);
frame.convertTo(frame, CV_16U);// to be sure... i omitted this part also and same error
double min, max;
cv::Point mloc, mxloc;
cv::minMaxLoc(frame, &min, &max, &mloc, &mxloc);
//i can access min and max values but not the specific pixel value
float zmx = frame.at<unsigned char>(118, 38);// no error
float zm = frame.at<float>(30, 40);// no error
std::cout << zmx << std::endl; // out 0
std::cout << min << std::endl; // out 0
std::cout << max << std::endl; // out 9
std::cout << mloc << std::endl; // out [0,0]
std::cout << mxloc << std::endl; // out [125,30]
return 0;
}
Update #2 to access multichannel image you need to access using the Vec3b data type. Also notice the order of the point coordinates. check the following code
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
int main(int argc, char** argv)
{
cv::Mat frame = cv::imread("0FD0X.png", CV_LOAD_IMAGE_ANYDEPTH);
frame.convertTo(frame, CV_16U);// to be sure... i omitted this part also and same error
double min, max;
cv::Point mloc, mxloc;
cv::minMaxLoc(frame, &min, &max, &mloc, &mxloc);
//i can access min and max values but not the specific pixel value
ushort pValShort = frame.at<ushort>(38, 118);// no error
Vec3b pValVec = frame.at<Vec3b>(38, 118);// no error
Vec3b pValVecPoint = frame.at<Vec3b>(Point(118,38));// no error
std::cout << pValShort << std::endl; // out 2423
std::cout << pValVec << std::endl; // out [166,8,165]
std::cout << pValVecPoint << std::endl; // out [166,8,165]
std::cout << min << std::endl; // out 0
std::cout << max << std::endl; // out 2423
std::cout << mloc << std::endl; // out [0,0]
std::cout << mxloc << std::endl; // out [118,38]
return 0;
}

Read an image from a qrc using imread() of OpenCV

I want to read an image from a qrc using imread() of OpenCV in this way:
Mat img = imread(":/TempIcons/logo.png");
but the final img size is [0x0]. I have also tried:
Mat img = imread("qrc://TempIcons/logo.png");
but the size I get is the same. I don't want to load the image in a QImage to then transform it in a cv::Mat. Is there a way to do this in a easy way?. If it is, how can I do it?.
Thank you
As #TheDarkKnight pointed out, imread is not aware of Qt resources. You can however write your own loader, that uses QFile to retrieve the binary data from the resource, and uses imdecode (as done internally by imread) to read the image:
Mat loadFromQrc(QString qrc, int flag = IMREAD_COLOR)
{
//double tic = double(getTickCount());
QFile file(qrc);
Mat m;
if(file.open(QIODevice::ReadOnly))
{
qint64 sz = file.size();
std::vector<uchar> buf(sz);
file.read((char*)buf.data(), sz);
m = imdecode(buf, flag);
}
//double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
//qDebug() << "OpenCV loading time: " << toc;
return m;
}
You can call it like:
Mat m = loadFromQrc("qrc_path");
or specifying a flag:
Mat m = loadFromQrc("qrc_path", IMREAD_GRAYSCALE);
Performance
I tried loading the image with loadFromQrc, and loading the QImage and converting to Mat using this code, both with and without cloning. loadFromQrc results to be 10 time faster then loading a QImage and convert it to Mat.
Results in ms:
Load Mat : 4.85965
QImage to Mat (no clone): 49.3999
QImage to Mat (clone) : 49.8497
Test code:
#include <vector>
#include <iostream>
#include <QDebug>
#include <QtWidgets>
#include <opencv2/opencv.hpp>
using namespace cv;
Mat loadFromQrc(QString qrc, int flag = IMREAD_COLOR)
{
QFile file(qrc);
Mat m;
if(file.open(QIODevice::ReadOnly))
{
qint64 sz = file.size();
std::vector<uchar> buf(sz);
file.read((char*)buf.data(), sz);
m = imdecode(buf, flag);
}
return m;
}
cv::Mat QImageToCvMat( const QImage &inImage, bool inCloneImageData = true )
{
switch ( inImage.format() )
{
// 8-bit, 4 channel
case QImage::Format_RGB32:
{
cv::Mat mat( inImage.height(), inImage.width(), CV_8UC4, const_cast<uchar*>(inImage.bits()), inImage.bytesPerLine() );
return (inCloneImageData ? mat.clone() : mat);
}
// 8-bit, 3 channel
case QImage::Format_RGB888:
{
if ( !inCloneImageData )
qWarning() << "ASM::QImageToCvMat() - Conversion requires cloning since we use a temporary QImage";
QImage swapped = inImage.rgbSwapped();
return cv::Mat( swapped.height(), swapped.width(), CV_8UC3, const_cast<uchar*>(swapped.bits()), swapped.bytesPerLine() ).clone();
}
// 8-bit, 1 channel
case QImage::Format_Indexed8:
{
cv::Mat mat( inImage.height(), inImage.width(), CV_8UC1, const_cast<uchar*>(inImage.bits()), inImage.bytesPerLine() );
return (inCloneImageData ? mat.clone() : mat);
}
default:
qWarning() << "ASM::QImageToCvMat() - QImage format not handled in switch:" << inImage.format();
break;
}
return cv::Mat();
}
int main(int argc, char *argv[])
{
QString url = "...";
{
double tic = double(getTickCount());
Mat m1 = loadFromQrc(url);
double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
qDebug() << "Load Mat: " << toc;
if(m1.data != NULL)
{
imshow("m1", m1);
waitKey(1);
}
}
// {
// double tic = double(getTickCount());
// QImage img;
// img.load(url);
// Mat m2 = QImageToCvMat(img, false);
// double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
// qDebug() << "QImage to Mat (no clone): " << toc;
// if(m2.data != NULL)
// {
// imshow("m2", m2);
// waitKey(1);
// }
// }
// {
// double tic = double(getTickCount());
// QImage img;
// img.load(url);
// Mat m3 = QImageToCvMat(img, true);
// double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
// qDebug() << "QImage to Mat (clone): " << toc;
// if(m3.data != NULL)
// {
// imshow("m3", m3);
// waitKey(1);
// }
// }
waitKey();
return 0;
}
The problem here is that imread() loads an image from a file.
In contrast, Qt's resource system compiles the data from images directly into the program's executable. Qt's QFile operations know that when they are provided a path starting with ":/", it refers to the embedded resources, rather than on disk.
Therefore, I don't think you will be able to use imread() to directly access a file that has been placed in Qt's resources.

debug assertion failed vc\include\vector vector iterator + offset out of range in chamferMatching Opencv

I am stuck with the problem while implementing chamfer matching program in OpenCV
https:// code.ros.org/trac/opencv/browser/trunk/opencv/samples/cpp/chamfer.cpp?rev=4194
Following is the code it is reading
template image
and test image
, I am using VS 2008 and OpenCV2.4.6
#include "stdafx.h"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/contrib/contrib.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main( int argc, char** argv )
{
IplImage *src;
src = cvLoadImage("C:\\Users\\JOSHI\\Desktop\\Images\\logo_in_clutter.png",1);
Mat img=cvarrToMat(src);
imshow("Mat",img);
src = cvLoadImage("C:\\Users\\JOSHI\\Desktop\\Images\\logo.png",1);
Mat tpl=cvarrToMat(src);
imshow("Mat",tpl);
Mat cimg;
// if the image and the template are not edge maps but normal grayscale images,
// you might want to uncomment the lines below to produce the maps. You can also
// run Sobel instead of Canny.
Canny(img, img, 5, 50, 3);
Canny(tpl, tpl, 5, 50, 3);
vector<vector<Point> > results;
vector<float> costs;
int best = chamerMatching( img, tpl, results, costs );
if( best < 0 )
{
cout << "not found;\n";
return 0;
}
size_t i, n = results[best].size();
for( i = 0; i < n; i++ )
{
Point pt = results[best][i];
if( pt.inside(Rect(0, 0, cimg.cols, cimg.rows)) )
cimg.at<Vec3b>(pt) = Vec3b(0, 255, 0);
}
imshow("result", cimg);
waitKey();
return 0;
}
this is the error image
can you suggest me why I am getting this error as I am new to OpenCV and Image Processing
I had same problem. Solution: http://code.opencv.org/issues/3603
You need to download opencv from source, open the chamfermatching.cpp and comment line:
~Matching()
{
for (size_t i = 0; i<templates.size(); i++) {
//delete templates[i];
}
}
Then you need to rebuild opencv. After this it should work.