Different results with cvDFT and DFT in OpenCV 2.4.8 - c++

I'm having problems with the DFT function in OpenCV 2.4.8 for c++.
I used an image of a 10 phases sinus curve to compare the old cvDFT() with the newer c++ function DFT() (one dimensional DFT row-wise).
The old version gives me logical results: very high peak at pixel 0 and 10, the rest being almost 0.
The new version gives me strange results with peaks all over the spectrum.
Here is my code:
#include "stdafx.h"
#include <opencv2\core\core_c.h>
#include <opencv2\core\core.hpp>
#include <opencv2\imgproc\imgproc_c.h>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\highgui\highgui_c.h>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\legacy\compat.hpp>
using namespace cv;
void OldMakeDFT(Mat original, double* result)
{
const int width = original.cols;
const int height = 1;
IplImage* fftBlock = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
IplImage* imgReal = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
IplImage* imgImag = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
IplImage* imgDFT = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 2);
Rect roi(0, 0, width, 1);
Mat image_roi = original(roi);
fftBlock->imageData = (char*)image_roi.data;
//cvSaveImage("C:/fftBlock1.png", fftBlock);
cvConvert(fftBlock, imgReal);
cvMerge(imgReal, imgImag, NULL, NULL, imgDFT);
cvDFT(imgDFT, imgDFT, (CV_DXT_FORWARD | CV_DXT_ROWS));
cvSplit(imgDFT, imgReal, imgImag, NULL, NULL);
double re,imag;
for (int i = 0; i < width; i++)
{
re = ((float*)imgReal->imageData)[i];
imag = ((float*)imgImag->imageData)[i];
result[i] = re * re + imag * imag;
}
cvReleaseImage(&imgReal);
cvReleaseImage(&imgImag);
cvReleaseImage(&imgDFT);
cvReleaseImage(&fftBlock);
}
void MakeDFT(Mat original, double* result)
{
const int width = original.cols;
const int height = 1;
Mat fftBlock(1,width, CV_8UC1);
Rect roi(0, 0, width, height);
Mat image_roi = original(roi);
image_roi.copyTo(fftBlock);
//imwrite("C:/fftBlock2.png", fftBlock);
Mat planes[] = {Mat_<float>(fftBlock), Mat::zeros(fftBlock.size(), CV_32F)};
Mat complexI;
merge(planes, 2, complexI);
dft(complexI, complexI, DFT_ROWS); //also tried with DFT_COMPLEX_OUTPUT | DFT_ROWS
split(complexI, planes);
double re, imag;
for (int i = 0; i < width; i++)
{
re = (float)planes[0].data[i];
imag = (float)planes[1].data[i];
result[i] = re * re + imag * imag;
}
}
bool SinusFFTTest()
{
const int size = 1024;
Mat sinTest(size,size,CV_8UC1, Scalar(0));
const int n_sin_curves = 10;
double deg_step = (double)n_sin_curves*360/size;
for (int j = 0; j < size; j++)
{
for (int i = 0; i <size; i++)
{
sinTest.data[j*size+i] = 127.5 * sin(i*deg_step*CV_PI/180) + 127.5;
}
}
double* result1 = new double[size];
double* result2 = new double[size];
OldMakeDFT(sinTest,result1);
MakeDFT(sinTest,result2);
bool identical = true;
for (int i = 0; i < size; i++)
{
if (abs(result1[i] - result2[i]) > 1000)
{
identical = false;
break;
}
}
delete[] result1;
delete[] result2;
return identical;
}
int _tmain(int argc, _TCHAR* argv[])
{
if (SinusFFTTest())
{
printf("identical");
}
else
{
printf("different");
}
getchar();
return 0;
}
Could someone explain the difference?

imgReal - is not filled with zeroes by default.

The bug in in the MakeDFT() function:
re = (float)planes[0].data[i];
imag = (float)planes[1].data[i];
data[i]'s type is uchar, and its conversion to float is not right.
The fix:
re = planes[0].at<float>(0,i);
imag = planes[1].at<float>(0,i);
After this change, the old and the new DFT versions gives the same results. Or, you can use cv::magnitude() instead of calculating the sum of squares of re and imag:
Mat magn;
magnitude(planes[0], planes[1], magn);
for (int i = 0; i < width; i++)
result[i] = pow(magn.at<float>(0,i),2);
This gives also the same result as the old cvDFT.

Related

Opencv assertion error when using cv::sfm::computeOrientation function with cv version 4.5.5

Here are my full code, i am trying to use opencv 4.5.5 for feature detection and relative pose calculation.
I tried using vector<vector> as first and second input type, but it didnot work.
I think this error is something related to input parameter type, but i donot know how to fix it, huge thanks for any help!
Detection and Descriptor calculation works fine.
#include <iostream>
#include <opencv2/features2d.hpp>
#include <opencv2/sfm/fundamental.hpp>
#include <opencv2/highgui.hpp>
#include <vector>
#include <chrono>
using std::vector;
static cv::Ptr<cv::ORB> mOrbTracker = nullptr;
static cv::Ptr<cv::DescriptorMatcher> mMatcher = nullptr;
void init(const unsigned int &nFeatures, const float &scaleFactor,
const unsigned int &nPyramid, const unsigned int &edgeThreshold,
const int &sourceImgToPyramidLevel, const int &wta_k,
const cv::ORB::ScoreType &sType, const unsigned int &fastThreshold,
const cv::DescriptorMatcher::MatcherType &matcherType) {
mOrbTracker = cv::ORB::create(nFeatures, scaleFactor, nPyramid,
edgeThreshold, sourceImgToPyramidLevel, wta_k,
sType, edgeThreshold, fastThreshold);
mMatcher = cv::DescriptorMatcher::create(matcherType);
}
int main() {
cv::Mat img1, img2;
img1 = cv::imread("/home/wgf/docs/1.jpg", cv::IMREAD_COLOR);
img2 = cv::imread("/home/wgf/docs/2.jpg", cv::IMREAD_COLOR);
init(600, 1.2f, 8, 31, 0, 2,
cv::ORB::HARRIS_SCORE, 20, cv::DescriptorMatcher::BRUTEFORCE_HAMMING);
cv::Mat firstDescriptors = cv::Mat();
cv::Mat secondDescriptors = cv::Mat();
vector<cv::KeyPoint> mTempFirst, mTempSecond;
vector<cv::DMatch> mTempMatches, mTempGoodMatches;
std::chrono::time_point<std::chrono::system_clock> start = std::chrono::system_clock::now();
detectAndComputeDescriptors(img1, mTempFirst, firstDescriptors);
detectAndComputeDescriptors(img2, mTempSecond, secondDescriptors);
auto dur_feature_extraction = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now() - start);
mMatcher->match(firstDescriptors, secondDescriptors, mTempMatches);
float maxDist = std::numeric_limits<float>::min();
for (int i = 0; i < mTempMatches.size(); ++i) {
maxDist = std::max(maxDist, mTempMatches[i].distance);
}
float mMatchesDistanceThreshold = 0.6f;
for (int j = 0; j < mTempMatches.size(); ++j) {
if (mTempMatches[j].distance < mMatchesDistanceThreshold * maxDist) {
mTempGoodMatches.emplace_back(mTempMatches[j]);
}
}
cv::Mat firstKeyPoints = cv::Mat(2, mTempGoodMatches.size(), CV_32F);
cv::Mat secondKeyPoints = cv::Mat(2, mTempGoodMatches.size(), CV_32F);
for (int k = 0; k < mTempGoodMatches.size(); k++) {
firstKeyPoints.at<float>(0, k) = mTempFirst[mTempGoodMatches[k].queryIdx].pt.x;
firstKeyPoints.at<float>(1, k) = mTempFirst[mTempGoodMatches[k].queryIdx].pt.y;
secondKeyPoints.at<float>(0, k) = mTempSecond[mTempGoodMatches[k].trainIdx].pt.x;
secondKeyPoints.at<float>(1, k) = mTempSecond[mTempGoodMatches[k].trainIdx].pt.y;
}
cv::Mat R = cv::Mat(3, 3, CV_32F);
cv::Mat t = cv::Mat(3, 1, CV_32F);
float scale = 0.0f;
cv::Mat currentPose;
cv::sfm::computeOrientation(firstKeyPoints, secondKeyPoints, R, t, scale);
currentPose = cv::Mat::eye(4, 4, CV_32F);
currentPose.at<float>(0, 0) = R.at<float>(0, 0);
currentPose.at<float>(0, 1) = R.at<float>(0, 1);
currentPose.at<float>(0, 2) = R.at<float>(0, 2);
currentPose.at<float>(1, 0) = R.at<float>(1, 0);
currentPose.at<float>(1, 1) = R.at<float>(1, 1);
currentPose.at<float>(1, 2) = R.at<float>(1, 2);
currentPose.at<float>(2, 0) = R.at<float>(2, 0);
currentPose.at<float>(2, 1) = R.at<float>(2, 1);
currentPose.at<float>(2, 2) = R.at<float>(2, 2);
currentPose.at<float>(0, 3) = t.at<float>(0, 0);
currentPose.at<float>(1, 3) = t.at<float>(1, 0);
currentPose.at<float>(2, 3) = t.at<float>(2, 0);
cv::Mat re;
cv::drawMatches(img1, mTempFirst, img2, mTempSecond, mTempGoodMatches, re);
cv::imshow("match", re);
cv::waitKey();
return 0;
}

(OpenCV Stitching) How to get better performance using OpenCV Stitcher class?

I have some problems when using Stitcher class.
First, I use ORB Feature Finder because it's faster than SURF.
but it's still slow.
Second, Stitcher class accuracy is too low.
Third, How can I get more performance by using Stitcher class?
Additional, How can I catch directions between two images?
This is my code.
Thank you.
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching.hpp"
#include "opencv2/features2d.hpp"
using namespace cv;
using namespace std;
void overlayImage(const cv::Mat &background, const cv::Mat &foreground, cv::Mat &output, cv::Point2i location);
int main(int argc, char* argv[])
{
Mat first;
Mat second;
Mat m_first;
Mat m_second;
vector<Mat> images;
// vector<Mat> re_images;
Mat panorama;
Mat result;
unsigned long t;
t = getTickCount();
first = imread(argv[1], CV_LOAD_IMAGE_COLOR);
second = imread(argv[2], CV_LOAD_IMAGE_COLOR);
//Mat m_first = Mat::zeros( first.size(), first.type() );
//Mat m_second = Mat::zeros( second.size(), second.type() );
/*
for( int y = 0; y < first.rows; y++ ) {
for( int x = 0; x < first.cols; x++ ) {
for( int c = 0; c < 3; c++ ) {
m_first.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( 1.2*( first.at<Vec3b>(y,x)[c] ) + 20 );
}
}
}
for( int y = 0; y < second.rows; y++ ){
for( int x = 0; x < second.cols; x++ ) {
for( int c = 0; c < 3; c++ ) {
m_second.at<Vec3b>(y,x)[c] =
saturate_cast<uchar>( 1.2*( second.at<Vec3b>(y,x)[c] ) + 20 );
}
}
}
*/
//imwrite("first.png", m_first);
//imwrite("second.png", m_second);
resize(first, m_first, Size(640, 480));
resize(second, m_second, Size(640, 480));
images.push_back(m_first);
images.push_back(m_second);
Stitcher stitcher = Stitcher::createDefault(false);
//Stitcher::Status status = stitcher.stitch(imgs, pano);
//stitcher.setWarper(new PlaneWarper());
stitcher.setWarper(new SphericalWarper());
// stitcher.setWarper(new CylindricalWarper());
stitcher.setFeaturesFinder(new detail::OrbFeaturesFinder(Size(3,1),1500));
// stitcher.setRegistrationResol(0.6);
// stitcher.setSeamEstimationResol(0.1);
// stitcher.setCompositingResol(0.5);
//stitcher.setPanoConfidenceThresh(1);
stitcher.setWaveCorrection(true);
stitcher.setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);
stitcher.setFeaturesMatcher(new detail::BestOf2NearestMatcher(false,0.3));
stitcher.setBundleAdjuster(new detail::BundleAdjusterRay());
stitcher.setBlender(new detail::MultiBandBlender());
stitcher.stitch(images, panorama);
printf("%.2lf sec \n", (getTickCount() - t) / getTickFrequency() );
Rect rect(panorama.cols / 2 - 320, panorama.rows / 2 - 240, 640, 480);
Mat subimage = panorama(rect);
Mat car = imread("car.png");
overlayImage(subimage, car, result, cv::Point(320 - (car.cols / 2), 240 - (car.rows / 2 )));
imshow("panorama", result);
// resize(panorama, result, Size(640, 480));
imwrite("result.jpg", result);
waitKey(0);
return 0;
}
void overlayImage(const cv::Mat &background, const cv::Mat &foreground, cv::Mat &output, cv::Point2i location)
{
background.copyTo(output);
// start at the row indicated by location, or at row 0 if location.y is negative.
for(int y = std::max(location.y , 0); y < background.rows; ++y)
{
int fY = y - location.y; // because of the translation
// we are done of we have processed all rows of the foreground image.
if(fY >= foreground.rows)
break;
// start at the column indicated by location,
// or at column 0 if location.x is negative.
for(int x = std::max(location.x, 0); x < background.cols; ++x)
{
int fX = x - location.x; // because of the translation.
// we are done with this row if the column is outside of the foreground image.
if(fX >= foreground.cols)
break;
// determine the opacity of the foregrond pixel, using its fourth (alpha) channel.
double opacity =
((double)foreground.data[fY * foreground.step + fX * foreground.channels() + 3])
/ 255.;
// and now combine the background and foreground pixel, using the opacity,
// but only if opacity > 0.
for(int c = 0; opacity > 0 && c < output.channels(); ++c)
{
unsigned char foregroundPx =
foreground.data[fY * foreground.step + fX * foreground.channels() + c];
unsigned char backgroundPx =
background.data[y * background.step + x * background.channels() + c];
output.data[y*output.step + output.channels()*x + c] =
backgroundPx * (1.-opacity) + foregroundPx * opacity;
}
}
}
}
FAST feature detector is faster than SURF and ORB.
Moreover, finding 1500 features in a 640*480 picture takes too much time. 300 features is ok. So you can use this code instead:
detail::OrbFeaturesFinder(Size(3,1),300));
Stitcher Class is so slow. I suggest you try to implement stitcher class yourself. Try using feature detectors, descriptors, then matching and after that find homography then making mask and then warping.
I don't understand your third question, "How can I catch directions between two images?". What do you mean exactly?

Max entropy thresholding using OpenCV [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 7 years ago.
Improve this question
I'm trying to convert the code for using the maximum entropy thresholding from this matlab code:
%**************************************************************************
%**************************************************************************
%
% maxentropie is a function for thresholding using Maximum Entropy
%
%
% input = I ==> Image in gray level
% output =
% I1 ==> binary image
% threshold ==> the threshold choosen by maxentropie
%
% F.Gargouri
%
%
%**************************************************************************
%**************************************************************************
function [threshold I1]=maxentropie(I)
[n,m]=size(I);
h=imhist(I);
%normalize the histogram ==> hn(k)=h(k)/(n*m) ==> k in [1 256]
hn=h/(n*m);
%Cumulative distribution function
c(1) = hn(1);
for l=2:256
c(l)=c(l-1)+hn(l);
end
hl = zeros(1,256);
hh = zeros(1,256);
for t=1:256
%low range entropy
cl=double(c(t));
if cl>0
for i=1:t
if hn(i)>0
hl(t) = hl(t)- (hn(i)/cl)*log(hn(i)/cl);
end
end
end
%high range entropy
ch=double(1.0-cl); %constraint cl+ch=1
if ch>0
for i=t+1:256
if hn(i)>0
hh(t) = hh(t)- (hn(i)/ch)*log(hn(i)/ch);
end
end
end
end
% choose best threshold
h_max =hl(1)+hh(1)
threshold = 0;
entropie(1)=h_max;
for t=2:256
entropie(t)=hl(t)+hh(t);
if entropie(t)>h_max
h_max=entropie(t);
threshold=t-1;
end
end
% Display
I1 = zeros(size(I));
I1(I<threshold) = 0;
I1(I>threshold) = 255;
%imshow(I1)
end
The problem is that I'm getting floating point excpetion error in the code, and I cannot understand why
This is my implementation:
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <math.h>
using namespace cv;
using namespace std;
int main(){
cout.setf(std::ios_base::fixed, std::ios_base::floatfield);
cout.precision(9);
Mat old_image=imread("2.png",CV_LOAD_IMAGE_GRAYSCALE);
double minval, maxval;
minMaxLoc(old_image,&minval, &maxval);
cout<<minval<<" "<<maxval<<endl;
Mat image;
old_image.convertTo(image, CV_8UC1, 255.0/(maxval-minval), -minval*255.0/(maxval-minval));
minMaxLoc(image,&minval, &maxval);
cout<<minval<<" "<<maxval;
int k=0;
imshow("im",image);
waitKey(0);
for(int y=0; y<image.rows;y++){
for(int x=0; x<image.cols;x++){
if((int) image.at<uchar>(y,x)==0){
k++;
}
}
}
cout<<k<<endl<<endl;
int i, l, j, t;
int histSize = 256;
float range[] = { 0, 255 };
const float *ranges[] = { range };
Mat hist, histogram, c, ctmp, hl, hh, hhtmp, entropy;
calcHist( &image, 1, 0, Mat(), hist, 1, &histSize, ranges, true, false );
for( int h = 1; h < histSize; h++){
histogram.push_back(hist.at<float>(h,0));
cout<<histogram.rows<<endl;
cout<<histogram.row(h-1)<<endl;
cout<<hist.row(h)<<endl;
}
histogram=histogram/(image.rows*image.cols-hist.at<float>(0,0));
//cumulative distribution function
float cl,ch;
ctmp.push_back(histogram.row(0));
c.push_back(histogram.row(0));
cout<<c.row(0)<<endl;
for(l=1;l<255;l++){
c.push_back(ctmp.at<float>(0)+histogram.at<float>(l));
ctmp.push_back(c.row(l));
cout<<c.at<float>(l)<<endl;
//c.row(l)=c.row(l-1)+histogram.row(l);
}
Mat hltmp= Mat::zeros(1,256,CV_8U);
// THE PROBLEM IS IN THIS TWO FOR CYCLES
for(t=0;t<255;t++){
//low range entropy
cl=c.at<float>(t);
if(cl>0){
for(i=0;i<=t;i++){
if(histogram.at<float>(t)>0){
printf("here\n");
hl.push_back(hltmp.at<float>(0)-(histogram.at<float> (i)/cl)*log(histogram.at<float>(i)/cl));
printf("here\n");
cout<<hl.at<float>(i);
printf("here\n");
hltmp.push_back(hl.row(t));
printf("here\n");
}
}
}
printf("here\n");
//high range entropy
ch=1.0-cl;
if(ch>0){
for(i=t+1;i<255;i++){
if(histogram.at<float>(i)>0){
hh.push_back(hh.at<float>(t)-(histogram.at<float> (i)/ch)*log(histogram.at<float>(i)/ch));
}
}
}
}
//choose the best threshold
float h_max=hl.at<float>(0,0)+hh.at<float>(0,0);
float threshold=0;
entropy.at<float>(0,0)=h_max;
for(t=1;t<255;t++){
entropy.at<float>(t)=hl.at<float>(t)+hh.at<float>(t);
if(entropy.at<float>(t)>h_max){
h_max=entropy.at<float>(t);
threshold=t-1;
}
cout<<threshold<<endl;
}
//display
Mat I1= Mat::zeros(image.rows,image.cols,CV_8UC1);
for(int y=0; y<image.rows;y++){
for(int x=0; x<image.cols;x++){
if((int) image.at<uchar>(y,x)<threshold){
I1.at<uchar>(y,x)=0;
}
else{
I1.at<uchar>(y,x)=255;
}
}
}
imshow("image",I1);
waitKey(0);*/
return 0;
}
Your problem is that you're reading float elements from a CV_8U (aka uchar) Mat.
Mat hltmp = Mat::zeros(1, 256, CV_8U);
...
hltmp.at<float>(0)
You should learn how to use a debugger, and you'll find out these problems very soon.
Since you over-complicated things in your implementation, made some errors, and the code is cluttered from debug prints, I propose the one below instead of punctually correct your (not many, but mainly conceptual) errors. You can see that, if written properly, there is almost a 1:1 conversion from Matlab to OpenCV.
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
uchar maxentropie(const Mat1b& src, Mat1b& dst)
{
// Histogram
Mat1d hist(1, 256, 0.0);
for (int r=0; r<src.rows; ++r)
for (int c=0; c<src.cols; ++c)
hist(src(r,c))++;
// Normalize
hist /= double(src.rows * src.cols);
// Cumulative histogram
Mat1d cumhist(1, 256, 0.0);
float sum = 0;
for (int i = 0; i < 256; ++i)
{
sum += hist(i);
cumhist(i) = sum;
}
Mat1d hl(1, 256, 0.0);
Mat1d hh(1, 256, 0.0);
for (int t = 0; t < 256; ++t)
{
// low range entropy
double cl = cumhist(t);
if (cl > 0)
{
for (int i = 0; i <= t; ++i)
{
if (hist(i) > 0)
{
hl(t) = hl(t) - (hist(i) / cl) * log(hist(i) / cl);
}
}
}
// high range entropy
double ch = 1.0 - cl; // constraint cl + ch = 1
if (ch > 0)
{
for (int i = t+1; i < 256; ++i)
{
if (hist(i) > 0)
{
hh(t) = hh(t) - (hist(i) / ch) * log(hist(i) / ch);
}
}
}
}
// choose best threshold
Mat1d entropie(1, 256, 0.0);
double h_max = hl(0) + hh(0);
uchar threshold = 0;
entropie(0) = h_max;
for (int t = 1; t < 256; ++t)
{
entropie(t) = hl(t) + hh(t);
if (entropie(t) > h_max)
{
h_max = entropie(t);
threshold = uchar(t);
}
}
// Create output image
dst = src > threshold;
return threshold;
}
int main()
{
Mat1b img = imread("path_to_image", IMREAD_GRAYSCALE);
Mat1b res;
uchar th = maxentropie(img, res);
imshow("Original", img);
imshow("Result", res);
waitKey();
return 0;
}

from float array to mat , concatenate blocks of image

I have an image 800x800 which is broken down to 16 blocks of 200x200.
(you can see previous post here)
These blocks are : vector<Mat> subImages;
I want to use float pointers on them , so I am doing :
float *pdata = (float*)( subImages[ idxSubImage ].data );
1) Now, I want to be able to get again the same images/blocks, going from float array to Mat data.
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
namedWindow( "Display window", WINDOW_AUTOSIZE );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
imshow( "Display window", mf );
waitKey(0);
}
}
So , the problem is that I am receiving an
OpenCV Error: Assertion failed
in imshow.
2) How can I recombine all the blocks to obtain the original 800x800 image?
I tried something like:
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
Mat big( 800,800,CV_32F );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
Rect roi(j,i,200,200);
mf.copyTo( big(roi) );
}
}
imwrite( "testing" , big );
This gives me :
OpenCV Error: Assertion failed (!fixedSize()) in release
in mf.copyTo( big(roi) );.
First, you need to know where are your subimages into the big image. To do this, you can save the rect of each subimage into the vector<Rect> smallImageRois;
Then you can use pointers (keep in mind that subimages are not continuous), or simply use copyTo to the correct place:
Have a look:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
Mat3b img = imread("path_to_image");
resize(img, img, Size(800, 800));
Mat grayImg;
cvtColor(img, grayImg, COLOR_BGR2GRAY);
grayImg.convertTo(grayImg, CV_32F);
int N = 4;
if (((grayImg.rows % N) != 0) || ((grayImg.cols % N) != 0))
{
// Error
return -1;
}
Size graySize = grayImg.size();
Size smallSize(grayImg.cols / N, grayImg.rows / N);
vector<Mat> smallImages;
vector<Rect> smallImageRois;
for (int i = 0; i < graySize.height; i += smallSize.height)
{
for (int j = 0; j < graySize.width; j += smallSize.width)
{
Rect rect = Rect(j, i, smallSize.width, smallSize.height);
smallImages.push_back(grayImg(rect));
smallImageRois.push_back(rect);
}
}
// Option 1. Using pointer to subimage data.
Mat big1(800, 800, CV_32F);
int big1step = big1.step1();
float* pbig1 = big1.ptr<float>(0);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
float* pdata = (float*)smallImages[idx].data;
int step = smallImages[idx].step1();
Rect roi = smallImageRois[idx];
for (int i = 0; i < smallSize.height; ++i)
{
for (int j = 0; j < smallSize.width; ++j)
{
pbig1[(roi.y + i) * big1step + (roi.x + j)] = pdata[i * step + j];
}
}
}
// Option 2. USing copyTo
Mat big2(800, 800, CV_32F);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
smallImages[idx].copyTo(big2(smallImageRois[idx]));
}
return 0;
}
For concatenating the sub-images into a single squared image, you can use the following function:
// Important: all patches should have exactly the same size
Mat concatPatches(vector<Mat> &patches) {
assert(patches.size() > 0);
// make it square
const int patch_width = patches[0].cols;
const int patch_height = patches[0].rows;
const int patch_stride = ceil(sqrt(patches.size()));
Mat image = Mat::zeros(patch_stride * patch_height, patch_stride * patch_width, patches[0].type());
for (size_t i = 0, iend = patches.size(); i < iend; i++) {
Mat &patch = patches[i];
const int offset_x = (i % patch_stride) * patch_width;
const int offset_y = (i / patch_stride) * patch_height;
// copy the patch to the output image
patch.copyTo(image(Rect(offset_x, offset_y, patch_width, patch_height)));
}
return image;
}
It takes a vector of sub-images (or patches as I refer them to) and concatenates them into a squared image. Example usage:
vector<Mat> patches;
vector<Scalar> colours = {Scalar(255, 0, 0), Scalar(0, 255, 0), Scalar(0, 0, 255)};
// fill vector with circles of different colours
for(int i = 0; i < 16; i++) {
Mat patch = Mat::zeros(100,100, CV_32FC3);
circle(patch, Point(50,50), 40, colours[i % 3], -1);
patches.push_back(patch);
}
Mat img = concatPatches(patches);
imshow("img", img);
waitKey();
Will produce the following image
print the values of i and j before creating Mat mf and I believe you will soon be able to find the error.
Hint 1: i and j will be 0 the first time
Hint 2: Use the copyTo() with a ROI like:
cv::Rect roi(0,0,200,200);
src.copyTo(dst(roi))
Edit:
Hint 3: Try not to do such pointer fiddling, you will get in trouble. Especially if you're ignoring the step (like you seem to do).

Trying a Matching Contours, trouble with CV Mat and Iplimage

I am doing a Matching Contours test.
Here I use image called "refshape.bmp"
(link: https://www.dropbox.com/s/06hrjji49uyid4w/refshape.bmp?dl=0)
And image called "2.bmp"
(link: https://www.dropbox.com/s/5t73mvbdfbtqvs1/2.BMP?dl=0)
to do this test.
This code is following:
I have two part of this code
Part 1: rotate the "refshape.bmp" image
Part 2: Matching Contours with a red line.
(the separate part can work successfully! )
But I have a problem with convert between CV Mat and IplImage.
There is the overflow warning: link: www.dropbox.com/s/mne4u3va94svx8y/%E6%93%B7%E5%8F%96.JPG?dl=0
First part there is a CV mat (image) "dst"
then I convert it to IplImage by: "IplImage* reference= ©"
"IplImage* reference= ©"
#include <stdlib.h>
#include<iostream>
#include "time.h"
#include "highgui.h"
#include "cv.h"
using namespace std;
int comp(const void *p,const void *q)
{
return (*(int *)p - *(int *)q);
}
int main()
{ int i =0;
cv::Mat src = cv::imread("refshape.bmp", CV_LOAD_IMAGE_UNCHANGED);
int angle = -i;
// get rotation matrix for rotating the image around its center
cv::Point2f center(src.cols/2.0, src.rows/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
cv::Rect bbox = cv::RotatedRect(center,src.size(), angle).boundingRect();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - center.x;
rot.at<double>(1,2) += bbox.height/2.0 - center.y;
cv::Mat dst;
cv::warpAffine(src, dst, rot, bbox.size());
IplImage copy = dst;
IplImage* input = NULL;
IplImage* input_canny = NULL;
IplImage* input_final = NULL;
//IplImage* reference = NULL;
IplImage* input_gray = NULL;
IplImage* reference_gray = NULL;
IplImage* find_contour = NULL;
IplImage* reference= ©
//圖像的尺寸的寬度大小
int x_min = 229;
int x_max = 0;
//圖像尺寸的高度大小
int y_min = 111;
int y_max = 0;
int n = 0;
//reference = cvLoadImage("refshape.bmp",1);//讀取圖檔
input = cvLoadImage("2.bmp",1);//讀取圖檔
input_canny=cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,1);//canny灰階
input_final=cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,3);//canny RGB
cvCvtColor(input, input_canny, CV_BGR2GRAY);//轉灰階圖
cvCanny(input_canny,input_canny,80,150,3);// canny edge
cvCvtColor(input_canny, input_final, CV_GRAY2BGR);// canny 灰階轉RGB
reference_gray = cvCreateImage(cvSize(reference->width, reference->height), IPL_DEPTH_8U,1);
input_gray = cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,1);
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq *contour = 0;
//cvFindContours只能使用灰階影像,故須先轉成灰階
cvCvtColor(reference, reference_gray, CV_BGR2GRAY);
cvFindContours(reference_gray, storage, &contour, sizeof(CvContour), CV_RETR_LIST , CV_CHAIN_APPROX_NONE, cvPoint(0,0));
//用來存放點的位置矩陣
CvPoint* PointArray[50000]={0};
//以下是將每一層的每個點的位置座標,存到PointArray裡,並且找出所有sample點x,y軸的最大最小值
for( CvSeq* c = contour; c != NULL; c=c->h_next )
{
for( int i = 0; i<c->total; i++ )
{
PointArray[n] = CV_GET_SEQ_ELEM( CvPoint, c, i );
if(PointArray[n]->x < x_min)
{
x_min = PointArray[n]->x;
}
if(PointArray[n]->y < y_min)
{
y_min = PointArray[n]->y;
}
if(PointArray[n]->x > x_max)
{
x_max = PointArray[n]->x;
}
if(PointArray[n]->y > y_max)
{
y_max = PointArray[n]->y;
}
n+=1;
}
}
CvScalar s,t;
int match_x;
int match_y;
// Contour matching
int x;
int y;
int matchcount=0;
int maxcount=0;
for(int i=0;i<780;i++)
{
for(int j=0;j<630;j++)
{
matchcount=0;
for(int a = 0; a < n; a++)
{
s = cvGet2D(input_final, PointArray[a]->y -y_min+j, PointArray[a]->x -x_min+i);
t = cvGet2D(reference,PointArray[a]->y,PointArray[a]->x);
if(s.val[0]==255 && t.val[0]==255)
matchcount++;
}
if(matchcount>maxcount)
{
maxcount=matchcount;
match_x =i ;
match_y =j ;
}
}
}
system("pause");
//當找到match數最多的位置時,設定要畫出的顏色後,將這些點標上顏色
for(int a = 0; a < n; a++)
{
t.val[0] = 0;
t.val[1] = 0;
t.val[2] = 255;
//標上顏色
cvSet2D(input_final, PointArray[a]->y-y_min+match_y, PointArray[a]->x-x_min+match_x, t);
}
system("pause");
cvNamedWindow("reference_gray",1);
cvNamedWindow("reference",1);
cvNamedWindow("input",1);
cvShowImage("reference_gray",reference_gray);
cvShowImage("reference",reference);
cvShowImage("input",input_final);
cvSaveImage("result.bmp",input_final);
system("pause");
cvWaitKey(0);
return 0;
}
There are separate code:
Rotation:
#include "opencv2/opencv.hpp"
#include <sstream>
int main()
{ for (int i=0;i<361;i++)
{
cv::Mat src = cv::imread("refshape.bmp", CV_LOAD_IMAGE_UNCHANGED);
int angle = -i;
// get rotation matrix for rotating the image around its center
cv::Point2f center(src.cols/2.0, src.rows/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
cv::Rect bbox = cv::RotatedRect(center,src.size(), angle).boundingRect();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - center.x;
rot.at<double>(1,2) += bbox.height/2.0 - center.y;
cv::Mat dst;
cv::warpAffine(src, dst, rot, bbox.size());
std::ostringstream name;
name << "rotated_im_" << i << ".png";
cv::imwrite(name.str(), dst);
}
return 0;
}
There is Matching Contours code:
#include <stdlib.h>
#include<iostream>
#include "time.h"
#include "highgui.h"
#include "cv.h"
using namespace std;
int comp(const void *p,const void *q)
{
return (*(int *)p - *(int *)q);
}
int main()
{
IplImage* input = NULL;
IplImage* input_canny = NULL;
IplImage* input_final = NULL;
IplImage* reference = NULL;
IplImage* input_gray = NULL;
IplImage* reference_gray = NULL;
IplImage* find_contour = NULL;
//圖像的尺寸的寬度大小
int x_min = 229;
int x_max = 0;
//圖像尺寸的高度大小
int y_min = 111;
int y_max = 0;
int n = 0;
reference = cvLoadImage("refshape.bmp",1);//讀取圖檔
input = cvLoadImage("2.bmp",1);//讀取圖檔
input_canny=cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,1);//canny灰階
input_final=cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,3);//canny RGB
cvCvtColor(input, input_canny, CV_BGR2GRAY);//轉灰階圖
cvCanny(input_canny,input_canny,80,150,3);// canny edge
cvCvtColor(input_canny, input_final, CV_GRAY2BGR);// canny 灰階轉RGB
reference_gray = cvCreateImage(cvSize(reference->width, reference->height), IPL_DEPTH_8U,1);
input_gray = cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,1);
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq *contour = 0;
//cvFindContours只能使用灰階影像,故須先轉成灰階
cvCvtColor(reference, reference_gray, CV_BGR2GRAY);
cvFindContours(reference_gray, storage, &contour, sizeof(CvContour), CV_RETR_LIST , CV_CHAIN_APPROX_NONE, cvPoint(0,0));
//用來存放點的位置矩陣
CvPoint* PointArray[5000]={0};
//以下是將每一層的每個點的位置座標,存到PointArray裡,並且找出所有sample點x,y軸的最大最小值
for( CvSeq* c = contour; c != NULL; c=c->h_next )
{
for( int i = 0; i<c->total; i++ )
{
PointArray[n] = CV_GET_SEQ_ELEM( CvPoint, c, i );
if(PointArray[n]->x < x_min)
{
x_min = PointArray[n]->x;
}
if(PointArray[n]->y < y_min)
{
y_min = PointArray[n]->y;
}
if(PointArray[n]->x > x_max)
{
x_max = PointArray[n]->x;
}
if(PointArray[n]->y > y_max)
{
y_max = PointArray[n]->y;
}
n+=1;
}
}
CvScalar s,t;
int match_x;
int match_y;
// Contour matching
int x;
int y;
int matchcount=0;
int maxcount=0;
for(int i=0;i<780;i++)
{
for(int j=0;j<630;j++)
{
matchcount=0;
for(int a = 0; a < n; a++)
{
s = cvGet2D(input_final, PointArray[a]->y -y_min+j, PointArray[a]->x -x_min+i);
t = cvGet2D(reference,PointArray[a]->y,PointArray[a]->x);
if(s.val[0]==255 && t.val[0]==255)
matchcount++;
}
if(matchcount>maxcount)
{
maxcount=matchcount;
match_x =i ;
match_y =j ;
}
}
}
system("pause");
//當找到match數最多的位置時,設定要畫出的顏色後,將這些點標上顏色
for(int a = 0; a < n; a++)
{
t.val[0] = 0;
t.val[1] = 0;
t.val[2] = 255;
//標上顏色
cvSet2D(input_final, PointArray[a]->y-y_min+match_y, PointArray[a]->x-x_min+match_x, t);
}
system("pause");
cvNamedWindow("reference_gray",1);
cvNamedWindow("reference",1);
cvNamedWindow("input",1);
cvShowImage("reference_gray",reference_gray);
cvShowImage("reference",reference);
cvShowImage("input",input_final);
cvSaveImage("result.bmp",input_final);
system("pause");
cvWaitKey(0);
}