Create a RGB image from pixel labels - c++

Given a CV_32SC1 cv::Mat image that contains a label for each pixel (where a label is just an index in 0..N-1), what is the cleanest code in OpenCV to generate a CV_8UC3 image that shows each connected component with a different arbitrary color? If I don't have to specify the colors manually, as with cv::floodFill, the better.

If the max number of labels is 256, you can use applyColorMap, converting the image to CV_8U:
Mat1i img = ...
// Convert to CV_8U
Mat1b img2;
img.convertTo(img2, CV_8U);
// Apply color map
Mat3b out;
applyColorMap(img2, out, COLORMAP_JET);
If the number of labels is higher than 256, you need to do it yourself. Below there is an example that generates a JET colormap (it's based on Matlab implementation of the jet function). Then you can apply the colormap for each element of your matrix.
Please note that if you want a different colormap, or random colors, you just need to modify the //Create JET colormap part:
#include <opencv2/opencv.hpp>
#include <algorithm>
using namespace std;
using namespace cv;
void applyCustomColormap(const Mat1i& src, Mat3b& dst)
{
// Create JET colormap
double m;
minMaxLoc(src, nullptr, &m);
m++;
int n = ceil(m / 4);
Mat1d u(n*3-1, 1, double(1.0));
for (int i = 1; i <= n; ++i) {
u(i-1) = double(i) / n;
u((n*3-1) - i) = double(i) / n;
}
vector<double> g(n * 3 - 1, 1);
vector<double> r(n * 3 - 1, 1);
vector<double> b(n * 3 - 1, 1);
for (int i = 0; i < g.size(); ++i)
{
g[i] = ceil(double(n) / 2) - (int(m)%4 == 1 ? 1 : 0) + i + 1;
r[i] = g[i] + n;
b[i] = g[i] - n;
}
g.erase(remove_if(g.begin(), g.end(), [m](double v){ return v > m;}), g.end());
r.erase(remove_if(r.begin(), r.end(), [m](double v){ return v > m; }), r.end());
b.erase(remove_if(b.begin(), b.end(), [](double v){ return v < 1.0; }), b.end());
Mat1d cmap(m, 3, double(0.0));
for (int i = 0; i < r.size(); ++i) { cmap(int(r[i])-1, 2) = u(i); }
for (int i = 0; i < g.size(); ++i) { cmap(int(g[i])-1, 1) = u(i); }
for (int i = 0; i < b.size(); ++i) { cmap(int(b[i])-1, 0) = u(u.rows - b.size() + i); }
Mat3d cmap3 = cmap.reshape(3);
Mat3b colormap;
cmap3.convertTo(colormap, CV_8U, 255.0);
// Apply color mapping
dst = Mat3b(src.rows, src.cols, Vec3b(0,0,0));
for (int r = 0; r < src.rows; ++r)
{
for (int c = 0; c < src.cols; ++c)
{
dst(r, c) = colormap(src(r,c));
}
}
}
int main()
{
Mat1i img(1000,1000);
randu(img, Scalar(0), Scalar(10));
Mat3b out;
applyCustomColormap(img, out);
imshow("Result", out);
waitKey();
return 0;
}

Related

How to implement convolution Gaussian blur without padding using OpenCV in C++

I am implementing convolution Gaussian blur with OpenCV without using OpenCV functions, but my code is programmed with padding.
Now I want to remove the padding from the matrix so that my code can be executed without the matrix with padding. How can I modify the code below to do so?
class Filteration {
private:
// member function to pad the image before convolution
Mat padding(Mat img, int k_width, int k_height, string type)
{
Mat scr;
img.convertTo(scr, CV_64FC1);
int pad_rows, pad_cols;
pad_rows = (k_height - 1) / 2;
pad_cols = (k_width - 1) / 2;
Mat pad_image(Size(scr.cols + 2 * pad_cols, scr.rows + 2 * pad_rows), CV_64FC1,
Scalar(0));
scr.copyTo(pad_image(Rect(pad_cols, pad_rows, scr.cols, scr.rows)));
// mirror padding
if (type == "mirror")
{
for (int i = 0; i < pad_rows; i++)
{
scr(Rect(0, pad_rows - i, scr.cols, 1)).copyTo(pad_image(Rect(pad_cols,
i, scr.cols, 1)));
scr(Rect(0, (scr.rows - 1) - pad_rows + i, scr.cols,
1)).copyTo(pad_image(Rect(pad_cols,
(pad_image.rows - 1) - i, scr.cols, 1)));
}
for (int j = 0; j < pad_cols; j++)
{
pad_image(Rect(2 * pad_cols - j, 0, 1,
pad_image.rows)).copyTo(pad_image(Rect(j, 0, 1, pad_image.rows)));
pad_image(Rect((pad_image.cols - 1) - 2 * pad_cols + j, 0, 1,
pad_image.rows)).
copyTo(pad_image(Rect((pad_image.cols - 1) - j, 0, 1,
pad_image.rows)));
}
return pad_image;
}
// replicate padding
else if (type == "replicate")
{
for (int i = 0; i < pad_rows; i++)
{
scr(Rect(0, 0, scr.cols, 1)).copyTo(pad_image(Rect(pad_cols, i, scr.cols,
1)));
scr(Rect(0, (scr.rows - 1), scr.cols, 1)).copyTo(pad_image(Rect(pad_cols,
(pad_image.rows - 1) - i, scr.cols, 1)));
}
for (int j = 0; j < pad_cols; j++)
{
pad_image(Rect(pad_cols, 0, 1, pad_image.rows)).copyTo(pad_image(Rect(j,
0, 1, pad_image.rows)));
pad_image(Rect((pad_image.cols - 1) - pad_cols, 0, 1, pad_image.rows)).
copyTo(pad_image(Rect((pad_image.cols - 1) - j, 0, 1,
pad_image.rows)));
}
// zero padding
return pad_image;
}
else
{
return pad_image;
}
}
// member function to define kernels for convolution
Mat define_kernel(int k_width, int k_height, string type)
{
// box kernel
if (type == "box")
{
Mat kernel(k_height, k_width, CV_64FC1, Scalar(1.0 / (k_width * k_height)));
return kernel;
}
// gaussian kernel
else if (type == "gaussian")
{
// I will assume k = 1 and sigma = 1
int pad_rows = (k_height - 1) / 2;
int pad_cols = (k_width - 1) / 2;
Mat kernel(k_height, k_width, CV_64FC1);
for (int i = -pad_rows; i <= pad_rows; i++)
{
for (int j = -pad_cols; j <= pad_cols; j++)
{
kernel.at<double>(i + pad_rows, j + pad_cols) = exp(-(i*i + j*j) /
2.0);
}
}
kernel = kernel / sum(kernel);
return kernel;
}
}
public:
// member function to implement convolution
void convolve(Mat scr, Mat &dst, int k_w, int k_h, string paddingType, string
filterType)
{
Mat pad_img, kernel;
pad_img = padding(scr, k_w, k_h, paddingType);
kernel = define_kernel(k_w, k_h, filterType);
Mat output = Mat::zeros(scr.size(), CV_64FC1);
for (int i = 0; i < scr.rows; i++)
{
for (int j = 0; j < scr.cols; j++)
{
output.at<double>(i, j) = sum(kernel.mul(pad_img(Rect(j, i, k_w,
k_h)))).val[0];
}
}
output.convertTo(dst, CV_8UC1); //IplImage: IPL_DEPTH_8U Mat: CV_8UC1, CV_8UC2,
CV_8UC3, CV_8UC4
}
};
int main(){
Mat img, dst;
//
Load the image
img = imread("mrl.jpg", 0);
Mat kernel;
int k_w = 5; // kernel width
int k_h = 5; // kernel height
Filteration F1;
F1.convolve(img, dst, k_w, k_h, "gaussian", "box");
namedWindow("dst", WINDOW_AUTOSIZE);
imshow("dst", dst);
waitKey(0);
}

OpenCV: How to do Delaunay Triangulation and return an adjacency matrix?

I am trying to do Delaunay Triangulation for a set of points in OpenCV, but encountered a problem.
The function takes a matrix of coordinates and return an adjacency matrix. (If there is and edge connecting the point i and the point j, then adj(i,j) = 1, otherwise 0.)
I didn't get it working. The code below give strange results.
Could you please help?
An example of Delaunay Triangulation is given here.
Thank you in advance.
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace std;
using namespace cv;
Mat delaunay(const Mat& points, int imRows, int imCols)
/// Return the Delaunay triangulation, under the form of an adjacency matrix
/// points is a Nx2 mat containing the coordinates (x, y) of the points
{
Mat adj(points.rows, points.rows, CV_32S, Scalar(0));
/// Create subdiv and insert the points to it
Subdiv2D subdiv(Rect(0,0,imCols,imRows));
for(int p = 0; p < points.rows; p++)
{
float xp = points.at<float>(p, 0);
float yp = points.at<float>(p, 1);
Point2f fp(xp, yp);
subdiv.insert(fp);
}
/// Get the number of edges
vector<Vec4f> edgeList;
subdiv.getEdgeList(edgeList);
int nE = edgeList.size();
/// Check adjacency
for(int e = 1; e <= nE; e++)
{
int p = subdiv.edgeOrg(e); // Edge's origin
int q = subdiv.edgeDst(e); // Edge's destination
if(p < points.rows && q < points.rows)
adj.at<int>(p, q) = 1;
// else
// {
// cout<<p<<", "<<q<<endl;
// assert(p < points.rows && q < points.rows);
// }
}
return adj;
}
int main()
{
Mat points = Mat(100, 2, CV_32F);
randu(points, 0, 99);
int rows = 100, cols = 100;
Mat im(rows, cols, CV_8UC3, Scalar::all(0));
Mat adj = delaunay(points, rows, cols);
for(int i = 0; i < points.rows; i++)
{
int xi = points.at<float>(i,0);
int yi = points.at<float>(i,1);
/// Draw the edges
for(int j = i+1; j < points.rows; j++)
{
if(adj.at<int>(i,j) > 0)
{
int xj = points.at<float>(j,0);
int yj = points.at<float>(j,1);
line(im, Point(xi,yi), Point(xj,yj), Scalar(255,0,0), 1);
}
/// Draw the nodes
circle(im, Point(xi, yi), 1, Scalar(0,0,255), -1);
}
}
namedWindow("im", CV_WINDOW_NORMAL);
imshow("im",im);
waitKey();
return 0;
}
You are inserting into the adjacency matrix the indices of the Subdiv2d edges, which don't correspond to the indices of the points.
You can fix this, for example, storing the points and their index into a std::map. When you retrieve edges from the Subdiv2d, you check that the edges is formed by your points, and not from boundary points added by Subdiv2d. Having stored the point indices, you're now able to built the adjacency matrix correctly.
Have a look at the code:
#include <map>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
struct lessPoint2f
{
bool operator()(const Point2f& lhs, const Point2f& rhs) const
{
return (lhs.x == rhs.x) ? (lhs.y < rhs.y) : (lhs.x < rhs.x);
}
};
Mat delaunay(const Mat1f& points, int imRows, int imCols)
/// Return the Delaunay triangulation, under the form of an adjacency matrix
/// points is a Nx2 mat containing the coordinates (x, y) of the points
{
map<Point2f, int, lessPoint2f> mappts;
Mat1b adj(points.rows, points.rows, uchar(0));
/// Create subdiv and insert the points to it
Subdiv2D subdiv(Rect(0, 0, imCols, imRows));
for (int p = 0; p < points.rows; p++)
{
float xp = points(p, 0);
float yp = points(p, 1);
Point2f fp(xp, yp);
// Don't add duplicates
if (mappts.count(fp) == 0)
{
// Save point and index
mappts[fp] = p;
subdiv.insert(fp);
}
}
/// Get the number of edges
vector<Vec4f> edgeList;
subdiv.getEdgeList(edgeList);
int nE = edgeList.size();
/// Check adjacency
for (int i = 0; i < nE; i++)
{
Vec4f e = edgeList[i];
Point2f pt0(e[0], e[1]);
Point2f pt1(e[2], e[3]);
if (mappts.count(pt0) == 0 || mappts.count(pt1) == 0) {
// Not a valid point
continue;
}
int idx0 = mappts[pt0];
int idx1 = mappts[pt1];
// Symmetric matrix
adj(idx0, idx1) = 1;
adj(idx1, idx0) = 1;
}
return adj;
}
int main()
{
Mat1f points(10, 2);
randu(points, 0, 99);
int rows = 100, cols = 100;
Mat3b im(rows, cols, Vec3b(0,0,0));
Mat1b adj = delaunay(points, rows, cols);
for (int i = 0; i < points.rows; i++)
{
int xi = points.at<float>(i, 0);
int yi = points.at<float>(i, 1);
/// Draw the edges
for (int j = i + 1; j < points.rows; j++)
{
if (adj(i, j))
{
int xj = points(j, 0);
int yj = points(j, 1);
line(im, Point(xi, yi), Point(xj, yj), Scalar(255, 0, 0), 1);
}
}
}
for (int i = 0; i < points.rows; i++)
{
int xi = points(i, 0);
int yi = points(i, 1);
/// Draw the nodes
circle(im, Point(xi, yi), 1, Scalar(0, 0, 255), -1);
}
imshow("im", im);
waitKey();
return 0;
}

Max entropy thresholding using OpenCV [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 7 years ago.
Improve this question
I'm trying to convert the code for using the maximum entropy thresholding from this matlab code:
%**************************************************************************
%**************************************************************************
%
% maxentropie is a function for thresholding using Maximum Entropy
%
%
% input = I ==> Image in gray level
% output =
% I1 ==> binary image
% threshold ==> the threshold choosen by maxentropie
%
% F.Gargouri
%
%
%**************************************************************************
%**************************************************************************
function [threshold I1]=maxentropie(I)
[n,m]=size(I);
h=imhist(I);
%normalize the histogram ==> hn(k)=h(k)/(n*m) ==> k in [1 256]
hn=h/(n*m);
%Cumulative distribution function
c(1) = hn(1);
for l=2:256
c(l)=c(l-1)+hn(l);
end
hl = zeros(1,256);
hh = zeros(1,256);
for t=1:256
%low range entropy
cl=double(c(t));
if cl>0
for i=1:t
if hn(i)>0
hl(t) = hl(t)- (hn(i)/cl)*log(hn(i)/cl);
end
end
end
%high range entropy
ch=double(1.0-cl); %constraint cl+ch=1
if ch>0
for i=t+1:256
if hn(i)>0
hh(t) = hh(t)- (hn(i)/ch)*log(hn(i)/ch);
end
end
end
end
% choose best threshold
h_max =hl(1)+hh(1)
threshold = 0;
entropie(1)=h_max;
for t=2:256
entropie(t)=hl(t)+hh(t);
if entropie(t)>h_max
h_max=entropie(t);
threshold=t-1;
end
end
% Display
I1 = zeros(size(I));
I1(I<threshold) = 0;
I1(I>threshold) = 255;
%imshow(I1)
end
The problem is that I'm getting floating point excpetion error in the code, and I cannot understand why
This is my implementation:
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <math.h>
using namespace cv;
using namespace std;
int main(){
cout.setf(std::ios_base::fixed, std::ios_base::floatfield);
cout.precision(9);
Mat old_image=imread("2.png",CV_LOAD_IMAGE_GRAYSCALE);
double minval, maxval;
minMaxLoc(old_image,&minval, &maxval);
cout<<minval<<" "<<maxval<<endl;
Mat image;
old_image.convertTo(image, CV_8UC1, 255.0/(maxval-minval), -minval*255.0/(maxval-minval));
minMaxLoc(image,&minval, &maxval);
cout<<minval<<" "<<maxval;
int k=0;
imshow("im",image);
waitKey(0);
for(int y=0; y<image.rows;y++){
for(int x=0; x<image.cols;x++){
if((int) image.at<uchar>(y,x)==0){
k++;
}
}
}
cout<<k<<endl<<endl;
int i, l, j, t;
int histSize = 256;
float range[] = { 0, 255 };
const float *ranges[] = { range };
Mat hist, histogram, c, ctmp, hl, hh, hhtmp, entropy;
calcHist( &image, 1, 0, Mat(), hist, 1, &histSize, ranges, true, false );
for( int h = 1; h < histSize; h++){
histogram.push_back(hist.at<float>(h,0));
cout<<histogram.rows<<endl;
cout<<histogram.row(h-1)<<endl;
cout<<hist.row(h)<<endl;
}
histogram=histogram/(image.rows*image.cols-hist.at<float>(0,0));
//cumulative distribution function
float cl,ch;
ctmp.push_back(histogram.row(0));
c.push_back(histogram.row(0));
cout<<c.row(0)<<endl;
for(l=1;l<255;l++){
c.push_back(ctmp.at<float>(0)+histogram.at<float>(l));
ctmp.push_back(c.row(l));
cout<<c.at<float>(l)<<endl;
//c.row(l)=c.row(l-1)+histogram.row(l);
}
Mat hltmp= Mat::zeros(1,256,CV_8U);
// THE PROBLEM IS IN THIS TWO FOR CYCLES
for(t=0;t<255;t++){
//low range entropy
cl=c.at<float>(t);
if(cl>0){
for(i=0;i<=t;i++){
if(histogram.at<float>(t)>0){
printf("here\n");
hl.push_back(hltmp.at<float>(0)-(histogram.at<float> (i)/cl)*log(histogram.at<float>(i)/cl));
printf("here\n");
cout<<hl.at<float>(i);
printf("here\n");
hltmp.push_back(hl.row(t));
printf("here\n");
}
}
}
printf("here\n");
//high range entropy
ch=1.0-cl;
if(ch>0){
for(i=t+1;i<255;i++){
if(histogram.at<float>(i)>0){
hh.push_back(hh.at<float>(t)-(histogram.at<float> (i)/ch)*log(histogram.at<float>(i)/ch));
}
}
}
}
//choose the best threshold
float h_max=hl.at<float>(0,0)+hh.at<float>(0,0);
float threshold=0;
entropy.at<float>(0,0)=h_max;
for(t=1;t<255;t++){
entropy.at<float>(t)=hl.at<float>(t)+hh.at<float>(t);
if(entropy.at<float>(t)>h_max){
h_max=entropy.at<float>(t);
threshold=t-1;
}
cout<<threshold<<endl;
}
//display
Mat I1= Mat::zeros(image.rows,image.cols,CV_8UC1);
for(int y=0; y<image.rows;y++){
for(int x=0; x<image.cols;x++){
if((int) image.at<uchar>(y,x)<threshold){
I1.at<uchar>(y,x)=0;
}
else{
I1.at<uchar>(y,x)=255;
}
}
}
imshow("image",I1);
waitKey(0);*/
return 0;
}
Your problem is that you're reading float elements from a CV_8U (aka uchar) Mat.
Mat hltmp = Mat::zeros(1, 256, CV_8U);
...
hltmp.at<float>(0)
You should learn how to use a debugger, and you'll find out these problems very soon.
Since you over-complicated things in your implementation, made some errors, and the code is cluttered from debug prints, I propose the one below instead of punctually correct your (not many, but mainly conceptual) errors. You can see that, if written properly, there is almost a 1:1 conversion from Matlab to OpenCV.
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
uchar maxentropie(const Mat1b& src, Mat1b& dst)
{
// Histogram
Mat1d hist(1, 256, 0.0);
for (int r=0; r<src.rows; ++r)
for (int c=0; c<src.cols; ++c)
hist(src(r,c))++;
// Normalize
hist /= double(src.rows * src.cols);
// Cumulative histogram
Mat1d cumhist(1, 256, 0.0);
float sum = 0;
for (int i = 0; i < 256; ++i)
{
sum += hist(i);
cumhist(i) = sum;
}
Mat1d hl(1, 256, 0.0);
Mat1d hh(1, 256, 0.0);
for (int t = 0; t < 256; ++t)
{
// low range entropy
double cl = cumhist(t);
if (cl > 0)
{
for (int i = 0; i <= t; ++i)
{
if (hist(i) > 0)
{
hl(t) = hl(t) - (hist(i) / cl) * log(hist(i) / cl);
}
}
}
// high range entropy
double ch = 1.0 - cl; // constraint cl + ch = 1
if (ch > 0)
{
for (int i = t+1; i < 256; ++i)
{
if (hist(i) > 0)
{
hh(t) = hh(t) - (hist(i) / ch) * log(hist(i) / ch);
}
}
}
}
// choose best threshold
Mat1d entropie(1, 256, 0.0);
double h_max = hl(0) + hh(0);
uchar threshold = 0;
entropie(0) = h_max;
for (int t = 1; t < 256; ++t)
{
entropie(t) = hl(t) + hh(t);
if (entropie(t) > h_max)
{
h_max = entropie(t);
threshold = uchar(t);
}
}
// Create output image
dst = src > threshold;
return threshold;
}
int main()
{
Mat1b img = imread("path_to_image", IMREAD_GRAYSCALE);
Mat1b res;
uchar th = maxentropie(img, res);
imshow("Original", img);
imshow("Result", res);
waitKey();
return 0;
}

from float array to mat , concatenate blocks of image

I have an image 800x800 which is broken down to 16 blocks of 200x200.
(you can see previous post here)
These blocks are : vector<Mat> subImages;
I want to use float pointers on them , so I am doing :
float *pdata = (float*)( subImages[ idxSubImage ].data );
1) Now, I want to be able to get again the same images/blocks, going from float array to Mat data.
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
namedWindow( "Display window", WINDOW_AUTOSIZE );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
imshow( "Display window", mf );
waitKey(0);
}
}
So , the problem is that I am receiving an
OpenCV Error: Assertion failed
in imshow.
2) How can I recombine all the blocks to obtain the original 800x800 image?
I tried something like:
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
Mat big( 800,800,CV_32F );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
Rect roi(j,i,200,200);
mf.copyTo( big(roi) );
}
}
imwrite( "testing" , big );
This gives me :
OpenCV Error: Assertion failed (!fixedSize()) in release
in mf.copyTo( big(roi) );.
First, you need to know where are your subimages into the big image. To do this, you can save the rect of each subimage into the vector<Rect> smallImageRois;
Then you can use pointers (keep in mind that subimages are not continuous), or simply use copyTo to the correct place:
Have a look:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
Mat3b img = imread("path_to_image");
resize(img, img, Size(800, 800));
Mat grayImg;
cvtColor(img, grayImg, COLOR_BGR2GRAY);
grayImg.convertTo(grayImg, CV_32F);
int N = 4;
if (((grayImg.rows % N) != 0) || ((grayImg.cols % N) != 0))
{
// Error
return -1;
}
Size graySize = grayImg.size();
Size smallSize(grayImg.cols / N, grayImg.rows / N);
vector<Mat> smallImages;
vector<Rect> smallImageRois;
for (int i = 0; i < graySize.height; i += smallSize.height)
{
for (int j = 0; j < graySize.width; j += smallSize.width)
{
Rect rect = Rect(j, i, smallSize.width, smallSize.height);
smallImages.push_back(grayImg(rect));
smallImageRois.push_back(rect);
}
}
// Option 1. Using pointer to subimage data.
Mat big1(800, 800, CV_32F);
int big1step = big1.step1();
float* pbig1 = big1.ptr<float>(0);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
float* pdata = (float*)smallImages[idx].data;
int step = smallImages[idx].step1();
Rect roi = smallImageRois[idx];
for (int i = 0; i < smallSize.height; ++i)
{
for (int j = 0; j < smallSize.width; ++j)
{
pbig1[(roi.y + i) * big1step + (roi.x + j)] = pdata[i * step + j];
}
}
}
// Option 2. USing copyTo
Mat big2(800, 800, CV_32F);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
smallImages[idx].copyTo(big2(smallImageRois[idx]));
}
return 0;
}
For concatenating the sub-images into a single squared image, you can use the following function:
// Important: all patches should have exactly the same size
Mat concatPatches(vector<Mat> &patches) {
assert(patches.size() > 0);
// make it square
const int patch_width = patches[0].cols;
const int patch_height = patches[0].rows;
const int patch_stride = ceil(sqrt(patches.size()));
Mat image = Mat::zeros(patch_stride * patch_height, patch_stride * patch_width, patches[0].type());
for (size_t i = 0, iend = patches.size(); i < iend; i++) {
Mat &patch = patches[i];
const int offset_x = (i % patch_stride) * patch_width;
const int offset_y = (i / patch_stride) * patch_height;
// copy the patch to the output image
patch.copyTo(image(Rect(offset_x, offset_y, patch_width, patch_height)));
}
return image;
}
It takes a vector of sub-images (or patches as I refer them to) and concatenates them into a squared image. Example usage:
vector<Mat> patches;
vector<Scalar> colours = {Scalar(255, 0, 0), Scalar(0, 255, 0), Scalar(0, 0, 255)};
// fill vector with circles of different colours
for(int i = 0; i < 16; i++) {
Mat patch = Mat::zeros(100,100, CV_32FC3);
circle(patch, Point(50,50), 40, colours[i % 3], -1);
patches.push_back(patch);
}
Mat img = concatPatches(patches);
imshow("img", img);
waitKey();
Will produce the following image
print the values of i and j before creating Mat mf and I believe you will soon be able to find the error.
Hint 1: i and j will be 0 the first time
Hint 2: Use the copyTo() with a ROI like:
cv::Rect roi(0,0,200,200);
src.copyTo(dst(roi))
Edit:
Hint 3: Try not to do such pointer fiddling, you will get in trouble. Especially if you're ignoring the step (like you seem to do).

Error during convolution of image with a filter in opencv c++

I am new to opencv c++ .I am getting error with code for convolution (got from internet)which is equivalent to conv2 in matlab. The problem is all the pixel values are becoming 255.The filter which i am using in the code has same size as image. Can anybody please help me in correcting the problem.My opencv c++ code is given below:
#include<opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<cv.hpp>
using namespace cv;
using namespace std;
Mat gd,img,bimgFiltered,gimgFiltered,rimgFiltered,fin_img;
Mat b,g,r,cr,cb,cg,B,G,R;
Mat b_logplane, b_plane,b_logfiltered,b_log,g_logplane,g_plane,g_logfiltered;
Mat g_log,r_logplane,r_plane,r_logfiltered,r_log;
Mat kernel, dest;
int m,n,m1,m2,n1,n2;
int c = 120;
double mysum = 0.0, mysum1 = 0.0, k = 0;
int cent=0,radius=0;
enum ConvolutionType {
/* Return the full convolution, including border */
CONVOLUTION_FULL,
/* Return only the part that corresponds to the original image */
CONVOLUTION_SAME,
/* Return only the submatrix containing elements that were not influenced
by the border
*/
CONVOLUTION_VALID
};
void conv2(const Mat &img, const Mat& kernel, ConvolutionType type,Mat& dest)
{
Mat source = img;
if(CONVOLUTION_FULL == type)
{
source = Mat();
const int additionalRows = kernel.rows - 1, additionalCols = kernel.cols - 1;
copyMakeBorder(img, source, (additionalRows + 1) / 2, additionalRows / 2,
(additionalCols + 1) / 2, additionalCols / 2, BORDER_CONSTANT, Scalar(0));
}
flip(kernel, kernel, -1);
Point anchor(kernel.cols - kernel.cols / 2 - 1, kernel.rows - kernel.rows / 2 - 1);
int borderMode = BORDER_CONSTANT;
filter2D(source, dest, img.depth(), kernel, anchor, 0, borderMode);
if(CONVOLUTION_VALID == type)
{
dest = dest.colRange((kernel.cols - 1) / 2, dest.cols - kernel.cols /
2).rowRange((kernel.rows - 1) / 2, dest.rows - kernel.rows / 2);
}
}
int main()
{
img = imread("milla.bmp", CV_LOAD_IMAGE_COLOR);
b.create(img.size(),img.type());
g.create(img.size(),img.type());
r.create(img.size(),img.type());
cr.create(img.size(),img.type());
cg.create(img.size(),img.type());
cb.create(img.size(),img.type());
Mat planes[3];
split(img,planes);
bimgFiltered.create(img.size(),img.type());
gimgFiltered.create(img.size(),img.type());
rimgFiltered.create(img.size(),img.type());
dest.create(img.size(), img.type());
gd.create(img.size(), img.type());
for(int j = 0; j < img.rows; j++)
{
for(int i = 0; i < img.cols; i++)
{
radius = ((cent - i)^2 + (cent - j)^2);
gd.at<float>(j, i) = exp((-(radius) / c^2));
mysum = mysum + gd.at<float>(j, i);
}
mysum1 = mysum1 + mysum;
}
k=1/mysum1;
cout<<endl<<k<<"\n"<<endl;
for(int j = 0; j < img.rows; j++)
{
for(int i = 0; i < img.cols; i++)
{
gd.at<float>(j, i) = k * gd.at<float>(j, i);
}
}
planes[0].convertTo(planes[0],CV_32F,1.0/255.0);
planes[1].convertTo(planes[1],CV_32F,1.0/255.0);
planes[2].convertTo(planes[2],CV_32F,1.0/255.0);
conv2(planes[0],gd,CONVOLUTION_SAME,bimgFiltered);
conv2(planes[1],gd,CONVOLUTION_SAME,gimgFiltered);
conv2(planes[2],gd,CONVOLUTION_SAME,rimgFiltered);
imshow("img",gimgFiltered );
waitKey(0);
return 0;
}
There are a few problems with the code:
Issue 1:
In the following two lines:
radius = ((cent - i)^2 + (cent - j)^2);
gd.at<float>(j, i) = exp((-(radius) / c^2));
You are using ^ operator which is the bitwise XOR operator in C/C++. I think you are mistaking it for power operator. To take the power of a number you have to use the pow function as follows:
radius = powf((cent - i),2) + powf((cent - j),2);
gd.at<float>(j, i) = expf((-(radius) / (c*c)));
Issue 2:
The gd matrix is assumed to have floating point values as it is accessed like gd.at<float>(j, i), but it is declared with the same type as that of the image, i.e. CV_8UC3. So gd should be created as follows:
gd.create(img.size(), CV_32FC1);
Issue 3:
Another possible logical error may be present in the first nested loop. You may have to set mysum = 0; before starting the inner loop like this:
for(int j = 0; j < img.rows; j++)
{
mysum = 0;
for(int i = 0; i < img.cols; i++)
{
radius = powf((cent - i),2) + powf((cent - j),2);
gd.at<float>(j, i) = expf((-(radius) / (c*c)));
mysum = mysum + gd.at<float>(j, i);
}
mysum1 = mysum1 + mysum;
}
Issue 4:
Output filtered images should be created single channel instead of 3 channels:
bimgFiltered.create(img.size(),CV_8UC1);
gimgFiltered.create(img.size(),CV_8UC1);
rimgFiltered.create(img.size(),CV_8UC1);