I am converting this depth image to a pcl::pointcloud.
using the following:
PointCloud::Ptr PointcloudUtils::RGBDtoPCL(cv::Mat depth_image, Eigen::Matrix3f& _intrinsics)
{
PointCloud::Ptr pointcloud(new PointCloud);
float fx = _intrinsics(0, 0);
float fy = _intrinsics(1, 1);
float cx = _intrinsics(0, 2);
float cy = _intrinsics(1, 2);
float factor = 1;
depth_image.convertTo(depth_image, CV_32F); // convert the image data to float type
if (!depth_image.data) {
std::cerr << "No depth data!!!" << std::endl;
exit(EXIT_FAILURE);
}
pointcloud->width = depth_image.cols; //Dimensions must be initialized to use 2-D indexing
pointcloud->height = depth_image.rows;
pointcloud->resize(pointcloud->width*pointcloud->height);
#pragma omp parallel for
for (int v = 0; v < depth_image.rows; v += 4)
{
for (int u = 0; u < depth_image.cols; u += 4)
{
float Z = depth_image.at<float>(v, u) / factor;
PointT p;
p.z = Z;
p.x = (u - cx) * Z / fx;
p.y = (v - cy) * Z / fy;
p.z = p.z / 1000;
p.x = p.x / 1000;
p.y = p.y / 1000;
pointcloud->points.push_back(p);
}
}
return pointcloud;
}
this works great, I have run some processing on the cloud, and now I need to convert the pointcloud back to a cv::Mat depth image. I cannot find an example for this, and am having trouble getting m head around it. What is the opposite of the above function?
How can i convert a pcl::pointcloud back to a cv::mat?
Thank you.
This is untested code, since I don't have point cloud on my machine.
From your own conversion code I am assuming your image a single channel image.
void PCL2Mat(PointCloud::Ptr pointcloud, cv::Mat& depth_image, int original_width, int original_height)
{
if (!depth_image.empty())
depth_image.release();
depth_image.create(original_height, original_width, CV_32F);
int count = 0;
#pragma omp parallel for
for (int v = 0; v < depth_image.rows; ++v)
{
for (int u = 0; u < depth_image.cols; ++u)
{
depth_image.at<float>(v, u) = pointcloud->points.at(count++).z * 1000;
}
}
depth_image.convertTo(depth_image,CV_8U);
}
I don't know about OpenCV methods, but in case you do something that makes your point cloud unstructured your process could be something like this
% rescale the points by 1000
p.x = p.x * 1000; p.y = p.y * 1000; p.z = p.z * 1000;
% project points on image plane and correct center point + factor
image_p.x = ( p.x * fx / p.z -cf ) * factor;
image_p.y = ( p.y * fy / p.z -cy ) * factor;
Now depending on what you have done with the point cloud the points might not map exactly to image matrix pixel center points (or top left corner for some applications) or you might be missing points -> NaN/0 value pixels. How you process that is up to you, but the most simple way would be to cast image_p.x and image_p.y as integers, make sure they are withing image boundaries and set
depth_image.at<float>(image_p.y, image_p.x) = p.Z;`
Related
How to create a Gaussian kernel by only specifying its width w (3,5,7,9...), and without specifying its variance sigma?
In other word, how to adapt sigma so that the Gaussian distribution 'fits well' w?
I would be interested in a C++ implementation:
void create_gaussian_kernel(int w, std::vector<std::vector<float>>& kernel)
{
kernel = std::vector<std::vector<float>>(w, std::vector<float>(w, 0.f)); // 2D array of size w x w
const Scalar sigma = 1.0; // how to adapt sigma to w ???
const int hw = (w-1)/2; // half width
for(int di = -hw; di <= +hw; ++di)
{
const int i = hw + di;
for(int dj = -hw; dj <= +hw; ++dj)
{
const int j = hw + dj;
kernel[i][j] = gauss2D(di, dj, sigma);
}
}
}
Everything I see on the Internet use a fixed size w and a fixed variance sigma :
geeksforgeeks.org/gaussian-filter-generation-c/
tutorialspoint.com/gaussian-filter-generation-in-cplusplus
stackoverflow.com/a/8204880/5317819
stackoverflow.com/q/42186498/5317819
stackoverflow.com/a/54615770/5317819
I found a simple (arbitrary) relation between sigma and w.
I want the next value outside the kernel (along one axis) below a very small value epsilon:
exp( - (half_width + 1)^2 / (2 * sigma^2) ) < epsilon
with half_width the kernel 'half width'.
The result is
sigma^2 = - (half_width + 1)^2 / (2 * log(epsilon))
I use the following c++ code:
#include <vector>
#include <cmath>
#include <cassert>
using Matrix = std::vector<std::vector<float>>;
// compute sigma^2 that 'fit' the kernel half width
float compute_squared_variance(int half_width, float epsilon = 0.001)
{
assert(0 < epsilon && epsilon < 1); // small value required
return - (half_width + 1.0) * (half_width + 1.0) / 2.0 / std::log(epsilon);
}
float gaussian_exp(float y, float x, float sigma2)
{
assert(0 < sigma2);
return std::exp( - (x*x + y*y) / (2 * sigma2) );
}
// create a Gaussian kernel of size 2*half_width+1 x 2*half_width+1
Matrix make_gaussian_kernel(int half_width)
{
if(half_width <= 0)
{
// kernel of size 1 x 1
Matrix kernel(1, std::vector<float>(1, 1.0));
return kernel;
}
Matrix kernel(2*half_width+1, std::vector<float>(2*half_width+1, 0.0));
const float sigma2 = compute_squared_variance(half_width, 0.1);
float sum = 0;
for(int di = -half_width; di <= +half_width; ++di)
{
const int i = half_width + di;
for(int dj = -half_width; dj <= +half_width; ++dj)
{
const int j = half_width + dj;
kernel[i][j] = gaussian_exp(di, dj, sigma2);
sum += kernel[i][j];
}
}
assert(0 < sum);
// normalize
for(int i=0; i<2*half_width+1; ++i)
{
for(int j=0; j<2*half_width+1; ++j)
{
kernel[i][j] /= sum;
}
}
return kernel;
}
I've been working on drawing the Julia set using a distance estimator instead of the normalized iteration count. I usually use the code below and play around with the iteration count until I get a decent enough picture
double Mandelbrot::getJulia(double x, double y)
{
complex<double> z(x, y);
complex<double> c(-0.7269, 0.1889);
double iterations = 0;
while (iterations < MAX)
{
z = z * z + c;
if (abs(z) > 2) {
return iterations + 1.0 - log(log2(abs(z)));
break;
}
iterations++;
}
return double(MAX);
}
I then call this for each point and draw to a bitmap;
ZoomTool zt(WIDTH, HEIGHT);
zt.add(Zoom(WIDTH / 2, HEIGHT / 2, 4.0 / WIDTH));
for (int y = 0; y < HEIGHT; y++) {
for (int x = 0; x < WIDTH; x++) {
pair<double, double> coords = zt.zoomIn(x, y);
double iterations = Mandelbrot::getJulia(coords.first,
coords.second);
double ratio = iterations / Mandelbrot::MAX;
double h = 0;
double s= 0;
double v = 0;
if (ratio != 1)
{
h = 360.0*ratio;
s = 1.0;
v = 1.0;
}
HSV hsv(h, s, v);
RGB rgb(0, 0, 0);
rgb = toRGB(hsv);
bitmap.setPixel(x, y, rgb._r, rgb._g, rgb._b);
}
}
At 600 iterations, I get this;
Which is not great but better than what I get with the distance estimator which I am attempting to now use. I've implemented the distance estimator as below;
double Mandelbrot::getJulia(double x, double y)
{
complex<double> z(x,y);
complex<double> c(-0.7269, 0.1889);
complex<double> dz = 0;
double iterations = 0;
while (iterations < MAX)
{
dz = 2.0 * dz * z + 1.0;
z = z * z + c;
if (abs(z) > 2)
{
return abs(z) * log(abs(z)) / abs(dz);
}
iterations++;
}
return Mandelbrot::MAX;
}
At 600 iterations, I get the following image
Am I not normalizing the colors correctly? I'm guessing this is happening because I'm normalizing to 360.0 and doing a conversion from HSV to RGB. Since the distances are quite small, I get a very condensed distribution of colors.
I have a cv::Mat that is a depth image.
I am converting it to a PCL pointcloud like this:
PointCloud::Ptr RGBDtoPCL3(cv::Mat depth_image)
{
PointCloud::Ptr pointcloud(new PointCloud);
float fx = 481.20;
float fy = 480.00;
float cx = 319.50;
float cy = 239.50;
float factor = 1;
depth_image.convertTo(depth_image, CV_32F); // convert the image data to float type
if (!depth_image.data) {
std::cerr << "No depth data!!!" << std::endl;
exit(EXIT_FAILURE);
}
pointcloud->width = depth_image.cols; //Dimensions must be initialized to use 2-D indexing
pointcloud->height = depth_image.rows;
pointcloud->points.resize(pointcloud->height * pointcloud->width);
pointcloud->resize(pointcloud->width*pointcloud->height);
for (int v = 0; v < depth_image.rows; v++)
{
for (int u = 0; u < depth_image.cols; u++)
{
float Z = depth_image.at<float>(v, u) / factor;
PointT p;
p.z = Z;
p.x = (u - cx) * Z / fx;
p.y = (v - cy) * Z / fy;
p.z = p.z / 1000;
p.x = p.x / 1000;
p.y = p.y / 1000;
pointcloud->points.push_back(p);
}
}
return pointcloud;
}
I am also getting the data from the Mat like this:
unsigned short* dataMat1 = depth_image.ptr<unsigned short>();
I then do some processing on the point cloud.
What i need to do now is convert the pointcloud into an unsigned short* that matches the dataMat1 above.
What would this conversion look like? Do I need to convert back to a cv::Mat and then use its ptr? Or can i just convert from pointcloud to unsigned short *?
Thank you.
I'm have this function taken from here:
bool interpolate(const Mat &im, float ofsx, float ofsy, float a11, float a12, float a21, float a22, Mat &res)
{
bool ret = false;
// input size (-1 for the safe bilinear interpolation)
const int width = im.cols-1;
const int height = im.rows-1;
// output size
const int halfWidth = res.cols >> 1;
const int halfHeight = res.rows >> 1;
int dim = res.rows * res.cols;
float *out = res.ptr<float>(0);
for (int j=-halfHeight; j<=halfHeight; ++j)
{
const float rx = ofsx + j * a12;
const float ry = ofsy + j * a22;
#pragma omp simd
for(int i=-halfWidth; i<=halfWidth; ++i)
{
float wx = rx + i * a11;
float wy = ry + i * a21;
const int x = (int) floor(wx);
const int y = (int) floor(wy);
if (x >= 0 && y >= 0 && x < width && y < height)
{
// compute weights
wx -= x; wy -= y;
// bilinear interpolation
*out++ =
(1.0f - wy) * ((1.0f - wx) * im.at<float>(y,x) + wx * im.at<float>(y,x+1)) +
( wy) * ((1.0f - wx) * im.at<float>(y+1,x) + wx * im.at<float>(y+1,x+1));
} else {
*out++ = 0;
ret = true; // touching boundary of the input
}
}
}
return ret;
}
I don't know what interpolation is in details, but looking at this opencv page, it seems that it's a bilinear interpolation using INTER_LINEAR. The point is that I don't know how to call an equivalent opencv function for the code above.
This function is called in two different points here.
You can't solely apply simple an interpolation using openCV,
it has to be part of image processing operation, e.g warp or resize operation.
I think you are trying to vectorize a warp affine, the easiest way and most efficient code on Intel platform would be to use IPP.
Otherwise, I'd let Opencv Warp affine do the job.
I am using objective c language.
I want to convert my image to cylindrical shape. Here I am using below cpp file code to convert image.
cv::Mat CylindricalWarper2 (Mat img)
{
cv::Mat destImgMat(img.size(), CV_8U);
for(int y = 0; y < img.rows; y++)
{
for(int x = 0; x < img.cols; x++)
{
cv::Point2f current_pos(x,y);
current_pos = convert_pt1dd(current_pos, img.cols, img.rows);
cv::Point2i top_left((int)current_pos.x,(int)current_pos.y);
if(top_left.x < 0 || top_left.x > img.cols-2 || top_left.y < 0 ||
top_left.y > img.rows-2)
{
continue;
}
//bilinear interpolation
float dx = current_pos.x-top_left.x;
float dy = current_pos.y-top_left.y;
float weight_tl = (1.0 - dx) * (1.0 - dy);
float weight_tr = (dx) * (1.0 - dy);
float weight_bl = (1.0 - dx) * (dy);
float weight_br = (dx) * (dy);
uchar value = weight_tl * img.at<uchar>(top_left) +
weight_tr * img.at<uchar>(top_left.y,top_left.x+1) +
weight_bl * img.at<uchar>(top_left.y+1,top_left.x) +
weight_br * img.at<uchar>(top_left.y+1,top_left.x+1);
destImgMat.at<uchar>(y,x) = value;
}
}
return destImgMat;
}
cv::Point2f convert_pt1dd(cv::Point2f point,int w,int h)
{
cv::Point2f pc(point.x-w/2,point.y-h/2);
float f = w;
float r = w;
float omega = w/2;
float z0 = f - sqrt(r*r-omega*omega);
float zc = (2*z0+sqrt(4*z0*z0-4*(pc.x*pc.x/(f*f)+1)*(z0*z0-r*r)))/(2*
(pc.x*pc.x/(f*f)+1));
cv::Point2f final_point(pc.x*zc/f,pc.y*zc/f);
final_point.x += w/2;
final_point.y += h/2;
return final_point;
}
With this code I got the cylindrical shape but my image cut down. Not get the full cylindrical projection image,My image look like below,
Image link
I want to display my full image in cylindrical shape.
If some source or help provided, greatly Appreciated.
Thanks in advance