I have a cv::Mat that is a depth image.
I am converting it to a PCL pointcloud like this:
PointCloud::Ptr RGBDtoPCL3(cv::Mat depth_image)
{
PointCloud::Ptr pointcloud(new PointCloud);
float fx = 481.20;
float fy = 480.00;
float cx = 319.50;
float cy = 239.50;
float factor = 1;
depth_image.convertTo(depth_image, CV_32F); // convert the image data to float type
if (!depth_image.data) {
std::cerr << "No depth data!!!" << std::endl;
exit(EXIT_FAILURE);
}
pointcloud->width = depth_image.cols; //Dimensions must be initialized to use 2-D indexing
pointcloud->height = depth_image.rows;
pointcloud->points.resize(pointcloud->height * pointcloud->width);
pointcloud->resize(pointcloud->width*pointcloud->height);
for (int v = 0; v < depth_image.rows; v++)
{
for (int u = 0; u < depth_image.cols; u++)
{
float Z = depth_image.at<float>(v, u) / factor;
PointT p;
p.z = Z;
p.x = (u - cx) * Z / fx;
p.y = (v - cy) * Z / fy;
p.z = p.z / 1000;
p.x = p.x / 1000;
p.y = p.y / 1000;
pointcloud->points.push_back(p);
}
}
return pointcloud;
}
I am also getting the data from the Mat like this:
unsigned short* dataMat1 = depth_image.ptr<unsigned short>();
I then do some processing on the point cloud.
What i need to do now is convert the pointcloud into an unsigned short* that matches the dataMat1 above.
What would this conversion look like? Do I need to convert back to a cv::Mat and then use its ptr? Or can i just convert from pointcloud to unsigned short *?
Thank you.
Related
I have 1d array (size = 4 * width * height + 1) of pixels of RGBA png image. I want to rotate image by X degrees clockwise. I already know how to do it for 90 degrees, but I guess I have some problem with trigonometry.
Here's the code:
std::pair<int, int> move(int x, int y, double rad) {
return {x * cos(rad) - y * sin(rad), x * cos(rad) + y * sin(rad)};
}
void turn(int deg) {
if (deg < 0) {
deg = 360 + deg;
}
double rad = deg * (M_PI / (double)180);
unsigned int oldWidth = width;
width = lround(sqrt(height * height + width * width));
height = lround(sqrt(height * height + oldWidth * oldWidth));
std::vector<unsigned char> output(rawPixels.size());
for (int X = 0; X < width; ++X) {
for (int Y = 0; Y < height; ++Y) {
for (int chan = 0; chan < CHANNELS_COUNT; ++chan) {
std::pair<int, int> xy = move(X, Y, rad);
output[(X * height + Y) * CHANNELS_COUNT + chan] = rawPixels[
((height - 1 - xy.second) * width + xy.first) * CHANNELS_COUNT + chan];
}
}
}
rawPixels = output;
}
It's ok to use addition arrays, but I don't want to use OpenCV or any other libraries.
I am converting this depth image to a pcl::pointcloud.
using the following:
PointCloud::Ptr PointcloudUtils::RGBDtoPCL(cv::Mat depth_image, Eigen::Matrix3f& _intrinsics)
{
PointCloud::Ptr pointcloud(new PointCloud);
float fx = _intrinsics(0, 0);
float fy = _intrinsics(1, 1);
float cx = _intrinsics(0, 2);
float cy = _intrinsics(1, 2);
float factor = 1;
depth_image.convertTo(depth_image, CV_32F); // convert the image data to float type
if (!depth_image.data) {
std::cerr << "No depth data!!!" << std::endl;
exit(EXIT_FAILURE);
}
pointcloud->width = depth_image.cols; //Dimensions must be initialized to use 2-D indexing
pointcloud->height = depth_image.rows;
pointcloud->resize(pointcloud->width*pointcloud->height);
#pragma omp parallel for
for (int v = 0; v < depth_image.rows; v += 4)
{
for (int u = 0; u < depth_image.cols; u += 4)
{
float Z = depth_image.at<float>(v, u) / factor;
PointT p;
p.z = Z;
p.x = (u - cx) * Z / fx;
p.y = (v - cy) * Z / fy;
p.z = p.z / 1000;
p.x = p.x / 1000;
p.y = p.y / 1000;
pointcloud->points.push_back(p);
}
}
return pointcloud;
}
this works great, I have run some processing on the cloud, and now I need to convert the pointcloud back to a cv::Mat depth image. I cannot find an example for this, and am having trouble getting m head around it. What is the opposite of the above function?
How can i convert a pcl::pointcloud back to a cv::mat?
Thank you.
This is untested code, since I don't have point cloud on my machine.
From your own conversion code I am assuming your image a single channel image.
void PCL2Mat(PointCloud::Ptr pointcloud, cv::Mat& depth_image, int original_width, int original_height)
{
if (!depth_image.empty())
depth_image.release();
depth_image.create(original_height, original_width, CV_32F);
int count = 0;
#pragma omp parallel for
for (int v = 0; v < depth_image.rows; ++v)
{
for (int u = 0; u < depth_image.cols; ++u)
{
depth_image.at<float>(v, u) = pointcloud->points.at(count++).z * 1000;
}
}
depth_image.convertTo(depth_image,CV_8U);
}
I don't know about OpenCV methods, but in case you do something that makes your point cloud unstructured your process could be something like this
% rescale the points by 1000
p.x = p.x * 1000; p.y = p.y * 1000; p.z = p.z * 1000;
% project points on image plane and correct center point + factor
image_p.x = ( p.x * fx / p.z -cf ) * factor;
image_p.y = ( p.y * fy / p.z -cy ) * factor;
Now depending on what you have done with the point cloud the points might not map exactly to image matrix pixel center points (or top left corner for some applications) or you might be missing points -> NaN/0 value pixels. How you process that is up to you, but the most simple way would be to cast image_p.x and image_p.y as integers, make sure they are withing image boundaries and set
depth_image.at<float>(image_p.y, image_p.x) = p.Z;`
I am writing a program which is using bicubic interpolation, and I am using EasyBMP, but i have a problem. While input image are peppers.
My output looks like this.
#define BOUNDS(val, min, max) if (val < min) { val = min; } else if (val > max) { val = max; }
void resize(float value)
{
BMP* temp = new BMP();
int in_w = image->TellWidth();
int in_h = image->TellHeight();
temp->SetSize(in_w*value, in_h*value);
RGBApixel input;
for (int y = 0; y < temp->TellHeight(); ++y)
{
float v = float(y) / float(temp->TellHeight() - 1);
for (int x = 0; x < temp->TellWidth(); ++x)
{
float u = float(x) / float(temp->TellWidth() - 1);
input = this->bicubicInterpolation(u,v);
temp->SetPixel(x,y,input);
}
}
delete image;
image=temp;
}
RGBApixel bicubicInterpolation(float u, float v)
{
RGBApixel p[4][4];
int q,w;
float x = (u * image->TellWidth()) - 0.5;
int xint = int(x);
float dx = x - floor(x);
float y = (v * image->TellHeight()) - 0.5;
int yint = int(y);
float dy = y - floor(y);
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
q=xint - 1 + j;
BOUNDS(q,0,image->TellWidth()-1)
w=yint - 1 + i;
BOUNDS(w,0,image->TellHeight()-1)
p[j][i] = image->GetPixel(q,w);
}
}
RGBApixel toReturn;
RGBApixel el1 = this->interpolation(p[0][0],p[1][0], p[2][0], p[3][0], dx);
RGBApixel el2 = this->interpolation(p[0][1],p[1][1], p[2][1], p[3][1], dx);
RGBApixel el3 = this->interpolation(p[0][2],p[1][2], p[2][2], p[3][2], dx);
RGBApixel el4 = this->interpolation(p[0][3],p[1][3], p[2][3], p[3][3], dx);
RGBApixel value = this->interpolation(el1, el2, el3, el4, dy);
return value;
}
RGBApixel interpolation(RGBApixel A, RGBApixel B, RGBApixel C, RGBApixel D, float t)
{
float a[3],b[3],c[3],d[3];
RGBApixel toRet;
a[0]=A.Red;
b[0]=B.Red;
c[0]=C.Red;
d[0]=D.Red;
a[1]=A.Green;
b[1]=B.Green;
c[1]=C.Green;
d[1]=D.Green;
a[2]=A.Blue;
b[2]=B.Blue;
c[2]=C.Blue;
d[2]=D.Blue;
float w[3];
float x[3];
float y[3];
float z[3];
float color[3];
for(int i=0; i<3; i++)
{
w[i]= -a[i] / 2.0f + (3.0f*b[i]) / 2.0f - (3.0f*c[i]) / 2.0f + d[i] / 2.0f;
x[i]= a[i] - (5.0f*b[i]) / 2.0f + 2.0f*c[i] - d[i] / 2.0f;
y[i]= -a[i] / 2.0f + c[i] / 2.0f;
z[i]= b[i];
color[i]= w[i] * t*t*t + x[i] * t*t + y[i] * t +z[i];
}
toRet.Red=color[0];
toRet.Green=color[1];
toRet.Blue=color[2];
toRet.Alpha=255;
return toRet;
}
Have you noticed mistake that I made?
It was beign out of range. Just need to:
color[i]= w[i] * t*t*t + x[i] * t*t + y[i] * t +z[i];
BOUNDS(color[i],0,255);
I'm have this function taken from here:
bool interpolate(const Mat &im, float ofsx, float ofsy, float a11, float a12, float a21, float a22, Mat &res)
{
bool ret = false;
// input size (-1 for the safe bilinear interpolation)
const int width = im.cols-1;
const int height = im.rows-1;
// output size
const int halfWidth = res.cols >> 1;
const int halfHeight = res.rows >> 1;
int dim = res.rows * res.cols;
float *out = res.ptr<float>(0);
for (int j=-halfHeight; j<=halfHeight; ++j)
{
const float rx = ofsx + j * a12;
const float ry = ofsy + j * a22;
#pragma omp simd
for(int i=-halfWidth; i<=halfWidth; ++i)
{
float wx = rx + i * a11;
float wy = ry + i * a21;
const int x = (int) floor(wx);
const int y = (int) floor(wy);
if (x >= 0 && y >= 0 && x < width && y < height)
{
// compute weights
wx -= x; wy -= y;
// bilinear interpolation
*out++ =
(1.0f - wy) * ((1.0f - wx) * im.at<float>(y,x) + wx * im.at<float>(y,x+1)) +
( wy) * ((1.0f - wx) * im.at<float>(y+1,x) + wx * im.at<float>(y+1,x+1));
} else {
*out++ = 0;
ret = true; // touching boundary of the input
}
}
}
return ret;
}
I don't know what interpolation is in details, but looking at this opencv page, it seems that it's a bilinear interpolation using INTER_LINEAR. The point is that I don't know how to call an equivalent opencv function for the code above.
This function is called in two different points here.
You can't solely apply simple an interpolation using openCV,
it has to be part of image processing operation, e.g warp or resize operation.
I think you are trying to vectorize a warp affine, the easiest way and most efficient code on Intel platform would be to use IPP.
Otherwise, I'd let Opencv Warp affine do the job.
I am using objective c language.
I want to convert my image to cylindrical shape. Here I am using below cpp file code to convert image.
cv::Mat CylindricalWarper2 (Mat img)
{
cv::Mat destImgMat(img.size(), CV_8U);
for(int y = 0; y < img.rows; y++)
{
for(int x = 0; x < img.cols; x++)
{
cv::Point2f current_pos(x,y);
current_pos = convert_pt1dd(current_pos, img.cols, img.rows);
cv::Point2i top_left((int)current_pos.x,(int)current_pos.y);
if(top_left.x < 0 || top_left.x > img.cols-2 || top_left.y < 0 ||
top_left.y > img.rows-2)
{
continue;
}
//bilinear interpolation
float dx = current_pos.x-top_left.x;
float dy = current_pos.y-top_left.y;
float weight_tl = (1.0 - dx) * (1.0 - dy);
float weight_tr = (dx) * (1.0 - dy);
float weight_bl = (1.0 - dx) * (dy);
float weight_br = (dx) * (dy);
uchar value = weight_tl * img.at<uchar>(top_left) +
weight_tr * img.at<uchar>(top_left.y,top_left.x+1) +
weight_bl * img.at<uchar>(top_left.y+1,top_left.x) +
weight_br * img.at<uchar>(top_left.y+1,top_left.x+1);
destImgMat.at<uchar>(y,x) = value;
}
}
return destImgMat;
}
cv::Point2f convert_pt1dd(cv::Point2f point,int w,int h)
{
cv::Point2f pc(point.x-w/2,point.y-h/2);
float f = w;
float r = w;
float omega = w/2;
float z0 = f - sqrt(r*r-omega*omega);
float zc = (2*z0+sqrt(4*z0*z0-4*(pc.x*pc.x/(f*f)+1)*(z0*z0-r*r)))/(2*
(pc.x*pc.x/(f*f)+1));
cv::Point2f final_point(pc.x*zc/f,pc.y*zc/f);
final_point.x += w/2;
final_point.y += h/2;
return final_point;
}
With this code I got the cylindrical shape but my image cut down. Not get the full cylindrical projection image,My image look like below,
Image link
I want to display my full image in cylindrical shape.
If some source or help provided, greatly Appreciated.
Thanks in advance