pcl supervoxel clustering: unable to visualise voxels - c++

I am trying to incorporate supervoxel clustering into my 3D pointcloud application. However, the point clouds can't be visualised on RVIZ. When i tried debugging using gdb, i realised that my map object still has no data inside even after the line
std::multimap<uint32_t, uint32_t> supervoxel_adjacency; super.getSupervoxelAdjacency(supervoxel_adjacency);
The tutorial i am referencing is this https://pointclouds.org/documentation/tutorials/supervoxel_clustering.html
What is the problem? It should fill the map struct with data for me to iterate over but it isn't.
void PointCloudProcessor::voxel_clustering(type_pclcloudptr& input, ros::Publisher& Publisher)
{
float voxel_resolution = 0.008f;
float seed_resolution = 0.1f;
float color_importance = 0.2f;
float spatial_importance = 0.4f;
float normal_importance = 1.0f;
pcl::SupervoxelClustering<pcl::PointXYZ> super(voxel_resolution, seed_resolution);
super.setUseSingleCameraTransform(false);
super.setInputCloud(input);
super.setColorImportance(color_importance);
super.setSpatialImportance(spatial_importance);
super.setNormalImportance(normal_importance);
std::map <uint32_t, pcl::Supervoxel<pcl::PointXYZ>::Ptr > supervoxel_clusters;
if ( !( input->points.empty() ) )
{
super.extract(supervoxel_clusters);
type_pclcloudptr voxel_centroid_cloud = super.getVoxelCentroidCloud();
pcl::PointCloud<pcl::PointXYZL>::Ptr labeled_voxel_cloud = super.getLabeledVoxelCloud();
std::multimap<uint32_t, uint32_t> supervoxel_adjacency;
super.getSupervoxelAdjacency(supervoxel_adjacency);
// To make a graph of the supervoxel adjacency, we need to iterate through the supervoxel adjacency multimap
std::multimap<uint32_t, uint32_t>::iterator label_itr = supervoxel_adjacency.begin();
int idx = 0;
//PointCloudProcessor::mMarkerArray.markers.resize(supervoxel_adjacency.size());
for (; label_itr != supervoxel_adjacency.end(); )
{
//First get the label
uint32_t supervoxel_label = label_itr->first;
Eigen::Vector4f centroid;
Eigen::Vector4f min;
Eigen::Vector4f max;
//Now get the supervoxel corresponding to the label
pcl::Supervoxel<pcl::PointXYZ>::Ptr supervoxel = supervoxel_clusters.at(supervoxel_label);
//Now we need to iterate through the adjacent supervoxels and make a point cloud of them
type_pclcloudptr adjacent_supervoxel_centers;
std::multimap<uint32_t, uint32_t>::iterator adjacent_itr = supervoxel_adjacency.equal_range(supervoxel_label).first;
for (; adjacent_itr != supervoxel_adjacency.equal_range(supervoxel_label).second; ++adjacent_itr)
{
pcl::Supervoxel<pcl::PointXYZ>::Ptr neighbor_supervoxel = supervoxel_clusters.at(adjacent_itr->second);
pcl::PointXYZ neighbour_centroid_xyz;
pcl::PointXYZRGBA neighbour_centroid_xyzrgba = neighbor_supervoxel->centroid_;
pcl::copyPoint(neighbour_centroid_xyzrgba, neighbour_centroid_xyz);
adjacent_supervoxel_centers->push_back(neighbour_centroid_xyz);
}
// pcl::compute3DCentroid(*adjacent_supervoxel_centers,centroid);
// pcl::getMinMax3D(*adjacent_supervoxel_centers, min, max);
// PointCloudProcessor::mark_cluster(centroid,min, max, idx);
adjacent_supervoxel_centers->width = adjacent_supervoxel_centers->size ();
adjacent_supervoxel_centers->height = 1;
adjacent_supervoxel_centers->is_dense = true;
adjacent_supervoxel_centers->header.frame_id = "rearAxle";
Publisher.publish(adjacent_supervoxel_centers);
//Move iterator forward to next label
label_itr = supervoxel_adjacency.upper_bound(supervoxel_label);
idx++;
}
}
}

Related

OpenCasCade BrepFeat_MakeCylindricalHole(); function

Currently I'm working on a project to drill some cylindrical holes on to a given shape.
So for do that I did use BRepFeat_MakeCylindricalHole(); function and used Standard_EXPORT void PerformThruNext (const Standard_Real Radius, const Standard_Boolean WithControl = Standard_True);
Because in this scenario I haven't the depth of the cylinder. Using performThruNext it will drilled, Top face to Bottom face.
User will be given the start and the end points, then using that will be drilling holes.
**1.when start and end points having decimal fraction part (gp_pnt StartPnt (328.547,-4.325,97.412);
gp_pnt EndPnt
(291.841,-7.586,101.651);), it doesn't drill the face correctly.
2.when diameter getting to a higher value,happens the same thing,
,
** following is my code,any help would be highly appreciated. Thanks!
void COCCAPPDoc::OnBox2()
{
ofstream out;
out.open("C:/Users/Dell/Desktop/Blade/areaInit.txt");
CString str;
TopoDS_Shape bladeWithCore,DrilledShape,DrilledShape2;
/* IMPORT bladeWithCore.brep FILE */
Standard_CString strFile = "C:/Users/Dell/Desktop/Blade/bladeWithCore.brep";
BRep_Builder brepS;
BRepTools::Read(bladeWithCore,strFile,brepS);
TopoDS_Face Face_52;
Standard_CString strFace = "C:/Users/Dell/Desktop/Blade/Face_52.brep";
BRep_Builder brepF;
BRepTools::Read(Face_52,strFace,brepF);
BRepClass3d_SolidClassifier classify;
classify.Load(bladeWithCore);
classify.PerformInfinitePoint(1.0e-6);
TopAbs_State state = classify.State();
if(state==TopAbs_IN) bladeWithCore.Reverse();
DrilledShape = bladeWithCore;
gp_Vec v1(gp_Pnt( 330,11,108),gp_Pnt(291,4,109)); //FINDING VECTOR FROM START TO THE END POINT
gp_Pnt point(330,11,108);
gp_Pnt startpoint (330,11,108);
TopoDS_Vertex aV1 = BRepBuilderAPI_MakeVertex (startpoint);
BRepTools::Write(aV1,"C:/Users/Dell/Desktop/Blade/startpoint.brep");
/* gp_Vec v1(gp_Pnt( 330.208114,11.203318,108.480255),gp_Pnt(291.103558,4.265285,109.432663));
gp_Pnt point(330.208114,11.203318,108.480255);*/
gp_Dir d1(v1); //GETTING DIRECTION USING FOUND VECTOR
gp_Vec UnitVector(d1); //FINDING UNITVECTOR
gp_Dir dir_line(0,0,1);
gp_Vec UnitVector_line(dir_line);
gp_Pnt minPoint;
Standard_Real diameter = 0.1;
int noOfInstances =3;
int gap = -10;
for(int i=0; i {
point.Translate(UnitVector);
gp_Pnt newPoint = point;
newPoint.Translate(UnitVector_line*10);
BRepBuilderAPI_MakeEdge LINE(newPoint, point);
BRepBuilderAPI_MakeWire lineWire(LINE);
TopoDS_Wire wire = TopoDS::Wire(lineWire);
TopoDS_Shape lineShape = TopoDS_Shape(wire);
BRepTools::Write(lineShape,"C:/Users/Dell/Desktop/Blade/linlineShapee.brep");
intersection( bladeWithCore,lineShape, point, minPoint);
TopoDS_Vertex checkPoint = BRepBuilderAPI_MakeVertex (minPoint);
BRepTools::Write(checkPoint,"C:/Users/Dell/Desktop/Blade/checkPoint.brep");
gp_Ax1 location =gp_Ax1(minPoint,gp_Dir(0,0,1));
BRepFeat_MakeCylindricalHole drilled;
drilled.Init(DrilledShape,location);
drilled.PerformThruNext(diameter/2.0);
BRepFeat_Status status = drilled.Status();
if(status==BRepFeat_NoError)
{
DrilledShape = drilled.Shape();
BRepTools::Write(DrilledShape,"C:/Users/Dell/Desktop/Blade/bladeWithCoreDrilled.brep");
}
}
Handle(AIS_InteractiveObject) obj3 = (new AIS_Shape(DrilledShape));
myAISContext->Display(obj3);
COCCAPPDoc:: Fit();
}
void COCCAPPDoc::intersection(TopoDS_Shape bladeWithCore,TopoDS_Shape lineShape,gp_Pnt point,gp_Pnt& minPoint)
{
BRepExtrema_DistShapeShape interSection( bladeWithCore, lineShape, Extrema_ExtFlag_MIN, Extrema_ExtAlgo_Grad);
interSection.Perform();
Standard_Integer Solutions = interSection.NbSolution();
CArray disArray;
int minindex = 1;
for(int i = 1; i {
double distance = point.Distance((interSection.PointOnShape1(i)));
disArray.Add(distance);
}
for(int i = 1; i {
if(disArray[0] > disArray[i])
{
disArray[0] = disArray[i];
minindex = i;
}
}
minPoint = interSection.PointOnShape1(minindex);
TopoDS_Vertex aV = BRepBuilderAPI_MakeVertex (minPoint);
BRepTools::Write(aV,"C:/Users/Dell/Desktop/Blade/minPoint.brep");
}

vtkResliceImageViewer get bounds image in view

Situation:
I'm using vtkresliceimageviewer to display the three MPR views, based on this example.
So I can rotate the cursors to generate a new view on all three screens.
Question:
I want to know the bounds of my image on the viewer, even after rotating some of the cursors.
obs
I tried to get the values from the imageActor and ask for my renderer positions, but this works only when I have no interaction from the other cursors.
Example for axial:
void UpdatePointWordToViewer(vtkRenderer* rend, double p[4])
{
rend->SetWorldPoint(p);
rend->WorldToDisplay();
rend->GetDisplayPoint(p);
}
void UpdateBoxAxial()
{
auto bounds = pMPR->GetViewer(2)->GetImageActor()->GetBounds();
auto ren = pMPR->GetViewer(2)->GetRenderer();
auto size = pMPR->GetViewer(2)->GetInteractor()->GetSize();
double pIni1[]{ 0,0,0,1 };
double pIni2[]{ 0,0,0,1 };
double pIni3[]{ 0,0,0,1 };
double pIni4[]{ 0,0,0,1 };
double pConvert1[]{ 0,0,0 };
double pConvert2[]{ 0,0,0 };
//check the bounds
pIni1[0] = bounds[0];
pIni1[1] = bounds[3];
pIni1[2] = bounds[4];
pIni2[0] = bounds[1];
pIni2[1] = bounds[3];
pIni2[2] = bounds[4];
pIni3[0] = bounds[0];
pIni3[1] = bounds[2];
pIni3[2] = bounds[4];
pIni4[0] = bounds[1];
pIni4[1] = bounds[2];
pIni4[2] = bounds[4];
//convert the points for viewer coordinates
UpdatePointWordToViewer(ren, pIni1);
UpdatePointWordToViewer(ren, pIni2);
UpdatePointWordToViewer(ren, pIni3);
UpdatePointWordToViewer(ren, pIni4);
//P1
if (pIni1[0] < pIni3[0])
pConvert1[0] = pIni1[0];
else
pConvert1[0] = pIni3[0];
if (pIni3[1] < pIni4[1])
pConvert1[1] = pIni3[1];
else
pConvert1[1] = pIni4[1];
//P2
if (pIni2[0] > pIni4[0])
pConvert2[0] = pIni2[0];
else
pConvert2[0] = pIni4[0];
if (pIni1[1] > pIni2[1])
pConvert2[1] = pIni1[1];
else
pConvert2[1] = pIni2[1];
}
So I know the minimum point (Left - Down) and the maximum point (Right - Top)

Ray-triangle intersection algorithm not intersecting (C++)

I've been trying to implement the Moller-Trumbore ray-triangle intersection algorithm in my raytracing code. The code is supposed to read in a mesh and light sources, fire off rays from the light source, and return the triangle from the mesh which each ray intersects. Here is my implementation of the algorithm:
//Moller-Trumbore intersection algorithm
void getFaceIntersect(modelStruct m, ray r, hitFaceStruct& hitFaces)
{
// Constant thoughout loop
point origin = r.p0;
point direction = r.u;
hitFaces.isHit = false;
for (int i = 0; i < m.faces; i++)
{
// Get face vertices
point v1 = m.vertList[m.faceList[i].v1];
point v2 = m.vertList[m.faceList[i].v2];
point v3 = m.vertList[m.faceList[i].v3];
// Get two edgess
point edge1 = v2 - v1;
point edge2 = v3 - v1;
// Get p
point p = direction.cross(direction, edge2);
// Use p to find determinant
double det = p.dot(edge1, p);
// If the determinant is about 0, the ray lies in the plane of the triangle
if (abs(det) < 0.00000000001)
{
continue;
}
double inverseDet = 1 / det;
point v1ToOrigin = (origin - v1);
double u = v1ToOrigin.dot(v1ToOrigin, p) * inverseDet;
// If u is not between 0 and 1, no hit
if (u < 0 || u > 1)
{
continue;
}
// Used for calculating v
point q = v1ToOrigin.cross(v1ToOrigin, edge1);
double v = direction.dot(direction, q) * inverseDet;
if (v < 0 || (u + v) > 1)
{
continue;
}
double t = q.dot(edge2, q) * inverseDet;
// gets closest face
if (t < abs(hitFaces.s)) {
hitFaceStruct goodStruct = hitFaceStruct();
goodStruct.face = i;
goodStruct.hitPoint = p;
goodStruct.isHit = true;
goodStruct.s = t;
hitFaces = goodStruct;
break;
}
}
}
The relevant code for hitFaceStruct and modelStruct is as follows:
typedef struct _hitFaceStruct
{
int face; // the index of the sphere in question in the list of faces
float s; // the distance from the ray that hit it
bool isHit;
point hitPoint;
} hitFaceStruct;
typedef struct _modelStruct {
char *fileName;
float scale;
float rot_x, rot_y, rot_z;
float x, y, z;
float r_amb, g_amb, b_amb;
float r_dif, g_dif, b_dif;
float r_spec, g_spec, b_spec;
float k_amb, k_dif, k_spec, k_reflective, k_refractive;
float spec_exp, index_refraction;
int verts, faces, norms = 0; // Number of vertices, faces, normals, and spheres in the system
point *vertList, *normList; // Vertex and Normal Lists
faceStruct *faceList; // Face List
} modelStruct;
Whenever I shoot a ray, the values of u or v in the algorithm code always come out to a large negative number, rather than the expected small, positive one. The direction vector of the ray is normalized before I pass it on to the intersection code, and I'm positive I'm firing rays that would normally hit the mesh. Can anyone please help me spot my error here?
Thanks!

clustering image segments in opencv

I am working on motion detection with non-static camera using opencv.
I am using a pretty basic background subtraction and thresholding approach to get a broad sense of all that's moving in a sample video. After thresholding, I enlist all separable "patches" of white pixels, store them as independent components and color them randomly with red, green or blue. The image below shows this for a football video where all such components are visible.
I create rectangles over these detected components and I get this image:
So I can see the challenge here. I want to cluster all the "similar" and close-by components into a single entity so that the rectangles in the output image show a player moving as a whole (and not his independent limbs). I tried doing K-means clustering but since ideally I would not know the number of moving entities, I could not make any progress.
Please guide me on how I can do this. Thanks
this problem can be almost perfectly solved by dbscan clustering algorithm. Below, I provide the implementation and result image. Gray blob means outlier or noise according to dbscan. I simply used boxes as input data. Initially, box centers were used for distance function. However for boxes, it is insufficient to correctly characterize distance. So, the current distance function use the minimum distance of all 8 corners of two boxes.
#include "opencv2/opencv.hpp"
using namespace cv;
#include <map>
#include <sstream>
template <class T>
inline std::string to_string (const T& t)
{
std::stringstream ss;
ss << t;
return ss.str();
}
class DbScan
{
public:
std::map<int, int> labels;
vector<Rect>& data;
int C;
double eps;
int mnpts;
double* dp;
//memoization table in case of complex dist functions
#define DP(i,j) dp[(data.size()*i)+j]
DbScan(vector<Rect>& _data,double _eps,int _mnpts):data(_data)
{
C=-1;
for(int i=0;i<data.size();i++)
{
labels[i]=-99;
}
eps=_eps;
mnpts=_mnpts;
}
void run()
{
dp = new double[data.size()*data.size()];
for(int i=0;i<data.size();i++)
{
for(int j=0;j<data.size();j++)
{
if(i==j)
DP(i,j)=0;
else
DP(i,j)=-1;
}
}
for(int i=0;i<data.size();i++)
{
if(!isVisited(i))
{
vector<int> neighbours = regionQuery(i);
if(neighbours.size()<mnpts)
{
labels[i]=-1;//noise
}else
{
C++;
expandCluster(i,neighbours);
}
}
}
delete [] dp;
}
void expandCluster(int p,vector<int> neighbours)
{
labels[p]=C;
for(int i=0;i<neighbours.size();i++)
{
if(!isVisited(neighbours[i]))
{
labels[neighbours[i]]=C;
vector<int> neighbours_p = regionQuery(neighbours[i]);
if (neighbours_p.size() >= mnpts)
{
expandCluster(neighbours[i],neighbours_p);
}
}
}
}
bool isVisited(int i)
{
return labels[i]!=-99;
}
vector<int> regionQuery(int p)
{
vector<int> res;
for(int i=0;i<data.size();i++)
{
if(distanceFunc(p,i)<=eps)
{
res.push_back(i);
}
}
return res;
}
double dist2d(Point2d a,Point2d b)
{
return sqrt(pow(a.x-b.x,2) + pow(a.y-b.y,2));
}
double distanceFunc(int ai,int bi)
{
if(DP(ai,bi)!=-1)
return DP(ai,bi);
Rect a = data[ai];
Rect b = data[bi];
/*
Point2d cena= Point2d(a.x+a.width/2,
a.y+a.height/2);
Point2d cenb = Point2d(b.x+b.width/2,
b.y+b.height/2);
double dist = sqrt(pow(cena.x-cenb.x,2) + pow(cena.y-cenb.y,2));
DP(ai,bi)=dist;
DP(bi,ai)=dist;*/
Point2d tla =Point2d(a.x,a.y);
Point2d tra =Point2d(a.x+a.width,a.y);
Point2d bla =Point2d(a.x,a.y+a.height);
Point2d bra =Point2d(a.x+a.width,a.y+a.height);
Point2d tlb =Point2d(b.x,b.y);
Point2d trb =Point2d(b.x+b.width,b.y);
Point2d blb =Point2d(b.x,b.y+b.height);
Point2d brb =Point2d(b.x+b.width,b.y+b.height);
double minDist = 9999999;
minDist = min(minDist,dist2d(tla,tlb));
minDist = min(minDist,dist2d(tla,trb));
minDist = min(minDist,dist2d(tla,blb));
minDist = min(minDist,dist2d(tla,brb));
minDist = min(minDist,dist2d(tra,tlb));
minDist = min(minDist,dist2d(tra,trb));
minDist = min(minDist,dist2d(tra,blb));
minDist = min(minDist,dist2d(tra,brb));
minDist = min(minDist,dist2d(bla,tlb));
minDist = min(minDist,dist2d(bla,trb));
minDist = min(minDist,dist2d(bla,blb));
minDist = min(minDist,dist2d(bla,brb));
minDist = min(minDist,dist2d(bra,tlb));
minDist = min(minDist,dist2d(bra,trb));
minDist = min(minDist,dist2d(bra,blb));
minDist = min(minDist,dist2d(bra,brb));
DP(ai,bi)=minDist;
DP(bi,ai)=minDist;
return DP(ai,bi);
}
vector<vector<Rect> > getGroups()
{
vector<vector<Rect> > ret;
for(int i=0;i<=C;i++)
{
ret.push_back(vector<Rect>());
for(int j=0;j<data.size();j++)
{
if(labels[j]==i)
{
ret[ret.size()-1].push_back(data[j]);
}
}
}
return ret;
}
};
cv::Scalar HSVtoRGBcvScalar(int H, int S, int V) {
int bH = H; // H component
int bS = S; // S component
int bV = V; // V component
double fH, fS, fV;
double fR, fG, fB;
const double double_TO_BYTE = 255.0f;
const double BYTE_TO_double = 1.0f / double_TO_BYTE;
// Convert from 8-bit integers to doubles
fH = (double)bH * BYTE_TO_double;
fS = (double)bS * BYTE_TO_double;
fV = (double)bV * BYTE_TO_double;
// Convert from HSV to RGB, using double ranges 0.0 to 1.0
int iI;
double fI, fF, p, q, t;
if( bS == 0 ) {
// achromatic (grey)
fR = fG = fB = fV;
}
else {
// If Hue == 1.0, then wrap it around the circle to 0.0
if (fH>= 1.0f)
fH = 0.0f;
fH *= 6.0; // sector 0 to 5
fI = floor( fH ); // integer part of h (0,1,2,3,4,5 or 6)
iI = (int) fH; // " " " "
fF = fH - fI; // factorial part of h (0 to 1)
p = fV * ( 1.0f - fS );
q = fV * ( 1.0f - fS * fF );
t = fV * ( 1.0f - fS * ( 1.0f - fF ) );
switch( iI ) {
case 0:
fR = fV;
fG = t;
fB = p;
break;
case 1:
fR = q;
fG = fV;
fB = p;
break;
case 2:
fR = p;
fG = fV;
fB = t;
break;
case 3:
fR = p;
fG = q;
fB = fV;
break;
case 4:
fR = t;
fG = p;
fB = fV;
break;
default: // case 5 (or 6):
fR = fV;
fG = p;
fB = q;
break;
}
}
// Convert from doubles to 8-bit integers
int bR = (int)(fR * double_TO_BYTE);
int bG = (int)(fG * double_TO_BYTE);
int bB = (int)(fB * double_TO_BYTE);
// Clip the values to make sure it fits within the 8bits.
if (bR > 255)
bR = 255;
if (bR < 0)
bR = 0;
if (bG >255)
bG = 255;
if (bG < 0)
bG = 0;
if (bB > 255)
bB = 255;
if (bB < 0)
bB = 0;
// Set the RGB cvScalar with G B R, you can use this values as you want too..
return cv::Scalar(bB,bG,bR); // R component
}
int main(int argc,char** argv )
{
Mat im = imread("c:/data/football.png",0);
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
findContours(im.clone(), contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Rect> boxes;
for(size_t i = 0; i < contours.size(); i++)
{
Rect r = boundingRect(contours[i]);
boxes.push_back(r);
}
DbScan dbscan(boxes,20,2);
dbscan.run();
//done, perform display
Mat grouped = Mat::zeros(im.size(),CV_8UC3);
vector<Scalar> colors;
RNG rng(3);
for(int i=0;i<=dbscan.C;i++)
{
colors.push_back(HSVtoRGBcvScalar(rng(255),255,255));
}
for(int i=0;i<dbscan.data.size();i++)
{
Scalar color;
if(dbscan.labels[i]==-1)
{
color=Scalar(128,128,128);
}else
{
int label=dbscan.labels[i];
color=colors[label];
}
putText(grouped,to_string(dbscan.labels[i]),dbscan.data[i].tl(), FONT_HERSHEY_COMPLEX,.5,color,1);
drawContours(grouped,contours,i,color,-1);
}
imshow("grouped",grouped);
imwrite("c:/data/grouped.jpg",grouped);
waitKey(0);
}
I agree with Sebastian Schmitz: you probably shouldn't be looking for clustering.
Don't expect an uninformed method such as k-means to work magic for you. In particular one that is as crude a heuristic as k-means, and which lives in an idealized mathematical world, not in messy, real data.
You have a good understanding of what you want. Try to put this intuition into code. In your case, you seem to be looking for connected components.
Consider downsampling your image to a lower resolution, then rerunning the same process! Or running it on the lower resolution right away (to reduce compression artifacts, and improve performance). Or adding filters, such as blurring.
I'd expect best and fastest results by looking at connected components in the downsampled/filtered image.
I am not entirely sure if you are really looking for clustering (in the Data Mining sense).
Clustering is used to group similar objects according to a distance function. In your case the distance function would only use the spatial qualities. Besides, in k-means clustering you have to specify a k, that you probably don't know beforehand.
It seems to me you just want to merge all rectangles whose borders are closer together than some predetermined threshold. So as a first idea try to merge all rectangles that are touching or that are closer together than half a players height.
You probably want to include a size check to minimize the risk of merging two players into one.
Edit: If you really want to use a clustering algorithm use one that estimates the number of clusters for you.
I guess you can improve your original attempt by using morphological transformations. Take a look at http://docs.opencv.org/master/d9/d61/tutorial_py_morphological_ops.html#gsc.tab=0. Probably you can deal with a closed set for each entity after that, specially with separate players as you got in your original image.

How to unify normal orientation

I've been trying to realize a mesh that has all face normals pointing outward.
In order to realize this, I load a mesh from a *.ctm file, then walk over all
triangles to determine the normal using a cross product and if the normal
is pointing to the negative z direction, I flip v1 and v2 (thus the normal orientation).
After this is done I save the result to a *.ctm file and view it with Meshlab.
The result in Meshlab still shows that normals are pointing in both positive and
negative z direction ( can be seen from the black triangles). Also when viewing
the normals in Meshlab they are really pointing backwards.
Can anyone give me some advice on how to solve this?
The source code for the normalization part is:
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud1 (new pcl::PointCloud<pcl::PointXYZRGBA> ());
pcl::fromROSMsg (meshFixed.cloud,*cloud1);for(std::vector<pcl::Vertices>::iterator it = meshFixed.polygons.begin(); it != meshFixed.polygons.end(); ++it)
{
alglib::real_2d_array v0;
double _v0[] = {cloud1->points[it->vertices[0]].x,cloud1->points[it->vertices[0]].y,cloud1->points[it->vertices[0]].z};
v0.setcontent(3,1,_v0); //3 rows, 1col
alglib::real_2d_array v1;
double _v1[] = {cloud1->points[it->vertices[1]].x,cloud1->points[it->vertices[1]].y,cloud1->points[it->vertices[1]].z};
v1.setcontent(3,1,_v1); //3 rows, 1col
alglib::real_2d_array v2;
double _v2[] = {cloud1->points[it->vertices[2]].x,cloud1->points[it->vertices[2]].y,cloud1->points[it->vertices[2]].z};
v2.setcontent(1,3,_v2); //3 rows, 1col
alglib::real_2d_array normal;
normal = cross(v1-v0,v2-v0);
//if z<0 change indices order v1->v2 and v2->v1
alglib::real_2d_array normalizedNormal;
if(normal[2][0]<0)
{
int index1,index2;
index1 = it->vertices[1];
index2 = it->vertices[2];
it->vertices[1] = index2;
it->vertices[2] = index1;
//make normal of length 1
double normalScaling = 1.0/sqrt(dot(normal,normal));
normal[0][0] = -1*normal[0][0];
normal[1][0] = -1*normal[1][0];
normal[2][0] = -1*normal[2][0];
normalizedNormal = normalScaling * normal;
}
else
{
//make normal of length 1
double normalScaling = 1.0/sqrt(dot(normal,normal));
normalizedNormal = normalScaling * normal;
}
//add to normal cloud
pcl::Normal pclNormalizedNormal;
pclNormalizedNormal.normal_x = normalizedNormal[0][0];
pclNormalizedNormal.normal_y = normalizedNormal[1][0];
pclNormalizedNormal.normal_z = normalizedNormal[2][0];
normalsFixed.push_back(pclNormalizedNormal);
}
The result from this code is:
I've found some code in the VCG library to orient the face and vertex normals.
After using this a large part of the mesh has correct face normals, but not all.
The new code:
// VCG library implementation
MyMesh m;
// Convert pcl::PolygonMesh to VCG MyMesh
m.Clear();
// Create temporary cloud in to have handy struct object
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud1 (new pcl::PointCloud<pcl::PointXYZRGBA> ());
pcl::fromROSMsg (meshFixed.cloud,*cloud1);
// Now convert the vertices to VCG MyMesh
int vertCount = cloud1->width*cloud1->height;
vcg::tri::Allocator<MyMesh>::AddVertices(m, vertCount);
for(unsigned int i=0;i<vertCount;++i)
m.vert[i].P()=vcg::Point3f(cloud1->points[i].x,cloud1->points[i].y,cloud1->points[i].z);
// Now convert the polygon indices to VCG MyMesh => make VCG faces..
int triCount = meshFixed.polygons.size();
if(triCount==1)
{
if(meshFixed.polygons[0].vertices[0]==0 && meshFixed.polygons[0].vertices[1]==0 && meshFixed.polygons[0].vertices[2]==0)
triCount=0;
}
Allocator<MyMesh>::AddFaces(m, triCount);
for(unsigned int i=0;i<triCount;++i)
{
m.face[i].V(0)=&m.vert[meshFixed.polygons[i].vertices[0]];
m.face[i].V(1)=&m.vert[meshFixed.polygons[i].vertices[1]];
m.face[i].V(2)=&m.vert[meshFixed.polygons[i].vertices[2]];
}
vcg::tri::UpdateBounding<MyMesh>::Box(m);
vcg::tri::UpdateNormal<MyMesh>::PerFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexNormalizedPerFace(m);
printf("Input mesh vn:%i fn:%i\n",m.VN(),m.FN());
// Start to flip all normals to outside
vcg::face::FFAdj<MyMesh>::FFAdj();
vcg::tri::UpdateTopology<MyMesh>::FaceFace(m);
bool oriented, orientable;
if ( vcg::tri::Clean<MyMesh>::CountNonManifoldEdgeFF(m)>0 ) {
std::cout << "Mesh has some not 2-manifold faces, Orientability requires manifoldness" << std::endl; // text
return; // can't continue, mesh can't be processed
}
vcg::tri::Clean<MyMesh>::OrientCoherentlyMesh(m, oriented,orientable);
vcg::tri::Clean<MyMesh>::FlipNormalOutside(m);
vcg::tri::Clean<MyMesh>::FlipMesh(m);
//vcg::tri::UpdateTopology<MyMesh>::FaceFace(m);
//vcg::tri::UpdateTopology<MyMesh>::TestFaceFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexNormalizedPerFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexFromCurrentFaceNormal(m);
// now convert VCG back to pcl::PolygonMesh
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGBA>);
cloud->is_dense = false;
cloud->width = vertCount;
cloud->height = 1;
cloud->points.resize (vertCount);
// Now fill the pointcloud of the mesh
for(int i=0; i<vertCount; i++)
{
cloud->points[i].x = m.vert[i].P()[0];
cloud->points[i].y = m.vert[i].P()[1];
cloud->points[i].z = m.vert[i].P()[2];
}
pcl::toROSMsg(*cloud,meshFixed.cloud);
std::vector<pcl::Vertices> polygons;
// Now fill the indices of the triangles/faces of the mesh
for(int i=0; i<triCount; i++)
{
pcl::Vertices vertices;
vertices.vertices.push_back(m.face[i].V(0)-&*m.vert.begin());
vertices.vertices.push_back(m.face[i].V(1)-&*m.vert.begin());
vertices.vertices.push_back(m.face[i].V(2)-&*m.vert.begin());
polygons.push_back(vertices);
}
meshFixed.polygons = polygons;
Which results in: (Meshlab still shows normals are facing both sides)
I finally solved the problem. So I'm still using VCG library. From the above new code I slightly updated the following section:
vcg::tri::Clean<MyMesh>::OrientCoherentlyMesh(m, oriented,orientable);
//vcg::tri::Clean<MyMesh>::FlipNormalOutside(m);
//vcg::tri::Clean<MyMesh>::FlipMesh(m);
//vcg::tri::UpdateTopology<MyMesh>::FaceFace(m);
//vcg::tri::UpdateTopology<MyMesh>::TestFaceFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexNormalizedPerFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexFromCurrentFaceNormal(m);
Now I've updated the vcg::tri::Clean<MyMesh>::OrientCoherentlyMesh() function in clean.h. Here the update is to orient the first polygon of a group correctly. Also after swapping the edge the normal of the face is calculated and updated.
static void OrientCoherentlyMesh(MeshType &m, bool &Oriented, bool &Orientable)
{
RequireFFAdjacency(m);
assert(&Oriented != &Orientable);
assert(m.face.back().FFp(0)); // This algorithms require FF topology initialized
Orientable = true;
Oriented = true;
tri::UpdateSelection<MeshType>::FaceClear(m);
std::stack<FacePointer> faces;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
{
if (!fi->IsD() && !fi->IsS())
{
// each face put in the stack is selected (and oriented)
fi->SetS();
// New section of code to orient the initial face correctly
if(fi->N()[2]>0.0)
{
face::SwapEdge<FaceType,true>(*fi, 0);
face::ComputeNormal(*fi);
}
// End of new code section.
faces.push(&(*fi));
// empty the stack
while (!faces.empty())
{
FacePointer fp = faces.top();
faces.pop();
// make consistently oriented the adjacent faces
for (int j = 0; j < 3; j++)
{
//get one of the adjacent face
FacePointer fpaux = fp->FFp(j);
int iaux = fp->FFi(j);
if (!fpaux->IsD() && fpaux != fp && face::IsManifold<FaceType>(*fp, j))
{
if (!CheckOrientation(*fpaux, iaux))
{
Oriented = false;
if (!fpaux->IsS())
{
face::SwapEdge<FaceType,true>(*fpaux, iaux);
// New line to update face normal
face::ComputeNormal(*fpaux);
// end of new section.
assert(CheckOrientation(*fpaux, iaux));
}
else
{
Orientable = false;
break;
}
}
// put the oriented face into the stack
if (!fpaux->IsS())
{
fpaux->SetS();
faces.push(fpaux);
}
}
}
}
}
if (!Orientable) break;
}
}
Besides I also updated the function bool CheckOrientation(FaceType &f, int z) to perform a calculation based on normal z-direction.
template <class FaceType>
bool CheckOrientation(FaceType &f, int z)
{
// Added next section to calculate the difference between normal z-directions
FaceType *original = f.FFp(z);
double nf2,ng2;
nf2=f.N()[2];
ng2=original->N()[2];
// End of additional section
if (IsBorder(f, z))
return true;
else
{
FaceType *g = f.FFp(z);
int gi = f.FFi(z);
// changed if statement from: if (f.V0(z) == g->V1(gi))
if (nf2/abs(nf2)==ng2/abs(ng2))
return true;
else
return false;
}
}
The result is as I expect and desire from the algorithm: