Using a custom C++ extension with python - c++

I have a problem with this code:
#include "PolyVoxCore/MaterialDensityPair.h"
#include "PolyVoxCore/CubicSurfaceExtractorWithNormals.h"
#include "PolyVoxCore/SurfaceMesh.h"
#include "PolyVoxCore/SimpleVolume.h"
#include "geomVertexData.h"
#include "geomVertexWriter.h"
#include "geomTriangles.h"
#include "py_panda.h"
#include "pandaNode.h"
#include "geomNode.h"
//Use the PolyVox namespace
using namespace PolyVox;
using namespace std;
static void createSphereInVolume(SimpleVolume<MaterialDensityPair44>& volData, float fRadius)
{
//This vector hold the position of the center of the volume
Vector3DFloat v3dVolCenter(volData.getWidth() / 2, volData.getHeight() / 2, volData.getDepth() / 2);
//This three-level for loop iterates over every voxel in the volume
for (int z = 0; z < volData.getWidth(); z++)
{
for (int y = 0; y < volData.getHeight(); y++)
{
for (int x = 0; x < volData.getDepth(); x++)
{
//Store our current position as a vector...
Vector3DFloat v3dCurrentPos(x,y,z);
//And compute how far the current position is from the center of the volume
float fDistToCenter = (v3dCurrentPos - v3dVolCenter).length();
uint8_t uDensity = 0;
uint8_t uMaterial = 0;
//If the current voxel is less than 'radius' units from the center then we make it solid.
if(fDistToCenter <= fRadius)
{
//Our new density value
uDensity = VoxelTypeTraits<MaterialDensityPair44>::maxDensity();
uMaterial = 1;
}
//Get the old voxel
MaterialDensityPair44 voxel = volData.getVoxelAt(x,y,z);
//Modify the density and material
voxel.setDensity(uDensity);
voxel.setMaterial(uMaterial);
//Write the voxel value into the volume
volData.setVoxelAt(x, y, z, voxel);
}
}
}
}
static PyObject* CreateSphere(PyObject* self, PyObject* args)
{
PyObject* pandaNodeWrapper;
if (!PyArg_ParseTuple(args, "0", &pandaNodeWrapper))
return NULL;
//Create an empty volume and then place a sphere in it
SimpleVolume<MaterialDensityPair44> volData(PolyVox::Region(Vector3DInt32(0,0,0), Vector3DInt32(63, 63, 63)));
createSphereInVolume(volData, 30);
//Extract the surface
SurfaceMesh<PositionMaterialNormal> mesh;
CubicSurfaceExtractorWithNormals<SimpleVolume, MaterialDensityPair44 > surfaceExtractor(&volData, volData.getEnclosingRegion(), &mesh);
surfaceExtractor.execute();
const vector<PositionMaterialNormal>& vertices = mesh.getVertices();
const vector<uint32_t>& indices = mesh.getIndices();
CPT(GeomVertexFormat) vertexFormat = GeomVertexFormat::get_v3n3();
PT(GeomVertexData) vertexData = new GeomVertexData("ProceduralMesh", vertexFormat, GeomEnums::UH_static);
GeomVertexWriter vertex, normal;
vertex = GeomVertexWriter(vertexData, "vertex");
normal = GeomVertexWriter(vertexData, "normal");
for(vector<PositionMaterialNormal>::const_iterator it = vertices.begin(); it != vertices.end(); ++it)
{
vertex.add_data3f(it->position.getX(), it->position.getY(), it->position.getZ());
normal.add_data3f(it->normal.getX(), it->position.getY(), it->position.getZ());
}
PT(GeomTriangles) chunk = new GeomTriangles(GeomEnums::UH_static);
for(int i = 0; i < indices.size(); ++i)
{
chunk->add_vertex(indices[i]);
}
PT(Geom) geom = new Geom(vertexData);
geom->add_primitive(chunk);
PT(GeomNode) geomNode = new GeomNode("chunk");
PandaNode* node = ((PandaNode*)((Dtool_PyInstDef*)pandaNodeWrapper)->_ptr_to_object);
node->add_child(geomNode);
Py_RETURN_NONE;
}
static PyMethodDef module_methods[] = {
{"CreateSphere", CreateSphere, METH_VARARGS, "CreateSphere"},
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC
initpolyvox(void)
{
(void) Py_InitModule("polyvox", module_methods);
}
It's a c++ extension module i'm trying to write, i used the setup.py method to compile it and install it, the problem is that when i import it in python and run it, python says "SystemError: dynamic module not initialized properly".
I know that this error should be that module name doesn't correspond with the init function name, but it isn't like that if you see the code. So what could be the problem?
I'm on Windows 7, using python 2.7.2, the module is compiled on VS2008 but it requires some .lib i had to compile with vs2010 since some modern c++ features are required.

Related

ScalableTSDFVolume Integrate from TUM-RGBD Dataset

I am using Open3D 0.15 and C++11 on Ubuntu 18.04.
The main function I'm interested in is the ScalabeTSDFVolume Integrate() function, using the TUM RGBD dataset (the xyz set to be exact), based off of the IntegrateRGBD example from the Open3D repo.
Since the TUM-RGBD dataset does not provide an association file that matches the RGBD images and the trajectory info, I've created my own small code that matches the timestamp on the TUM dataset's image data and the trajectory information, and converting the 7-dimension [x y z rx ry rz rw] trajectory information into Eigen::Matrix4d, using the same equation that Open3D's FileTUM.cpp uses:
do
{
// Read the timestamp first
gt >> p_gt.timestamp;
double poseArr[7];
// push the remaining 7 numbers to the poseArr
for (int i = 0; i < 7; i++)
gt >> poseArr[i];
// copy paste of the tum trajectory reader
Eigen::Matrix4d transform;
transform.setIdentity();
transform.topLeftCorner<3, 3>() =
Eigen::Quaterniond(poseArr[6], poseArr[3], poseArr[4], poseArr[5]).toRotationMatrix();
transform.topRightCorner<3, 1>() = Eigen::Vector3d(poseArr[0], poseArr[1], poseArr[2]);
p_gt.pose = transform.inverse();
gtF.push_back(p_gt);
} while (std::getline(gt, line));
The code runs fine, but the issue is when I try to integrate multiple frames into the same volume and extract its pointcloud or mesh.
I can tell that the RGBD information is being fed into the program correctly, by extracting the mesh at the very first frame:
first frame mesh extraction
But there is a significant artifact when I try to extract the mesh when more frames are integrated, like this:
30 frames mesh extraction
From my previous experience, this probably has to do with the fact that the transformation matrices are not in the correct axis. If anyone has tried to use the TUM dataset with Open3D and encountered the same problem, I would greatly appreciate any info on this.
Edit:
For reference, this is the modified code I'm using for the reconstruction.
int main(int argc, char *argv[]) {
using namespace open3d;
std::string filebase("/home/geometry/Documents/rgbd_dataset_freiburg1_xyz");
VirtualSensor::CameraParameters kinect{ 525.0,525.0,319.5,239.5,5000};
VirtualSensor::CameraParameters camPar = kinect;
VirtualSensor v1(filebase,camPar);
bool save_pointcloud = true;
bool save_mesh = true;
bool save_voxel = false;
int every_k_frames = 50;
double length = 4.0;
double uLength = 6.0;
int resolution = 512;
double sdf_trunc_percentage = 0.01;
int verbose = 2;
utility::SetVerbosityLevel((utility::VerbosityLevel)verbose);
auto camera_intrinsic = camera::PinholeCameraIntrinsic(640, 480, 525.0, 525.0, 319.5, 239.5);
int index = 0;
int save_index = 0;
int pairSize = 30;
// initialise TSDF
pipelines::integration::ScalableTSDFVolume volume(
length / (double)resolution, length * sdf_trunc_percentage,
pipelines::integration::TSDFVolumeColorType::RGB8);
//pipelines::integration::UniformTSDFVolume uVolume(uLength, resolution, uLength*sdf_trunc_percentage, pipelines::integration::TSDFVolumeColorType::RGB8);
utility::FPSTimer timer("Process RGBD stream",
pairSize);
geometry::Image depth, color;
// start loop
for(int i = 0; i < pairSize; i++){
utility::LogInfo("Processing frame {:d} ...", index);
io::ReadImage(v1.GetDepthPath(i), depth);
io::ReadImage(v1.GetColorPath(i), color);
auto rgbd = geometry::RGBDImage::CreateFromColorAndDepth(
color, depth, 5000.0, 6.0, false);
if (index == 0 ||
(every_k_frames > 0 && index % every_k_frames == 0))
volume.Reset();
}
volume.Integrate(*rgbd,
camera_intrinsic, // intrinsic never changes
v1.GetCounterGT(i)); // get the groundtruth pose from my class
index++;
// saving mesh/pc logic
if (index == pairSize ||
(every_k_frames > 0 && index % every_k_frames == 0)) {
utility::LogInfo("Saving fragment {:d} ...", save_index);
std::string save_index_str = std::to_string(save_index);
if (save_pointcloud) {
utility::LogInfo("Saving pointcloud {:d} ...", save_index);
auto pcd = volume.ExtractPointCloud();
io::WritePointCloud("pointcloud_" + save_index_str + ".ply",
*pcd);
}
if (save_mesh) {
utility::LogInfo("Saving mesh {:d} ...", save_index);
auto mesh = volume.ExtractTriangleMesh();
io::WriteTriangleMesh("mesh_" + save_index_str + ".ply",
*mesh);
}
if (save_voxel) {
utility::LogInfo("Saving voxel {:d} ...", save_index);
auto voxel = volume.ExtractVoxelPointCloud();
io::WritePointCloud("voxel_" + save_index_str + ".ply",
*voxel);
}
save_index++;
}
timer.Signal();
}
return 0;
}

GEOS (​JTS Topology Suite) BufferOp / Offset Curve producing additional points

I am using GEOS (the C port of the JTS Topology suite) to produce an offset curve of a linestring.
I have successfully produced the offset curve however for some cases (namely where the start and end lines are both horizontal and end at the same x position / or vertical and end at the same y position), an additional point is created at the beginning and end of the offset.
It's easiest to explain this in images:
Example Image
Example Image 2
I can't work out if there is something I have failed to do or if this is an bug with the library, here's my code:
#include "geos_c.h"
std::vector<vec2> Geos::offsetLine(const std::vector<vec2>& points, float offset, int quadrantSegments, int joinStyle, double mitreLimit)
{
// make coord sequence from points
GEOSCoordSequence* seq = makeCoordSequence(points);
// Define line string
GEOSGeometry* lineString = GEOSGeom_createLineString(seq);
if(!lineString) return {};
// offset line
GEOSGeometry* bufferOp = GEOSOffsetCurve(lineString, offset, quadrantSegments, joinStyle, mitreLimit);
if(!bufferOp) return {};
// put coords into vector
std::vector<vec2> output = outputCoords(bufferOp, (offset < 0.0f));
// Frees memory of all as memory ownership is passed along
GEOSGeom_destroy(bufferOp);
return move(output);
}
GEOSCoordSequence* Geos::makeCoordSequence(const std::vector<vec2>& points)
{
GEOSCoordSequence* seq = GEOSCoordSeq_create(points.size(), 2);
if(!seq) return {};
for (size_t i = 0; i < points.size(); i++) {
GEOSCoordSeq_setX(seq, i, points[i].x);
GEOSCoordSeq_setY(seq, i, points[i].y);
}
return seq;
}
std::vector<vec2> Geos::outputCoords(const GEOSGeometry* points, bool reversePoints)
{
// Convert to coord sequence and draw points
const GEOSCoordSequence *coordSeq = GEOSGeom_getCoordSeq(points);
if(!coordSeq) return {};
// get number of points
int nPoints = GEOSGeomGetNumPoints(points);
if(nPoints == -1) return {};
// output onto vector to return
std::vector<vec2> output;
// build vector
for (size_t i = 0; i < (size_t)nPoints; i++) {
// points are in reverse order if negative offset
size_t index = reversePoints ? nPoints-i-1 : i;
double xCoord, yCoord;
GEOSCoordSeq_getX(coordSeq, index, &xCoord);
GEOSCoordSeq_getY(coordSeq, index, &yCoord);
output.push_back({ xCoord, yCoord });
}
return move(output);
}
Latest release of Geos resolves this now.
See here: Issue

GDALWarpRegionToBuffer & Tiling when Dst Frame not strictly contained in Src Frame

I'm currently working with gdal api C/C++ and I'm facing an issue with gdal warp region to buffer functionality (WarpRegionToBuffer).
When my destination dataset is not strictly contained in the frame of my source dataset, the area where there should be no data values is filled with random data (see out_code.tif enclosed). However gdalwarp command line functionality, which also uses WarpRegionToBuffer function, does not seem to have this problem.
1/ Here is the code I use:
#include <iostream>
#include <string>
#include <vector>
#include "gdal.h"
#include "gdalwarper.h"
#include "cpl_conv.h"
int main(void)
{
std::string pathSrc = "in.dt1";
//these datas will be provided by command line
std::string pathDst = "out_code.tif";
double resolutionx = 0.000833333;
double resolutiony = 0.000833333;
//destination corner coordinates: top left (tl) bottom right (br)
float_t xtl = -1;
float_t ytl = 45;
float_t xbr = 2;
float_t ybr = 41;
//tile size defined by user
int tilesizex = 256;
int tilesizey = 256;
float width = std::ceil((xbr - xtl)/resolutionx);
float height = std::ceil((ytl - ybr)/resolutiony);
double adfDstGeoTransform[6] = {xtl, resolutionx, 0, ytl, 0, -resolutiony};
GDALDatasetH hSrcDS, hDstDS;
// Open input file
GDALAllRegister();
hSrcDS = GDALOpen(pathSrc.c_str(), GA_ReadOnly);
GDALDataType eDT = GDALGetRasterDataType(GDALGetRasterBand(hSrcDS,1));
// Create output file, using same spatial reference as input image, but new geotransform
GDALDriverH hDriver = GDALGetDriverByName( "GTiff" );
hDstDS = GDALCreate( hDriver, pathDst.c_str(), width, height, GDALGetRasterCount(hSrcDS), eDT, NULL );
OGRSpatialReference oSRS;
char *pszWKT = NULL;
//force geo projection
oSRS.SetWellKnownGeogCS( "WGS84" );
oSRS.exportToWkt( &pszWKT );
GDALSetProjection( hDstDS, pszWKT );
//Fetches the coefficients for transforming between pixel/line (P,L) raster space,
//and projection coordinates (Xp,Yp) space.
GDALSetGeoTransform( hDstDS, adfDstGeoTransform );
// Setup warp options
GDALWarpOptions *psWarpOptions = GDALCreateWarpOptions();
psWarpOptions->hSrcDS = hSrcDS;
psWarpOptions->hDstDS = hDstDS;
psWarpOptions->nBandCount = 1;
psWarpOptions->panSrcBands = (int *) CPLMalloc(sizeof(int) * psWarpOptions->nBandCount );
psWarpOptions->panSrcBands[0] = 1;
psWarpOptions->panDstBands = (int *) CPLMalloc(sizeof(int) * psWarpOptions->nBandCount );
psWarpOptions->panDstBands[0] = 1;
psWarpOptions->pfnProgress = GDALTermProgress;
//these datas will be calculated in order to warp tile by tile
//current tile size
int cursizex = 0;
int cursizey = 0;
double nbtilex = std::ceil(width/tilesizex);
double nbtiley = std::ceil(height/tilesizey);
int starttilex = 0;
int starttiley = 0;
// Establish reprojection transformer
psWarpOptions->pTransformerArg =
GDALCreateGenImgProjTransformer(hSrcDS,
GDALGetProjectionRef(hSrcDS),
hDstDS,
GDALGetProjectionRef(hDstDS),
FALSE, 0.0, 1);
psWarpOptions->pfnTransformer = GDALGenImgProjTransform;
// Initialize and execute the warp operation on region
GDALWarpOperation oOperation;
oOperation.Initialize(psWarpOptions);
for (int ty = 0; ty < nbtiley; ty++) {
//handle last tile size
//if it last tile change size otherwise keep tilesize
for (int tx = 0; tx < nbtilex; tx++) {
//if it last tile change size otherwise keep tilesize
starttiley = ty * tilesizey;
starttilex = tx * tilesizex;
cursizex = std::min(starttilex + tilesizex, (int)width) - starttilex;
cursizey = std::min(starttiley + tilesizey, (int)height) - starttiley;
float * buffer = new float[cursizex*cursizey];
memset(buffer, 0, cursizex*cursizey);
//warp source
CPLErr ret = oOperation.WarpRegionToBuffer(
starttilex, starttiley, cursizex, cursizey,
buffer,
eDT);
if (ret != 0) {
CEA_SIMONE_ERROR(CPLGetLastErrorMsg());
throw std::runtime_error("warp error");
}
//write the fuzed tile in dest
ret = GDALRasterIO(GDALGetRasterBand(hDstDS,1),
GF_Write,
starttilex, starttiley, cursizex, cursizey,
buffer, cursizex, cursizey,
eDT,
0, 0);
if (ret != 0) {
CEA_SIMONE_ERROR("raster io write error");
throw std::runtime_error("raster io write error");
}
delete(buffer);
}
}
// Clean memory
GDALDestroyGenImgProjTransformer( psWarpOptions->pTransformerArg );
GDALDestroyWarpOptions( psWarpOptions );
GDALClose( hDstDS );
GDALClose( hSrcDS );
return 0;
}
The result:
output image of previous sample of code (as png, as I can't enclose TIF img)
The GdalWarp command line:
gdalwarp -te -1 41 2 45 -tr 0.000833333 0.000833333 in.dt1 out_cmd_line.tif
The command line result:
output image of previous command line (as png, as I can't enclose TIF img)
Can you please help me find what is wrong with my use of GDAL C/C++ API in order to have a similar behaviour as gdalwarp command line? There is probably an algorithm in gdalwarp that computes a mask of useful pixels in destination frame before calling WarpRegionToBuffer, but I didn't find it.
I would really appreciate help on this problem!
Best regards

Extracting skin data from an FBX file

I need to convert animation data from Autodesk's FBX file format to one that is compatible with DirectX; specifically, I need to calculate the offset matrices for my skinned mesh. I have written a converter( which in this case converts .fbx to my own 'scene' format ) in which I would like to calculate an offset matrix for my mesh. Here is code:
//
// Skin
//
if(bHasDeformer)
{
// iterate deformers( TODO: ACCOUNT FOR MULTIPLE DEFORMERS )
for(int i = 0; i < ncDeformers && i < 1; ++i)
{
// skin
FbxSkin *pSkin = (FbxSkin*)pMesh->GetDeformer(i, FbxDeformer::eSkin);
if(pSkin == NULL)
continue;
// bone count
int ncBones = pSkin->GetClusterCount();
// iterate bones
for (int boneIndex = 0; boneIndex < ncBones; ++boneIndex)
{
// cluster
FbxCluster* cluster = pSkin->GetCluster(boneIndex);
// bone ref
FbxNode* pBone = cluster->GetLink();
// Get the bind pose
FbxAMatrix bindPoseMatrix, transformMatrix;
cluster->GetTransformMatrix(transformMatrix);
cluster->GetTransformLinkMatrix(bindPoseMatrix);
// decomposed transform components
vS = bindPoseMatrix.GetS();
vR = bindPoseMatrix.GetR();
vT = bindPoseMatrix.GetT();
int *pVertexIndices = cluster->GetControlPointIndices();
double *pVertexWeights = cluster->GetControlPointWeights();
// Iterate through all the vertices, which are affected by the bone
int ncVertexIndices = cluster->GetControlPointIndicesCount();
for (int iBoneVertexIndex = 0; iBoneVertexIndex < ncVertexIndices; iBoneVertexIndex++)
{
// vertex
int niVertex = pVertexIndices[iBoneVertexIndex];
// weight
float fWeight = (float)pVertexWeights[iBoneVertexIndex];
}
}
}
How do I convert the fbx transforms to a bone offset matrix?

How to unify normal orientation

I've been trying to realize a mesh that has all face normals pointing outward.
In order to realize this, I load a mesh from a *.ctm file, then walk over all
triangles to determine the normal using a cross product and if the normal
is pointing to the negative z direction, I flip v1 and v2 (thus the normal orientation).
After this is done I save the result to a *.ctm file and view it with Meshlab.
The result in Meshlab still shows that normals are pointing in both positive and
negative z direction ( can be seen from the black triangles). Also when viewing
the normals in Meshlab they are really pointing backwards.
Can anyone give me some advice on how to solve this?
The source code for the normalization part is:
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud1 (new pcl::PointCloud<pcl::PointXYZRGBA> ());
pcl::fromROSMsg (meshFixed.cloud,*cloud1);for(std::vector<pcl::Vertices>::iterator it = meshFixed.polygons.begin(); it != meshFixed.polygons.end(); ++it)
{
alglib::real_2d_array v0;
double _v0[] = {cloud1->points[it->vertices[0]].x,cloud1->points[it->vertices[0]].y,cloud1->points[it->vertices[0]].z};
v0.setcontent(3,1,_v0); //3 rows, 1col
alglib::real_2d_array v1;
double _v1[] = {cloud1->points[it->vertices[1]].x,cloud1->points[it->vertices[1]].y,cloud1->points[it->vertices[1]].z};
v1.setcontent(3,1,_v1); //3 rows, 1col
alglib::real_2d_array v2;
double _v2[] = {cloud1->points[it->vertices[2]].x,cloud1->points[it->vertices[2]].y,cloud1->points[it->vertices[2]].z};
v2.setcontent(1,3,_v2); //3 rows, 1col
alglib::real_2d_array normal;
normal = cross(v1-v0,v2-v0);
//if z<0 change indices order v1->v2 and v2->v1
alglib::real_2d_array normalizedNormal;
if(normal[2][0]<0)
{
int index1,index2;
index1 = it->vertices[1];
index2 = it->vertices[2];
it->vertices[1] = index2;
it->vertices[2] = index1;
//make normal of length 1
double normalScaling = 1.0/sqrt(dot(normal,normal));
normal[0][0] = -1*normal[0][0];
normal[1][0] = -1*normal[1][0];
normal[2][0] = -1*normal[2][0];
normalizedNormal = normalScaling * normal;
}
else
{
//make normal of length 1
double normalScaling = 1.0/sqrt(dot(normal,normal));
normalizedNormal = normalScaling * normal;
}
//add to normal cloud
pcl::Normal pclNormalizedNormal;
pclNormalizedNormal.normal_x = normalizedNormal[0][0];
pclNormalizedNormal.normal_y = normalizedNormal[1][0];
pclNormalizedNormal.normal_z = normalizedNormal[2][0];
normalsFixed.push_back(pclNormalizedNormal);
}
The result from this code is:
I've found some code in the VCG library to orient the face and vertex normals.
After using this a large part of the mesh has correct face normals, but not all.
The new code:
// VCG library implementation
MyMesh m;
// Convert pcl::PolygonMesh to VCG MyMesh
m.Clear();
// Create temporary cloud in to have handy struct object
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud1 (new pcl::PointCloud<pcl::PointXYZRGBA> ());
pcl::fromROSMsg (meshFixed.cloud,*cloud1);
// Now convert the vertices to VCG MyMesh
int vertCount = cloud1->width*cloud1->height;
vcg::tri::Allocator<MyMesh>::AddVertices(m, vertCount);
for(unsigned int i=0;i<vertCount;++i)
m.vert[i].P()=vcg::Point3f(cloud1->points[i].x,cloud1->points[i].y,cloud1->points[i].z);
// Now convert the polygon indices to VCG MyMesh => make VCG faces..
int triCount = meshFixed.polygons.size();
if(triCount==1)
{
if(meshFixed.polygons[0].vertices[0]==0 && meshFixed.polygons[0].vertices[1]==0 && meshFixed.polygons[0].vertices[2]==0)
triCount=0;
}
Allocator<MyMesh>::AddFaces(m, triCount);
for(unsigned int i=0;i<triCount;++i)
{
m.face[i].V(0)=&m.vert[meshFixed.polygons[i].vertices[0]];
m.face[i].V(1)=&m.vert[meshFixed.polygons[i].vertices[1]];
m.face[i].V(2)=&m.vert[meshFixed.polygons[i].vertices[2]];
}
vcg::tri::UpdateBounding<MyMesh>::Box(m);
vcg::tri::UpdateNormal<MyMesh>::PerFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexNormalizedPerFace(m);
printf("Input mesh vn:%i fn:%i\n",m.VN(),m.FN());
// Start to flip all normals to outside
vcg::face::FFAdj<MyMesh>::FFAdj();
vcg::tri::UpdateTopology<MyMesh>::FaceFace(m);
bool oriented, orientable;
if ( vcg::tri::Clean<MyMesh>::CountNonManifoldEdgeFF(m)>0 ) {
std::cout << "Mesh has some not 2-manifold faces, Orientability requires manifoldness" << std::endl; // text
return; // can't continue, mesh can't be processed
}
vcg::tri::Clean<MyMesh>::OrientCoherentlyMesh(m, oriented,orientable);
vcg::tri::Clean<MyMesh>::FlipNormalOutside(m);
vcg::tri::Clean<MyMesh>::FlipMesh(m);
//vcg::tri::UpdateTopology<MyMesh>::FaceFace(m);
//vcg::tri::UpdateTopology<MyMesh>::TestFaceFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexNormalizedPerFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexFromCurrentFaceNormal(m);
// now convert VCG back to pcl::PolygonMesh
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGBA>);
cloud->is_dense = false;
cloud->width = vertCount;
cloud->height = 1;
cloud->points.resize (vertCount);
// Now fill the pointcloud of the mesh
for(int i=0; i<vertCount; i++)
{
cloud->points[i].x = m.vert[i].P()[0];
cloud->points[i].y = m.vert[i].P()[1];
cloud->points[i].z = m.vert[i].P()[2];
}
pcl::toROSMsg(*cloud,meshFixed.cloud);
std::vector<pcl::Vertices> polygons;
// Now fill the indices of the triangles/faces of the mesh
for(int i=0; i<triCount; i++)
{
pcl::Vertices vertices;
vertices.vertices.push_back(m.face[i].V(0)-&*m.vert.begin());
vertices.vertices.push_back(m.face[i].V(1)-&*m.vert.begin());
vertices.vertices.push_back(m.face[i].V(2)-&*m.vert.begin());
polygons.push_back(vertices);
}
meshFixed.polygons = polygons;
Which results in: (Meshlab still shows normals are facing both sides)
I finally solved the problem. So I'm still using VCG library. From the above new code I slightly updated the following section:
vcg::tri::Clean<MyMesh>::OrientCoherentlyMesh(m, oriented,orientable);
//vcg::tri::Clean<MyMesh>::FlipNormalOutside(m);
//vcg::tri::Clean<MyMesh>::FlipMesh(m);
//vcg::tri::UpdateTopology<MyMesh>::FaceFace(m);
//vcg::tri::UpdateTopology<MyMesh>::TestFaceFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexNormalizedPerFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexFromCurrentFaceNormal(m);
Now I've updated the vcg::tri::Clean<MyMesh>::OrientCoherentlyMesh() function in clean.h. Here the update is to orient the first polygon of a group correctly. Also after swapping the edge the normal of the face is calculated and updated.
static void OrientCoherentlyMesh(MeshType &m, bool &Oriented, bool &Orientable)
{
RequireFFAdjacency(m);
assert(&Oriented != &Orientable);
assert(m.face.back().FFp(0)); // This algorithms require FF topology initialized
Orientable = true;
Oriented = true;
tri::UpdateSelection<MeshType>::FaceClear(m);
std::stack<FacePointer> faces;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
{
if (!fi->IsD() && !fi->IsS())
{
// each face put in the stack is selected (and oriented)
fi->SetS();
// New section of code to orient the initial face correctly
if(fi->N()[2]>0.0)
{
face::SwapEdge<FaceType,true>(*fi, 0);
face::ComputeNormal(*fi);
}
// End of new code section.
faces.push(&(*fi));
// empty the stack
while (!faces.empty())
{
FacePointer fp = faces.top();
faces.pop();
// make consistently oriented the adjacent faces
for (int j = 0; j < 3; j++)
{
//get one of the adjacent face
FacePointer fpaux = fp->FFp(j);
int iaux = fp->FFi(j);
if (!fpaux->IsD() && fpaux != fp && face::IsManifold<FaceType>(*fp, j))
{
if (!CheckOrientation(*fpaux, iaux))
{
Oriented = false;
if (!fpaux->IsS())
{
face::SwapEdge<FaceType,true>(*fpaux, iaux);
// New line to update face normal
face::ComputeNormal(*fpaux);
// end of new section.
assert(CheckOrientation(*fpaux, iaux));
}
else
{
Orientable = false;
break;
}
}
// put the oriented face into the stack
if (!fpaux->IsS())
{
fpaux->SetS();
faces.push(fpaux);
}
}
}
}
}
if (!Orientable) break;
}
}
Besides I also updated the function bool CheckOrientation(FaceType &f, int z) to perform a calculation based on normal z-direction.
template <class FaceType>
bool CheckOrientation(FaceType &f, int z)
{
// Added next section to calculate the difference between normal z-directions
FaceType *original = f.FFp(z);
double nf2,ng2;
nf2=f.N()[2];
ng2=original->N()[2];
// End of additional section
if (IsBorder(f, z))
return true;
else
{
FaceType *g = f.FFp(z);
int gi = f.FFi(z);
// changed if statement from: if (f.V0(z) == g->V1(gi))
if (nf2/abs(nf2)==ng2/abs(ng2))
return true;
else
return false;
}
}
The result is as I expect and desire from the algorithm: