I am using GEOS (the C port of the JTS Topology suite) to produce an offset curve of a linestring.
I have successfully produced the offset curve however for some cases (namely where the start and end lines are both horizontal and end at the same x position / or vertical and end at the same y position), an additional point is created at the beginning and end of the offset.
It's easiest to explain this in images:
Example Image
Example Image 2
I can't work out if there is something I have failed to do or if this is an bug with the library, here's my code:
#include "geos_c.h"
std::vector<vec2> Geos::offsetLine(const std::vector<vec2>& points, float offset, int quadrantSegments, int joinStyle, double mitreLimit)
{
// make coord sequence from points
GEOSCoordSequence* seq = makeCoordSequence(points);
// Define line string
GEOSGeometry* lineString = GEOSGeom_createLineString(seq);
if(!lineString) return {};
// offset line
GEOSGeometry* bufferOp = GEOSOffsetCurve(lineString, offset, quadrantSegments, joinStyle, mitreLimit);
if(!bufferOp) return {};
// put coords into vector
std::vector<vec2> output = outputCoords(bufferOp, (offset < 0.0f));
// Frees memory of all as memory ownership is passed along
GEOSGeom_destroy(bufferOp);
return move(output);
}
GEOSCoordSequence* Geos::makeCoordSequence(const std::vector<vec2>& points)
{
GEOSCoordSequence* seq = GEOSCoordSeq_create(points.size(), 2);
if(!seq) return {};
for (size_t i = 0; i < points.size(); i++) {
GEOSCoordSeq_setX(seq, i, points[i].x);
GEOSCoordSeq_setY(seq, i, points[i].y);
}
return seq;
}
std::vector<vec2> Geos::outputCoords(const GEOSGeometry* points, bool reversePoints)
{
// Convert to coord sequence and draw points
const GEOSCoordSequence *coordSeq = GEOSGeom_getCoordSeq(points);
if(!coordSeq) return {};
// get number of points
int nPoints = GEOSGeomGetNumPoints(points);
if(nPoints == -1) return {};
// output onto vector to return
std::vector<vec2> output;
// build vector
for (size_t i = 0; i < (size_t)nPoints; i++) {
// points are in reverse order if negative offset
size_t index = reversePoints ? nPoints-i-1 : i;
double xCoord, yCoord;
GEOSCoordSeq_getX(coordSeq, index, &xCoord);
GEOSCoordSeq_getY(coordSeq, index, &yCoord);
output.push_back({ xCoord, yCoord });
}
return move(output);
}
Latest release of Geos resolves this now.
See here: Issue
Related
I am trying to get the left polygonal chain given a set of consecutive points. (NOTE: edges are non-intersecting.)
Image 1. Sample polygon and its bound.
What I did was:
Get the minY, maxY and minX. (Bound.)
Find the point that contains minY (or maxY) then save it as the first point.
Save any points until point with minY or maxY is found while checking for point with minX.
If the same Y is found first, save it as the new first point and repeat from #3.
If other Y is found first and the saved points has minX, this is the chain. Otherwise, save as the new first point and repeat from #3.
Image 2. The left chain of points.
But using this steps might give wrong result for some polygon, like this:
Since one point is (minX, maxY), either of the side will be returned.
EDIT:
With the idea of the left-bottom- and left-top-most points, here is the current code that I am using:
Get the min (left-bottom-most) and max (left-top-most) point.
std::vector<Coord> ret;
size_t i = 0;
Coord minCoord = poly[i];
Coord maxCoord = poly[i];
size_t minIdx = -1;
size_t maxIdx = -1;
size_t cnt = poly.size();
i++;
for (; i < cnt; i++)
{
Coord c = poly[i];
if (c.y < minCoord.y) // new bottom
{
minCoord = c;
minIdx = i;
}
else if (c.y == minCoord.y) // same bottom
{
if (c.x < minCoord.x) // left most
{
minCoord = c;
minIdx = i;
}
}
if (c.y > maxCoord.y) // new top
{
maxCoord = c;
maxIdx = i;
}
else if (c.y == maxCoord.y) // same top
{
if (c.x < maxCoord.x) // left most
{
maxCoord = c;
maxIdx = i;
}
}
}
Get the points connected to the max point.
i = maxIdx;
Coord mid = poly[i];
Coord ray1 = poly[(i + cnt - 1) % cnt];
Coord ray2 = poly[(i + 1) % cnt];
Get which has smallest angle. This will be the path we will follow.
double rad1 = Pts2Rad(mid, ray1);
double rad2 = Pts2Rad(mid, ray2);
int step = 1;
if (rad1 < rad2)
step = cnt - 1;
Save the points.
while (i != minIdx)
{
ret.push_back(poly[i]);
i = (i + step) % cnt;
}
ret.push_back(poly[minIdx]);
To be specific, I am assuming that no vertex is duplicated and define the "left chain" as the sequence of vertices from the original polygon loop that goes from the leftmost vertex in the top side of the bounding box, to the leftmost vertex in the bottom side of the bounding box. [In case the top and bottom sides coincide, these two vertices also coincide; I leave it to you what to return in this case.]
To obtain these, you can scan all vertices and keep the left-topmost so far and left-bottommost so far. Then compare to the next vertex. If above the left-topmost, becomes the new lef-topmost. If at the same level and to the left, becomes the new left-topmost. Similarly for the left-bottommost.
I've been trying to implement the Moller-Trumbore ray-triangle intersection algorithm in my raytracing code. The code is supposed to read in a mesh and light sources, fire off rays from the light source, and return the triangle from the mesh which each ray intersects. Here is my implementation of the algorithm:
//Moller-Trumbore intersection algorithm
void getFaceIntersect(modelStruct m, ray r, hitFaceStruct& hitFaces)
{
// Constant thoughout loop
point origin = r.p0;
point direction = r.u;
hitFaces.isHit = false;
for (int i = 0; i < m.faces; i++)
{
// Get face vertices
point v1 = m.vertList[m.faceList[i].v1];
point v2 = m.vertList[m.faceList[i].v2];
point v3 = m.vertList[m.faceList[i].v3];
// Get two edgess
point edge1 = v2 - v1;
point edge2 = v3 - v1;
// Get p
point p = direction.cross(direction, edge2);
// Use p to find determinant
double det = p.dot(edge1, p);
// If the determinant is about 0, the ray lies in the plane of the triangle
if (abs(det) < 0.00000000001)
{
continue;
}
double inverseDet = 1 / det;
point v1ToOrigin = (origin - v1);
double u = v1ToOrigin.dot(v1ToOrigin, p) * inverseDet;
// If u is not between 0 and 1, no hit
if (u < 0 || u > 1)
{
continue;
}
// Used for calculating v
point q = v1ToOrigin.cross(v1ToOrigin, edge1);
double v = direction.dot(direction, q) * inverseDet;
if (v < 0 || (u + v) > 1)
{
continue;
}
double t = q.dot(edge2, q) * inverseDet;
// gets closest face
if (t < abs(hitFaces.s)) {
hitFaceStruct goodStruct = hitFaceStruct();
goodStruct.face = i;
goodStruct.hitPoint = p;
goodStruct.isHit = true;
goodStruct.s = t;
hitFaces = goodStruct;
break;
}
}
}
The relevant code for hitFaceStruct and modelStruct is as follows:
typedef struct _hitFaceStruct
{
int face; // the index of the sphere in question in the list of faces
float s; // the distance from the ray that hit it
bool isHit;
point hitPoint;
} hitFaceStruct;
typedef struct _modelStruct {
char *fileName;
float scale;
float rot_x, rot_y, rot_z;
float x, y, z;
float r_amb, g_amb, b_amb;
float r_dif, g_dif, b_dif;
float r_spec, g_spec, b_spec;
float k_amb, k_dif, k_spec, k_reflective, k_refractive;
float spec_exp, index_refraction;
int verts, faces, norms = 0; // Number of vertices, faces, normals, and spheres in the system
point *vertList, *normList; // Vertex and Normal Lists
faceStruct *faceList; // Face List
} modelStruct;
Whenever I shoot a ray, the values of u or v in the algorithm code always come out to a large negative number, rather than the expected small, positive one. The direction vector of the ray is normalized before I pass it on to the intersection code, and I'm positive I'm firing rays that would normally hit the mesh. Can anyone please help me spot my error here?
Thanks!
I have a set of points that I'm trying to sort in ccw order or cw order from their angle. I want the points to be sorted in a way that they could form a polygon with no splits in its region or intersections. This is difficult because in most cases, it would be a concave polygon.
point centroid;
int main( int argc, char** argv )
{
// I read a set of points into a struct point array: points[n]
// Find centroid
double sx = 0; double sy = 0;
for (int i = 0; i < n; i++)
{
sx += points[i].x;
sy += points[i].y;
}
centroid.x = sx/n;
centroid.y = sy/n;
// sort points using in polar order using centroid as reference
std::qsort(&points, n, sizeof(point), polarOrder);
}
// -1 ccw, 1 cw, 0 collinear
int orientation(point a, point b, point c)
{
double area2 = (b.x-a.x)*(c.y-a.y) - (b.y-a.y)*(c.x-a.x);
if (area2 < 0) return -1;
else if (area2 > 0) return +1;
else return 0;
}
// compare other points relative to polar angle they make with this point
// (where the polar angle is between 0 and 2pi)
int polarOrder(const void *vp1, const void *vp2)
{
point *p1 = (point *)vp1;
point *p2 = (point *)vp2;
// translation
double dx1 = p1->x - centroid.x;
double dy1 = p1->y - centroid.y;
double dx2 = p2->x - centroid.x;
double dy2 = p2->y - centroid.y;
if (dy1 >= 0 && dy2 < 0) { return -1; } // p1 above and p2 below
else if (dy2 >= 0 && dy1 < 0) { return 1; } // p1 below and p2 above
else if (dy1 == 0 && dy2 ==0) { // 3-collinear and horizontal
if (dx1 >= 0 && dx2 < 0) { return -1; }
else if (dx2 >= 0 && dx1 < 0) { return 1; }
else { return 0; }
}
else return -orientation(centroid,*p1,*p2); // both above or below
}
It looks like the points are sorted accurately(pink) until they "cave" in, in which case the algorithm skips over these points then continues.. Can anyone point me into the right direction to sort the points so that they form the polygon I'm looking for?
Raw Point Plot - Blue, Pink Points - Sorted
Point List: http://pastebin.com/N0Wdn2sm (You can ignore the 3rd component, since all these points lie on the same plane.)
The code below (sorry it's C rather than C++) sorts correctly as you wish with atan2.
The problem with your code may be that it attempts to use the included angle between the two vectors being compared. This is doomed to fail. The array is not circular. It has a first and a final element. With respect to the centroid, sorting an array requires a total polar order: a range of angles such that each point corresponds to a unique angle regardless of the other point. The angles are the total polar order, and comparing them as scalars provides the sort comparison function.
In this manner, the algorithm you proposed is guaranteed to produce a star-shaped polyline. It may oscillate wildly between different radii (...which your data do! Is this what you meant by "caved in"? If so, it's a feature of your algorithm and data, not an implementation error), and points corresponding to exactly the same angle might produce edges that coincide (lie directly on top of each other), but the edges won't cross.
I believe that your choice of centroid as the polar origin is sufficient to guarantee that connecting the ends of the polyline generated as above will produce a full star-shaped polygon, however, I don't have a proof.
Result plotted with Excel
Note you can guess from the nearly radial edges where the centroid is! This is the "star shape" I referred to above.
To illustrate this is really a star-shaped polygon, here is a zoom in to the confusing lower left corner:
If you want a polygon that is "nicer" in some sense, you will need a fancier (probably much fancier) algorithm, e.g. the Delaunay triangulation-based ones others have referred to.
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
struct point {
double x, y;
};
void print(FILE *f, struct point *p) {
fprintf(f, "%f,%f\n", p->x, p->y);
}
// Return polar angle of p with respect to origin o
double to_angle(const struct point *p, const struct point *o) {
return atan2(p->y - o->y, p->x - o->x);
}
void find_centroid(struct point *c, struct point *pts, int n_pts) {
double x = 0, y = 0;
for (int i = 0; i < n_pts; i++) {
x += pts[i].x;
y += pts[i].y;
}
c->x = x / n_pts;
c->y = y / n_pts;
}
static struct point centroid[1];
int by_polar_angle(const void *va, const void *vb) {
double theta_a = to_angle(va, centroid);
double theta_b = to_angle(vb, centroid);
return theta_a < theta_b ? -1 : theta_a > theta_b ? 1 : 0;
}
void sort_by_polar_angle(struct point *pts, int n_pts) {
find_centroid(centroid, pts, n_pts);
qsort(pts, n_pts, sizeof pts[0], by_polar_angle);
}
int main(void) {
FILE *f = fopen("data.txt", "r");
if (!f) return 1;
struct point pts[10000];
int n_pts, n_read;
for (n_pts = 0;
(n_read = fscanf(f, "%lf%lf%*f", &pts[n_pts].x, &pts[n_pts].y)) != EOF;
++n_pts)
if (n_read != 2) return 2;
fclose(f);
sort_by_polar_angle(pts, n_pts);
for (int i = 0; i < n_pts; i++)
print(stdout, pts + i);
return 0;
}
Well, first and foremost, I see centroid declared as a local variable in main. Yet inside polarOrder you are also accessing some centroid variable.
Judging by the code you posted, that second centroid is a file-scope variable that you never initialized to any specific value. Hence the meaningless results from your comparison function.
The second strange detail in your code is that you do return -orientation(centroid,*p1,*p2) if both points are above or below. Since orientation returns -1 for CCW and +1 for CW, it should be just return orientation(centroid,*p1,*p2). Why did you feel the need to negate the result of orientation?
Your original points don't appear form a convex polygon, so simply ordering them by angle around a fixed centroid will not necessarily result in a clean polygon. This is a non-trivial problem, you may want to research Delaunay triangulation and/or gift wrapping algorithms, although both would have to be modified because your polygon is concave. The answer here is an interesting example of a modified gift wrapping algorithm for concave polygons. There is also a C++ library called PCL that may do what you need.
But...if you really do want to do a polar sort, your sorting functions seem more complex than necessary. I would sort using atan2 first, then optimize it later once you get the result you want if necessary. Here is an example using lambda functions:
#include <algorithm>
#include <math.h>
#include <vector>
int main()
{
struct point
{
double x;
double y;
};
std::vector< point > points;
point centroid;
// fill in your data...
auto sort_predicate = [¢roid] (const point& a, const point& b) -> bool {
return atan2 (a.x - centroid.x, a.y - centroid.y) <
atan2 (b.x - centroid.x, b.y - centroid.y);
};
std::sort (points.begin(), points.end(), sort_predicate);
}
I've been trying to realize a mesh that has all face normals pointing outward.
In order to realize this, I load a mesh from a *.ctm file, then walk over all
triangles to determine the normal using a cross product and if the normal
is pointing to the negative z direction, I flip v1 and v2 (thus the normal orientation).
After this is done I save the result to a *.ctm file and view it with Meshlab.
The result in Meshlab still shows that normals are pointing in both positive and
negative z direction ( can be seen from the black triangles). Also when viewing
the normals in Meshlab they are really pointing backwards.
Can anyone give me some advice on how to solve this?
The source code for the normalization part is:
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud1 (new pcl::PointCloud<pcl::PointXYZRGBA> ());
pcl::fromROSMsg (meshFixed.cloud,*cloud1);for(std::vector<pcl::Vertices>::iterator it = meshFixed.polygons.begin(); it != meshFixed.polygons.end(); ++it)
{
alglib::real_2d_array v0;
double _v0[] = {cloud1->points[it->vertices[0]].x,cloud1->points[it->vertices[0]].y,cloud1->points[it->vertices[0]].z};
v0.setcontent(3,1,_v0); //3 rows, 1col
alglib::real_2d_array v1;
double _v1[] = {cloud1->points[it->vertices[1]].x,cloud1->points[it->vertices[1]].y,cloud1->points[it->vertices[1]].z};
v1.setcontent(3,1,_v1); //3 rows, 1col
alglib::real_2d_array v2;
double _v2[] = {cloud1->points[it->vertices[2]].x,cloud1->points[it->vertices[2]].y,cloud1->points[it->vertices[2]].z};
v2.setcontent(1,3,_v2); //3 rows, 1col
alglib::real_2d_array normal;
normal = cross(v1-v0,v2-v0);
//if z<0 change indices order v1->v2 and v2->v1
alglib::real_2d_array normalizedNormal;
if(normal[2][0]<0)
{
int index1,index2;
index1 = it->vertices[1];
index2 = it->vertices[2];
it->vertices[1] = index2;
it->vertices[2] = index1;
//make normal of length 1
double normalScaling = 1.0/sqrt(dot(normal,normal));
normal[0][0] = -1*normal[0][0];
normal[1][0] = -1*normal[1][0];
normal[2][0] = -1*normal[2][0];
normalizedNormal = normalScaling * normal;
}
else
{
//make normal of length 1
double normalScaling = 1.0/sqrt(dot(normal,normal));
normalizedNormal = normalScaling * normal;
}
//add to normal cloud
pcl::Normal pclNormalizedNormal;
pclNormalizedNormal.normal_x = normalizedNormal[0][0];
pclNormalizedNormal.normal_y = normalizedNormal[1][0];
pclNormalizedNormal.normal_z = normalizedNormal[2][0];
normalsFixed.push_back(pclNormalizedNormal);
}
The result from this code is:
I've found some code in the VCG library to orient the face and vertex normals.
After using this a large part of the mesh has correct face normals, but not all.
The new code:
// VCG library implementation
MyMesh m;
// Convert pcl::PolygonMesh to VCG MyMesh
m.Clear();
// Create temporary cloud in to have handy struct object
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud1 (new pcl::PointCloud<pcl::PointXYZRGBA> ());
pcl::fromROSMsg (meshFixed.cloud,*cloud1);
// Now convert the vertices to VCG MyMesh
int vertCount = cloud1->width*cloud1->height;
vcg::tri::Allocator<MyMesh>::AddVertices(m, vertCount);
for(unsigned int i=0;i<vertCount;++i)
m.vert[i].P()=vcg::Point3f(cloud1->points[i].x,cloud1->points[i].y,cloud1->points[i].z);
// Now convert the polygon indices to VCG MyMesh => make VCG faces..
int triCount = meshFixed.polygons.size();
if(triCount==1)
{
if(meshFixed.polygons[0].vertices[0]==0 && meshFixed.polygons[0].vertices[1]==0 && meshFixed.polygons[0].vertices[2]==0)
triCount=0;
}
Allocator<MyMesh>::AddFaces(m, triCount);
for(unsigned int i=0;i<triCount;++i)
{
m.face[i].V(0)=&m.vert[meshFixed.polygons[i].vertices[0]];
m.face[i].V(1)=&m.vert[meshFixed.polygons[i].vertices[1]];
m.face[i].V(2)=&m.vert[meshFixed.polygons[i].vertices[2]];
}
vcg::tri::UpdateBounding<MyMesh>::Box(m);
vcg::tri::UpdateNormal<MyMesh>::PerFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexNormalizedPerFace(m);
printf("Input mesh vn:%i fn:%i\n",m.VN(),m.FN());
// Start to flip all normals to outside
vcg::face::FFAdj<MyMesh>::FFAdj();
vcg::tri::UpdateTopology<MyMesh>::FaceFace(m);
bool oriented, orientable;
if ( vcg::tri::Clean<MyMesh>::CountNonManifoldEdgeFF(m)>0 ) {
std::cout << "Mesh has some not 2-manifold faces, Orientability requires manifoldness" << std::endl; // text
return; // can't continue, mesh can't be processed
}
vcg::tri::Clean<MyMesh>::OrientCoherentlyMesh(m, oriented,orientable);
vcg::tri::Clean<MyMesh>::FlipNormalOutside(m);
vcg::tri::Clean<MyMesh>::FlipMesh(m);
//vcg::tri::UpdateTopology<MyMesh>::FaceFace(m);
//vcg::tri::UpdateTopology<MyMesh>::TestFaceFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexNormalizedPerFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexFromCurrentFaceNormal(m);
// now convert VCG back to pcl::PolygonMesh
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGBA>);
cloud->is_dense = false;
cloud->width = vertCount;
cloud->height = 1;
cloud->points.resize (vertCount);
// Now fill the pointcloud of the mesh
for(int i=0; i<vertCount; i++)
{
cloud->points[i].x = m.vert[i].P()[0];
cloud->points[i].y = m.vert[i].P()[1];
cloud->points[i].z = m.vert[i].P()[2];
}
pcl::toROSMsg(*cloud,meshFixed.cloud);
std::vector<pcl::Vertices> polygons;
// Now fill the indices of the triangles/faces of the mesh
for(int i=0; i<triCount; i++)
{
pcl::Vertices vertices;
vertices.vertices.push_back(m.face[i].V(0)-&*m.vert.begin());
vertices.vertices.push_back(m.face[i].V(1)-&*m.vert.begin());
vertices.vertices.push_back(m.face[i].V(2)-&*m.vert.begin());
polygons.push_back(vertices);
}
meshFixed.polygons = polygons;
Which results in: (Meshlab still shows normals are facing both sides)
I finally solved the problem. So I'm still using VCG library. From the above new code I slightly updated the following section:
vcg::tri::Clean<MyMesh>::OrientCoherentlyMesh(m, oriented,orientable);
//vcg::tri::Clean<MyMesh>::FlipNormalOutside(m);
//vcg::tri::Clean<MyMesh>::FlipMesh(m);
//vcg::tri::UpdateTopology<MyMesh>::FaceFace(m);
//vcg::tri::UpdateTopology<MyMesh>::TestFaceFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexNormalizedPerFace(m);
vcg::tri::UpdateNormal<MyMesh>::PerVertexFromCurrentFaceNormal(m);
Now I've updated the vcg::tri::Clean<MyMesh>::OrientCoherentlyMesh() function in clean.h. Here the update is to orient the first polygon of a group correctly. Also after swapping the edge the normal of the face is calculated and updated.
static void OrientCoherentlyMesh(MeshType &m, bool &Oriented, bool &Orientable)
{
RequireFFAdjacency(m);
assert(&Oriented != &Orientable);
assert(m.face.back().FFp(0)); // This algorithms require FF topology initialized
Orientable = true;
Oriented = true;
tri::UpdateSelection<MeshType>::FaceClear(m);
std::stack<FacePointer> faces;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
{
if (!fi->IsD() && !fi->IsS())
{
// each face put in the stack is selected (and oriented)
fi->SetS();
// New section of code to orient the initial face correctly
if(fi->N()[2]>0.0)
{
face::SwapEdge<FaceType,true>(*fi, 0);
face::ComputeNormal(*fi);
}
// End of new code section.
faces.push(&(*fi));
// empty the stack
while (!faces.empty())
{
FacePointer fp = faces.top();
faces.pop();
// make consistently oriented the adjacent faces
for (int j = 0; j < 3; j++)
{
//get one of the adjacent face
FacePointer fpaux = fp->FFp(j);
int iaux = fp->FFi(j);
if (!fpaux->IsD() && fpaux != fp && face::IsManifold<FaceType>(*fp, j))
{
if (!CheckOrientation(*fpaux, iaux))
{
Oriented = false;
if (!fpaux->IsS())
{
face::SwapEdge<FaceType,true>(*fpaux, iaux);
// New line to update face normal
face::ComputeNormal(*fpaux);
// end of new section.
assert(CheckOrientation(*fpaux, iaux));
}
else
{
Orientable = false;
break;
}
}
// put the oriented face into the stack
if (!fpaux->IsS())
{
fpaux->SetS();
faces.push(fpaux);
}
}
}
}
}
if (!Orientable) break;
}
}
Besides I also updated the function bool CheckOrientation(FaceType &f, int z) to perform a calculation based on normal z-direction.
template <class FaceType>
bool CheckOrientation(FaceType &f, int z)
{
// Added next section to calculate the difference between normal z-directions
FaceType *original = f.FFp(z);
double nf2,ng2;
nf2=f.N()[2];
ng2=original->N()[2];
// End of additional section
if (IsBorder(f, z))
return true;
else
{
FaceType *g = f.FFp(z);
int gi = f.FFi(z);
// changed if statement from: if (f.V0(z) == g->V1(gi))
if (nf2/abs(nf2)==ng2/abs(ng2))
return true;
else
return false;
}
}
The result is as I expect and desire from the algorithm:
I have line that is defined as two points.
start = (xs,ys)
end = (xe, ye)
Drawing function that I'm using Only accepts lines that are fully in screen coordinates.
Screen size is (xSize, ySize).
Top left corner is (0,0). Bottom right corner is (xSize, ySize).
Some other funcions gives me line that that is defined for example as start(-50, -15) end(5000, 200). So it's ends are outside of screen size.
In C++
struct Vec2
{
int x, y
};
Vec2 start, end //This is all little bit pseudo code
Vec2 screenSize;//You can access coordinates like start.x end.y
How can I calculate new start and endt that is at the screen edge, not outside screen.
I know how to do it on paper. But I can't transfer it to c++.
On paper I'm sershing for point that belongs to edge and line. But it is to much calculations for c++.
Can you help?
There are many line clipping algorithms like:
Cohen–Sutherland wikipedia page with implementation
Liang–Barsky wikipedia page
Nicholl–Lee–Nicholl (NLN)
and many more. see Line Clipping on wikipedia
[EDIT1]
See below figure:
there are 3 kinds of start point:
sx > 0 and sy < 0 (red line)
sx < 0 and sy > 0 (yellow line)
sx < 0 and sy < 0 (green and violet lines)
In situations 1 and 2 simply find Xintersect and Yintersect respectively and choose them as new start point.
As you can see, there are 2 kinds of lines in situation 3. In this situation find Xintersect and Yintersect and choose the intersect point near the end point which is the point that has minimum distance to endPoint.
min(distance(Xintersect, endPoint), distance(Yintersect, endPoint))
[EDIT2]
// Liang-Barsky function by Daniel White # http://www.skytopia.com/project/articles/compsci/clipping.html
// This function inputs 8 numbers, and outputs 4 new numbers (plus a boolean value to say whether the clipped line is drawn at all).
//
bool LiangBarsky (double edgeLeft, double edgeRight, double edgeBottom, double edgeTop, // Define the x/y clipping values for the border.
double x0src, double y0src, double x1src, double y1src, // Define the start and end points of the line.
double &x0clip, double &y0clip, double &x1clip, double &y1clip) // The output values, so declare these outside.
{
double t0 = 0.0; double t1 = 1.0;
double xdelta = x1src-x0src;
double ydelta = y1src-y0src;
double p,q,r;
for(int edge=0; edge<4; edge++) { // Traverse through left, right, bottom, top edges.
if (edge==0) { p = -xdelta; q = -(edgeLeft-x0src); }
if (edge==1) { p = xdelta; q = (edgeRight-x0src); }
if (edge==2) { p = -ydelta; q = -(edgeBottom-y0src);}
if (edge==3) { p = ydelta; q = (edgeTop-y0src); }
r = q/p;
if(p==0 && q<0) return false; // Don't draw line at all. (parallel line outside)
if(p<0) {
if(r>t1) return false; // Don't draw line at all.
else if(r>t0) t0=r; // Line is clipped!
} else if(p>0) {
if(r<t0) return false; // Don't draw line at all.
else if(r<t1) t1=r; // Line is clipped!
}
}
x0clip = x0src + t0*xdelta;
y0clip = y0src + t0*ydelta;
x1clip = x0src + t1*xdelta;
y1clip = y0src + t1*ydelta;
return true; // (clipped) line is drawn
}
Here is a function I wrote. It cycles through all 4 planes (left, top, right, bottom) and clips each point by the plane.
// Clips a line segment to an axis-aligned rectangle
// Returns true if clipping is successful
// Returns false if line segment lies outside the rectangle
bool clipLineToRect(int a[2], int b[2],
int xmin, int ymin, int xmax, int ymax)
{
int mins[2] = {xmin, ymin};
int maxs[2] = {xmax, ymax};
int normals[2] = {1, -1};
for (int axis=0; axis<2; axis++) {
for (int plane=0; plane<2; plane++) {
// Check both points
for (int pt=1; pt<=2; pt++) {
int* pt1 = pt==1 ? a : b;
int* pt2 = pt==1 ? b : a;
// If both points are outside the same plane, the line is
// outside the rectangle
if ( (a[0]<xmin && b[0]<xmin) || (a[0]>xmax && b[0]>xmax) ||
(a[1]<ymin && b[1]<ymin) || (a[1]>ymax && b[1]>ymax)) {
return false;
}
const int n = normals[plane];
if ( (n==1 && pt1[axis]<mins[axis]) || // check left/top plane
(n==-1 && pt1[axis]>maxs[axis]) ) { // check right/bottom plane
// Calculate interpolation factor t using ratio of signed distance
// of each point from the plane
const float p = (n==1) ? mins[axis] : maxs[axis];
const float q1 = pt1[axis];
const float q2 = pt2[axis];
const float d1 = n * (q1-p);
const float d2 = n * (q2-p);
const float t = d1 / (d1-d2);
// t should always be between 0 and 1
if (t<0 || t >1) {
return false;
}
// Interpolate to find the new point
pt1[0] = (int)(pt1[0] + (pt2[0] - pt1[0]) * t );
pt1[1] = (int)(pt1[1] + (pt2[1] - pt1[1]) * t );
}
}
}
}
return true;
}
Example Usage:
void testClipLineToRect()
{
int screenWidth = 320;
int screenHeight = 240;
int xmin=0;
int ymin=0;
int xmax=screenWidth-1;
int ymax=screenHeight-1;
int a[2] = {-10, 10};
int b[2] = {300, 250};
printf("Before clipping:\n\ta={%d, %d}\n\tb=[%d, %d]\n",
a[0], a[1], b[0], b[1]);
if (clipLineToRect(a, b, xmin, ymin, xmax, ymax)) {
printf("After clipping:\n\ta={%d, %d}\n\tb=[%d, %d]\n",
a[0], a[1], b[0], b[1]);
}
else {
printf("clipLineToRect returned false\n");
}
}
Output:
Before clipping:
a={-10, 10}
b=[300, 250]
After clipping:
a={0, 17}
b=[285, 239]