DirectX/C++: Marching Cubes Indexing - c++

I've implemented the Marching Cube algorithm in a DirectX environment (To test and have fun). Upon completion, I noticed that the resulting model looks heavily distorted, as if the indices were off.
I've attempted to extract the indices, but I think the vertices are ordered correctly already, using the lookup tables, examples at http://paulbourke.net/geometry/polygonise/ . The current build uses a 15^3 volume.
Marching cubes iterates over the array as normal:
for (float iX = 0; iX < CellFieldSize.x; iX++){
for (float iY = 0; iY < CellFieldSize.y; iY++){
for (float iZ = 0; iZ < CellFieldSize.z; iZ++){
MarchCubes(XMFLOAT3(iX*StepSize, iY*StepSize, iZ*StepSize), StepSize);
}
}
}
The MarchCube function is called:
void MC::MarchCubes(){
...
int Corner, Vertex, VertexTest, Edge, Triangle, FlagIndex, EdgeFlags;
float Offset;
XMFLOAT3 Color;
float CubeValue[8];
XMFLOAT3 EdgeVertex[12];
XMFLOAT3 EdgeNorm[12];
//Local copy
for (Vertex = 0; Vertex < 8; Vertex++) {
CubeValue[Vertex] = (this->*fSample)(
in_Position.x + VertexOffset[Vertex][0] * Scale,
in_Position.y + VertexOffset[Vertex][1] * Scale,
in_Position.z + VertexOffset[Vertex][2] * Scale
);
}
FlagIndex = 0;
Intersection calculations:
...
//Test vertices for intersection.
for (VertexTest = 0; VertexTest < 8; VertexTest++){
if (CubeValue[VertexTest] <= TargetValue)
FlagIndex |= 1 << VertexTest;
}
//Find which edges are intersected by the surface.
EdgeFlags = CubeEdgeFlags[FlagIndex];
if (EdgeFlags == 0){
return;
}
for (Edge = 0; Edge < 12; Edge++){
if (EdgeFlags & (1 << Edge)) {
Offset = GetOffset(CubeValue[EdgeConnection[Edge][0]], CubeValue[EdgeConnection[Edge][1]], TargetValue); // Get offset function definition. Needed!
EdgeVertex[Edge].x = in_Position.x + VertexOffset[EdgeConnection[Edge][0]][0] + Offset * EdgeDirection[Edge][0] * Scale;
EdgeVertex[Edge].y = in_Position.y + VertexOffset[EdgeConnection[Edge][0]][1] + Offset * EdgeDirection[Edge][1] * Scale;
EdgeVertex[Edge].z = in_Position.z + VertexOffset[EdgeConnection[Edge][0]][2] + Offset * EdgeDirection[Edge][2] * Scale;
GetNormal(EdgeNorm[Edge], EdgeVertex[Edge].x, EdgeVertex[Edge].y, EdgeVertex[Edge].z); //Need normal values
}
}
And the original implementation gets pushed into a holding struct for DirectX.
for (Triangle = 0; Triangle < 5; Triangle++) {
if (TriangleConnectionTable[FlagIndex][3 * Triangle] < 0) break;
for (Corner = 0; Corner < 3; Corner++) {
Vertex = TriangleConnectionTable[FlagIndex][3 * Triangle + Corner];3 * Triangle + Corner]);
GetColor(Color, EdgeVertex[Vertex], EdgeNorm[Vertex]);
Data.VertexData.push_back(XMFLOAT3(EdgeVertex[Vertex].x, EdgeVertex[Vertex].y, EdgeVertex[Vertex].z));
Data.NormalData.push_back(XMFLOAT3(EdgeNorm[Vertex].x, EdgeNorm[Vertex].y, EdgeNorm[Vertex].z));
Data.ColorData.push_back(XMFLOAT4(Color.x, Color.y, Color.z, 1.0f));
}
}
(This is the same ordering as the original GL implementation)

Turns out, I missed a parenthesis showing operator precedence.
EdgeVertex[Edge].x = in_Position.x + (VertexOffset[EdgeConnection[Edge][0]][0] + Offset * EdgeDirection[Edge][0]) * Scale;
EdgeVertex[Edge].y = in_Position.y + (VertexOffset[EdgeConnection[Edge][0]][1] + Offset * EdgeDirection[Edge][1]) * Scale;
EdgeVertex[Edge].z = in_Position.z + (VertexOffset[EdgeConnection[Edge][0]][2] + Offset * EdgeDirection[Edge][2]) * Scale;
Corrected, obtained Visine; resumed fun.

Related

Adding Ozone to my sky simulation

I implemented a simulation for the colour of the sky a while ago by following the scratch a pixel tutorial: https://www.scratchapixel.com/lessons/procedural-generation-virtual-worlds/simulating-sky
I adapted it for the actual sun position and am able to get realistic sky colours during the day. However, I noticed that after sunset/ before sunrise, the colours are greyish when they should be deep blue. After researching about this, I read that this is due to the ozone absorption not being present in my model.
I used extinction coefficients : (3.426,8.298,0.356) * 0.06e-5 -> found on https://media.contentapi.ea.com/content/dam/eacom/frostbite/files/s2016-pbs-frostbite-sky-clouds-new.pdf
and also read that since the ozone does not scatter, it should only be added to the transmittance value.
Equation
Therefore, I modified the code from scratchapixel as follows:
for (uint32_t i = 0; i < numSamples; ++i) {
vec3 samplePosition = ray_in2.origin() + (tCurrent +
segmentLength * 0.5f) * ray_in2.direction();
float height = samplePosition.length() - atmosphere.earthRadius;
// compute optical depth for light
float hr = exp(-height / atmosphere.Hr) * segmentLength;
float hm = exp(-height / atmosphere.Hm) * segmentLength;
float ho = exp(-height / atmosphere.Hr)* segmentLength*(6e-7);
opticalDepthR += hr;
opticalDepthM += hm;
opticalDepthO += ho;
// light optical depth
float t0Light, t1Light;
...
for (j = 0; j < numSamplesLight; ++j) {
vec3 samplePositionLight = samplePosition + (tCurrentLight +
segmentLengthLight * 0.5f) * sunDir;
float heightLight = samplePositionLight.length() -
atmosphere.earthRadius;
if (heightLight < 0) break;
opticalDepthLightR += exp(-heightLight / atmosphere.Hr) *
segmentLengthLight;
opticalDepthLightM += exp(-heightLight / atmosphere.Hm) *
segmentLengthLight;
opticalDepthLightO += exp(-heightLight / atmosphere.Hr) *
segmentLengthLight*(6e-7); ;
tCurrentLight += segmentLengthLight;
}
if (j == numSamplesLight) {
vec3 tau = (betaR) * (opticalDepthR + opticalDepthLightR) +
betaM * 1.1f * (opticalDepthM + opticalDepthLightM)+ betaO*
(opticalDepthO + opticalDepthLightO);
vec3 attenuation(exp(-tau.x()), exp(-tau.y()), exp(-
tau.z()));
Summary:
I added variable opticalDepthO and opticalDepthLightO which
are calculated same as the optical depth for Rayleigh, but multiplied
by 6e-7.
Then, the sum of opticalDepthLightO and opticalDepthO is multiplied by the extiction coefficient for ozone and added to variable tau.
Problem is, I see no difference in my sky colour before and after
adding ozone. Can someone guide me to what it is that I'm doing wrong?

UV Mapping issue artifact on Sphere OpenGl

I am UV mapping a 2D Texture on a 3d sphere X, Y, Z coordinates, by using the formula
u = (0.5 + atan2(X, Y) / (2 * glm::pi<double>()));
v = (0.5 - asin(Z) / glm::pi<double>());
in modern openGL C++.
I dont know why there is this artifact in the sphere. Cant figure it out.
Ok, I have figured and corrected this out, thought I will answer here finally now.
Big thanks to BDL and Rabbid76.
Whenever u == 0, I added the same vertex position (X Y Z) to the vertices vector (or array) and also increased the index, but hardcoding the texture u to be 1.0f this time.
No issues now, the seam looks perfect now.
This is detail of a textured sphere geometry which is indexed. You should use index for better performance:
m_meridians and m_latitudes are detail level of sphere.
for (size_t i = 0; i < m_meridians + 1; i++)
{
for (size_t j = 0; j < m_latitudes + 2; j++)
{
// texCoord in the range [(0,0), (1,1)]
QVector2D texCoord((float)i / m_meridians, (float)j / (m_latitudes+1));
// theta = longitude from 0 to 2pi
// phi = latitude from -pi/2 to pi/2
double theta, phi;
theta = 2*M_PI * texCoord.x();
phi = M_PI * texCoord.y() - M_PI_2;
QVector3D pos;
pos.setY((float)std::sin(phi));
pos.setX((float)std::cos(phi) * std::cos(theta));
pos.setZ((float)std::cos(phi) * std::sin(theta));
m_vertices.push_back({pos, texCoord});
}
}
// Calculate triangle indices
for (size_t i = 0; i < m_meridians; i++)
{
// Construct triangles between successive meridians
for (size_t j = 0; j < m_latitudes + 1; j++)
{
m_indices.push_back(i * (m_latitudes+2) + j);
m_indices.push_back(i * (m_latitudes+2) + j+1);
m_indices.push_back((i+1) * (m_latitudes+2) + j+1);
m_triangleCount++;
m_indices.push_back((i+1) * (m_latitudes+2) + j+1);
m_indices.push_back((i+1) * (m_latitudes+2) + j);
m_indices.push_back(i * (m_latitudes+2) + j);
m_triangleCount++;
}
}

Why doesn't my OpenCL 3d image lookup work?

I have been having trouble with an OpenCL kernel which I've written producing incorrect results (compared to a reference brute-force CPU implementation).
I tracked the problem down to a 3D lookup table I'm using which seems to be returning garbage results, rather than the values which I passed in.
I have the following (simplified) OpenCL kernel for reading a precomputed function from a 3D image type:
__constant sampler_t legSampler = CLK_NORMALIZED_COORDS_TRUE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_LINEAR;
inline float normalizedLegendre(int n, int m, float z, image3d_t legendreLUT)
{
float nCoord = (((float) n) / get_image_width(legendreLUT));
float mCoord = (((float) m) / get_image_height(legendreLUT));
float zCoord = ((z + 1.0f) / 2.0f);
float4 coord = (float4)(floor(nCoord) + 0.5f, floor(mCoord) + 0.5f, zCoord, 0.0f);
return read_imagef(legendreLUT, legSampler, coord).x;
}
_kernel void noiseMain(__read_only image3d_t legendreLUT, __global float* outLegDump)
{
//k is the linear index into the array.
int k = get_global_id(0);
if(k < get_image_depth(legendreLUT))
{
float z = ((float) k / (float) get_image_depth(legendreLUT)) * 2.0 - 1.0;
float legLookup = normalizedLegendre(5, 4, z, legendreLUT);
float texCoord = ((float) k / 1024.0) * 2 - 1;
outLegDump = legLookup;
}
}
On the host side, I generate the 3D image, legendreLUT, using the following code:
static const size_t NLEGPOLYBINS = 1024;
static const size_t NLEGPOLYORDERS = 16;
boost::scoped_array<float> legendreHostBuffer(new float[NLEGPOLYORDERS * NLEGPOLYORDERS * NLEGPOLYBINS]);
float stepSize = 1.0 / (((float) NLEGPOLYBINS/2.0) - 0.5);
float z = -1.0;
std::cout << "Generating legendre polynomials..." << std::endl;
for(size_t n = 0; n < NLEGPOLYORDERS; n++)
{
for(size_t m = 0; m < NLEGPOLYORDERS; m++)
{
for(size_t zI = 0; zI < NLEGPOLYBINS; zI++)
{
using namespace boost::math;
size_t index = (n * NLEGPOLYORDERS * NLEGPOLYBINS) + (m * NLEGPOLYBINS) + zI;
//-1..1 in NLEGPOLYBINS steps...
float val;
if(m > n)
{
legendreHostBuffer[index] = 0;
continue;
}
else
{
//boost::math::legendre_p
val = legendre_p<float>(n,m,z);
}
float nPm = n+m;
float nMm = n-m;
float factNum;
float factDen;
factNum = factorial<float>(n-m);
factDen = factorial<float>(n+m);
float nrmTerm;
nrmTerm = pow(-1.0, m) * sqrt((n + 0.5) * (factNum/factDen));
legendreHostBuffer[index] = val;
z += stepSize;
if(z > 1.0) z + 1.0;
}
z = -1.0;
}
}
//DEBUGGING STEP: Dump everything we've just generated for m = 4, n = 5, z=-1..1
std::ofstream legDump("legDump.txt");
for(size_t i = 0; i < NLEGPOLYBINS; i++)
{
int n =5; int m = 4;
size_t index = (n * NLEGPOLYORDERS * NLEGPOLYBINS) + (m * NLEGPOLYBINS) + i;
float texCoord = ((float) i / (float) NLEGPOLYBINS) * 2 - 1;
legDump << i << " " << texCoord << " " << legendreHostBuffer[index] << std::endl;
}
legDump.close();
std::cout << "Creating legendre polynomial look up table image..." << std::endl;
cl::ImageFormat legFormat(CL_R, CL_FLOAT);
//Generate out legendre polynomials image...
m_legendreTable = cl::Image3D(m_clContext,
CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
legFormat,
NLEGPOLYORDERS,
NLEGPOLYORDERS,
NLEGPOLYBINS,
0,
0,
legendreHostBuffer.get());
Other than the index, the actual generation of the values is more or less irrelevant, but I've included it here for completeness.
And here is how I execute the kernel and read back the results:
cl::Buffer outLegDump = cl::Buffer(m_clContext, CL_MEM_WRITE_ONLY, NLEGPOLYBINS * sizeof(float));
//Create out kernel...
cl::Kernel kernel(m_program, "noiseMain");
kernel.setArg(0, m_legendreTable);
kernel.setArg(1, outLegDump);
size_t kernelSize = 1024;
cl::NDRange globalRange(kernelSize);
cl::NDRange localRange(1);
m_commandQueue.enqueueNDRangeKernel(kernel, cl::NullRange, globalRange, cl::NullRange);
m_commandQueue.finish();
boost::scoped_array<float> legDumpHost(new float[NLEGPOLYBINS]);
m_commandQueue.enqueueReadBuffer(outLegDump, CL_TRUE, 0, NLEGPOLYBINS * sizeof(float), legDumpHost.get());
std::ofstream legreadback("legreadback.txt");
for(size_t i = 0; i < NLEGPOLYBINS; i++)
{
legreadback << i << " " << legDumpHost[i] << std::endl;
}
legreadback.close();
When I look at the dumped data (i.e. that put out in legdump.txt from the host-side buffer), I get the expected data. However, when I compare it to the data received back from the device side (i.e. that looked up by the kernel and put out in legreadback.txt), I get incorrect values.
Since I'm calculating 1024 values in both cases, I'll spare everyone the whole dump, however, here are the first few/last values of each:
legdump.txt (host side sanity check):
0 -0
1 -0.0143913
2 -0.0573401
3 -0.12851
4 -0.227566
5 -0.354175
..
..
1020 0.12859
1021 0.0144185
1022 0.0144185
1023 1.2905e-8
legreadback.txt (device-side lookup and readback)
0 1
1 1
2 1
3 1
4 0.5
5 0
..
..
1020 7.74249e+11
1021 -1.91171e+15
1022 -3.81029e+15
1023 -1.91173e+15
Note that these values are the same across multiple runs of the code, so I don't think it's an initialization problem.
I can only assume that I'm calculating indices wrong somewhere, but I don't know where. I've checked the calculation of the Z coordinate (which naturally is defined on -1..1), its conversion to texture coordinates (0..1 range), and the conversion of M and N to texture coordinates (which should be done without interpolation), and found nothing to be wrong.
So my question is thus:
What is the proper way to create and index a 3D lookup table in OpenCL?
As expected, the problem turned out to be in the indexing on the host-side used to generate the lookup table.
The previous index calculation:
size_t index = (n * NLEGPOLYORDERS * NLEGPOLYBINS) + (m * NLEGPOLYBINS) + zI;
Was based on C++ 3D array indexing, which is not the way addressing works in OpenCL for a 3D image. A 3D image can be thought of as a "stack" of 2D images on top of each other, where the depth coordinate (Z in this case) selects the image, and the horizontal and vertical coordinates (m and n in this case) select the pixel within the selected image.
The correct indexing calculation is:
size_t index = m * NLEGPOLYORDERS + n + (zI * NLEGPOLYORDERS * NLEGPOLYORDERS);
As one can see, this new approach fits the "stacked image" layout described previously, whereas the previous calculation does not.

Using glColorPointer with glDrawElements results in nothing being drawn

I'm working on just making uniformly colors spheres for a project and I'm running into an issue. The spheres run fine but when I try to color them with glColorPointer they stop appearing. OpenGL isn't showing any errors when I call glGetError so I'm at a loss for why this would happen.
The code to generate the vertices, colors etc:
void SphereObject::setupVertices()
{
//determine the array sizes
//vertices per row (+1 for the repeated one at the end) * three for each coordinate
//times the number of rows
int arraySize = myNumVertices * 3;
myNumIndices = (myVerticesPerRow + 1) * myRows * 2;
myVertices = new GLdouble[arraySize];
myIndices = new GLuint[myNumIndices];
myNormals = new GLdouble[arraySize];
myColors = new GLint[myNumVertices * 4];
//use spherical coordinates to calculate the vertices
double phiIncrement = 360 / myVerticesPerRow;
double thetaIncrement = 180 / (double)myRows;
int arrayIndex = 0;
int colorArrayIndex = 0;
int indicesIndex = 0;
double x, y, z = 0;
for(double theta = 0; theta <= 180; theta += thetaIncrement)
{
//loop including the repeat for the last vertex
for(double phi = 0; phi <= 360; phi += phiIncrement)
{
//make sure that the last vertex is repeated
if(360 - phi < phiIncrement)
{
x = myRadius * sin(radians(theta)) * cos(radians(0));
y = myRadius * sin(radians(theta)) * sin(radians(0));
z = myRadius * cos(radians(theta));
}
else
{
x = myRadius * sin(radians(theta)) * cos(radians(phi));
y = myRadius * sin(radians(theta)) * sin(radians(phi));
z = myRadius * cos(radians(theta));
}
myColors[colorArrayIndex] = myColor.getX();
myColors[colorArrayIndex + 1] = myColor.getY();
myColors[colorArrayIndex + 2] = myColor.getZ();
myColors[colorArrayIndex + 3] = 1;
myVertices[arrayIndex] = x;
myVertices[arrayIndex + 1] = y;
myVertices[arrayIndex + 2] = z;
if(theta <= 180 - thetaIncrement)
{
myIndices[indicesIndex] = arrayIndex / 3;
myIndices[indicesIndex + 1] = (arrayIndex / 3) + myVerticesPerRow + 1;
indicesIndex += 2;
}
arrayIndex += 3;
colorArrayIndex += 4;
}
}
}
And the code to actually render the thing
void SphereObject::render()
{
glPushMatrix();
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(4, GL_INT, 0, myColors);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_DOUBLE, 0, myVertices);
glDrawElements(GL_QUAD_STRIP, myNumIndices, GL_UNSIGNED_INT, myIndices);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glPopClientAttrib();
glPopMatrix();
}
Any and all help would be appreciated. I'm really having a hard time for some reason.
When you use GL_INT (or any integer type) for color pointer, it linearly maps the largest possible integer value to 1.0f (maximum color), and 0 to 0.0f (minimum color).
Therefore unless your values of RGB and A are in the billions, they will likely appear completely black (or transparent if that's enabled). I see that you've got alpha = 1, which will essentially be zero after conversion to float.

A method for indexing triangles from a loaded heightmap?

I am currently making a method to load in a noisy heightmap, but lack the triangles to do so. I want to make an algorithm that will take an image, its width and height and construct a terrain node out of it.
Here's what I have so far, in somewhat pseudo
Vertex* vertices = new Vertices[image.width * image.height];
Index* indices; // How do I judge how many indices I will have?
float scaleX = 1 / image.width;
float scaleY = 1 / image.height;
float currentYScale = 0;
for(int y = 0; y < image.height; ++y) {
float currentXScale = 0;
for (int x = 0; x < image.width; ++x) {
Vertex* v = vertices[x * y];
v.x = currentXScale;
v.y = currentYScale;
v.z = image[x,y];
currentXScale += scaleX;
}
currentYScale += scaleY;
}
This works well enough to my needs, my only problem is this: How would I calculate the # of indices and their positions for drawing the triangles? I have somewhat familiarity with indices, but not how to programmatically calculate them, I can only do that statically.
As far as your code above goes, using vertices[x * y] isn't right - if you use that, then e.g. vert(2,3) == vert(3,2). What you want is something like vertices[y * image.width + x], but you can do it more efficiently by incrementing a counter (see below).
Here's the equivalent code I use. It's in C# unfortunately, but hopefully it should illustrate the point:
/// <summary>
/// Constructs the vertex and index buffers for the terrain (for use when rendering the terrain).
/// </summary>
private void ConstructBuffers()
{
int heightmapHeight = Heightmap.GetLength(0);
int heightmapWidth = Heightmap.GetLength(1);
int gridHeight = heightmapHeight - 1;
int gridWidth = heightmapWidth - 1;
// Construct the individual vertices for the terrain.
var vertices = new VertexPositionTexture[heightmapHeight * heightmapWidth];
int vertIndex = 0;
for(int y = 0; y < heightmapHeight; ++y)
{
for(int x = 0; x < heightmapWidth; ++x)
{
var position = new Vector3(x, y, Heightmap[y,x]);
var texCoords = new Vector2(x * 2f / heightmapWidth, y * 2f / heightmapHeight);
vertices[vertIndex++] = new VertexPositionTexture(position, texCoords);
}
}
// Create the vertex buffer and fill it with the constructed vertices.
this.VertexBuffer = new VertexBuffer(Renderer.GraphicsDevice, typeof(VertexPositionTexture), vertices.Length, BufferUsage.WriteOnly);
this.VertexBuffer.SetData(vertices);
// Construct the index array.
var indices = new short[gridHeight * gridWidth * 6]; // 2 triangles per grid square x 3 vertices per triangle
int indicesIndex = 0;
for(int y = 0; y < gridHeight; ++y)
{
for(int x = 0; x < gridWidth; ++x)
{
int start = y * heightmapWidth + x;
indices[indicesIndex++] = (short)start;
indices[indicesIndex++] = (short)(start + 1);
indices[indicesIndex++] = (short)(start + heightmapWidth);
indices[indicesIndex++] = (short)(start + 1);
indices[indicesIndex++] = (short)(start + 1 + heightmapWidth);
indices[indicesIndex++] = (short)(start + heightmapWidth);
}
}
// Create the index buffer.
this.IndexBuffer = new IndexBuffer(Renderer.GraphicsDevice, typeof(short), indices.Length, BufferUsage.WriteOnly);
this.IndexBuffer.SetData(indices);
}
I guess the key point is that given a heightmap of size heightmapHeight * heightmapWidth, you need (heightmapHeight - 1) * (heightmapWidth - 1) * 6 indices, since you're drawing:
2 triangles per grid square
3 vertices per triangle
(heightmapHeight - 1) * (heightmapWidth - 1) grid squares in your terrain.