The program that I am writing takes in the vertex data of a 3D mesh, performs a series of calculations (forgive the vagueness, I'll try to explain in better detail later), and outputs a binary file that defines where the edges are on the mesh. My program then draws a colored line where the edge is. Without the appropriate vertex shader, this would look like a regular triangulated mesh, but once the appropriate vertex shader is applied, only the edges that are "sharp" (the dot product of their normals is greater than something close to zero) have lines drawn on them, along with the edges on the outside of the figure. My implementation for the outline is not correct, as I made the assumption that if an edge wasn't behind the edge, and didn't define a sharp edge, it would be an outline edge. I haven't found a satisfactory answer to this elsewhere, and I didn't want to rely on the old trick of re-drawing the mesh as a solid color, and rendering it to be slightly larger than the original mesh. This approach was to be entirely math-based, relying only on the vertex data of a mesh. I am writing a program that uses the following vertex shader:
uniform mat4 worldMatrix;
uniform mat4 projMatrix;
uniform mat4 viewProjMatrix;
uniform vec4 eyepos;
attribute vec3 a;
attribute vec3 b;
attribute vec3 n1;
attribute vec3 n2;
attribute float w;
void main()
{
float a_vertex = dot(eyepos.xyz - a, n1);
float b_vertex = dot(eyepos.xyz - a, n2);
if (a_vertex * b_vertex > 0.0) // signs are different, edge is behind the object
{
gl_Position = vec4(2.0,2.0,2.0,1.0);
}
else // the outline of the figure
{
if(w == 0.0)
{
vec4 p = vec4(a.x, a.y, a.z, 1.0);
p = p * worldMatrix * viewProjMatrix;
gl_Position = p;
}
else
{
vec4 p = vec4(b.x, b.y, b.z, 1.0);
p = p * worldMatrix * viewProjMatrix;
gl_Position = p;
}
}
if(dot(n1, n2) <= 0.2) // there is a sharp edge
{
if(w == 0.0)
{
vec4 p = vec4(a.x, a.y, a.z, 1.0);
p = p * worldMatrix * viewProjMatrix;
gl_Position = p;
}
else
{
vec4 p = vec4(b.x, b.y, b.z, 1.0);
p = p * worldMatrix * viewProjMatrix;
gl_Position = p;
}
}
}
... to take information from a binary file that is written using this program in C++:
#include <iostream>
#include "llgl.h"
#include <fstream>
#include <vector>
#include "SuperMesh.h"
using namespace std;
using namespace llgl;
struct Vertex
{
float x,y,z,w;
float s,t,p,q;
float nx,ny,nz,nw;
};
bool isFileAlright(string fName)
{
ifstream in(fName.c_str());
if(!in.good())
return false;
return true;
}
int main(int argc, char* argv[])
{
// INPUT FILE NAME //
string fName;
cout << "Enter the path to your spec.mesh file here: ";
cin >> fName;
while(!isFileAlright(fName))
{
cout << "Enter the path to your spec.mesh file here: ";
cin >> fName;
}
SuperMesh* Model = new SuperMesh(fName.c_str());
// END INPUT //
Model->load();
Model->draw();
string fname = Model->fname;
string FileName = fname.substr(0, fname.size() - 10); // supposed to slash the last 10 characters off of the string, removing ".spec.mesh"...
FileName = FileName + ".bin"; //... and then we make it a .bin file*/
cout << FileName << endl;
ofstream out(FileName.c_str(), ios::binary);
for (unsigned w = 0; w < Model->m.size(); w++)
{
vector<float> &vdata = Model->m[w]->vdata;
vector<char> &idata = Model->m[w]->idata;
//Create a vertex and index variable, a map for Edge Mesh, perform two loops to analyze all triangles on a mesh and write out their vertex values to a file.//
Vertex* V = (Vertex*)(&vdata[0]);
unsigned short* I16 = (unsigned short*)(&idata[0]);
unsigned char* I8 = (unsigned char*)(&idata[0]);
unsigned int* I32 = (unsigned int*)(&idata[0]);
map<set<int>, vector<vec3> > EM;
for(unsigned i = 0; i < Model->m[w]->ic; i += 3) // 3 because we're looking at triangles //
{
Mesh* foo = Model->m[w];
int i1;
int i2;
int i3;
if( Model->m[w]->ise == GL_UNSIGNED_BYTE)
{
i1 = I8[i];
i2 = I8[i + 1];
i3 = I8[i + 2];
}
else if( Model->m[w]->ise == GL_UNSIGNED_SHORT)
{
i1 = I16[i];
i2 = I16[i + 1];
i3 = I16[i + 2];
}
else
{
i1 = I32[i];
i2 = I32[i + 1];
i3 = I32[i + 2];
}
vec3 p = vec3(V[i1].x, V[i1].y, V[i1].z); // to represent the point in 3D space of each vertex on every triangle on the mesh
vec3 q = vec3(V[i2].x, V[i2].y, V[i2].z);
vec3 r = vec3(V[i3].x, V[i3].y, V[i3].z);
vec3 v1 = p - q;
vec3 v2 = r - q;
vec3 n = cross(v2,v1); //important to make sure the order is correct here, do VERTEX TWO dot VERTEX ONE//
set<int> tmp;
tmp.insert(i1); tmp.insert(i2);
EM[tmp].push_back(n);
set<int> tmp2;
tmp2.insert(i2); tmp2.insert(i3);
EM[tmp2].push_back(n);
set<int> tmp3;
tmp3.insert(i3); tmp3.insert(i1);
EM[tmp3].push_back(n);
//we have now pushed every needed point into our edge map
}
int edgeNumber = 0;
cout << "There should be 12 edges on a lousy cube." << endl;
for(map<set<int>, vector<vec3> >::iterator it = EM.begin(); it != EM.end(); ++it)
{
//Now we will take our edge map and write its data to the file!//
/* Information is written to the file in this form:
Vertex One, Vertex Two, Normal One, Normal Two, r (where r, depending on its value, determines whether one edge is on top of the other in the case
where two edges are aligned with one another)
*/
set<int>::iterator tmp = it->first.begin();
int pi = *tmp;
tmp++;
int qi = *tmp;
Vertex One = V[pi];
Vertex Two = V[qi];
vec3 norm1 = it->second[0];
vec3 norm2;
if(it->second.size() == 1)
norm2 = -1 * norm1;
else
norm2 = it->second[1];
out.write((char*) &One, 12);
out.write((char*) &Two, 12);
out.write((char*) &norm1, 12);
out.write((char*) &norm2, 12);
float r = 0;
out.write((char*) &r, 4);
out.write((char*) &One, 12);
out.write((char*) &Two, 12);
out.write((char*) &norm1, 12);
out.write((char*) &norm2, 12);
r = 1;
out.write((char*) &r, 4);
edgeNumber++;
cout << "Wrote edge #" << edgeNumber << endl;
}
}
return 0;
}
The problem that this program has is that it does neither of these two essential things in the test case where I use it to draw a simple box with outlines:
It does not draw outlines. The vertex shader is not sufficient to determine anything more than where the edges of the object are. The binary file that makes this happen is pre-computed in a separate program using code from the second snippet posted above, and then it is saved as a .bin file along with the mesh assets to which it belongs. However, raw vertex data would only take me so far, and I seek a way to draw a line around the outside of the mesh without using more traditional methods.
It does not draw ALL of the edges that I need. In my test case, two of the edges are missing, and I cannot figure out for the life of me why. I figure I must have done something wrong in writing the edge map.
A couple notes about the above code:
llgl is an OpenGL wrapper that I have used to simplify many elements of OpenGL. It is not used extensively here, but rather in the creation of meshes, done elsewhere.
Things like Mesh and SuperMesh (a collection of meshes into one rigid body) are meant to be 3D objects in my scene. In my test case, there is only one Mesh in my scene, and defining a SuperMesh of a single Mesh is essentially just creating a single Mesh.
The "draw" call in the second snippet, which pre-computes a Mesh's edge map, does not actually draw anything. It is necessary to gain access to the Mesh's vertex data.
The variable "ise" is taken from the individual Meshes in the SuperMesh, and is a variable found by reading it in from the original Blender .OBJ file. It is related to how much memory should be used to store the important vertex data. It generally isn't a good idea to allocate more space than is needed for these values, as I've been told by friends and mentors who work with Blender.
It isn't well-commented, as I'm not the only one who has worked on this code, and I, unfortunately, have a limited understanding of how the second snippet could iterate through all of the triangles on a mesh and somehow miss the last two edges. Once I understand better what this code should do when properly written, I plan on heavily commenting it and using it in future applications.
Order of multiplication between matrix and vector is not comutative, so
your vertex shader have to output Projection * Model * Vertex and not the opposite.
I solved the mystery of the undrawn lines by allocating more space to write vertex data in a different part of my code. As for my other problems, although the order of multiplication being done in my vertex shader was actually alright, I had messed up another fundamental concept of vector math. The dot product of two face normals will be a negative number when the normals make an obtuse angle... the way a sharp point on my model would. Also, there is the faulty logic above that basically says that if the face is visible, draw all of the lines on it. I re-wrote my shader to test first if a face was visible, and then in that same conditional block I did the test for sharp edges. Now, if a face is visible BUT it doesn't create a sharp edge, the shader will ignore that edge. Also, outlines appear now, just not perfectly. Here is a modified version of the above vertex shader:
uniform mat4 worldMatrix; /* the matrix that defines how to project a point from
object space to world space.*/
uniform mat4 viewProjMatrix; // the view (pertaining to screen size) matrix times the projection (how to project points to 3D) matrix.
uniform vec4 eyepos; // the position of the eye, given by the program.
attribute vec3 a; // one vertex on an edge, having an x,y,z, and w coordinate.
attribute vec3 b; // the other edge vertex.
attribute vec3 n1; // the normal of the face the edge is on.
attribute vec3 n2; // another normal in the case that an edge shares two faces... otherwise, this is the same as n1.
attribute float w; // an attribute given to make a binary choice between two edges when they draw on top of one another.
void main()
{
// WORLD SPACE ATTRIBUTES //
vec4 eye_world = eyepos * worldMatrix;
vec4 a_world = vec4(a.x, a.y,a.z,1.0) * worldMatrix;
vec4 b_world = vec4(b.x, b.y,b.z,1.0) * worldMatrix;
vec4 n1_world = normalize(vec4(n1.x, n1.y,n1.z,0.0) * worldMatrix);
vec4 n2_world = normalize(vec4(n2.x, n2.y,n2.z,0.0) * worldMatrix);
// END WORLD SPACE ATTRIBUTES //
// TEST CASE ATTRIBUTES //
float a_vertex = dot(eye_world - a_world, n1_world);
float b_vertex = dot(eye_world - b_world, n2_world);
float normalDot = dot(n1_world.xyz, n2_world.xyz);
float vertProduct = a_vertex * b_vertex;
float hardness = 0.0; // this would be the value for an object made of sharp angles, like a box. Take a look at its use below.
// END TEST CASE ATTRIBUTES //
gl_Position = vec4(2.0,2.0,2.0,1.0); // if all else fails, keeping this here will discard unwanted data.
if (vertProduct >= 0.1) // NOTE: face is behind the viewable portion of the object, normally uses 0.0 when not checking for silhouette
{
gl_Position = vec4(2.0,2.0,2.0,1.0);
}
else if(vertProduct < 0.1 && vertProduct >= -0.1) // NOTE: face makes almost a right angle with the eye vector
{
if(w == 0.0)
{
vec4 p = vec4(a_world.x, a_world.y, a_world.z, 1.0);
p = p * viewProjMatrix;
gl_Position = p;
}
else
{
vec4 p = vec4(b_world.x, b_world.y, b_world.z, 1.0);
p = p * viewProjMatrix;
gl_Position = p;
}
}
else // NOTE: this is the case where you can very clearly see a face.
{ // NOTE: the number that normalDot compares to should be its "hardness" value. The more negative the value, the smoother the surface.
// a.k.a. the less we care about hard edges (when the normals of the faces make an obtuse angle) on the object, the more negative
// hardness becomes on a scale of 0.0 to -1.0.
if(normalDot <= hardness) // NOTE: the dot product of the two normals is obtuse, so we are looking at a sharp edge.
{
if(w == 0.0)
{
vec4 p = vec4(a_world.x, a_world.y, a_world.z, 1.0);
p = p * viewProjMatrix;
gl_Position = p;
}
else
{
vec4 p = vec4(b_world.x, b_world.y, b_world.z, 1.0);
p = p * viewProjMatrix;
gl_Position = p;
}
}
else // NOTE: not sharp enough, just throw the vertex away
{
gl_Position = vec4(2.0,2.0,2.0,1.0);
}
}
}
Related
So, I'm encountering a really bizarre (at least to me as a compute shader noob) phenomenon when I compile my compute shader using glGetShaderiv(m_shaderID, GL_COMPILE_STATUS, &status). Inexplicably, my compute shader takes much longer to compile when I increase the size of my work groups! When I have one-dimensional work groups, it compiles in less than a second, but when I increase the size of my work groups to 4x1x6, the compute shader takes 10+ minutes to compile! How strange.
For background, I'm trying to implement a light clustering algorithm (essentially the one shown here: http://www.aortiz.me/2018/12/21/CG.html#tiled-shading--forward), and my compute shader is this monster:
// TODO: Figure out optimal tile size, currently using a 16x9x24 subdivision
#define FLT_MAX 3.402823466e+38
#define FLT_MIN 1.175494351e-38
#define DBL_MAX 1.7976931348623158e+308
#define DBL_MIN 2.2250738585072014e-308
layout(local_size_x = 4, local_size_y = 9, local_size_z = 4) in;
// TODO: Change to reflect my light structure
// struct PointLight{
// vec4 position;
// vec4 color;
// uint enabled;
// float intensity;
// float range;
// };
// TODO: Pack this more efficiently
struct Light {
vec4 position;
vec4 direction;
vec4 ambientColor;
vec4 diffuseColor;
vec4 specularColor;
vec4 attributes;
vec4 intensity;
ivec4 typeIndexAndFlags;
// uint flags;
};
// Array containing offset and number of lights in a cluster
struct LightGrid{
uint offset;
uint count;
};
struct VolumeTileAABB{
vec4 minPoint;
vec4 maxPoint;
};
layout(std430, binding = 0) readonly buffer LightBuffer {
Light data[];
} lightBuffer;
layout (std430, binding = 1) buffer clusterAABB{
VolumeTileAABB cluster[ ];
};
layout (std430, binding = 2) buffer screenToView{
mat4 inverseProjection;
uvec4 tileSizes;
uvec2 screenDimensions;
};
// layout (std430, binding = 3) buffer lightSSBO{
// PointLight pointLight[];
// };
// SSBO of active light indices
layout (std430, binding = 4) buffer lightIndexSSBO{
uint globalLightIndexList[];
};
layout (std430, binding = 5) buffer lightGridSSBO{
LightGrid lightGrid[];
};
layout (std430, binding = 6) buffer globalIndexCountSSBO{
uint globalIndexCount;
};
// Shared variables, shared between all invocations WITHIN A WORK GROUP
// TODO: See if I can use gl_WorkGroupSize for this, gl_WorkGroupSize.x * gl_WorkGroupSize.y * gl_WorkGroupSize.z
// A grouped-shared array which contains all the lights being evaluated
shared Light sharedLights[4*9*4]; // A grouped-shared array which contains all the lights being evaluated, size is thread-count
uniform mat4 viewMatrix;
bool testSphereAABB(uint light, uint tile);
float sqDistPointAABB(vec3 point, uint tile);
bool testConeAABB(uint light, uint tile);
float getLightRange(uint lightIndex);
bool isEnabled(uint lightIndex);
// Runs in batches of multiple Z slices at once
// In this implementation, 6 batches, since each thread group contains four z slices (24/4=6)
// We begin by each thread representing a cluster
// Then in the light traversal loop they change to representing lights
// Then change again near the end to represent clusters
// NOTE: Tiles actually mean clusters, it's just a legacy name from tiled shading
void main(){
// Reset every frame
globalIndexCount = 0; // How many lights are active in t his scene
uint threadCount = gl_WorkGroupSize.x * gl_WorkGroupSize.y * gl_WorkGroupSize.z; // Number of threads in a group, same as local_size_x, local_size_y, local_size_z
uint lightCount = lightBuffer.data.length(); // Number of total lights in the scene
uint numBatches = uint((lightCount + threadCount -1) / threadCount); // Number of groups of lights that will be completed, i.e., number of passes
uint tileIndex = gl_LocalInvocationIndex + gl_WorkGroupSize.x * gl_WorkGroupSize.y * gl_WorkGroupSize.z * gl_WorkGroupID.z;
// uint tileIndex = gl_GlobalInvocationID; // doesn't wortk, is uvec3
// Local thread variables
uint visibleLightCount = 0;
uint visibleLightIndices[100]; // local light index list, to be transferred to global list
// Every light is being checked against every cluster in the view frustum
// TODO: Perform active cluster determination
// Each individual thread will be responsible for loading a light and writing it to shared memory so other threads can read it
for( uint batch = 0; batch < numBatches; ++batch){
uint lightIndex = batch * threadCount + gl_LocalInvocationIndex;
//Prevent overflow by clamping to last light which is always null
lightIndex = min(lightIndex, lightCount);
//Populating shared light array
// NOTE: It is VERY important that lightBuffer.data not be referenced after this point,
// since that is not thread-safe
sharedLights[gl_LocalInvocationIndex] = lightBuffer.data[lightIndex];
barrier(); // Synchronize read/writes between invocations within a work group
//Iterating within the current batch of lights
for( uint light = 0; light < threadCount; ++light){
if( isEnabled(light)){
uint lightType = uint(sharedLights[light].typeIndexAndFlags[0]);
if(lightType == 0){
// Point light
if( testSphereAABB(light, tileIndex) ){
visibleLightIndices[visibleLightCount] = batch * threadCount + light;
visibleLightCount += 1;
}
}
else if(lightType == 1){
// Directional light
visibleLightIndices[visibleLightCount] = batch * threadCount + light;
visibleLightCount += 1;
}
else if(lightType == 2){
// Spot light
if( testConeAABB(light, tileIndex) ){
visibleLightIndices[visibleLightCount] = batch * threadCount + light;
visibleLightCount += 1;
}
}
}
}
}
// We want all thread groups to have completed the light tests before continuing
barrier();
// Back to every thread representing a cluster
// Adding the light indices to the cluster light index list
uint offset = atomicAdd(globalIndexCount, visibleLightCount);
for(uint i = 0; i < visibleLightCount; ++i){
globalLightIndexList[offset + i] = visibleLightIndices[i];
}
// Updating the light grid for each cluster
lightGrid[tileIndex].offset = offset;
lightGrid[tileIndex].count = visibleLightCount;
}
// Return whether or not the specified light intersects with the specified tile (cluster)
bool testSphereAABB(uint light, uint tile){
float radius = getLightRange(light);
vec3 center = vec3(viewMatrix * sharedLights[light].position);
float squaredDistance = sqDistPointAABB(center, tile);
return squaredDistance <= (radius * radius);
}
// TODO: Different test for spot-lights
// Has been done by using several AABBs for spot-light cone, this could be a good approach, or even just use one to start.
bool testConeAABB(uint light, uint tile){
// Light light = lightBuffer.data[lightIndex];
// float innerAngleCos = light.attributes[0];
// float outerAngleCos = light.attributes[1];
// float innerAngle = acos(innerAngleCos);
// float outerAngle = acos(outerAngleCos);
// FIXME: Actually do something clever here
return true;
}
// Get range of light given the specified light index
float getLightRange(uint lightIndex){
int lightType = sharedLights[lightIndex].typeIndexAndFlags[0];
float range;
if(lightType == 0){
// Point light
float brightness = 0.01; // cutoff for end of range
float c = sharedLights[lightIndex].attributes.x;
float lin = sharedLights[lightIndex].attributes.y;
float quad = sharedLights[lightIndex].attributes.z;
range = (-lin + sqrt(lin*lin - 4.0 * c * quad + (4.0/brightness)* quad)) / (2.0 * quad);
}
else if(lightType == 1){
// Directional light
range = FLT_MAX;
}
else{
// Spot light
range = FLT_MAX;
}
return range;
}
// Whether the light at the specified index is enabled
bool isEnabled(uint lightIndex){
uint flags = sharedLights[lightIndex].typeIndexAndFlags[2];
return (flags | 1) != 0;
}
// Get squared distance from a point to the AABB of the specified tile (cluster)
float sqDistPointAABB(vec3 point, uint tile){
float sqDist = 0.0;
VolumeTileAABB currentCell = cluster[tile];
cluster[tile].maxPoint[3] = tile;
for(int i = 0; i < 3; ++i){
float v = point[i];
if(v < currentCell.minPoint[i]){
sqDist += (currentCell.minPoint[i] - v) * (currentCell.minPoint[i] - v);
}
if(v > currentCell.maxPoint[i]){
sqDist += (v - currentCell.maxPoint[i]) * (v - currentCell.maxPoint[i]);
}
}
return sqDist;
}
Edit: Whoops, lost the bottom part of this!
What I don't understand is why changing the size of the work groups affects compilation time at all? It sort of defeats the point of the algorithm if my work group sizes are too small for the compute shader to run efficiently, so I'm hoping there's something that I'm missing.
As a last note, I'd like to avoid using glGetProgramBinary as a solution. Not only because it merely circumvents the issue instead of solving it, but because pre-compiling shaders will not play nicely with the engine's current architecture.
So, I'm figuring that this must be a bug in the compiler, since I've replaced the loop in my sqDistPointAABB function with:
vec3 minPoint = currentCell.minPoint.xyz;
vec3 maxPoint = currentCell.maxPoint.xyz;
vec3 t1 = vec3(lessThan(point, minPoint));
vec3 t2 = vec3(greaterThan(point, maxPoint));
vec3 sqDist = t1 * (minPoint - point) * (minPoint - point) + t2 * (maxPoint - point) * (maxPoint - point);
return sqDist.x + sqDist.y + sqDist.z;
And it compiles just fine now, in less than a second! So strange
I am manually raytracing a 3D image. I have noticed that, the farther from the 3D image I am, the bigger the aliasing.
This 3D image is basically a voxelized representation of the stanford dragon. I have placed volume centered at the origin (the diagonals cross at (0,0,0)), meaning that one of the corners is at (-cube_dim, -cube_dim, -cube_dim) and the other is at (cube_dim, cube_dim, cube_dim).
At close range the image is fine:
(The minor "aliasing" you see here is due to me doing a ray marching algorithm, this is not the aliasing I am worried about, this was expected and acceptabel)
However if we get far away enough some aliasing starts to be seen:
(This is a completely different kind of aliasing)
The fragment shader sued to generate the image is this:
#version 430
in vec2 f_coord;
out vec4 fragment_color;
uniform layout(binding=0, rgba8) image3D volume_data;
uniform vec3 camera_pos;
uniform float aspect_ratio;
uniform float cube_dim;
uniform int voxel_resolution;
#define EPSILON 0.01
// Check whether the position is inside of the specified box
bool inBoxBounds(vec3 corner, float size, vec3 position)
{
bool inside = true;
//Put the position in the coordinate frame of the box
position-=corner;
//The point is inside only if all of it's components are inside
for(int i=0; i<3; i++)
{
inside = inside && (position[i] > -EPSILON);
inside = inside && (position[i] < size+EPSILON);
}
return inside;
}
//Calculate the distance to the intersection to a box, or inifnity if the bos cannot be hit
float boxIntersection(vec3 origin, vec3 dir, vec3 corner0, float size)
{
//dir = normalize(dir);
//calculate opposite corner
vec3 corner1 = corner0 + vec3(size,size,size);
//Set the ray plane intersections
float coeffs[6];
coeffs[0] = (corner0.x - origin.x)/(dir.x);
coeffs[1] = (corner0.y - origin.y)/(dir.y);
coeffs[2] = (corner0.z - origin.z)/(dir.z);
coeffs[3] = (corner1.x - origin.x)/(dir.x);
coeffs[4] = (corner1.y - origin.y)/(dir.y);
coeffs[5] = (corner1.z - origin.z)/(dir.z);
float t = 1.f/0.f;
//Check for the smallest valid intersection distance
//We allow negative values up to -size to create correct sorting if the origin is
//inside the box
for(uint i=0; i<6; i++)
t = (coeffs[i]>=0) && inBoxBounds(corner0,size,origin+dir*coeffs[i])?
min(coeffs[i],t) : t;
return t;
}
void main()
{
float v_size = cube_dim/voxel_resolution;
vec3 r = (vec3(f_coord.xy,1.f/tan(radians(40))));
r.y /= aspect_ratio;
vec3 dir = normalize(r);//;*v_size*0.5;
r+= camera_pos;
float t = boxIntersection(r, dir, -vec3(cube_dim), cube_dim*2);
if(isinf(t))
discard;
if(!((r.x>=-cube_dim) && (r.x<=cube_dim) && (r.y>=-cube_dim) &&
(r.y<=cube_dim) && (r.z>=-cube_dim) && (r.z<=cube_dim)))
r += dir*t;
vec4 color = vec4(0);
int c=0;
while((r.x>=-cube_dim) && (r.x<=cube_dim) && (r.y>=-cube_dim) &&
(r.y<=cube_dim) && (r.z>=-cube_dim) && (r.z<=cube_dim))
{
r += dir*v_size*0.5;
vec4 val = imageLoad(volume_data, ivec3(((r)*0.5/cube_dim+vec3(0.5))*(voxel_resolution-1)));
if(val.w > 0)
{
color = val;
break;
}
c++;
}
fragment_color = color;
}
Understanding the algorithm
First, we create a ray based on the screen coordiantes (we use the standard raytracing ttechnique, were the focal length is 1/tan(angle)).
We then start the ray at the camera's current position
We check intersection of the ray with the box containing our object (we basically assume that our 3D texture is a big cube in the scene and we check whether we hit it).
If we donlt hit it we discard the fragment. If we do hit it and we're outside we move along the ray until we are at the surface of the box. If we hit it and are inside we stay where we are.
At this point we are guranteed that the position of our ray is inside the box.
Now we move by small segments along the ray until we either find a non zero value or we hit the end of the box.
I have a vtkPolyData filled with points and cells that I want to draw on the screen. My polydata represents brain fibers (list of lines in 3D). A cell is a fiber. It's working, but I need to add colors between all points. We decided to color the polydata using a shader because there will be a lot of coloring methods. My vertex shader is:
vtkShader2 *shader = vtkShader2::New();
shader->SetType(VTK_SHADER_TYPE_VERTEX);
shader->SetSourceCode(R"VertexShader(
#version 120
attribute vec3 next_point;
varying vec3 vColor; // Pass to fragment shader
void main() {
float r = gl_Vertex.x - next_point.x;
float g = gl_Vertex.y - next_point.y;
float b = gl_Vertex.z - next_point.z;
if (r < 0.0) { r *= -1.0; }
if (g < 0.0) { g *= -1.0; }
if (b < 0.0) { b *= -1.0; }
const float norm = 1.0 / sqrt(r*r + g*g + b*b);
vColor = vec3(r * norm, g * norm, b * norm);
gl_Position = ftransform();
}
)VertexShader");
shader->SetContext(shader_program->GetContext());
shader_program->GetShaders()->AddItem(shader);
The goal here is, for each point, get the next point to calculate the color of the line between them. The problem is that I can't find a way to set the value of "next_point". I'm pretty sure it's always filled with 0.0 because the output image is red, blue and green on the sides.
I tried using vtkProperty::AddShaderVariable() but I never saw any change and the method's documentation hints about a "uniform variable" so it's probably not the right way.
// Splitted in 3 because I'm not sure how to pass a vtkPoints object to AddShaderVariable
fibersActor->GetProperty()->AddShaderVariable("next_x", nb_points, next_x);
fibersActor->GetProperty()->AddShaderVariable("next_y", nb_points, next_y);
fibersActor->GetProperty()->AddShaderVariable("next_z", nb_points, next_z);
I also tried using a vtkFloatArray filled with my points, then setting it as a data array.
vtkFloatArray *next_point = vtkFloatArray::New();
next_point->SetName("next_point");
next_point->SetNumberOfComponents(3);
next_point->Resize(nb_points);
// Fill next_point ...
polydata->GetPointData()->AddArray(next_point);
// Tried the vtkAssignAttribute class. Did nothing.
tl;dr Can you please tell me how to pass a list of points into a GLSL attribute variable? Thanks for your time.
I'm trying to implement Sketchy Drawings. I'm at the part of the process which calls for the use of the noise texture to derive uncertainty values that will provide an offset into the edge map.
Here is a picture of my edge map for a torus:
And here is the noise texture I've gotten using the Perlin function as suggested:
I have these saved as textures in edgeTexture and noiseTexture respectively.
Now I'm stuck on the section where you have to offset the texture coordinates of the edge map by uncertainty values derived from the noise texture. This image is from the book:
offs = turbulence(s, t);
offt = turbulence(1 - s, 1 - t);
I'm ignoring the 2x2 matrix for the time being. Here is my current fragment shader attempt and the result it produces:
#version 330
out vec4 vFragColor;
uniform sampler2D edgeTexture;
uniform sampler2D noiseTexture;
smooth in vec2 vTexCoords;
float turbulence(float s, float t)
{
float sum = 0;
float scale = 1;
float s1 = 1;
vec2 coords = vec2(s,t);
for (int i=0; i < 10; i++)
{
vec4 noise = texture(noiseTexture, 0.25 * s1 * coords);
sum += scale * noise.x;
scale = scale / 2;
s1 = s1 * 2;
}
return sum;
}
void main( void )
{
float off_s = turbulence(vTexCoords.s, vTexCoords.t);
float off_t = turbulence(1 - vTexCoords.s, 1 - vTexCoords.t);
vFragColor = texture(edgeTexture, vTexCoords + vec2(off_s, off_t));
}
Clearly my addition to the vTexCoords is way off, but I can't see why. I have tried several other turbulence function definitions but none were close to the desired output so I'm thinking my overall approach is flawed somewhere. Any help here is greatly appreciated, and please comment if I haven't been clear. The desired output for a torus would just look like a roughly drawn circle I would imagine.
Your turbulence function will return values in the range (0,1). Firstly you need to change this to get values centered on 0. This should be done inside the loop in the function or you'll end up with a strange distribution. So firstly, I think you should change the line:
vec4 noise = texture(noiseTexture, 0.25 * s1 * coords);
to
vec4 noise = texture(noiseTexture, 0.25 * s1 * coords) * 2.0 - 1.0;
You then need to scale the offset so that you're not sampling the edge texture too far away from the fragment being drawn. Change:
vFragColor = texture(edgeTexture, vTexCoords + vec2(off_s, off_t));
to
vFragColor = texture(edgeTexture, vTexCoords + vec2(off_s, off_t) * off_scale);
where off_scale is some small value (perhaps around 0.05) chosen by experimentation.
I am attempting to add features to a ray tracer in C++. Namely, I am trying to add texture mapping to the spheres. For simplicity, I am using an array to store the texture data. I obtained the texture data by using a hex editor and copying the correct byte values into an array in my code. This was just for my testing purposes. When the values of this array correspond to an image that is simply red, it appears to work close to what is expected except there is no shading.
first image http://dl.dropbox.com/u/367232/Texture.jpg
The bottom right of the image shows what a correct sphere should look like. This sphere's colour using one set colour, not a texture map.
Another problem is that when the texture map is of something other than just one colour pixels, it turns white. My test image is a picture of water, and when it maps, it shows only one ring of bluish pixels surrounding the white colour.
bmp http://dl.dropbox.com/u/367232/vPoolWater.bmp
When this is done, it simply appears as this:
second image http://dl.dropbox.com/u/367232/texture2.jpg
Here are a few code snippets:
Color getColor(const Object *object,const Ray *ray, float *t)
{
if (object->materialType == TEXTDIF || object->materialType == TEXTMATTE) {
float distance = *t;
Point pnt = ray->origin + ray->direction * distance;
Point oc = object->center;
Vector ve = Point(oc.x,oc.y,oc.z+1) - oc;
Normalize(&ve);
Vector vn = Point(oc.x,oc.y+1,oc.z) - oc;
Normalize(&vn);
Vector vp = pnt - oc;
Normalize(&vp);
double phi = acos(-vn.dot(vp));
float v = phi / M_PI;
float u;
float num1 = (float)acos(vp.dot(ve));
float num = (num1 /(float) sin(phi));
float theta = num /(float) (2 * M_PI);
if (theta < 0 || theta == NAN) {theta = 0;}
if (vn.cross(ve).dot(vp) > 0) {
u = theta;
}
else {
u = 1 - theta;
}
int x = (u * IMAGE_WIDTH) -1;
int y = (v * IMAGE_WIDTH) -1;
int p = (y * IMAGE_WIDTH + x)*3;
return Color(TEXT_DATA[p+2],TEXT_DATA[p+1],TEXT_DATA[p]);
}
else {
return object->color;
}
};
I call the colour code here in Trace:
if (object->materialType == MATTE)
return getColor(object, ray, &t);
Ray shadowRay;
int isInShadow = 0;
shadowRay.origin.x = pHit.x + nHit.x * bias;
shadowRay.origin.y = pHit.y + nHit.y * bias;
shadowRay.origin.z = pHit.z + nHit.z * bias;
shadowRay.direction = light->object->center - pHit;
float len = shadowRay.direction.length();
Normalize(&shadowRay.direction);
float LdotN = shadowRay.direction.dot(nHit);
if (LdotN < 0)
return 0;
Color lightColor = light->object->color;
for (int k = 0; k < numObjects; k++) {
if (Intersect(objects[k], &shadowRay, &t) && !objects[k]->isLight) {
if (objects[k]->materialType == GLASS)
lightColor *= getColor(objects[k], &shadowRay, &t); // attenuate light color by glass color
else
isInShadow = 1;
break;
}
}
lightColor *= 1.f/(len*len);
return (isInShadow) ? 0 : getColor(object, &shadowRay, &t) * lightColor * LdotN;
}
I left out the rest of the code as to not bog down the post, but it can be seen here. Any help is greatly appreciated. The only portion not included in the code, is where I define the texture data, which as I said, is simply taken straight from a bitmap file of the above image.
Thanks.
It could be that the texture is just washed out because the light is so bright and so close. Notice how in the solid red case, there doesn't seem to be any gradation around the sphere. The red looks like it's saturated.
Your u,v mapping looks right, but there could be a mistake there. I'd add some assert statements to make sure u and v and really between 0 and 1 and that the p index into your TEXT_DATA array is also within range.
If you're debugging your textures, you should use a constant material whose color is determined only by the texture and not the lights. That way you can make sure you are correctly mapping your texture to your primitive and filtering it properly before doing any lighting on it. Then you know that part isn't the problem.