I am looking for the best way to create a billboard in Qt3D. I would like a plane which faces the camera wherever it is and does not change sized when the camera dollies forward or back. I have read how to do this using GLSL vertex and geometry shaders, but I am looking for the Qt3D way, unless customer shaders is the most efficient and best way of billboarding.
I have looked, and it appears I can set the Matrix on a QTransform via properties, but it isn't clear to me how I would manipulate the matrix, or perhaps there is a better way? I am using the C++ api, but a QML answer would do. I could port it to C++.
If you want to draw just one billboard, you can add a plane and rotate it whenever the camera moves. However, if you want to do this efficiently with thousands or millions of billboards, I recommend using custom shaders. We did this to draw impostor spheres in Qt3D.
However, we didn't use a geometry shader because we were targeting systems that didn't support geometry shaders. Instead, we used only the vertex shader by placing four vertices in the origin and moved these on the shader. To create many copies, we used instanced drawing. We moved each set of four vertices according to the positions of the spheres. Finally, we moved each of the four vertices of each sphere such that they result in a billboard that is always facing the camera.
Start out by subclassing QGeometry and created a buffer functor that creates four points, all in the origin (see spherespointgeometry.cpp). Give each point an ID that we can use later. If you use geometry shaders, the ID is not needed and you can get away with creating only one vertex.
class SpheresPointVertexDataFunctor : public Qt3DRender::QBufferDataGenerator
{
public:
SpheresPointVertexDataFunctor()
{
}
QByteArray operator ()() Q_DECL_OVERRIDE
{
const int verticesCount = 4;
// vec3 pos
const quint32 vertexSize = (3+1) * sizeof(float);
QByteArray verticesData;
verticesData.resize(vertexSize*verticesCount);
float *verticesPtr = reinterpret_cast<float*>(verticesData.data());
// Vertex 1
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 1
*verticesPtr++ = 0.0;
// Vertex 2
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 2
*verticesPtr++ = 1.0;
// Vertex 3
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID3
*verticesPtr++ = 2.0;
// Vertex 4
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 4
*verticesPtr++ = 3.0;
return verticesData;
}
bool operator ==(const QBufferDataGenerator &other) const Q_DECL_OVERRIDE
{
Q_UNUSED(other);
return true;
}
QT3D_FUNCTOR(SpheresPointVertexDataFunctor)
};
For the real positions, we used a separate QBuffer. We also set color and scale, but I have omitted those here (see spheredata.cpp):
void SphereData::setPositions(QVector<QVector3D> positions, QVector3D color, float scale)
{
QByteArray ba;
ba.resize(positions.size() * sizeof(QVector3D));
SphereVBOData *vboData = reinterpret_cast<QVector3D *>(ba.data());
for(int i=0; i<positions.size(); i++) {
QVector3D &position = vboData[i];
position = positions[i];
}
m_buffer->setData(ba);
m_count = positions.count();
}
Then, in QML, we connected the geometry with the buffer in a QGeometryRenderer. This can also be done in C++, if you prefer (see
Spheres.qml):
GeometryRenderer {
id: spheresMeshInstanced
primitiveType: GeometryRenderer.TriangleStrip
enabled: instanceCount != 0
instanceCount: sphereData.count
geometry: SpheresPointGeometry {
attributes: [
Attribute {
name: "pos"
attributeType: Attribute.VertexAttribute
vertexBaseType: Attribute.Float
vertexSize: 3
byteOffset: 0
byteStride: (3 + 3 + 1) * 4
divisor: 1
buffer: sphereData ? sphereData.buffer : null
}
]
}
}
Finally, we created custom shaders to draw the billboards. Note that because we were drawing impostor spheres, the billboard size was increased to handle raytracing in the fragment shader from awkward angles. You likely do not need the 2.0*0.6 factor in general.
Vertex shader:
#version 330
in vec3 vertexPosition;
in float vertexId;
in vec3 pos;
in vec3 col;
in float scale;
uniform vec3 eyePosition = vec3(0.0, 0.0, 0.0);
uniform mat4 modelMatrix;
uniform mat4 mvp;
out vec3 modelSpherePosition;
out vec3 modelPosition;
out vec3 color;
out vec2 planePosition;
out float radius;
vec3 makePerpendicular(vec3 v) {
if(v.x == 0.0 && v.y == 0.0) {
if(v.z == 0.0) {
return vec3(0.0, 0.0, 0.0);
}
return vec3(0.0, 1.0, 0.0);
}
return vec3(-v.y, v.x, 0.0);
}
void main() {
vec3 position = vertexPosition + pos;
color = col;
radius = scale;
modelSpherePosition = (modelMatrix * vec4(position, 1.0)).xyz;
vec3 view = normalize(position - eyePosition);
vec3 right = normalize(makePerpendicular(view));
vec3 up = cross(right, view);
float texCoordX = 1.0 - 2.0*(float(vertexId==0.0) + float(vertexId==2.0));
float texCoordY = 1.0 - 2.0*(float(vertexId==0.0) + float(vertexId==1.0));
planePosition = vec2(texCoordX, texCoordY);
position += 2*0.6*(-up - right)*(scale*float(vertexId==0.0));
position += 2*0.6*(-up + right)*(scale*float(vertexId==1.0));
position += 2*0.6*(up - right)*(scale*float(vertexId==2.0));
position += 2*0.6*(up + right)*(scale*float(vertexId==3.0));
vec4 modelPositionTmp = modelMatrix * vec4(position, 1.0);
modelPosition = modelPositionTmp.xyz;
gl_Position = mvp*vec4(position, 1.0);
}
Fragment shader:
#version 330
in vec3 modelPosition;
in vec3 modelSpherePosition;
in vec3 color;
in vec2 planePosition;
in float radius;
out vec4 fragColor;
uniform mat4 modelView;
uniform mat4 inverseModelView;
uniform mat4 inverseViewMatrix;
uniform vec3 eyePosition;
uniform vec3 viewVector;
void main(void) {
vec3 rayDirection = eyePosition - modelPosition;
vec3 rayOrigin = modelPosition - modelSpherePosition;
vec3 E = rayOrigin;
vec3 D = rayDirection;
// Sphere equation
// x^2 + y^2 + z^2 = r^2
// Ray equation is
// P(t) = E + t*D
// We substitute ray into sphere equation to get
// (Ex + Dx * t)^2 + (Ey + Dy * t)^2 + (Ez + Dz * t)^2 = r^2
float r2 = radius*radius;
float a = D.x*D.x + D.y*D.y + D.z*D.z;
float b = 2.0*E.x*D.x + 2.0*E.y*D.y + 2.0*E.z*D.z;
float c = E.x*E.x + E.y*E.y + E.z*E.z - r2;
// discriminant of sphere equation
float d = b*b - 4.0*a*c;
if(d < 0.0) {
discard;
}
float t = (-b + sqrt(d))/(2.0*a);
vec3 sphereIntersection = rayOrigin + t * rayDirection;
vec3 normal = normalize(sphereIntersection);
vec3 normalDotCamera = color*dot(normal, normalize(rayDirection));
float pi = 3.1415926535897932384626433832795;
vec3 position = modelSpherePosition + sphereIntersection;
// flat red
fragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
It has been some time since we first implemented this, and there might be easier ways to do it now, but this should give you an idea of the pieces you need.
Related
I'm rendering a sphere with instanced drawing, while rotating the model-view-matrix around the Y axis.
It looks ok at the beginning:
But at another angle, things get worse:
It looks to me like a problem with normals. Currently, I'm calculating the normal-matrix from my model-view-matrix and then pass it to the shader, which is doing phong-like lighting:
attribute vec4 a_position;
attribute vec3 a_normal;
attribute vec4 a_color;
attribute vec2 a_coord;
attribute mat4 a_matrix;
uniform mat4 u_mv_matrix;
uniform mat4 u_projection_matrix;
uniform mat3 u_normal_matrix;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec4 transformedPosition = u_mv_matrix * a_matrix * a_position;
v_position = transformedPosition;
v_normal = u_normal_matrix * a_normal;
v_color = a_color;
v_coord = a_coord;
gl_Position = u_projection_matrix * transformedPosition;
}
uniform sampler2D u_sampler;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec3 lightPosition = vec3(0.0); // XXX
// set diffuse and specular colors
vec3 cDiffuse = (v_color * texture2D(u_sampler, v_coord)).rgb;
vec3 cSpecular = vec3(0.3);
// lighting calculations
vec3 N = normalize(v_normal);
vec3 L = normalize(lightPosition - v_position.xyz);
vec3 E = normalize(-v_position.xyz);
vec3 H = normalize(L + E);
// Calculate coefficients.
float phong = max(dot(N, L), 0.0);
const float kMaterialShininess = 20.0;
const float kNormalization = (kMaterialShininess + 8.0) / (3.14159265 * 8.0);
float blinn = pow(max(dot(N, H), 0.0), kMaterialShininess) * kNormalization;
// diffuse coefficient
vec3 diffuse = phong * cDiffuse;
// specular coefficient
vec3 specular = blinn * cSpecular;
gl_FragColor = vec4(diffuse + specular, 1);
}
Final note: I'm working on desktop OpenGL 2.1 as well as WebGL on the browser.
Edit: Per request, I'm adding some information.
The mesh is built as follows, by passing an identity matrix:
void Sphere::append(IndexedVertexBatch<XYZ.N.UV> &batch, const Matrix &matrix) const {
float sectorStep = TWO_PI / sectorCount;
float stackStep = PI / stackCount;
for(int i = 0; i <= stackCount; ++i) {
float stackAngle = HALF_PI - i * stackStep;
float xy = radius * cosf(stackAngle);
float z = radius * sinf(stackAngle);
for(int j = 0; j <= sectorCount; ++j) {
float sectorAngle = j * sectorStep;
float x = xy * cosf(sectorAngle);
float y = xy * sinf(sectorAngle);
float nx = x / radius;
float ny = y / radius;
float nz = z / radius;
float s = (float)j / sectorCount;
float t = (float)i / stackCount;
batch.addVertex(matrix.transformPoint(x, y, z), matrix.transformNormal(nx, ny, nz), glm::vec2(s, t));
}
}
for(int i = 0; i < stackCount; ++i) {
float k1 = i * (sectorCount + 1);
float k2 = k1 + sectorCount + 1;
for(int j = 0; j < sectorCount; ++j, ++k1, ++k2) {
if (i != 0) {
if (frontFace == CCW) {
batch.addIndices(k1, k1 + 1, k2);
} else {
batch.addIndices(k1, k2, k1 + 1);
}
}
if (i != (stackCount - 1)) {
if (frontFace == CCW) {
batch.addIndices(k1 + 1, k2 + 1, k2);
} else {
batch.addIndices(k1 + 1, k2, k2 + 1);
}
}
}
}
}
Regarding the transformation matrices, it works as follow:
camera.getMVMatrix()
.setIdentity()
.translate(0, -150, -600)
.rotateY(clock()->getTime() * 0.5f);
State()
.setShader(shader)
.setShaderMatrix<MV>(camera.getMVMatrix())
.setShaderMatrix<PROJECTION>(camera.getProjectionMatrix())
.setShaderMatrix<NORMAL>(camera.getNormalMatrix())
.apply();
Finally, the light position is defined as vec3(0) in the fragment shader.
Note: As you can see, I'm using my own framework which provides among other things high level methods for building meshes and handling transformations. It's all straightforward stuff, proven to work as intended, but let me know if you need pointers to the source-code.
Update: The lighting part of the shader I used ended up being wrong, so I switched to another method.
But in essence, the solution I proposed in my answer is still valid (or at least it does the job of solving the "normal problem" when instancing is used, and non-uniform scaling is avoided.)
Here is a gist with the source-code. There is also an online WebGL demo.
The solution was relatively simple: there is no point in passing a normal-matrix to the shader.
Instead, the normal needs to be computed in the vertex shader:
v_normal = vec3(u_mv_matrix * a_matrix * vec4(a_normal, 0.0));
Credits
I'm trying to construct a render engine using OpenGL and C++. but can't seem to get past this problem. The same model is being rendered 5 different times using different shaders, in 4 out of the 5 shaders the backface culling is working properly. In the tessellation shader, however, it is not. Any outwards faces are invisible, so you can see directly to the rear ones. Does anyone know why this shader flips the faces?
Vertex Shader
void main()
{
worldVertexPosition_cs = (transformationMatrix * vec4(position_vs, 1.0)).xyz;
worldTextureCoords_cs = textureCoords_vs;
worldNormal_cs = mat3(transpose(inverse(transformationMatrix))) * normal_vs;
}
Control Shader
float getTessLevel(float distance0, float distance1)
{
float avgDistance = (distance0 + distance1) / 2.0;
avgDistance = (100 - avgDistance) / 20;
if (avgDistance < 1) {
avgDistance = 1;
}
return avgDistance;
}
void main()
{
worldTextureCoords_es[gl_InvocationID] = worldTextureCoords_cs[gl_InvocationID];
worldNormal_es[gl_InvocationID] = worldNormal_cs[gl_InvocationID];
worldVertexPosition_es[gl_InvocationID] = worldVertexPosition_cs[gl_InvocationID];
float eyeToVertexDistance0 = distance(eyePos, worldVertexPosition_es[0]);
float eyeToVertexDistance1 = distance(eyePos, worldVertexPosition_es[1]);
float eyeToVertexDistance2 = distance(eyePos, worldVertexPosition_es[2]);
gl_TessLevelOuter[0] = getTessLevel(eyeToVertexDistance1, eyeToVertexDistance2);
gl_TessLevelOuter[1] = getTessLevel(eyeToVertexDistance2, eyeToVertexDistance0);
gl_TessLevelOuter[2] = getTessLevel(eyeToVertexDistance0, eyeToVertexDistance1);
gl_TessLevelInner[0] = gl_TessLevelOuter[2];
}
Evaluation Shader
vec2 interpolate2D(vec2 v0, vec2 v1, vec2 v2)
{
return vec2(gl_TessCoord.x) * v0 + vec2(gl_TessCoord.y) * v1 + vec2(gl_TessCoord.z) * v2;
}
vec3 interpolate3D(vec3 v0, vec3 v1, vec3 v2)
{
return vec3(gl_TessCoord.x) * v0 + vec3(gl_TessCoord.y) * v1 + vec3(gl_TessCoord.z) * v2;
}
void main()
{
worldTextureCoords_fs = interpolate2D(worldTextureCoords_es[0], worldTextureCoords_es[1], worldTextureCoords_es[2]);
worldNormal_fs = interpolate3D(worldNormal_es[0], worldNormal_es[1], worldNormal_es[2]);
worldNormal_fs = normalize(worldNormal_fs);
worldVertexPosition_fs = interpolate3D(worldVertexPosition_es[0], worldVertexPosition_es[1], worldVertexPosition_es[2]);
float displacement = texture(texture_displacement0, worldTextureCoords_fs.xy).x;
worldVertexPosition_fs += worldNormal_fs * (displacement / 1.0f);
gl_Position = projectionMatrix * viewMatrix * vec4(worldVertexPosition_fs.xyz, 1.0);
}
Fragment Shader
void main()
{
vec3 unitNormal = normalize(worldNormal_fs);
vec3 unitLightVector = normalize(lightPosition - worldVertexPosition_fs);
float dotResult = dot(unitNormal, unitLightVector);
float brightness = max(dotResult, blackPoint);
vec3 diffuse = brightness * lightColor;
FragColor = vec4(diffuse, 1.0) * texture(texture_diffuse0, worldTextureCoords_fs);
FragColor.rgb = pow(FragColor.rgb, vec3(1.0/gamma));
}
In the Tessellation Evaluation Shader you've to define the winding order of the generated triangles.
This is done via the cw and ccw parameters. Default is ccw.
Either generate clockwise primitives:
layout(triangles, cw) in;
Or generate counterclockwise primitives:
layout(triangles, ccw) in;
I'm trying to implement Oren-Nayar lighting in the fragment shader as shown here.
However, I'm getting some strange lighting effects on the terrain as shown below.
I am currently sending the shader the 'view direction' uniform as the camera's 'front' vector. I am not sure if this is correct, as moving the camera around changes the artifacts.
Multiplying the 'front' vector by the MVP matrix gives a better result, but the artifacts are still very noticable when viewing the terrain from some angles. It is particularly noticable in dark areas and around the edges of the screen.
What could be causing this effect?
Artifact example
How the scene should look
Vertex Shader
#version 450
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 normal;
out VS_OUT {
vec3 normal;
} vert_out;
void main() {
vert_out.normal = normal;
gl_Position = vec4(position, 1.0);
}
Tesselation Control Shader
#version 450
layout(vertices = 3) out;
in VS_OUT {
vec3 normal;
} tesc_in[];
out TESC_OUT {
vec3 normal;
} tesc_out[];
void main() {
if(gl_InvocationID == 0) {
gl_TessLevelInner[0] = 1.0;
gl_TessLevelInner[1] = 1.0;
gl_TessLevelOuter[0] = 1.0;
gl_TessLevelOuter[1] = 1.0;
gl_TessLevelOuter[2] = 1.0;
gl_TessLevelOuter[3] = 1.0;
}
tesc_out[gl_InvocationID].normal = tesc_in[gl_InvocationID].normal;
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
Tesselation Evaluation Shader
#version 450
layout(triangles, equal_spacing) in;
in TESC_OUT {
vec3 normal;
} tesc_in[];
out TESE_OUT {
vec3 normal;
float height;
vec4 shadow_position;
} tesc_out;
uniform mat4 model_view;
uniform mat4 model_view_perspective;
uniform mat3 normal_matrix;
uniform mat4 depth_matrix;
vec3 lerp(vec3 v0, vec3 v1, vec3 v2) {
return (
(vec3(gl_TessCoord.x) * v0) +
(vec3(gl_TessCoord.y) * v1) +
(vec3(gl_TessCoord.z) * v2)
);
}
vec4 lerp(vec4 v0, vec4 v1, vec4 v2) {
return (
(vec4(gl_TessCoord.x) * v0) +
(vec4(gl_TessCoord.y) * v1) +
(vec4(gl_TessCoord.z) * v2)
);
}
void main() {
gl_Position = lerp(
gl_in[0].gl_Position,
gl_in[1].gl_Position,
gl_in[2].gl_Position
);
tesc_out.normal = normal_matrix * lerp(
tesc_in[0].normal,
tesc_in[1].normal,
tesc_in[2].normal
);
tesc_out.height = gl_Position.y;
tesc_out.shadow_position = depth_matrix * gl_Position;
gl_Position = model_view_perspective * gl_Position;
}
Fragment Shader
#version 450
in TESE_OUT {
vec3 normal;
float height;
vec4 shadow_position;
} frag_in;
out vec4 colour;
uniform vec3 view_direction;
uniform vec3 light_position;
#define PI 3.141592653589793
void main() {
const vec3 ambient = vec3(0.1, 0.1, 0.1);
const float roughness = 0.8;
const vec4 water = vec4(0.0, 0.0, 0.8, 1.0);
const vec4 sand = vec4(0.93, 0.87, 0.51, 1.0);
const vec4 grass = vec4(0.0, 0.8, 0.0, 1.0);
const vec4 ground = vec4(0.49, 0.27, 0.08, 1.0);
const vec4 snow = vec4(0.9, 0.9, 0.9, 1.0);
if(frag_in.height == 0.0) {
colour = water;
} else if(frag_in.height < 0.2) {
colour = sand;
} else if(frag_in.height < 0.575) {
colour = grass;
} else if(frag_in.height < 0.8) {
colour = ground;
} else {
colour = snow;
}
vec3 normal = normalize(frag_in.normal);
vec3 view_dir = normalize(view_direction);
vec3 light_dir = normalize(light_position);
float NdotL = dot(normal, light_dir);
float NdotV = dot(normal, view_dir);
float angleVN = acos(NdotV);
float angleLN = acos(NdotL);
float alpha = max(angleVN, angleLN);
float beta = min(angleVN, angleLN);
float gamma = dot(view_dir - normal * dot(view_dir, normal), light_dir - normal * dot(light_dir, normal));
float roughnessSquared = roughness * roughness;
float roughnessSquared9 = (roughnessSquared / (roughnessSquared + 0.09));
// calculate C1, C2 and C3
float C1 = 1.0 - 0.5 * (roughnessSquared / (roughnessSquared + 0.33));
float C2 = 0.45 * roughnessSquared9;
if(gamma >= 0.0) {
C2 *= sin(alpha);
} else {
C2 *= (sin(alpha) - pow((2.0 * beta) / PI, 3.0));
}
float powValue = (4.0 * alpha * beta) / (PI * PI);
float C3 = 0.125 * roughnessSquared9 * powValue * powValue;
// now calculate both main parts of the formula
float A = gamma * C2 * tan(beta);
float B = (1.0 - abs(gamma)) * C3 * tan((alpha + beta) / 2.0);
// put it all together
float L1 = max(0.0, NdotL) * (C1 + A + B);
// also calculate interreflection
float twoBetaPi = 2.0 * beta / PI;
float L2 = 0.17 * max(0.0, NdotL) * (roughnessSquared / (roughnessSquared + 0.13)) * (1.0 - gamma * twoBetaPi * twoBetaPi);
colour = vec4(colour.xyz * (L1 + L2), 1.0);
}
First I've plugged your fragment shader into my renderer with my view/normal/light vectors and it works perfectly. So the problem has to be in the way you calculate those vectors.
Next, you say that you set view_dir to your camera's front vector. I assume that you meant "camera's front vector in the world space" which would be incorrect. Since you calculate the dot products with vectors in the camera space, the view_dir must be in the camera space too. That is vec3(0,0,1) would be an easy way to check that. If it works -- we found your problem.
However, using (0,0,1) for the view direction is not strictly correct when you do perspective projection, because the direction from the fragment to the camera then depends on the location of the fragment on the screen. The correct formula then would be view_dir = normalize(-pos) where pos is the fragment's position in camera space (that is with model-view matrix applied without the projection). Further, this quantity now depends only on the fragment location on the screen, so you can calculate it as:
view_dir = normalize(vec3(-(gl_FragCoord.xy - frame_size/2) / (frame_width/2), flen))
flen is the focal length of your camera, which you can calculate as flen = cot(fovx/2).
I know this is a long dead thread, but I've been having the same problem (for several years), and finally found the solution...
It can be partially solved by fixing the orientation of the surface normals to match the polygon winding direction, but you can also get rid of the artifacts in the shader, by changing the following two lines...
float angleVN = acos(cos_nv);
float angleLN = acos(cos_nl);
to this...
float angleVN = acos(clamp(cos_nv, -1.0, 1.0));
float angleLN = acos(clamp(cos_nl, -1.0, 1.0));
Tada!
I'm currently learning about shaders and graphics pipelines and I was wondering if a pixel shader could be used to create, for example, a triangle or a more complex shape like a zigzag.
Could this be done without the use of a vertex shader?
Answer is yes! You can draw anything you want using pixel shader by implementing a ray Tracer. Here is a sample code:
uniform vec3 lightposition;
uniform vec3 cameraposition;
uniform float motion;
struct Ray
{
vec3 org;
vec3 dir;
};
struct Sphere
{
vec3 Center;
float Radius;
vec4 Color;
float MatID;
float id;
};
struct Intersection
{
float t;
vec3 normal;
vec3 hitpos;
vec4 color;
float objectid;
float materialID;
};
bool sphereIntersect(Ray eyeray, Sphere sp, inout Intersection intersection)
{
float t1=0.0;
eyeray.dir = normalize(eyeray.dir);
float B = 2.0 *( ( eyeray.dir.x * (eyeray.org.x - sp.Center.x ) )+ ( eyeray.dir.y *(eyeray.org.y - sp.Center.y )) + ( eyeray.dir.z * (eyeray.org.z - sp.Center.z ) ));
float C = pow((eyeray.org.x - sp.Center.x),2.0) + pow((eyeray.org.y - sp.Center.y),2.0) + pow((eyeray.org.z - sp.Center.z),2.0) - pow(sp.Radius,2.0);
float D = B*B - 4.0*C ;
if(D>=0.0)
{
t1= (-B - pow(D, .5)) / 2.0;
if (t1 < 0.0)
{
t1 = (-B + pow(D, .5)) / 2.0;
if( t1 < 0.0)
return false;
else
{
if (t1 > 1e-2 && t1 < intersection.t)
{
intersection.t = t1;
intersection.materialID = sp.MatID;
intersection.hitpos = eyeray.org + t1 * eyeray.dir;
intersection.normal = normalize(intersection.hitpos - sp.Center);
intersection.color = sp.Color;
intersection.objectid = sp.id;
return true;
}
}
}
else
{
if(t1 > 1e-2 && t1 < intersection.t)
{
intersection.t = t1;
intersection.materialID = sp.MatID;
intersection.hitpos = eyeray.org + t1 * eyeray.dir;
intersection.normal = normalize(intersection.hitpos - sp.Center);
intersection.color = sp.Color;
intersection.objectid = sp.id;
return true;
}
}
}
else
return false;
}
void findIntersection(Ray ray, inout Intersection intersection)
{
intersection.t = 1e10;
intersection.materialID = 0.0;
Sphere sp1 = Sphere(vec3(-2.0,0.0,-5.0),1.5,vec4(0.5, 0.1, 0.5, 1.0),1.0,1.0);
Sphere sp2 = Sphere(vec3( 2.0,0.0,-5.0),1.5,vec4(0.5,0.5,0.1,1.0),1.0,2.0);
Sphere sp3 = Sphere(vec3( 0.0,3.0,-5.0),1.5,vec4(0.1,0.5,0.5,1.0),1.0,3.0);
sphereIntersect(ray, sp1, intersection);
sphereIntersect(ray, sp2, intersection);
sphereIntersect(ray, sp3, intersection);
}
vec4 CalculateColor(vec4 ambient ,float shiness,vec3 intersection, vec3 normal);
Ray ReflectedRay(vec3 Normal,Ray EyeRay,vec3 intersection);
vec4 GetColor(Ray ray)
{
Ray currentRay = ray;
vec4 finalColor = vec4(0.0);
for(int bounce = 1 ; bounce < 4 ; bounce++)
{
Intersection intersection;
intersection.objectid = 0.0;
findIntersection(currentRay, intersection);
if (intersection.materialID == 0.0) // We could not find any object. We return the background color
return finalColor;
else if (intersection.materialID == 1.0)
{
vec3 lv = lightposition - intersection.hitpos;
vec3 nlv = normalize(lv);
Intersection shadowIntersection;
Ray shadowRay = Ray(intersection.hitpos, nlv);
shadowIntersection.objectid = intersection.objectid;
findIntersection(shadowRay, shadowIntersection);
if (shadowIntersection.t > length(lv) || shadowIntersection.t < 1)
{
finalColor = finalColor + float(1.0f/bounce) * CalculateColor(intersection.color, 100.0, intersection.hitpos, intersection.normal);;
}
else
{
finalColor = finalColor + float(1.0f/bounce) * intersection.color;
}
//currentRay = Ray(intersection.hitpos, reflect(ray.dir, intersection.normal));
currentRay = ReflectedRay(intersection.normal,ray,intersection.hitpos);
}
}
return finalColor;
}
Ray createRay(float ScreenWidth,float ScreenHeight)
{
Ray toret;
toret.org = cameraposition;
float left = -3.0;
float bottom = -3.0;
float screenZ = -3.0;
float su = -3.0 + gl_FragCoord.x/ScreenWidth * 6; //gl_FragCoord gives you the current x and y component of your current pixel
float sv = -3.0 + gl_FragCoord.y/ScreenHeight * 6;
float sz = screenZ - cameraposition.z;
toret.dir = normalize(vec3(su,sv,sz));
//vec2 p = (gl_FragCoord.xy/resolution) * 2 ;
//toret.dir = normalize(vec3(p, -1.0));
return toret;
}
Ray ReflectedRay(vec3 Normal,Ray EyeRay,vec3 intersection)
{
Ray reflection;
reflection.dir = EyeRay.dir - 2 * Normal * dot(EyeRay.dir,Normal);
reflection.org = intersection + reflection.dir * 0.01;
return reflection;
}
vec4 CalculateColor(vec4 ambient ,float shiness,vec3 intersection, vec3 normal)
{
//intensities
vec3 Idifuse = vec3(1, 1, 1);
vec3 Iambient = vec3(0.8, 0.8, 0.8);
vec3 Ispecular = vec3(1,1,1);
vec3 kDifuse = vec3(0.5,0.5,0.5); //for difuse
vec3 kSpecular = vec3(0.75, 0.6, 0.3); //for specular
vec3 kAmbient = vec3(0.1, 0.2, 0.3); //for ambient
//vec4 kSpecular = vec4(0.5,0.5,0.5,1.0);
//vec4 kDifuse = vec4(0.5,0.5,0.5,1.0);
float ColorDifuse = max(dot(normal,lightposition),0.0) * kDifuse;
//vector calculations
vec3 l = normalize(lightposition - intersection); //light vector
vec3 n = normalize(normal); // normalVector of point in the sea
vec3 v = normalize(cameraposition - intersection); // view Vector
vec3 h = normalize(v + l); // half Vector
vec3 difuse = kDifuse * Idifuse * max(0.0, dot(n, l));
vec3 specular = kSpecular * Ispecular * pow(max(0.0, dot(n, h)), shiness);
vec3 color = ambient.xyz + difuse + specular;
return vec4(color,1.0);
gl_FragColor = vec4(color,1.0);
}
void main()
{
if(lightposition == vec3(0.0,0.0,0.0))
gl_FragColor = vec4(0.0,1.0,0.0,1.0);
Ray eyeray = createRay(600.0,600.0);
gl_FragColor = GetColor(eyeray);
}
A useful technique is to use a fragment shader (I'm an OpenGL guy) with point sprites. Point sprites in OpenGL 3+ get rendered as squares of pixels, with the size of the square (gl_PointSize) set by the vertex shader.
In the fragment shader, gl_PointCoord has the x and y coords of this particular pixel within the square, from 0.0 to 1.0. So you can draw a circle by testing if gl_PointCoord.x and gl_PointCoord.y are both within the radius and discarding if not, a framed square by checking that .x and .y are with some distance of the edge, and so on. It's classic maths, define a function(x, y) which returns true for points within the shape you want, false if not.
The Orange book, OpenGL Shading Language 3rd edition, has some examples (which in turn come from RenderMan) of how to draw such shapes.
Hope this helps.
What you want is called procedural textures or procedural shading.
You can draw different shapes with a simple (and not so simple) math.
Take a look for some examples here:
http://glslsandbox.com/
More on google.
I am having trouble using Attribute variables for getting a value into vertex shader. I want to provide the geometry shader with one of the points from the previous primitive(line) for some calculation. I am providing this point using a vec3 attribute variable(Ppoint) in to vertex shader and then to geometry shader using a out variable in vertex shader and a in variable in geometry shader(pointPass).
The problem is when I am updating the attribute variable in the glBegin()/glEnd() block while drawing the lines the values in glVertexAttrib3f are taken as vertices and a line is also rendered to those points. This causes some extra lines to be displayed and all the geometry shader functionality is disturbed.
Here is my code for all the shaders and my opengl program to draw the lines.
Vertex Shader
#version 330 compatibility
out vec3 pointPass;
attribute vec3 Ppoint;
void main()
{
pointPass = Ppoint;
gl_Position = gl_Vertex;
}
Geometry Shader
#version 330 compatibility
in vec3 pointPass[];
out vec4 colorFrag;
layout(lines) in;
// 100 vertices are not actually required specified more for trial
layout(triangle_strip, max_vertices=100) out;
vec3 getA(vec3 axis){
vec3 a;
a.x = 1.0;
a.y = 1.0;
a.z = -(axis.x + axis.y)/axis.z;
a = normalize(a);
return a;
}
vec3 getB(vec3 axis, vec3 a){
vec3 b;
b.x = (a.y*axis.z - a.z*axis.y);
b.y = (a.z*axis.x - a.x*axis.z);
b.z = (a.x*axis.y - a.y*axis.x );
b = normalize(b);
return b;
}
void main()
{
vec3 axis0, axis1, v0, v1, v2;
float radius = 0.5;
float rotation = 0.0f;
float pi = 3.1416;
int numPoints = 15;
vec3 p1, p2, p3, p4;
int count = 0, i;
float increment = 2*pi/numPoints;
v0 = pointPass[0];
v1 = gl_in[0].gl_Position.xyz;
v2 = gl_in[1].gl_Position.xyz;
axis1 = v1 - v2;
axis1 = normalize(axis1);
vec3 a1 = getA(axis1);
vec3 b1 = getB(axis1, a1);
axis0 = v0-v2;
axis0 = normalize(axis0);
vec3 a0 = getA(axis0);
vec3 b0 = getB(axis0, a0);
// Rotation with theta
for(rotation = 0; rotation<=2*pi; rotation+=increment){
p1 = v1 + radius*cos(rotation)*a0 + radius*sin(rotation)*b0;
p2 = v1 + radius*cos(rotation + increment)*a0 + radius*sin (rotation + increment)*b0;
p3 = v2 + radius*cos(rotation)*a1 + radius*sin(rotation)*b1;
p4 = v2 + radius*cos(rotation + increment)*a1 + radius*sin(rotation + increment)*b1;
// FIRST Triangle
// FIRST vertex
gl_Position = (gl_ModelViewProjectionMatrix*vec4(p3,1.0) );
EmitVertex();
// SECOND vertex
gl_Position = (gl_ModelViewProjectionMatrix*vec4(p1, 1.0) );
EmitVertex();
// THIRD vertex
gl_Position = (gl_ModelViewProjectionMatrix*vec4(p4, 1.0) );
EmitVertex();
// SECOND Triangle
// FIRST vertex
gl_Position = (gl_ModelViewProjectionMatrix*vec4(p2, 1.0) );
EmitVertex();
}
EndPrimitive();
}
Fragment Shader
#version 330 compatibility
in vec4 colorFrag;
void main()
{
gl_FragColor = colorFrag;
}
OpenGL program for drawing lines
// vPoints is a std::vector of 3d vector class created by me.
void drawLines(){
float angle =0.0f;
int numLines = 30;
int count = 0;
float disp = 0.30f;
float radius_x = 5.0;
float radius_y = 5.0;
vPoints.resize(numLines+2);
// Loop around in a circle and specify even points along the spiral
float increment = (float)(2*GL_PI/numLines);
for(angle = 0.0f; angle < (2.0f*GL_PI); angle += increment)
{
// Calculate x and y position of the next vertex
float x1 = radius_x*sin(angle);
float y1 = radius_y*cos(angle);
float z1 = count*disp;
vPoints[count].SetVector(x1, y1, z1);
count ++;
}
// Drawing only first two line segments for testing
glBegin(GL_LINES);
int pointPassLocation = glGetAttribLocation(programID, "Ppoint");
// This is also considered as a vertex and a line is drawn from this point to vPoints[1]
glVertexAttrib3f(pointPassLocation, vPoints[0].GetX(), vPoints[0].GetY(), vPoints[0].GetZ());
glVertex3d(vPoints[1].GetX(), vPoints[1].GetY(), vPoints[1].GetZ());
glVertex3d(vPoints[2].GetX(), vPoints[2].GetY(), vPoints[2].GetZ());
// Again this is also considered as a point and a line is drawn from vPoints[2] to this point.
glVertexAttrib3f(pointPassLocation, vPoints[1].GetX(), vPoints[1].GetY(), vPoints[1].GetZ());
glVertex3d(vPoints[2].GetX(), vPoints[2].GetY(), vPoints[2].GetZ());
glVertex3d(vPoints[3].GetX(), vPoints[3].GetY(), vPoints[3].GetZ());
glEnd();
}
So instead of 2 lines which I wanted to draw from vPoints[1] to vPoints[2] and vPoints[2] to vPoints[3], I am getting 3 lines with 6 vertices considering the two glVertexAttrib3f statements as vertices.
Am I doing it correct, or is there a better way or another way to do this.