How to access all vertexes within the same patch in Tessellation Control Shader - opengl

I want to do LOD in Tessenllation Control Shader. And my method is to calculate the area each patch occupyed on screen coordinate, and set different tessellation level for them.
So I need to access all vertices within a patch and I do so like:
for(int i = 0; i < 4; i++)
{
position_screen[i] = ProjectionMatrix * ModelViewMatrix * gl_in[i].gl_Position;
}
where i defined my patch in TCS like:
#version 400
layout( vertices=4 ) out;
and here is related codes in OpenGL:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rect_index_buffer);
glPatchParameteri(GL_PATCH_VERTICES, 4);
glDrawElements(GL_PATCHES, RECT_INDEX_SIZE, GL_UNSIGNED_INT, 0);
However the result is strange.The tessellation level is related to the area on the scrren, but all patches have the same tessellation level.
So what's the problem?
I guess it's the way I try getting access to vertices within a patch went wrong. Then how can I do that?
The following is codes in my Tessellation Control Shader, I hope it helps:
#version 400
layout( vertices=4 ) out;
uniform mat4 ProjectionMatrix;
uniform mat4 ModelViewMatrix;
uniform float window_height;
uniform float window_width;
float PI = 3.14159;
float calcTriangleArea(float l[3]) //Heron's formula
{
float p = (l[0] + l[1] + l[2]) / 2.0;
return sqrt(p * (p - l[0]) * (p - l[1]) * (p - l[2]));
}
float calcSqureArea(vec4 position[4])
{
vec2 position_screen[4];
for(int i=0;i<4;i++)
{
position_screen[i] = position[i].xy;
}
float l[4];
for(int i = 0;i < 4;i++)
{
l[i] = length(position_screen[(i + 1) % 4] - position_screen[i % 4]);
}
float diagonal = length(position_screen[2] - position_screen[0]);
float l1[3];
float l2[3];
l1[0] = l[0];
l1[1] = l[1];
l1[2] = diagonal;
l2[0] = l[2];
l2[1] = l[3];
l2[2] = diagonal;
float area = calcTriangleArea(l1) + calcTriangleArea(l2);
return area;
}
float checkInsideView(vec4 position[4]) //check if the patch is visible
{
int flag = 4;
for(int i=0;i<4;i++)
{
if((position[i].x >= -window_width / 2.0) && (position[i].x <= window_width / 2.0) &&
(position[i].y >= -window_height / 2.0) && (position[i].y <= window_height / 2.0))
{
flag --;
}
}
if(flag == 0) //all 4 vertices are visible
{
return 0.0;
}
else if(flag == 4) //not all visible
{
return 2.0;
}
else //all vertices are not visible
{
return 1.0;
}
}
float calcLODLevel()
{
vec4 position_screen[4];
for(int i = 0; i < 4; i++)
{
position_screen[i] = ProjectionMatrix * ModelViewMatrix * gl_in[i].gl_Position;
}
float in_view_level = checkInsideView(position_screen);
//tess number is decided by the area that this patch covers on
//the screen
float area = calcSqureArea(position_screen);
float level = sqrt(area);
if(in_view_level == 1.0)
{
level /= sqrt(2);
}
//dont do tessellation
//if this patch is not visible
else if(in_view_level == 2.0)
{
level = 1.0;
}
return level;
}
void main()
{
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
float lod_level = calcLODLevel();
gl_TessLevelOuter[0] = lod_level;
gl_TessLevelOuter[1] = lod_level;
gl_TessLevelOuter[2] = lod_level;
gl_TessLevelOuter[3] = lod_level;
gl_TessLevelInner[0] = lod_level;
gl_TessLevelInner[1] = lod_level;
}

I think the problem is with your calculation of the screen coordinates, resulting in the tessellation levels to be too small. The key part is this:
position_screen[i] = ProjectionMatrix * ModelViewMatrix * gl_in[i].gl_Position;
What you're calculating here are clip coordinates, not screen coordinates. To get screen coordinates from clip coordinates, you need to:
Perform the perspective division. This gives you NDC (Normalized Device Coordinates) in the range [-1.0, 1.0].
Calculate screen coordinates from the NDC.
In code, the calculation could look like this:
vec4 posClip = ProjectionMatrix * ModelViewMatrix * gl_in[i].gl_Position;
vec2 posNdc = posClip.xy * (1.0 / posClip.w);
vec2 posScreen = 0.5 * (posNdc + 1.0) * vec2(window_width, window_height);

Related

LWJGL3 Visual artifacts caused by different projections

I have a very simple glsl vertex shader, which is the following:
#version 430
in vec2 vertices;
in vec2 textures;
out vec2 tex_coords;
uniform mat4 projection;
uniform mat4 texModifier;
void main() {
tex_coords = (texModifier * vec4(textures,0,1)).xy;
gl_Position = projection * vec4(vertices, 0, 1);
}
This shader is used to render text. The text is just a font sheet where I change the texModifier so that the mat4 coordinates are over the specific letter I want to draw. This works fine, but I noticed that different projection matrixes cause different visual artifacts.
Here is the an example:
This is the text using its original position.
And this is the the same text only the projection matrix is translated 0.1f / 1920 to the left. The visual artifacts change from being around commas and r to being around the u, S and more. These artifacts appear even when using linear interpolation.
This is the function used to draw text:
public void drawText(Shader shader, CharSequence text, float x, float y, int letters) {
if(letters == -1)
letters = text.length();
Model model;
Matrix4f projection = new Matrix4f();
Texture fontSheet = texture;
int textHeight = getTextHeight(text);
projection.setOrtho(-1, 1, -1, 1, -1, 1);
float drawX = x;
float drawY = y;
if (textHeight > fontHeight) {
drawY += textHeight - fontHeight;
}
shader.bind();
shader.setUniform("sampler", 0);
fontSheet.bind(0);
for (int i = 0; i < text.length() && i < letters; i++) {
char ch = text.charAt(i);
if (ch == '\n') {
/* Line feed, set x and y to draw at the next line */
drawY -= fontHeight;
drawX = x;
continue;
}
if (ch == '\r') {
/* Carriage return, just skip it */
continue;
}
Glyph g = glyphs.get(ch);
model = Model.createQuad((float) g.width / 2, (float) g.height / 2);
projection.setTranslation(((float) g.width / 2 + drawX) / Main.WINDOW_WIDTH, drawY / Main.WINDOW_HEIGHT, 0);
Matrix4f texModifier =
new Matrix4f()
.translate((float) g.x / fontSheet.getWidth(), 0, 0)
.scale((float) g.width / fontSheet.getWidth(), -(float) g.height / fontSheet.getHeight(), 1);
shader.setUniform("projection", projection);
shader.setUniform("texModifier", texModifier);
model.render();
drawX += g.width;
}
}
Why is the projection causing theses artifacts?

How to implement refraction light in fragment shader?

I am working on an OpenGL ray-tracer, which is capable of loading obj files and ray-trace it. My application loads the obj file with assimp and then sends all of the triangle faces (with the primitive coordinates and te material coefficients as well) to the fragment shader by using shader storage objects. The basic structure is about to render the results to a quad from the fragment shader.
I have trouble with the ray-tracing part in the fragment shader, but first let's introduce it.
For diffuse light, Lambert's cosine law, for specular light Phong-Blinn model was used. In case of total reflection a weightvariable is used to make the reflected light has an effect on other objects as well. The weight is calculated with approximating the Fresnel equation by Schlick method. In the image below, you can see, that the plane works like a mirror reflecting the image of the cube above.
I would like to make the cube appear as a glass object (like a glass sphere), which has refracting and reflecting effects as well. Or at least refracts the light. In the image above you can see a refracting effect on cube, but it is not as good, as it should be. I searched examples how to implement it, but until now, I recognized fresnel euquation has to be used just like in the reflection part.
Here is fragment my shader:
vec3 Fresnel(vec3 F0, float cosTheta) {
return F0 + (vec3(1, 1, 1) - F0) * pow(1-cosTheta, 5);
}
float schlickApprox(float Ni, float cosTheta){
float F0=pow((1-Ni)/(1+Ni), 2);
return F0 + (1 - F0) * pow((1 - cosTheta), 5);
}
vec3 trace(Ray ray){
vec3 weight = vec3(1, 1, 1);
const float epsilon = 0.0001f;
vec3 outRadiance = vec3(0, 0, 0);
int maxdepth=5;
for (int i=0; i < maxdepth; i++){
Hit hit=traverseBvhTree(ray);
if (hit.t<0){ return weight * lights[0].La; }
vec4 textColor = texture(texture1, vec2(hit.u, hit.v));
Ray shadowRay;
shadowRay.orig = hit.orig + hit.normal * epsilon;
shadowRay.dir = normalize(lights[0].direction);
// Ambient Light
outRadiance+= materials[hit.mat].Ka.xyz * lights[0].La*textColor.xyz * weight;
// Diffuse light based on Lambert's cosine law
float cosTheta = dot(hit.normal, normalize(lights[0].direction));
if (cosTheta>0 && traverseBvhTree(shadowRay).t<0) {
outRadiance +=lights[0].La * materials[hit.mat].Kd.xyz * cosTheta * weight;
// Specular light based on Phong-Blinn model
vec3 halfway = normalize(-ray.dir + lights[0].direction);
float cosDelta = dot(hit.normal, halfway);
if (cosDelta > 0){
outRadiance +=weight * lights[0].Le * materials[hit.mat].Ks.xyz * pow(cosDelta, materials[hit.mat].shininess); }
}
float fresnel=schlickApprox(materials[hit.mat].Ni, cosTheta);
// For refractive materials
if (materials[hit.mat].Ni < 3)
{
/*this is the under contruction part.*/
ray.orig = hit.orig - hit.normal*epsilon;
ray.dir = refract(ray.dir, hit.normal, materials[hit.mat].Ni);
}
// If the refraction index is more than 15, treat the material as mirror.
else if (materials[hit.mat].Ni >= 15) {
weight *= fresnel;
ray.orig=hit.orig+hit.normal*epsilon;
ray.dir=reflect(ray.dir, hit.normal);
}
}
return outRadiance;
}
Update 1
I updated the trace method in the shader. As far as I understand the physics of light, if there is a material, which reflects and refracts the light, I have to treat two cases according to this.
In this case of reflection, I added a weight to the diffuse light calculation: weight *= fresnel
In case of refraction light, the weight is weight*=1-fresnel.
Moreover, I calculated the ray.orig and ray.dir connected to the cases and refraction calculation only happens when it is not the case of total internal reflection (fresnel is smaller than 1).
The modified trace method:
vec3 trace(Ray ray){
vec3 weight = vec3(1, 1, 1);
const float epsilon = 0.0001f;
vec3 outRadiance = vec3(0, 0, 0);
int maxdepth=3;
for (int i=0; i < maxdepth; i++){
Hit hit=traverseBvhTree(ray);
if (hit.t<0){ return weight * lights[0].La; }
vec4 textColor = texture(texture1, vec2(hit.u, hit.v));
Ray shadowRay;
shadowRay.orig = hit.orig + hit.normal * epsilon;
shadowRay.dir = normalize(lights[0].direction);
// Ambient Light
outRadiance+= materials[hit.mat].Ka.xyz * lights[0].La*textColor.xyz * weight;
// Diffuse light based on Lambert's cosine law
float cosTheta = dot(hit.normal, normalize(lights[0].direction));
if (cosTheta>0 && traverseBvhTree(shadowRay).t<0) {
outRadiance +=lights[0].La * materials[hit.mat].Kd.xyz * cosTheta * weight;
// Specular light based on Phong-Blinn model
vec3 halfway = normalize(-ray.dir + lights[0].direction);
float cosDelta = dot(hit.normal, halfway);
if (cosDelta > 0){
outRadiance +=weight * lights[0].Le * materials[hit.mat].Ks.xyz * pow(cosDelta, materials[hit.mat].shininess); }
}
float fresnel=schlickApprox(materials[hit.mat].Ni, cosTheta);
// For refractive/reflective materials
if (materials[hit.mat].Ni < 7)
{
bool outside = dot(ray.dir, hit.normal) < 0;
// compute refraction if it is not a case of total internal reflection
if (fresnel < 1) {
ray.orig = outside ? hit.orig-hit.normal*epsilon : hit.orig+hit.normal*epsilon;
ray.dir = refract(ray.dir, hit.normal,materials[hit.mat].Ni);
weight *= 1-fresnel;
continue;
}
// compute reflection
ray.orig= outside ? hit.orig+hit.normal*epsilon : hit.orig-hit.normal*epsilon;
ray.dir= reflect(ray.dir, hit.normal);
weight *= fresnel;
continue;
}
// If the refraction index is more than 15, treat the material as mirror: total reflection
else if (materials[hit.mat].Ni >= 7) {
weight *= fresnel;
ray.orig=hit.orig+hit.normal*epsilon;
ray.dir=reflect(ray.dir, hit.normal);
}
}
return outRadiance;
}
Here is a snapshot connected to the update. Slightly better, I guess.
Update 2:
I found an iterative algorithm which is using a stack to visualize refractive and reflected rays in opengl: it is on page 68.
I modified my frag shader according to this. Almost fine, except for the back faces, which are completely black. Some pictures attached.
Here is the trace method of my frag shader:
vec3 trace(Ray ray){
vec3 color;
float epsilon=0.001;
Stack stack[8];// max depth
int stackSize = 0;// current depth
int bounceCount = 0;
vec3 coeff = vec3(1, 1, 1);
bool continueLoop = true;
while (continueLoop){
Hit hit = traverseBvhTree(ray);
if (hit.t>0){
bounceCount++;
//----------------------------------------------------------------------------------------------------------------
Ray shadowRay;
shadowRay.orig = hit.orig + hit.normal * epsilon;
shadowRay.dir = normalize(lights[0].direction);
color+= materials[hit.mat].Ka.xyz * lights[0].La * coeff;
// Diffuse light
float cosTheta = dot(hit.normal, normalize(lights[0].direction));// Lambert-féle cosinus törvény alapján.
if (cosTheta>0 && traverseBvhTree(shadowRay).t<0) {
color +=lights[0].La * materials[hit.mat].Kd.xyz * cosTheta * coeff;
vec3 halfway = normalize(-ray.dir + lights[0].direction);
float cosDelta = dot(hit.normal, halfway);
// Specular light
if (cosDelta > 0){
color +=coeff * lights[0].Le * materials[hit.mat].Ks.xyz * pow(cosDelta, materials[hit.mat].shininess); }
}
//---------------------------------------------------------------------------------------------------------------
if (materials[hit.mat].indicator > 3.0 && bounceCount <=2){
float eta = 1.0/materials[hit.mat].Ni;
Ray refractedRay;
refractedRay.dir = dot(ray.dir, hit.normal) <= 0.0 ? refract(ray.dir, hit.normal, eta) : refract(ray.dir, -hit.normal, 1.0/eta);
bool totalInternalReflection = length(refractedRay.dir) < epsilon;
if(!totalInternalReflection){
refractedRay.orig = hit.orig + hit.normal*epsilon*sign(dot(ray.dir, hit.normal));
refractedRay.dir = normalize(refractedRay.dir);
stack[stackSize].coeff = coeff *(1 - schlickApprox(materials[hit.mat].Ni, dot(ray.dir, hit.normal)));
stack[stackSize].depth = bounceCount;
stack[stackSize++].ray = refractedRay;
}
else{
ray.dir = reflect(ray.dir, -hit.normal);
ray.orig = hit.orig - hit.normal*epsilon;
}
}
else if (materials[hit.mat].indicator == 0){
coeff *= schlickApprox(materials[hit.mat].Ni, dot(-ray.dir, hit.normal));
ray.orig=hit.orig+hit.normal*epsilon;
ray.dir=reflect(ray.dir, hit.normal);
}
else { //Diffuse Material
continueLoop=false;
}
}
else {
color+= coeff * lights[0].La;
continueLoop=false;
}
if (!continueLoop && stackSize > 0){
ray = stack[stackSize--].ray;
bounceCount = stack[stackSize].depth;
coeff = stack[stackSize].coeff;
continueLoop = true;
}
}
return color;
}

Recreating scene from depth encoded image in Vulkan vs OpenGL

I'm in the process of translating a piece of OpenGL code to Vulkan. The code recreates a rendered scene from an image (on a hemisphere projection) with depth information encoded. Note that I also load the model view matrix used for the projection to recreate the scene. The translation has been pretty straightforward but I'm running into issues due to the new Vulkan coordinate system.
The original OpenGL shader with comments follows:
#version 430
layout (triangles) in;
layout (triangle_strip, max_vertices = 3) out;
in vec2 posGeom[];
out vec2 texCoord;
uniform mat4 view;
uniform mat4 projection;
uniform float threshold;
uniform vec3 quantization;
uniform mat4 inverseStaticView;
uniform sampler2D rgbdTexture;
//get the image space for each pixel of our hemisphere image
vec3 getSphereRay(const vec2 coord) {
//get length of ray from camera to point on image plane
float len = 1 - coord.x * coord.x - coord.y * coord.y;
if (len > 0)
return vec3(coord, -sqrt(len));//scale to unit length vector as viewing ray
else
return vec3(0);
}
vec4 getPosition(const in vec2 inCoord, const in float depth) {
vec2 coord = inCoord;
//reverse the stretching from sphere to quad (based on y-coordinate)
float percent = sqrt(1.0 - coord.y * coord.y);
coord.x = coord.x * percent;
//scale ray with corresponding depth
vec3 normal = getSphereRay(coord) * depth;
//move from image space to world space by inverse view matrix
return inverseStaticView * vec4(normal, 1);
}
bool hasZeroDepth = false;
//get the real depth from quantized and packed depth by inverting the gamma correction and inverting min, max
float getDepth(int idx) {
float depth = texture(rgbdTexture, posGeom[idx] * 0.5 + 0.5).w;
if(depth == 0)
hasZeroDepth = true;
float minDepth = quantization.x;
float maxDepth = quantization.y;
float gamma = quantization.z;
depth = pow(depth, gamma);
depth = depth * (maxDepth - minDepth) + minDepth;
return depth;
}
//emit the position and texcoord
void emitPosition(int idx, float depth) {
texCoord = posGeom[idx] * 0.5 + 0.5;
gl_Position = projection * view * getPosition(posGeom[idx], depth);
EmitVertex();
}
void main() {
float d0 = getDepth(0);
float d1 = getDepth(1);
float d2 = getDepth(2);
//do not emit tris with zero (invalid) depth
if(!hasZeroDepth) {
float minDepth = min(d0, min(d1, d2));
float maxDepth = max(d0, max(d1, d2));
float minDist = maxDepth - minDepth;
float avgDepth = (d0 + d1 + d2) / 3;
float thres = threshold;
//look at tri stretching factor
if(minDist / avgDepth < thres) {
//emit original tri
emitPosition(0, d0);
emitPosition(1, d1);
emitPosition(2, d2);
} else {
//emit tri with maxDepth to only show background
emitPosition(0, maxDepth);
emitPosition(1, maxDepth);
emitPosition(2, maxDepth);
}
}
}
In the Vulkan shader, I account for the Vulkan coordinate system by inverting the y value. I also must normalize the world values for reasons that are unclear to me (otherwise what's rendered is completely nonsense). The shader code follows:
#version 450
layout (triangles) in;
layout (triangle_strip, max_vertices = 3) out;
layout(binding = 0) uniform UniformBufferObject {
mat4 modelView;
mat4 inverseStaticModelView;
float quantization;
} ubo;
layout(binding = 1) uniform sampler2D texSampler;
layout(location = 0) in vec2 posGeom[];
layout(location = 0) out vec2 texCoord;
bool hasZeroDepth = false;
float minDepth = 0;
float maxDepth = 1.0;
vec3 unproject(vec2 win) {
float scale = 1 - win.y * win.y;
// Invert y to account for Vulkan coordinate system.
float y = win.y * -1;
// Scale x to account for hemisphere projection.
float x = win.x * scale;
float z = -sqrt(1 - x * x - y * y);
if(z < 0){
vec4 outVals = ubo.inverseStaticModelView * vec4(x, y, z, 1.0);
return vec3(outVals[0], outVals[1], outVals[2]) / outVals.w;
}else
return vec3(0);
}
vec3 reconstructWorldPosition(vec2 ndc, float depth) {
vec3 pos = unproject(ndc);
return depth * normalize(pos);
}
float getDepth(int idx) {
float depth = texture(texSampler, posGeom[idx] * 0.5 + 0.5).w;
if(depth == 0)
hasZeroDepth = true;
depth = pow(depth, ubo.quantization);
return depth;
}
void emitPosition(int idx, float depth) {
vec2 pos = posGeom[idx].xy;
texCoord = pos * 0.5 + 0.5;
vec3 positionFromDepth = reconstructWorldPosition(pos, depth);
gl_Position = ubo.modelView * vec4(positionFromDepth,1);
EmitVertex();
}
void main() {
float d0 = getDepth(0);
float d1 = getDepth(1);
float d2 = getDepth(2);
if(!hasZeroDepth) {
float minDepth = min(d0, min(d1, d2));
float maxDepth = max(d0, max(d1, d2));
float minDist = maxDepth - minDepth;
float avgDepth = (d0 + d1 + d2) / 3.0;
float thres = 0.1;
if(minDist / avgDepth < thres ) {
emitPosition(0, d0);
emitPosition(1, d1);
emitPosition(2, d2);
} else {
emitPosition(0, maxDepth);
emitPosition(1, maxDepth);
emitPosition(2, maxDepth);
}
}
}
Images of the output of the two programs are contained in this album: http://imgur.com/a/KUl57
The Vulkan output appears to almost be correct except for some odd artifacts in the lower left hand of the scene. My suspicion is that the scaling to the x coordinate to account for the hemisphere projection is causing the issue. I've played around with the scaling and other parts of the shader but I can't seem to get it right. Am I overlooking something else that is different between Vulkan and OpenGL, especially with regards to the coordinate system?

How to Make 2D Lighting Better in OpenGL

I want to ask a question about my lighting effect in OpenGL.
I am trying to add lighting, but I don't think it's good and I've seen some 2D lighting pictures which are so much better than mine.
Question: I have made a spotlight but I want it to be dimmer as its light range gets lower and have it more like a natural light, but I can't figure out the solution.
I am using an orthographic matrix with (800, 600) as the window size and I make my meshes with real x, y coords. I send my lightPos and my PlayerPos to the fragment shader and I use the vertex as the width and the height of the mesh so that I can generate lighting for every pixel.
The light is just a basic circle and I don't know how to make it look better. Here are some images. In the fragment shader, I use the Pythagorean Theorem to calculate the distance between the 2 points.
And here is the vertex and fragment Shader
Vetex shader
#version 330
layout (location = 0) in vec3 pos;
layout (location = 1) in vec2 tCoord;
uniform mat4 mat;
out vec2 tCoord0;
out vec2 vPos;
void main(){
tCoord0 = vec2(tCoord.x, 1 - tCoord.y);
gl_Position = mat * vec4(pos, 1.0);
vPos = vec2(pos.x, pos.y);
}
Fragment shader
#version 330
out vec4 color;
uniform sampler2D sampler;
in vec2 tCoord0;
uniform vec3 objColor;
uniform vec2 lightPos;
uniform vec2 xyPos;
in vec2 vPos;
void main(){
vec4 textureColor = texture2D(sampler, tCoord0);
vec3 ambientLight = vec3(0.3f, 0.3f, 0.3f);
float dx = lightPos.x - (xyPos.x + vPos.x);
float dy = lightPos.y - (xyPos.y + vPos.y);
float dist = sqrt(dx * dx + dy * dy);
if(dist > 0 && dist < 50){
ambientLight = vec3(0.7f, 0.7f, 0.7f) * 0.6f;
}
else if(dist > 50 && dist < 70){
ambientLight = vec3(0.4f, 0.4f, 0.4f) * 0.6f;
}
else{
discard;
}
if((textureColor.x == 0 && textureColor.y == 0 && textureColor.z == 0) || textureColor.a <= 0){
color = vec4(objColor, 1.0) * vec4(ambientLight, 1.0);
}
else{
color = textureColor * vec4(ambientLight, 1.0) * vec4(objColor, 1.0);
}
}
Drawer.cpp
#include <graphics\shader.h>
#include <graphics\texture.h>
#include <graphics\shape.h>
#include <GL\glew.h>
#include <graphics\light.h>
#include <core\TSAContainer.h>
#include <core\drawer.h>
namespace GE{
namespace core{
std::vector<graphics::GraphicComponent*> Drawer::drawables;
GLuint Drawer::buffer;
void Drawer::init(){
glGenFramebuffers(1, &buffer);
}
std::vector<graphics::GraphicComponent*>& Drawer::getAllGraphicComponents(){
return drawables;
}
void Drawer::addDrawable(graphics::GraphicComponent* drawable){
drawables.push_back(drawable);
}
void Drawer::destroy(){
for (unsigned int i = 0; i < drawables.size(); i++)
delete drawables[i];
drawables.clear();
}
void Drawer::render(){
for (std::vector<graphics::GraphicComponent*>::iterator it = drawables.begin(); it != drawables.end(); it++){
if ((*it)->isDraw()){
(*it)->getShader().bind();
int color = getColor(static_cast<graphics::Shape*>(*it)->getColor());
int r = (color >> 16) & 0xff;
int g = (color >> 8) & 0xff;
int b = (color)& 0xff;
(*it)->getShader().setUniform("mat", (*it)->getTransformation().getTransformationMatrix());
(*it)->getShader().setUniform("objColor", r, g, b);
(*it)->getShader().setUniform("xyPos", (*it)->getTransformation().getPosition());
(*it)->getShader().setUniform("sampler", 1);
if (static_cast<graphics::Shape*>(*it)->getLight() != NULL){
static_cast<graphics::Shape*>(*it)->getLight()->update();
}
//(*it)->getShader().setUniform("ambientLight", static_cast<graphics::Shape*>(*it)->getAmbientLight());
glActiveTexture(GL_TEXTURE1);
if ((*it)->getTexture() != NULL)
(*it)->getTexture()->bind();
(*it)->getMesh().draw();
if ((*it)->getTexture() != NULL)
(*it)->getTexture()->unbind();
(*it)->getShader().unbind();
}
}
}
int Drawer::getColor(colorType color){
int col = 0;
if (color == GE_COLOR_BLUE){
col = 0 << 16 | 0 << 8 | 1;
}
else if (GE_COLOR_GREEN == color){
col = 0 << 16 | 1 << 8 | 0;
}
else if (GE_COLOR_RED == color){
col = 1 << 16 | 0 << 8 | 0;
}
else{
col = 1 << 16 | 1 << 8 | 1;
}
return col;
}
Drawer::Drawer(){
}
Drawer::~Drawer(){
}
}
}
float dx = lightPos.x - (xyPos.x + vPos.x);
float dy = lightPos.y - (xyPos.y + vPos.y);
float dist = sqrt(dx * dx + dy * dy);
if(dist > 0 && dist < 50)
{
ambientLight = vec3(0.7f, 0.7f, 0.7f) * 0.6f;
}
else if(dist > 50 && dist < 70)
{
ambientLight = vec3(0.4f, 0.4f, 0.4f) * 0.6f;
}
Here you're using kind of a constant attenuation based on distance. That's going to make that kind of effect of a bright inner circle and dim outer circle with unnaturally hard edges between.
If you want a soft kind of gradient effect, you want to avoid the branching and constants here. We can start with a linear falloff:
float dx = lightPos.x - (xyPos.x + vPos.x);
float dy = lightPos.y - (xyPos.y + vPos.y);
float dist = sqrt(dx * dx + dy * dy);
float max_dist = 70.0f;
float percent = clamp(1.0f - dist / max_dist, 0.0, 1.0f);
ambientLight = vec3(percent, percent, percent);
However, that will probably look kind of ugly to you with a sharp point around the center. We can use an exponential curve instead, like so:
...
percent *= percent;
ambientLight = vec3(percent, percent, percent);
To make it kind of "rounder", you can multiply again:
...
percent *= percent * percent;
ambientLight = vec3(percent, percent, percent);
If that's kind of opposite of what you want visually, you can try sqrt:
float percent = clamp(1.0f - dist / max_dist, 0.0, 1.0f);
percent = sqrt(percent);
Since I don't know exactly what you're after visually, these are some things to try initially. Play with these two and see if you like what you get.
If you really want to take max control over the effect, a cubic bezier curve interpolation might come in handy:
float bezier4(float p1, float p2, float p3, float p4, float t)
{
const float mum1 = 1.0f - t;
const float mum13 = mum1 * mum1 * mum1;
const float mu3 = t * t * t;
return mum13 * p1 + 3 * t * mum1 * mum1 * p2 + 3 * t * t * mum1 * p3 + mu3 * p4;
}
...
float percent = clamp(1.0f - dist / max_dist, 0.0, 1.0f);
// Can play with the first four arguments to achieve the desired effect.
percent = bezier4(0.0f, 0.25f, 0.75f, 1.0f, percent);
ambientLight = vec3(percent, percent, percent);
That will give you a lot of control over the effect, but maybe overkill. Try the other methods first.

OpenGL ES - texturing sphere

I have a sphere. I can map texture on it. But now my texture is outside sphere. And I need inside. My user sit like inside sphere, so he can view inside it (rotate and zoom). So simply like a sky dome, but sphere. Maybe I need fix uv texture coordinates or enable something?
Here code for generating sphere:
class Sphere : public ParametricSurface {
public:
Sphere(float radius) : m_radius(radius)
{
ParametricInterval interval = { ivec2(20, 20), vec2(Pi, TwoPi), vec2(8, 14) };
SetInterval(interval);
}
vec3 Evaluate(const vec2& domain) const
{
float u = domain.x, v = domain.y;
float x = m_radius * sin(u) * cos(v);
float y = m_radius * cos(u);
float z = m_radius * -sin(u) * sin(v);
return vec3(x, y, z);
}
private:
float m_radius;
};
vec2 ParametricSurface::ComputeDomain(float x, float y) const
{
return vec2(x * m_upperBound.x / m_slices.x, y * m_upperBound.y / m_slices.y);
}
void ParametricSurface::GenerateVertices(float * vertices) const
{
float* attribute = vertices;
for (int j = 0; j < m_divisions.y; j++) {
for (int i = 0; i < m_divisions.x; i++) {
// Compute Position
vec2 domain = ComputeDomain(i, j);
vec3 range = Evaluate(domain);
attribute = range.Write(attribute);
// Compute Normal
if (m_vertexFlags & VertexFlagsNormals) {
float s = i, t = j;
// Nudge the point if the normal is indeterminate.
if (i == 0) s += 0.01f;
if (i == m_divisions.x - 1) s -= 0.01f;
if (j == 0) t += 0.01f;
if (j == m_divisions.y - 1) t -= 0.01f;
// Compute the tangents and their cross product.
vec3 p = Evaluate(ComputeDomain(s, t));
vec3 u = Evaluate(ComputeDomain(s + 0.01f, t)) - p;
vec3 v = Evaluate(ComputeDomain(s, t + 0.01f)) - p;
vec3 normal = u.Cross(v).Normalized();
if (InvertNormal(domain))
normal = -normal;
attribute = normal.Write(attribute);
}
// Compute Texture Coordinates
if (m_vertexFlags & VertexFlagsTexCoords) {
float s = m_textureCount.x * i / m_slices.x;
float t = m_textureCount.y * j / m_slices.y;
attribute = vec2(s, t).Write(attribute);
}
}
}
}
void ParametricSurface::GenerateLineIndices(unsigned short * indices) const
{
unsigned short * index = indices;
for (int j = 0, vertex = 0; j < m_slices.y; j++) {
for (int i = 0; i < m_slices.x; i++) {
int next = (i + 1) % m_divisions.x;
*index++ = vertex + i;
*index++ = vertex + next;
*index++ = vertex + i;
*index++ = vertex + i + m_divisions.x;
}
vertex += m_divisions.x;
}
}
void ParametricSurface::GenerateTriangleIndices(unsigned short * indices) const
{
unsigned short * index = indices;
for (int j = 0, vertex = 0; j < m_slices.y; j++) {
for (int i = 0; i < m_slices.x; i++) {
int next = (i + 1) % m_divisions.x;
*index++ = vertex + i;
*index++ = vertex + next;
*index++ = vertex + i + m_divisions.x;
*index++ = vertex + next;
*index++ = vertex + next + m_divisions.x;
*index++ = vertex + i + m_divisions.x;
}
vertex += m_divisions.x;
}
}
And here is VBO creation:
+ (DrawableVBO *)createVBO:(SurfaceType)surfaceType
{
ISurface * surface = [self createSurface:surfaceType]; // just Sphere type
surface->SetVertexFlags(VertexFlagsNormals | VertexFlagsTexCoords); // which vertexes I need
// Get vertice from surface.
//
int vertexSize = surface->GetVertexSize();
int vBufSize = surface->GetVertexCount() * vertexSize;
GLfloat * vbuf = new GLfloat[vBufSize];
surface->GenerateVertices(vbuf);
// Get triangle indice from surface
//
int triangleIndexCount = surface->GetTriangleIndexCount();
unsigned short * triangleBuf = new unsigned short[triangleIndexCount];
surface->GenerateTriangleIndices(triangleBuf);
// Create the VBO for the vertice.
//
GLuint vertexBuffer;
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, vBufSize * sizeof(GLfloat), vbuf, GL_STATIC_DRAW);
// Create the VBO for the triangle indice
//
GLuint triangleIndexBuffer;
glGenBuffers(1, &triangleIndexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, triangleIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, triangleIndexCount * sizeof(GLushort), triangleBuf, GL_STATIC_DRAW);
delete [] vbuf;
delete [] triangleBuf;
delete surface;
DrawableVBO * vbo = [[DrawableVBO alloc] init];
vbo.vertexBuffer = vertexBuffer;
vbo.triangleIndexBuffer = triangleIndexBuffer;
vbo.vertexSize = vertexSize;
vbo.triangleIndexCount = triangleIndexCount;
return vbo;
}
Here is my light setup:
- (void)setupLights
{
// Set up some default material parameters.
//
glUniform3f(_ambientSlot, 0.04f, 0.04f, 0.04f);
glUniform3f(_specularSlot, 0.5, 0.5, 0.5);
glUniform1f(_shininessSlot, 50);
// Initialize various state.
//
glEnableVertexAttribArray(_positionSlot);
glEnableVertexAttribArray(_normalSlot);
glUniform3f(_lightPositionSlot, 1.0, 1.0, 5.0);
glVertexAttrib3f(_diffuseSlot, 0.8, 0.8, 0.8);
}
And finally shaders:
fragment:
precision mediump float;
varying vec4 vDestinationColor;
varying vec2 vTextureCoordOut;
uniform sampler2D Sampler;
void main()
{
gl_FragColor = texture2D(Sampler, vTextureCoordOut) * vDestinationColor;
}
vertex:
uniform mat4 projection;
uniform mat4 modelView;
attribute vec4 vPosition;
attribute vec2 vTextureCoord;
uniform mat3 normalMatrix;
uniform vec3 vLightPosition;
uniform vec3 vAmbientMaterial;
uniform vec3 vSpecularMaterial;
uniform float shininess;
attribute vec3 vNormal;
attribute vec3 vDiffuseMaterial;
varying vec4 vDestinationColor;
varying vec2 vTextureCoordOut;
void main(void)
{
gl_Position = projection * modelView * vPosition;
vec3 N = normalMatrix * vNormal;
vec3 L = normalize(vLightPosition);
vec3 E = vec3(0, 0, 1);
vec3 H = normalize(L + E);
float df = max(0.0, dot(N, L));
float sf = max(0.0, dot(N, H));
sf = pow(sf, shininess);
vec3 color = vAmbientMaterial + df * vDiffuseMaterial + sf * vSpecularMaterial;
vDestinationColor = vec4(color, 1);
vTextureCoordOut = vTextureCoord;
}
Some monkey code but I fix his. Firstly we enable culling and disable front side rendering:
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
Then I change position of the light source:
glUniform3f(_lightPositionSlot, 1.0, 1.0, -2.5);
(I even don't need the light, so next step - I must disable it at all). But finally I have a sphere, user is inside it, can rotate it, zoom in and out and see the texture!