LWJGL3 Visual artifacts caused by different projections - glsl

I have a very simple glsl vertex shader, which is the following:
#version 430
in vec2 vertices;
in vec2 textures;
out vec2 tex_coords;
uniform mat4 projection;
uniform mat4 texModifier;
void main() {
tex_coords = (texModifier * vec4(textures,0,1)).xy;
gl_Position = projection * vec4(vertices, 0, 1);
}
This shader is used to render text. The text is just a font sheet where I change the texModifier so that the mat4 coordinates are over the specific letter I want to draw. This works fine, but I noticed that different projection matrixes cause different visual artifacts.
Here is the an example:
This is the text using its original position.
And this is the the same text only the projection matrix is translated 0.1f / 1920 to the left. The visual artifacts change from being around commas and r to being around the u, S and more. These artifacts appear even when using linear interpolation.
This is the function used to draw text:
public void drawText(Shader shader, CharSequence text, float x, float y, int letters) {
if(letters == -1)
letters = text.length();
Model model;
Matrix4f projection = new Matrix4f();
Texture fontSheet = texture;
int textHeight = getTextHeight(text);
projection.setOrtho(-1, 1, -1, 1, -1, 1);
float drawX = x;
float drawY = y;
if (textHeight > fontHeight) {
drawY += textHeight - fontHeight;
}
shader.bind();
shader.setUniform("sampler", 0);
fontSheet.bind(0);
for (int i = 0; i < text.length() && i < letters; i++) {
char ch = text.charAt(i);
if (ch == '\n') {
/* Line feed, set x and y to draw at the next line */
drawY -= fontHeight;
drawX = x;
continue;
}
if (ch == '\r') {
/* Carriage return, just skip it */
continue;
}
Glyph g = glyphs.get(ch);
model = Model.createQuad((float) g.width / 2, (float) g.height / 2);
projection.setTranslation(((float) g.width / 2 + drawX) / Main.WINDOW_WIDTH, drawY / Main.WINDOW_HEIGHT, 0);
Matrix4f texModifier =
new Matrix4f()
.translate((float) g.x / fontSheet.getWidth(), 0, 0)
.scale((float) g.width / fontSheet.getWidth(), -(float) g.height / fontSheet.getHeight(), 1);
shader.setUniform("projection", projection);
shader.setUniform("texModifier", texModifier);
model.render();
drawX += g.width;
}
}
Why is the projection causing theses artifacts?

Related

How to generate camera rays for ray casting

I am trying to make a simple voxel engine with OpenGL and C++. My first step is to send out rays from the camera and detect if the ray intersected with something (for testing purposes its just two planes). I have got it working with without the camera rotating by creating a full screen quad and programming the fragment shader to send out a ray for every fragment (for now I'm just assuming a fragment is a pixel) which is in the direction texCoord.x, texCoord.y, -1. Now I am trying to implement camera rotation.
I have tried to generate a rotation matrix within the cpu and send that to the shader which will multiply it with every ray. However, when I rotate the camera, the planes start to stretch in a way which I can only describe with this video.
https://www.youtube.com/watch?v=6NScMwnPe8c
Here is the code that creates the matrix and is run every frame:
float pi = 3.141592;
// camRotX and Y are defined elsewhere and can be controlled from the keyboard during runtime.
glm::vec3 camEulerAngles = glm::vec3(camRotX, camRotY, 0);
std::cout << "X: " << camEulerAngles.x << " Y: " << camEulerAngles.y << "\n";
// Convert to radians
camEulerAngles.x = camEulerAngles.x * pi / 180;
camEulerAngles.y = camEulerAngles.y * pi / 180;
camEulerAngles.z = camEulerAngles.z * pi / 180;
// Generate Quaternian
glm::quat camRotation;
camRotation = glm::quat(camEulerAngles);
// Generate rotation matrix from quaternian
glm::mat4 camToWorldMatrix = glm::toMat4(camRotation);
// No transformation matrix is created because the rays should be relative to 0,0,0
// Send the rotation matrix to the shader
int camTransformMatrixID = glGetUniformLocation(shader, "cameraTransformationMatrix");
glUniformMatrix4fv(camTransformMatrixID, 1, GL_FALSE, glm::value_ptr(camToWorldMatrix));
And the fragment shader:
#version 330 core
in vec4 texCoord;
layout(location = 0) out vec4 color;
uniform vec3 cameraPosition;
uniform vec3 cameraTR;
uniform vec3 cameraTL;
uniform vec3 cameraBR;
uniform vec3 cameraBL;
uniform mat4 cameraTransformationMatrix;
uniform float fov;
uniform float aspectRatio;
float pi = 3.141592;
int RayHitCell(vec3 origin, vec3 direction, vec3 cellPosition, float cellSize)
{
if(direction.z != 0)
{
float multiplicationFactorFront = cellPosition.z - origin.z;
if(multiplicationFactorFront > 0){
vec2 interceptFront = vec2(direction.x * multiplicationFactorFront + origin.x,
direction.y * multiplicationFactorFront + origin.y);
if(interceptFront.x > cellPosition.x && interceptFront.x < cellPosition.x + cellSize &&
interceptFront.y > cellPosition.y && interceptFront.y < cellPosition.y + cellSize)
{
return 1;
}
}
float multiplicationFactorBack = cellPosition.z + cellSize - origin.z;
if(multiplicationFactorBack > 0){
vec2 interceptBack = vec2(direction.x * multiplicationFactorBack + origin.x,
direction.y * multiplicationFactorBack + origin.y);
if(interceptBack.x > cellPosition.x && interceptBack.x < cellPosition.x + cellSize &&
interceptBack.y > cellPosition.y && interceptBack.y < cellPosition.y + cellSize)
{
return 2;
}
}
}
return 0;
}
void main()
{
// For now I'm not accounting for FOV and aspect ratio because I want to get the rotation working first
vec4 beforeRotateRayDirection = vec4(texCoord.x,texCoord.y,-1,0);
// Apply the rotation matrix that was generated on the cpu
vec3 rayDirection = vec3(cameraTransformationMatrix * beforeRotateRayDirection);
int t = RayHitCell(cameraPosition, rayDirection, vec3(0,0,5), 1);
if(t == 1)
{
// Hit front plane
color = vec4(0, 0, 1, 0);
}else if(t == 2)
{
// Hit back plane
color = vec4(0, 0, 0.5, 0);
}else{
// background color
color = vec4(0, 1, 0, 0);
}
}
Okay. Its really hard to know what is wrong, I will try non-theless.
Here are few tips and notes:
1) You can debug directions by mapping them to RGB color. Keep in mind you should normalize the vectors and map from (-1,1) to (0,1). Just do the dir*0.5+1.0 type of thing. Example:
color = vec4(normalize(rayDirection) * 0.5, 0) + vec4(1);
2) You can get the rotation matrix in a more straight manner. Quaternion is initialized from an forward direction, it will first rotate around Y axis (horizontal look) then, and only then, around X axis (vertical look). Keep in mind that the rotations order is implementation dependent if you initialize from euler-angles. Use mat4_cast to avoid experimental glm extension (gtx) whenever possible. Example:
// Define rotation quaternion starting from look rotation
glm::quat camRotation = glm::vec3(0, 0, 0);
camRotation = glm::rotate(camRotation, glm::radians(camRotY), glm::vec3(0, 1, 0));
camRotation = glm::rotate(camRotation, glm::radians(camRotX), glm::vec3(1, 0, 0));
glm::mat4 camToWorldMatrix = glm::mat4_cast(camRotation);
3) Your beforeRotateRayDirection is a vector that (probably) points from (-1,-1,-1) all the way to (1,1,-1). Which is not normalized, the length of (1,1,1) is √3 ≈ 1.7320508075688772... Be sure you have taken that into account for your collision math or just normalize the vector.
My partial answer so far...
Your collision test is a bit weird... It appears you want to cast the ray into the Z plane for the given cell position (but twice, one for the front and one for the back). I have reviewed your code logic and it makes some sense, but without the vertex program, thus not knowing what the texCoord range values are, it is not possible to be sure. You might want to rethink your logic to something like this:
int RayHitCell(vec3 origin, vec3 direction, vec3 cellPosition, float cellSize)
{
//Get triangle side vectors
vec3 tu = vec3(cellSize,0,0); //Triangle U component
vec3 tv = vec3(0,cellSize,0); //Triangle V component
//Determinant for inverse matrix
vec3 q = cross(direction, tv);
float det = dot(tu, q);
//if(abs(det) < 0.0000001) //If too close to zero
// return;
float invdet = 1.0/det;
//Solve component parameters
vec3 s = origin - cellPosition;
float u = dot(s, q) * invdet;
if(u < 0.0 || u > 1.0)
return 0;
vec3 r = cross(s, tu);
float v = dot(direction, r) * invdet;
if(v < 0.0 || v > 1.0)
return 0;
float t = dot(tv, r) * invdet;
if(t <= 0.0)
return 0;
return 1;
}
void main()
{
// For now I'm not accounting for FOV and aspect ratio because I want to get the
// rotation working first
vec4 beforeRotateRayDirection = vec4(texCoord.x, texCoord.y, -1, 0);
// Apply the rotation matrix that was generated on the cpu
vec3 rayDirection = vec3(cameraTransformationMatrix * beforeRotateRayDirection);
int t = RayHitCell(cameraPosition, normalize(rayDirection), vec3(0,0,5), 1);
if (t == 1)
{
// Hit front plane
color = vec4(0, 0, 1, 0);
}
else
{
// background color
color = vec4(0, 1, 0, 0);
}
}
This should give you a plane, let me know if it works. A cube will be very easy to do.
PS.: u and v can be used for texture mapping.

Opengl ES 2.0 - Problem with color opacity by glDrawElements(GL_TRIANGLES)

I created simple c++ class for drawing square in opengl es 2.0. It puts square on specific place with specific color and opacity. All is fine, except opacity. I set color and opacity with function "setColor". I expect 0.0f full transparent and 1.0 full visible. Looks like, for black color it works, but white color is still full visible, does not matter what I set to opacity. For other colors is opacity weird too.
At beginning I set
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_BLEND);
My class :
GLSquare::GLSquare() {
vecs = new std::vector<GLfloat>();
indices = new std::vector<GLshort>();
colors = new std::vector<GLfloat>();
indicesCount = 6;
for (int i = 0; i < 12; i++) {
vecs->push_back(0.0f);
}
for (int i=0; i<16; i+=4) {
colors->push_back(0.0f);
colors->push_back(0.0f);
colors->push_back(0.0f);
colors->push_back(1.0f);
}
GLshort ind[] = { 0, 1, 2, 0, 2, 3 };
for (int i = 0; i < indicesCount; i++) {
indices->push_back(ind[i]);
}
}
GLSquare::~GLSquare() {
delete vecs;
delete colors;
delete indices;
}
void GLSquare::draw(Matrix *matrix) {
glUseProgram(program->id);
glEnableVertexAttribArray(program->positionHandle);
glVertexAttribPointer(program->positionHandle, 3, GL_FLOAT, false, 12, &vecs->front());
glEnableVertexAttribArray(program->colorHandle);
// Prepare the background coordinate data
glVertexAttribPointer(program->colorHandle, 4, GL_FLOAT, false, 0, &colors->front());
glUniformMatrix4fv(program->matrixHandle, 1, false, matrix->projectionAndView);
glDrawElements(GL_TRIANGLES, indicesCount, GL_UNSIGNED_SHORT, &indices->front());
glDisableVertexAttribArray(program->positionHandle);
glDisableVertexAttribArray(program->colorHandle);
}
void GLSquare::set(float left, float top, float width, float height) {
position[0] = left;
position[1] = top;
size[0] = width;
size[1] = height;
vecs->at(0) = left;
vecs->at(1) = top;
vecs->at(3) = left;
vecs->at(4) = top + height;
vecs->at(6) = left + width;
vecs->at(7) = top + height;
vecs->at(9) = left + width;
vecs->at(10) = top;
}
void GLSquare::setColor(Color color, GLfloat opacity) {
for (int i=0; i<16; i+=4) {
colors->at(i) = color.r;
colors->at(i + 1) = color.g;
colors->at(i + 2) = color.b;
colors->at(i + 3) = opacity;
}
}
my fragment shader is simple :
precision mediump float;
varying vec4 v_Color;
void main()
{
gl_FragColor = v_Color;
}
My vertex shader :
uniform mat4 uMVPMatrix;
attribute vec4 vPosition;
attribute vec4 a_Color;
varying vec4 v_Color;
void main()
{
gl_Position = uMVPMatrix * vPosition;
v_Color = a_Color;
}
Gettings colorHandle from shader:
program->colorHandle = glGetAttribLocation(programID, "a_Color");
Here is how it looks with :
square->setColor(Color(1.0f,1.0f,1.0f), 0.1f);
white rectangle in left top corner
and here is with black color, opacity 0.1
I found the problem. Code works correctly, other 3rd party library overrides glBlendFunc. Fixing it solved the issue

openframeworks on RPI2 Vertex shader Texture2D possible?

I'm new at OpenGL and glsl and trying to use it on RPI2.
I did my first practice example on OpenGL 2.1 and glsl version120. Now I'm working on OpenGLES 2.0.
What I want to do is that store float values to a texture and send it to vertex shader to make lines.
So, I made one array for float values to store, and passing these values to one FBO. And I send this FBO to UniformTexture. But It seems vertex shader doesn't get any values.
OpenGL ES 2.0 document states that "Texture lookup functions are available to both vertex and fragment shaders." Also I have checked GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS in RPI2 and it returns 8.
Unfortunately, I cannot fetch any values form texture that I send to vertex shader.
What am I missing here?
I posted this question at openframeworks forum as well.
Any help would be appreciated. I really hope.
Thanks !
vertex shader code
precision highp float;
uniform mat4 modelViewProjectionMatrix;
uniform sampler2D valTex;
uniform vec2 spaceSize;
attribute vec4 position;
attribute vec2 texcoord;
void main() {
vec4 pixPos = position;
vec2 valAmt = texture2D(valTex, texcoord).xy;
valAmt.x *= spaceSize.x;
valAmt.y *= spaceSize.y;
vec2 nPos;
nPos.x = (pixPos.x + valAmt.x);
nPos.y = (pixPos.y + valAmt.y);
if ((v_pos.x <= 1.0)) {
gl_Position = modelViewProjectionMatrix * vec4(pixPos.x, pixPos.y, 0, 1);
}else{
gl_Position = modelViewProjectionMatrix * vec4(nPos.x, nPos.y, 0, 1);
}
}
Fragment shader
void main() {
gl_FragColor = vec4(1.0, 1.0, 0.5, 1.0);
}
OFX CODE
#include "ofApp.h"
void ofApp::setup(){
string ShaderFolder;
ShaderFolder = "shader_gles";
renderShader.load(ShaderFolder + "/fluidRender.vert", ShaderFolder + "/fluidRender.frag");
renderFbo.allocate(spaceSize, spaceSize, GL_RGB); //Render to Screen
renderFbo.begin();
ofClear(0);
renderFbo.end();
valTex.allocate(gridSize, gridSize, GL_RGB); //Store values from array
valTex.begin();
ofClear(0);
valTex.end();
int vertexNum = gridSize * gridSize;
vector<float> val(vertexNum * 3); //Store values
for (int x = 0; x < gridSize; x++) {
for (int y = 0; y < gridSize; y++) {
val[((x * 3 * gridSize) + y * 3) + 0] = 200.0; //x pos value
val[((x * 3 * gridSize) + y * 3) + 1] = 200.0; //y pos value
val[((x * 3 * gridSize) + y * 3) + 2] = 0.0;
}
}
valTex.getTexture().loadData(val.data(), gridSize, gridSize, GL_RGB);
mesh.setMode(OF_PRIMITIVE_LINES);
for (int x = 0; x < gridSize; x++) {
for (int y = 0; y < gridSize; y++) {
mesh.addVertex(ofVec3f(x * cellSize, y * cellSize)); //center vertex
mesh.addTexCoord(ofVec2f(1.1, 1.1));
mesh.addVertex(ofVec3f(x * cellSize, y * cellSize)); //val vertex
mesh.addTexCoord(ofVec2f(x / gridSize - 1, y / gridSize - 1)); //normalize texcoord
mesh.addIndex((x * 2) * gridSize + (y * 2) + 0);
mesh.addIndex((x * 2) * gridSize + (y * 2) + 1);
}
}
glGetIntegerv(GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, &vertex_texture_units);
}
void ofApp::update(){
vertexRender();
}
void ofApp::draw(){
ofBackground(0);
renderFbo.draw(0, 0);
ofDrawBitmapString("v_tex_unit : " + ofToString(vertex_texture_units), 15, 15);
}
void ofApp::vertexRender(){
renderFbo.begin();
ofClear(0);
renderShader.begin();
renderShader.setUniformTexture("velTex", valTex.getTexture(), 0);
renderShader.setUniform2f("spaceSize", spaceSize, spaceSize);
mesh.draw();
renderShader.end();
renderFbo.end();
}
NOTE
Not GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS in description. It is GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS in the code.
EDIT
This screen shot below is what I want to draw using vertex shader. Currently in this case, I send mouse x and y position float value (0 to screen size which is 960.0 in this case) to uniform to display value vertex.
Each line has two vertex. One has fixed position which is from 'addVertex(x,y)' function.
mesh.addVertex(ofVec3f(x * cellSize, y * cellSize));
Another one has 'fixed position value + mouse position value'. So If mouse moves, It makes a line connecting fixed position vertex and 'fixed position + mouse position' vertex.
renderShader.setUniform2f("mousePos", mouseX, mouseY);
in GLSL
vec2 pixPos = position;
vec2 nPos;
nPos.x = (pixPos.x + mousePos.x);
nPos.y = (pixPos.y + mousePos.y);
vertex_mouse_position //<-sccreen shot (I can't embed pictures directly yet. sorry.)
Another picture is when I send texture(FBO) to vertex shader. I made an array and set 200.0 float value for each x, y position and load this array to FBO.
vector<float> val(vertexNum * 3); //Store values
for (int x = 0; x < gridSize; x++) {
for (int y = 0; y < gridSize; y++) {
val[((x * 3 * gridSize) + y * 3) + 0] = 200.0; //x pos value
val[((x * 3 * gridSize) + y * 3) + 1] = 200.0; //y pos value
val[((x * 3 * gridSize) + y * 3) + 2] = 0.0;
}
}
valTex.getTexture().loadData(val.data(), gridSize, gridSize, GL_RGB);
then send this FBO to vertex shader.
renderShader.setUniformTexture("velTex", valTex.getTexture(), 0);
But, I just get black screen displaying noting.
vertex_texture
I hope this help you to more understanding for my issue. Thank for reply. I really appreciate.
You are checking the wrong query - check GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS.
On most GLES 2.0 implementations this will return zero; i.e. vertex texturing not supported. It's mandatory in GLES 3.x.

How to access all vertexes within the same patch in Tessellation Control Shader

I want to do LOD in Tessenllation Control Shader. And my method is to calculate the area each patch occupyed on screen coordinate, and set different tessellation level for them.
So I need to access all vertices within a patch and I do so like:
for(int i = 0; i < 4; i++)
{
position_screen[i] = ProjectionMatrix * ModelViewMatrix * gl_in[i].gl_Position;
}
where i defined my patch in TCS like:
#version 400
layout( vertices=4 ) out;
and here is related codes in OpenGL:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rect_index_buffer);
glPatchParameteri(GL_PATCH_VERTICES, 4);
glDrawElements(GL_PATCHES, RECT_INDEX_SIZE, GL_UNSIGNED_INT, 0);
However the result is strange.The tessellation level is related to the area on the scrren, but all patches have the same tessellation level.
So what's the problem?
I guess it's the way I try getting access to vertices within a patch went wrong. Then how can I do that?
The following is codes in my Tessellation Control Shader, I hope it helps:
#version 400
layout( vertices=4 ) out;
uniform mat4 ProjectionMatrix;
uniform mat4 ModelViewMatrix;
uniform float window_height;
uniform float window_width;
float PI = 3.14159;
float calcTriangleArea(float l[3]) //Heron's formula
{
float p = (l[0] + l[1] + l[2]) / 2.0;
return sqrt(p * (p - l[0]) * (p - l[1]) * (p - l[2]));
}
float calcSqureArea(vec4 position[4])
{
vec2 position_screen[4];
for(int i=0;i<4;i++)
{
position_screen[i] = position[i].xy;
}
float l[4];
for(int i = 0;i < 4;i++)
{
l[i] = length(position_screen[(i + 1) % 4] - position_screen[i % 4]);
}
float diagonal = length(position_screen[2] - position_screen[0]);
float l1[3];
float l2[3];
l1[0] = l[0];
l1[1] = l[1];
l1[2] = diagonal;
l2[0] = l[2];
l2[1] = l[3];
l2[2] = diagonal;
float area = calcTriangleArea(l1) + calcTriangleArea(l2);
return area;
}
float checkInsideView(vec4 position[4]) //check if the patch is visible
{
int flag = 4;
for(int i=0;i<4;i++)
{
if((position[i].x >= -window_width / 2.0) && (position[i].x <= window_width / 2.0) &&
(position[i].y >= -window_height / 2.0) && (position[i].y <= window_height / 2.0))
{
flag --;
}
}
if(flag == 0) //all 4 vertices are visible
{
return 0.0;
}
else if(flag == 4) //not all visible
{
return 2.0;
}
else //all vertices are not visible
{
return 1.0;
}
}
float calcLODLevel()
{
vec4 position_screen[4];
for(int i = 0; i < 4; i++)
{
position_screen[i] = ProjectionMatrix * ModelViewMatrix * gl_in[i].gl_Position;
}
float in_view_level = checkInsideView(position_screen);
//tess number is decided by the area that this patch covers on
//the screen
float area = calcSqureArea(position_screen);
float level = sqrt(area);
if(in_view_level == 1.0)
{
level /= sqrt(2);
}
//dont do tessellation
//if this patch is not visible
else if(in_view_level == 2.0)
{
level = 1.0;
}
return level;
}
void main()
{
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
float lod_level = calcLODLevel();
gl_TessLevelOuter[0] = lod_level;
gl_TessLevelOuter[1] = lod_level;
gl_TessLevelOuter[2] = lod_level;
gl_TessLevelOuter[3] = lod_level;
gl_TessLevelInner[0] = lod_level;
gl_TessLevelInner[1] = lod_level;
}
I think the problem is with your calculation of the screen coordinates, resulting in the tessellation levels to be too small. The key part is this:
position_screen[i] = ProjectionMatrix * ModelViewMatrix * gl_in[i].gl_Position;
What you're calculating here are clip coordinates, not screen coordinates. To get screen coordinates from clip coordinates, you need to:
Perform the perspective division. This gives you NDC (Normalized Device Coordinates) in the range [-1.0, 1.0].
Calculate screen coordinates from the NDC.
In code, the calculation could look like this:
vec4 posClip = ProjectionMatrix * ModelViewMatrix * gl_in[i].gl_Position;
vec2 posNdc = posClip.xy * (1.0 / posClip.w);
vec2 posScreen = 0.5 * (posNdc + 1.0) * vec2(window_width, window_height);

OpenGL ES - texturing sphere

I have a sphere. I can map texture on it. But now my texture is outside sphere. And I need inside. My user sit like inside sphere, so he can view inside it (rotate and zoom). So simply like a sky dome, but sphere. Maybe I need fix uv texture coordinates or enable something?
Here code for generating sphere:
class Sphere : public ParametricSurface {
public:
Sphere(float radius) : m_radius(radius)
{
ParametricInterval interval = { ivec2(20, 20), vec2(Pi, TwoPi), vec2(8, 14) };
SetInterval(interval);
}
vec3 Evaluate(const vec2& domain) const
{
float u = domain.x, v = domain.y;
float x = m_radius * sin(u) * cos(v);
float y = m_radius * cos(u);
float z = m_radius * -sin(u) * sin(v);
return vec3(x, y, z);
}
private:
float m_radius;
};
vec2 ParametricSurface::ComputeDomain(float x, float y) const
{
return vec2(x * m_upperBound.x / m_slices.x, y * m_upperBound.y / m_slices.y);
}
void ParametricSurface::GenerateVertices(float * vertices) const
{
float* attribute = vertices;
for (int j = 0; j < m_divisions.y; j++) {
for (int i = 0; i < m_divisions.x; i++) {
// Compute Position
vec2 domain = ComputeDomain(i, j);
vec3 range = Evaluate(domain);
attribute = range.Write(attribute);
// Compute Normal
if (m_vertexFlags & VertexFlagsNormals) {
float s = i, t = j;
// Nudge the point if the normal is indeterminate.
if (i == 0) s += 0.01f;
if (i == m_divisions.x - 1) s -= 0.01f;
if (j == 0) t += 0.01f;
if (j == m_divisions.y - 1) t -= 0.01f;
// Compute the tangents and their cross product.
vec3 p = Evaluate(ComputeDomain(s, t));
vec3 u = Evaluate(ComputeDomain(s + 0.01f, t)) - p;
vec3 v = Evaluate(ComputeDomain(s, t + 0.01f)) - p;
vec3 normal = u.Cross(v).Normalized();
if (InvertNormal(domain))
normal = -normal;
attribute = normal.Write(attribute);
}
// Compute Texture Coordinates
if (m_vertexFlags & VertexFlagsTexCoords) {
float s = m_textureCount.x * i / m_slices.x;
float t = m_textureCount.y * j / m_slices.y;
attribute = vec2(s, t).Write(attribute);
}
}
}
}
void ParametricSurface::GenerateLineIndices(unsigned short * indices) const
{
unsigned short * index = indices;
for (int j = 0, vertex = 0; j < m_slices.y; j++) {
for (int i = 0; i < m_slices.x; i++) {
int next = (i + 1) % m_divisions.x;
*index++ = vertex + i;
*index++ = vertex + next;
*index++ = vertex + i;
*index++ = vertex + i + m_divisions.x;
}
vertex += m_divisions.x;
}
}
void ParametricSurface::GenerateTriangleIndices(unsigned short * indices) const
{
unsigned short * index = indices;
for (int j = 0, vertex = 0; j < m_slices.y; j++) {
for (int i = 0; i < m_slices.x; i++) {
int next = (i + 1) % m_divisions.x;
*index++ = vertex + i;
*index++ = vertex + next;
*index++ = vertex + i + m_divisions.x;
*index++ = vertex + next;
*index++ = vertex + next + m_divisions.x;
*index++ = vertex + i + m_divisions.x;
}
vertex += m_divisions.x;
}
}
And here is VBO creation:
+ (DrawableVBO *)createVBO:(SurfaceType)surfaceType
{
ISurface * surface = [self createSurface:surfaceType]; // just Sphere type
surface->SetVertexFlags(VertexFlagsNormals | VertexFlagsTexCoords); // which vertexes I need
// Get vertice from surface.
//
int vertexSize = surface->GetVertexSize();
int vBufSize = surface->GetVertexCount() * vertexSize;
GLfloat * vbuf = new GLfloat[vBufSize];
surface->GenerateVertices(vbuf);
// Get triangle indice from surface
//
int triangleIndexCount = surface->GetTriangleIndexCount();
unsigned short * triangleBuf = new unsigned short[triangleIndexCount];
surface->GenerateTriangleIndices(triangleBuf);
// Create the VBO for the vertice.
//
GLuint vertexBuffer;
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, vBufSize * sizeof(GLfloat), vbuf, GL_STATIC_DRAW);
// Create the VBO for the triangle indice
//
GLuint triangleIndexBuffer;
glGenBuffers(1, &triangleIndexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, triangleIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, triangleIndexCount * sizeof(GLushort), triangleBuf, GL_STATIC_DRAW);
delete [] vbuf;
delete [] triangleBuf;
delete surface;
DrawableVBO * vbo = [[DrawableVBO alloc] init];
vbo.vertexBuffer = vertexBuffer;
vbo.triangleIndexBuffer = triangleIndexBuffer;
vbo.vertexSize = vertexSize;
vbo.triangleIndexCount = triangleIndexCount;
return vbo;
}
Here is my light setup:
- (void)setupLights
{
// Set up some default material parameters.
//
glUniform3f(_ambientSlot, 0.04f, 0.04f, 0.04f);
glUniform3f(_specularSlot, 0.5, 0.5, 0.5);
glUniform1f(_shininessSlot, 50);
// Initialize various state.
//
glEnableVertexAttribArray(_positionSlot);
glEnableVertexAttribArray(_normalSlot);
glUniform3f(_lightPositionSlot, 1.0, 1.0, 5.0);
glVertexAttrib3f(_diffuseSlot, 0.8, 0.8, 0.8);
}
And finally shaders:
fragment:
precision mediump float;
varying vec4 vDestinationColor;
varying vec2 vTextureCoordOut;
uniform sampler2D Sampler;
void main()
{
gl_FragColor = texture2D(Sampler, vTextureCoordOut) * vDestinationColor;
}
vertex:
uniform mat4 projection;
uniform mat4 modelView;
attribute vec4 vPosition;
attribute vec2 vTextureCoord;
uniform mat3 normalMatrix;
uniform vec3 vLightPosition;
uniform vec3 vAmbientMaterial;
uniform vec3 vSpecularMaterial;
uniform float shininess;
attribute vec3 vNormal;
attribute vec3 vDiffuseMaterial;
varying vec4 vDestinationColor;
varying vec2 vTextureCoordOut;
void main(void)
{
gl_Position = projection * modelView * vPosition;
vec3 N = normalMatrix * vNormal;
vec3 L = normalize(vLightPosition);
vec3 E = vec3(0, 0, 1);
vec3 H = normalize(L + E);
float df = max(0.0, dot(N, L));
float sf = max(0.0, dot(N, H));
sf = pow(sf, shininess);
vec3 color = vAmbientMaterial + df * vDiffuseMaterial + sf * vSpecularMaterial;
vDestinationColor = vec4(color, 1);
vTextureCoordOut = vTextureCoord;
}
Some monkey code but I fix his. Firstly we enable culling and disable front side rendering:
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
Then I change position of the light source:
glUniform3f(_lightPositionSlot, 1.0, 1.0, -2.5);
(I even don't need the light, so next step - I must disable it at all). But finally I have a sphere, user is inside it, can rotate it, zoom in and out and see the texture!