How to find the normal of a complex shape(more than 4 vertices) and apply lighting on it? - opengl

im currently trying to apply lighting on a complex shape drawn with the GL_TRIANGLE_STRIP flag. since every plane in the XYZ can be defined by 3 points calculating the normal can be quite easy, but what happens when i use gl_triangle_strip?
just to make the following code more readable, the drawing process goes like this :
(x,y) -> (x+1 , y) -> (x , y+1) -> (x+1 , y+1)
for(int i = 0; i < m_GraphConfig.MAXY ; i +=2)
{
glBegin(GL_TRIANGLE_STRIP);
for (int j = 0; j < m_GraphConfig.MAXX ;j+=2)
{
if ( (i * m_GraphConfig.MAXX + j + 2) % (m_GraphConfig.MAXX) != 0 && (i+2) % (m_GraphConfig.MAXY) != 0 && (j + 2) % (m_GraphConfig.MAXX) != 0 )
{
index = i * (m_GraphConfig.MAXX) + j;
heightVal = host[index];
glColor3b(((heightVal % 32) * 7 %255), ((heightVal % 16) * 14) %255, ((heightVal % 128) * 2)%255);
glTexCoord2f(0.0f, 0.0f);
glVertex3d(xFactor * j , host[index] /m_GraphConfig.devider , zFactor * i);
index = i * (m_GraphConfig.MAXX) + j + 2;
heightVal = host[index];
glColor3b(((heightVal % 32) * 7)%255, ((heightVal % 16) * 14)%255, ((heightVal % 128) * 2)%255);
glTexCoord2f(1.0f, 0.0f);
glVertex3d(xFactor * (j + 2) , host[index] / m_GraphConfig.devider, zFactor * i);
index = i * (m_GraphConfig.MAXX) + j + (2 * m_GraphConfig.MAXX);
heightVal = host[index];
glColor3b(((heightVal % 32) * 7)%255, ((heightVal % 16) * 14)%255, ((heightVal % 128) * 2)%255);
glTexCoord2f(0.0f, 1.0f);
glVertex3d(xFactor * j , host[index] / m_GraphConfig.devider , zFactor * (i + 2));
index = i * (m_GraphConfig.MAXX) + j + (2 * m_GraphConfig.MAXX) + 2;
heightVal = host[index];
glColor3b(((heightVal % 32) * 7)%255, ((heightVal % 16) * 14)%255, ((heightVal % 128) * 2)%255);
glTexCoord2f(1.0f, 1.0f);
glVertex3d(xFactor * ( j + 2) , host[index] / m_GraphConfig.devider , zFactor * (i + 2));
}
}
glEnd();
glBindTexture( GL_TEXTURE_2D, 0 ); //clear the texture buffer
}

It much depends on your usage of it. Is it a flat surface (e.g. cylinder bottom) or a curvy shape (e.g. cylinder side)
For the first case you can calculate normal for every triangle in the strip and use average of all of them for the polygon.
For the second case you need to split the triangle strip into triangles and treat/render them separately one by one. If you want to keep the smoothshading and triangle strip then you will need to calculate and use per-vertice normals (which are averages of all triangles normals that use this vertice)

Related

How do I get correct answers using my code with the barycentric formula?

My function getHeightOfTerrain() is calling a barycentric formula function that is not returning the correct height for the one set test height in : heightMapFromArray[][].
I've tried watching OpenGL JAVA Game tutorials 14,21, 22, by "thin matrix" and I am confused on how to use my array: heightMapforBaryCentric in both of the supplied functions, and how to set the arguments that are passed to the baryCentic() function in some sort of manner so that I can solve the problem.
int creaateTerrain(int height, int width)
{
float holderY[6] = { 0.f ,0.f,0.f,0.f,0.f,0.f };
float scaleit = 1.5f;
float holder[6] = { 0.f,0.f,0.f,0.f,0.f,0.f };
for (int z = 0, z2 =0; z < iterationofHeightMap;z2++)
{
//each loop is two iterations and creates one quad (two triangles)
//however because each iteration is by two (i.e. : x=x+2) om bottom
//the amount of triangles is half the x value
//
//number of vertices : 80 x 80 x 6.
//column
for (int x = 0, x2 = 0; x < iterationofHeightMap;x2++)
{
//relevant - A : first triangle - on left triangle
//[row] [colum[]
holder[0] = heightMapFromArray[z][x];
//holder[0] = (float)imageData[(z / 2 * MAP_Z + (x / 2)) * 3];
//holder[0] = holder[0] / 255;// *scaleit;
vertices.push_back(glm::vec3(x, holder[0], z));
//match height map with online barycentric use
heightMapforBaryCentric[x2][z2] = holder[0];
holder[1] = heightMapFromArray[z+2][x];
//holder[1] = (float)imageData[(((z + 2) / 2 * MAP_Z + ((x) / 2))) * 3];
//holder[1] = holder[1] / 255;// 6 * scaleit;
vertices.push_back(glm::vec3(x, holder[1], z + 2));
//match height map with online barycentric use
heightMapforBaryCentric[x2][z2+1] = holder[1];
holder[2] = heightMapFromArray[z+2][x+2];
//holder[2] = (float)imageData[(((z + 2) / 2 * MAP_Z + ((x + 2) / 2))) * 3];
//holder[2] = holder[2] / 255;// *scaleit;
vertices.push_back(glm::vec3(x + 2, holder[2], z + 2));
////match height map with online barycentric use
heightMapforBaryCentric[x2+1][z2+1] = holder[2];
//relevant - B - second triangle (on right side)
holder[3] = heightMapFromArray[z][x];
//holder[3] = (float)imageData[((z / 2)*MAP_Z + (x / 2)) * 3];
//holder[3] = holder[3] / 255;// 256 * scaleit;
vertices.push_back(glm::vec3(x, holder[3], z));
holder[4] = heightMapFromArray[x+2][z+2];
//holder[4] = (float)imageData[(((z + 2) / 2 * MAP_Z + ((x + 2) / 2))) * 3];
//holder[4] = holder[4] / 255;// *scaleit;
vertices.push_back(glm::vec3(x + 2, holder[4], z + 2));
holder[5] = heightMapFromArray[x+2][z];
//holder[5] = (float)imageData[((z / 2)*MAP_Z + ((x + 2) / 2)) * 3];
//holder[5] = holder[5] / 255;// *scaleit;
vertices.push_back(glm::vec3(x + 2, holder[5], z));
x = x + 2;
}
z = z + 2;
}
return(1);
}
float getHeightOfTerrain(float worldX, float worldZ) {
float terrainX = worldX;
float terrainZ = worldZ;
int gridSquareSize = 2.0f;
gridX = (int)floor(terrainX / gridSquareSize);
gridZ = (int)floor(terrainZ / gridSquareSize);
xCoord = ((float)(fmod(terrainX, gridSquareSize)) / (float)gridSquareSize);
zCoord = ((float)(fmod(terrainZ, gridSquareSize)) / (float)gridSquareSize);
if (xCoord <= (1 - zCoord))
{
answer = baryCentric(
//left triangle
glm::vec3(0.0f, heightMapforBaryCentric[gridX][gridZ], 0.0f),
glm::vec3(0.0f, heightMapforBaryCentric[gridX][gridZ+1], 1.0f),
glm::vec3(1.0f, heightMapforBaryCentric[gridX+1][gridZ+1], 1.0f),
glm::vec2(xCoord, zCoord));
// if (answer != 1)
// {
// fprintf(stderr, "Z:gridx: %d gridz: %d answer: %f\n", gridX, gridZ,answer);
//
// }
}
else
{
//right triangle
answer = baryCentric(glm::vec3(0, heightMapforBaryCentric[gridX][gridZ], 0),
glm::vec3(1,heightMapforBaryCentric[gridX+1][gridZ+1], 1),
glm::vec3(1,heightMapforBaryCentric[gridX+1][gridZ], 0),
glm::vec2(xCoord, zCoord));
}
if (answer == 1)
{
answer = 0;
}
//answer = abs(answer - 1);
return(answer);
}
float baryCentric(glm::vec3 p1, glm::vec3 p2, glm::vec3 p3 , glm::vec2 pos) {
float det = (p2.z - p3.z) * (p1.x - p3.x) + (p3.x - p2.x) * (p1.z - p3.z);
float l1 = ((p2.z - p3.z) * (pos.x - p3.x) + (p3.x - p2.x) * (pos.y - p3.z)) / det;
float l2 = ((p3.z - p1.z) * (pos.x - p3.x) + (p1.x - p3.x) * (pos.y - p3.z)) / det;
float l3 = 1.0f - l1 - l2;
return (l1 * p1.y + l2 * p2.y + l3 * p3.y);
}
My expected results were that the center of the test grid's height to be the set value .5 and gradually less as the heights declined. My results were the heights being all the same, varied, or increasing. Usually these heights were under the value of one.

What am I doing wrong when executing the sobel filter function in c++

Here is my sobel filter function performed on a grayscale image. Apparently I'm not doing my calculations correct because I keep getting an all black image. I have already turned in the project but it is bothering me that the results aren't right.
int sobelH[3][3] = { -1, 0, 1,
-2, 0, 2,
-1, 0, 1 },
sobelV[3][3] = { 1, 2, 1,
0, 0, 0,
-1, -2, -1 };
//variable declaration
int mag;
int pix_x, pix_y = 0;
int img_x, img_y;
for (img_x = 0; img_x < img->x; img_x++)
{
for (img_y = 0; img_y < img->y; img_y++)
{
pix_x = 0;
pix_y = 0;
//calculating the X and Y convolutions
for (int i = -1; i <= 1; i++)
{
for (int j = -1; j <= 1; j++)
{
pix_x += (img->data[img_y * img->x + img_x].red + img->data[img_y * img->x + img_x].green + img->data[img_y * img->x + img_x].blue) * sobelH[1 + i][1 + j];
pix_y += (img->data[img_y * img->x + img_x].red + img->data[img_y * img->x + img_x].green + img->data[img_y * img->x + img_x].blue) * sobelV[1 + i][1 + j];
}
}
//Gradient magnitude
mag = sqrt((pix_x * pix_x) + (pix_y * pix_y));
if (mag > RGB_COMPONENT_COLOR)
mag = 255;
if (mag < 0)
mag = 0;
//Setting the new pixel value
img->data[img_y * img->x + img_x].red = mag;
img->data[img_y * img->x + img_x].green = mag;
img->data[img_y * img->x + img_x].blue = mag;
}
}
Although your code could use some improvement, the main reason is that you compute the convolution at constant img_y and img_x. What you need to do is:
pix_x += (img->data[img_y * img->x + img_x + i].red + img->data[img_y * img->x + img_x + i].green + img->data[img_y * img->x + img_x + i].blue) * sobelH[1 + i][1 + j];
Indeed, the Sobel convolution is symmetric, so if you compute the convolution with a constant image, it will result in only black.
Note that in the above example I do not take into account the border of the image. You should make sure to not access pixels that are outside your pixel array.
Another mistake is that you're writing in the input image. You write at location (x,y), then compute the filter result for location (x+1,y) using the modified value at (x,y), which is the wrong value to use.
You need to write your result to a new image.

BGRA -> RGBA and vertical flip, OptiX

I have a buffer containing a "raw" BGRA texture with one byte per color.
The lines are in reversed order (the texture is upside down).
The BGRA buffer is all green (0, 255, 0, 255).
I need to convert that to RGBA and flip the textures lines.
I tried this:
// bgra is an unsigned char*
int width = 1366;
int height = 768;
unsigned char* rgba = new unsigned char[width * height * 4];
for(int y = height - 1; y >= 0; y--)
{
for(int x = 0; x < width; x++)
{
rgba[(x * y * 4)] = bgra[(x * y * 4) + 2];
rgba[(x * y * 4) + 1] = bgra[(x * y * 4) + 1];
rgba[(x * y * 4) + 2] = bgra[(x * y * 4)];
rgba[(x * y * 4) + 3] = bgra[(x * y * 4) + 3];
}
}
But the result when rendered is not a full green screen, but this:
What might i be doing wrong here?
You're indexing wrong.
This is how it should be done:
rgba[(x + y * width) * 4] = bgra[(x + y * width) * 4 + 2]

Wrong pixel locations with glDrawPixels

I have been playing around with trying to draw a 320 by 240 full screen resolution image in opengl using java and lwjgl. I set the resolution to 640 by 480 and doubled the size of the pixels to fill in the space. After a lot of google searching I found some information about using the glDrawPixels function to speed up drawing to the screen. I wanted to test it by assigning random colors to all the pixels on the screen, but it wouldn't fill the screen. I divided the width into 4 sections of 80 pixels each and colored them red, green, blue, and white. I saw that I was interleaving the colors but I can't figure out how.
Here is an image of the output:
Here is where I run the openGL code:
// init OpenGL
GL11.glMatrixMode(GL11.GL_PROJECTION);
GL11.glLoadIdentity();
GL11.glOrtho(0, 640, 0, 480, 1, -1);
GL11.glMatrixMode(GL11.GL_MODELVIEW);
while (!Display.isCloseRequested()) {
pollInput();
// Clear the screen and depth buffer
GL11.glClear(GL11.GL_COLOR_BUFFER_BIT | GL11.GL_DEPTH_BUFFER_BIT);
randomizePixels();
GL11.glRasterPos2i(0, 0);
GL11.glDrawPixels(320, 240,GL11.GL_RGBA, GL11.GL_UNSIGNED_BYTE,buff);
GL11.glPixelZoom(2, 2);
Display.update();
}
Display.destroy();
}
and here is where I create the pixel color data:
public void randomizePixels(){
for(int y = 0; y < 240; y++){
for(int x = 0; x < 320; x+=4){
/*
pixels[x * 320 + y] = (byte)(-128 + ran.nextInt(256));
pixels[x * 320 + y + 1] = (byte)(-128 + ran.nextInt(256));
pixels[x * 320 + y + 2] = (byte)(-128 + ran.nextInt(256));
pixels[x * 320 + y + 3] = (byte)(-128 + ran.nextInt(256));
*/
if(x >= 0 && x < 80){
pixels[y * 240 + x] = (byte)128;
pixels[y * 240 + x + 1] = (byte)0;
pixels[y * 240 + x + 2] = (byte)0;
pixels[y * 240 + x + 3] = (byte)128;
}else if(x >= 80 && x < 160){
pixels[y * 240 + x] = (byte)0;
pixels[y * 240 + x + 1] = (byte)128;
pixels[y * 240 + x + 2] = (byte)0;
pixels[y * 240 + x + 3] = (byte)128;
}else if(x >= 160 && x < 240){
pixels[y * 240 + x] = (byte)0;
pixels[y * 240 + x + 1] = (byte)0;
pixels[y * 240 + x + 2] = (byte)128;
pixels[y * 240 + x + 3] = (byte)128;
}else if(x >= 240 && x < 320){
pixels[y * 240 + x] = (byte)128;
pixels[y * 240 + x + 1] = (byte)128;
pixels[y * 240 + x + 2] = (byte)128;
pixels[y * 240 + x + 3] = (byte)128;
}
}
}
buff.put(pixels).flip();
}
If you can figure out why I can't get the pixels to line up to the x and y coordinates I want them to go to that would be great. I have read that glDrawPixels probably isn't the best or fastest way to draw pixels to the screen, but I want to understand why I'm having this particular issue before I have to move on to some other method.
Just load your image (unscaled) into a texture and draw a textured quad.
Don't use glDrawPixels. This function was never properly optimized in most drivers and has was deprecated since OpenGL-2 and got removed from OpenGL-3 core and later.
I spot 2 issues in your randomizePixels().
1. Indexing Pixel Buffer
The total size of pixel buffer is 320x240x4 bytes because the pixel type is GL_RGBA. So, indexing each pixel with subscript operator, [], it would be;
for(int y = 0; y < 240; y++)
{
for(int x = 0; x < 320; x++)
{
pixels[y * 320 * 4 + x * 4 + 0] = ... // R
pixels[y * 320 * 4 + x * 4 + 1] = ... // G
pixels[y * 320 * 4 + x * 4 + 2] = ... // B
pixels[y * 320 * 4 + x * 4 + 3] = ... // A
}
}
2. Colour Value
The max intensity of 8bit colour is 255, for example, an opaque red pixel would be (255, 0, 0, 255).
your operating on the texture. better do it on quadrature. it would yield good results

Color picking in the openGL

i've been trying to implement color picking and it just aint working right. the problem is that if initially paint my model in the different colors that are used for the picking (i mean, i give each triangle different color, which is his id color), it works fine (without texture or anything .. ), but if i put texture of the model, and that when the mouse is clicked i paint the model by giving each triangle a different color, it doesnt work..
here is the code:
public int selection(int x, int y) {
GL11.glDisable(GL11.GL_LIGHTING);
GL11.glDisable(GL11.GL_TEXTURE_2D);
IntBuffer viewport = BufferUtils.createIntBuffer(16);
ByteBuffer pixelbuff = BufferUtils.createByteBuffer(16);
GL11.glGetInteger(GL11.GL_VIEWPORT, viewport);
this.render(this.mesh);
GL11.glReadPixels(x, y, 1, 1, GL11.GL_RGB, GL11.GL_UNSIGNED_BYTE, pixelbuff);
for (int m = 0; m < 3; m++)
System.out.println(pixelbuff.get(m));
GL11.glEnable(GL11.GL_TEXTURE_2D);
GL11.glEnable(GL11.GL_LIGHTING);
return 0;
}
public void render(GL_Mesh m, boolean inPickingMode)
{
GLMaterial[] materials = m.materials; // loaded from the .mtl file
GLMaterial mtl;
GL_Triangle t;
int currMtl = -1;
int i = 0;
// draw all triangles in object
for (i=0; i < m.triangles.length; ) {
t = m.triangles[i];
// activate new material and texture
currMtl = t.materialID;
mtl = (materials != null && materials.length>0 && currMtl >= 0)? materials[currMtl] : defaultMtl;
mtl.apply();
GL11.glBindTexture(GL11.GL_TEXTURE_2D, mtl.textureHandle);
// draw triangles until material changes
for ( ; i < m.triangles.length && (t=m.triangles[i])!=null && currMtl == t.materialID; i++) {
drawTriangle(t, i, inPickingMode);
}
}
}
private void drawTriangle(GL_Triangle t, int i, boolean inPickingMode) {
if (inPickingMode) {
byte[] triColor = this.triangleToColor(i);
GL11.glColor3ub((byte)triColor[2], (byte)triColor[1], (byte)triColor[0]);
}
GL11.glBegin(GL11.GL_TRIANGLES);
GL11.glTexCoord2f(t.uvw1.x, t.uvw1.y);
GL11.glNormal3f(t.norm1.x, t.norm1.y, t.norm1.z);
GL11.glVertex3f( (float)t.p1.pos.x, (float)t.p1.pos.y, (float)t.p1.pos.z);
GL11.glTexCoord2f(t.uvw2.x, t.uvw2.y);
GL11.glNormal3f(t.norm2.x, t.norm2.y, t.norm2.z);
GL11.glVertex3f( (float)t.p2.pos.x, (float)t.p2.pos.y, (float)t.p2.pos.z);
GL11.glTexCoord2f(t.uvw3.x, t.uvw3.y);
GL11.glNormal3f(t.norm3.x, t.norm3.y, t.norm3.z);
GL11.glVertex3f( (float)t.p3.pos.x, (float)t.p3.pos.y, (float)t.p3.pos.z);
GL11.glEnd();
}
as you can see, i have a selection function that's called everytime the mouse is clicked, i then disable the lightining and the texture, and then i render the scene again in the unique colors, and then read the pixles buffer, and the call of:
GL11.glReadPixels(x, y, 1, 1, GL11.GL_RGB, GL11.GL_UNSIGNED_BYTE, pixelbuff);
gives me wrong values .. and its driving me nutz !
btw, the main render function is render(mesh m, boolean inPickingMode) as u can see, you can also see that there is texture on the model before the mouse clicking ..
there are several problems with the example.
First, you're not clearing the color and depth-buffer when clicking the mouse (that causes the scene with color polygons to be mixed into the scene with textured polygons - and then it doesn't work). you need to call:
GL11.glClear(GL11.GL_COLOR_BUFFER_BIT | GL11.GL_DEPTH_BUFFER_BIT);
Second, it is probably a bad idea to use materials when color-picking. I'm not familiar with the GLMaterial class, but it might enable GL_COLOR_MATERIAL or some other stuff, which modifies the final color, even if lighting is disabled. Try this:
if(!inPickingMode) { // === add this line ===
// activate new material and texture
currMtl = t.materialID;
mtl = (materials != null && materials.length>0 && currMtl >= 0)? materials[currMtl] : defaultMtl;
mtl.apply();
GL11.glBindTexture(GL11.GL_TEXTURE_2D, mtl.textureHandle);
} // === and this line ===
Next, and that is not related to color picking, you call glBegin() too often for no good reason. You can call it in render(), before the triangle drawing loop (but that shouldn't change how the result looks like):
GL11.glBegin(GL11.GL_TRIANGLES);
// draw triangles until material changes
for ( ; i < m.triangles.length && (t=m.triangles[i])!=null && currMtl == t.materialID; i++) {
drawTriangle(t, i, inPickingMode);
}
GL11.glEnd();
--- now i am answering a little beyond the original question ---
The thing about color picking is, that the renderer has only limited number of bits to represent the colors (like as little as 5 bits per channel), so you need to use colors that do not have these bits set. It might be a bad idea to do this on a mobile device.
If your objects are simple enough (can be represented by, say a sphere, for picking), it might be a good idea to use raytracing for picking objects. It is pretty simple, the idea is that you take inverse of modelview-projection matrix, and transform points (mouse_x, mouse_y, -1) and (mouse_x, mouse_y, +1) by it, which will give you position of mouse at the near and at the far view plane, in object space. All you need to do is to subtract them to get direction of ray (origin is at the near plane), and you can pick your objects using this ray (google ray - sphere intersection).
float[] mvp = new float[16]; // this is your modelview-projection
float mouse_x, mouse_y; // those are mouse coordinates (in -1 to +1 range)
// inputs
float[] mvp_inverse = new float[16];
Matrix.invertM(mvp_inverse, 0, mvp, 0);
// inverse the matrix
float nearX = mvp_inverse[0 * 4 + 0] * mouse_x +
mvp_inverse[1 * 4 + 0] * mouse_y +
mvp_inverse[2 * 4 + 0] * -1 +
mvp_inverse[3 * 4 + 0];
float nearY = mvp_inverse[0 * 4 + 1] * mouse_x +
mvp_inverse[1 * 4 + 1] * mouse_y +
mvp_inverse[2 * 4 + 1] * -1 +
mvp_inverse[3 * 4 + 1];
float nearZ = mvp_inverse[0 * 4 + 2] * mouse_x +
mvp_inverse[1 * 4 + 2] * mouse_y +
mvp_inverse[2 * 4 + 2] * -1 +
mvp_inverse[3 * 4 + 2];
float nearW = mvp_inverse[0 * 4 + 3] * mouse_x +
mvp_inverse[1 * 4 + 3] * mouse_y +
mvp_inverse[2 * 4 + 3] * -1 +
mvp_inverse[3 * 4 + 3];
// transform the near point
nearX /= nearW;
nearY /= nearW;
nearZ /= nearW;
// dehomogenize the coordinate
float farX = mvp_inverse[0 * 4 + 0] * mouse_x +
mvp_inverse[1 * 4 + 0] * mouse_y +
mvp_inverse[2 * 4 + 0] * +1 +
mvp_inverse[3 * 4 + 0];
float farY = mvp_inverse[0 * 4 + 1] * mouse_x +
mvp_inverse[1 * 4 + 1] * mouse_y +
mvp_inverse[2 * 4 + 1] * +1 +
mvp_inverse[3 * 4 + 1];
float farZ = mvp_inverse[0 * 4 + 2] * mouse_x +
mvp_inverse[1 * 4 + 2] * mouse_y +
mvp_inverse[2 * 4 + 2] * +1 +
mvp_inverse[3 * 4 + 2];
float farW = mvp_inverse[0 * 4 + 3] * mouse_x +
mvp_inverse[1 * 4 + 3] * mouse_y +
mvp_inverse[2 * 4 + 3] * +1 +
mvp_inverse[3 * 4 + 3];
// transform the far point
farX /= farW;
farY /= farW;
farZ /= farW;
// dehomogenize the coordinate
float rayX = farX - nearX, rayY = farY - nearY, rayZ = farZ - nearZ;
// ray direction
float orgX = nearX, orgY = nearY, orgZ = nearZ;
// ray origin
And finally - a debugging suggestion: try to render with inPickingMode set to true so you can see what is it that you are actually drawing, on screen. If you see texture or lighting, then something went wrong.