OpenGL When to use Geometry Shaders - c++

I currently have a OpenGL sprite drawing class that buffers up a bunch of sprite data then dumps it with glDrawElements. The problem is, creating the sprites that go into the buffer is cumbersome as I have loads of parameters to pass into the buffer with even more redundancy for the shaders. I was wondering if I could reduce CPU load by only loading the buffer with the essentials, location, orientation, texture coordinates etc... and then let a geometry shader turn that nonsense into quads for the fragment shader.
If theres a different answer, I've added the offending method so you can see what I mean:
void Machine::draw(key num, BoundingBox loc, float angle){
SpriteSyncData* props;
VertexAttribArray* vdata;
GLushort* idata;
SpriteProperties* sprite_props;
int sliceW;
int sliceH;
sprite_props = &spriteList[num];
props = &spriteToSync[sprite_props->atlas];
props->size++;
if(props->size > props->capacity){
props->capacity += COARSE_MEM_SCALE;
props->data = (VertexAttribArray*) realloc((void*) props->data, (sizeof(VertexAttribArray)*4) * props->capacity);
props->i_data = (GLushort*) realloc((void*) props->i_data, (sizeof(GLushort)*4) * props->capacity);
}
vdata = props->data + (props->size - 1) * 4;
idata = props->i_data + (props->size - 1) * 4;
sliceW = sprite_props->location.x1 - sprite_props->location.x0;
sliceH = sprite_props->location.y1 - sprite_props->location.y0;
if(sprite_props->flags & DRAW_TILED){
vdata[0].p = QVector3D(loc.x1, loc.y0, UNIFORM_DEPTH);
vdata[1].p = QVector3D(loc.x0, loc.y0, UNIFORM_DEPTH);
vdata[2].p = QVector3D(loc.x0, loc.y1, UNIFORM_DEPTH);
vdata[3].p = QVector3D(loc.x1, loc.y1, UNIFORM_DEPTH);
vdata[0].s = QVector2D(((float) (loc.x1 - loc.x0)) / sliceW,
((float) (loc.y1 - loc.y0)) / sliceH);
vdata[0].r = QVector2D(0, 0);
vdata[1].r = vdata[0].r;
vdata[2].r = vdata[0].r;
vdata[3].r = vdata[0].r;
}
else{
vdata[0].p = QVector3D(loc.x0 + sliceW, loc.y0, UNIFORM_DEPTH);
vdata[1].p = QVector3D(loc.x0, loc.y0, UNIFORM_DEPTH);
vdata[2].p = QVector3D(loc.x0, loc.y0 + sliceH, UNIFORM_DEPTH);
vdata[3].p = QVector3D(loc.x0 + sliceW, loc.y0 + sliceH, UNIFORM_DEPTH);
vdata[0].s = QVector2D(1, 1);
vdata[0].r = QVector2D(sliceW, sliceH);
vdata[1].r = vdata[0].r;
vdata[2].r = vdata[0].r;
vdata[3].r = vdata[0].r;
}
vdata[0].t = QVector2D(sprite_props->texCoords[2], sprite_props->texCoords[1]);
vdata[1].t = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[1]);
vdata[2].t = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[3]);
vdata[3].t = QVector2D(sprite_props->texCoords[2], sprite_props->texCoords[3]);
vdata[1].s = vdata[0].s;
vdata[2].s = vdata[0].s;
vdata[3].s = vdata[0].s;
vdata[0].s_lo = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[1]);
vdata[0].s_hi = QVector2D(sprite_props->texCoords[2] - sprite_props->texCoords[0],
sprite_props->texCoords[3] - sprite_props->texCoords[1]);
vdata[1].s_lo = vdata[0].s_lo;
vdata[1].s_hi = vdata[0].s_hi;
vdata[2].s_lo = vdata[0].s_lo;
vdata[2].s_hi = vdata[0].s_hi;
vdata[3].s_lo = vdata[0].s_lo;
vdata[3].s_hi = vdata[0].s_hi;
vdata[0].o = (vdata[1].p + vdata[3].p) * 0.5;
vdata[1].o = vdata[0].o;
vdata[2].o = vdata[0].o;
vdata[3].o = vdata[0].o;
vdata[0].a = angle;
vdata[1].a = angle;
vdata[2].a = angle;
vdata[3].a = angle;
idata[0] = (props->size - 1) * 4;
idata[1] = idata[0] + 1;
idata[2] = idata[0] + 2;
idata[3] = idata[0] + 3;
}

Related

How to make OpenVR multiview rendering to work?

I'm optimizing my already working VR rendering by implementing multiview rendering into my custom C++ engine, but can't get the other eye to render. Here's my vertex shader:
layout(set=0, binding=2) Buffer<float3> positions : register(b2);
VSOutput unlitVS( uint vertexId : SV_VertexID, uint viewId : SV_ViewID )
{
VSOutput vsOut;
vsOut.uv = uvs[ vertexId ];
vsOut.pos = mul( data.localToClip[ viewId ], float4( positions[ vertexId ], 1 ) );
return vsOut;
}
If I use viewId to select a transformation matrix, I'm getting an error:
ERROR: Vertex shader consumes input at location 0 but not provided
If I don't use viewId my other eye doesn't render at all. Here's how I setup my framebuffer, it has 2 layers and after rendering the left eye, I copy the second layer to the right eye.
VkImageCreateInfo imageCreateInfo = {};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.pNext = nullptr;
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.extent.width = width;
imageCreateInfo.extent.height = height;
imageCreateInfo.extent.depth = 1;
imageCreateInfo.mipLevels = 1;
imageCreateInfo.arrayLayers = 2;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageCreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCreateInfo.samples = sampleCount == 4 ? VK_SAMPLE_COUNT_4_BIT : VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.usage = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
imageCreateInfo.flags = 0;
VkResult result = vkCreateImage( dev, &imageCreateInfo, nullptr, &outFramebufferDesc.image );
...
// Bit mask that specifies which view rendering is broadcast to.
// 0011 = Broadcast to first and second view (layer)
const uint32_t viewMask = 0b00000011;
// Bit mask that specifices correlation between views
// An implementation may use this for optimizations (concurrent render)
const uint32_t correlationMask = 0b00000011;
VkRenderPassMultiviewCreateInfo renderPassMultiviewCI{};
renderPassMultiviewCI.sType = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO;
renderPassMultiviewCI.subpassCount = 1;
renderPassMultiviewCI.pViewMasks = &viewMask;
renderPassMultiviewCI.correlationMaskCount = 1;
renderPassMultiviewCI.pCorrelationMasks = &correlationMask;
VkRenderPassCreateInfo renderPassCreateInfo = {};
renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassCreateInfo.flags = 0;
renderPassCreateInfo.pNext = &renderPassMultiviewCI;
renderPassCreateInfo.attachmentCount = 2;
renderPassCreateInfo.pAttachments = &attachmentDescs[ 0 ];
renderPassCreateInfo.subpassCount = 1;
renderPassCreateInfo.pSubpasses = &subPassCreateInfo;
renderPassCreateInfo.dependencyCount = 0;
renderPassCreateInfo.pDependencies = nullptr;
result = vkCreateRenderPass( dev, &renderPassCreateInfo, nullptr, &outFramebufferDesc.renderPass );
assert( result == VK_SUCCESS );
VkImageView attachments[ 2 ] = { outFramebufferDesc.imageView, outFramebufferDesc.depthStencilImageView };
VkFramebufferCreateInfo framebufferCreateInfo = {};
framebufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferCreateInfo.pNext = nullptr;
framebufferCreateInfo.renderPass = outFramebufferDesc.renderPass;
framebufferCreateInfo.attachmentCount = 2;
framebufferCreateInfo.pAttachments = &attachments[ 0 ];
framebufferCreateInfo.width = width;
framebufferCreateInfo.height = height;
framebufferCreateInfo.layers = 1;
result = vkCreateFramebuffer( dev, &framebufferCreateInfo, nullptr, &outFramebufferDesc.framebuffer );
...
// After rendering the left eye:
VkImageCopy region = {};
region.extent.width = device.width;
region.extent.height = device.height;
region.extent.depth = 1;
region.srcSubresource.baseArrayLayer = 1;
region.srcSubresource.layerCount = 1;
region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.dstSubresource.layerCount = 1;
region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
vkCmdCopyImage( gCurrentDrawCommandBuffer, device.fbDesc.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, device.fbDesc.imageCopy, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region );
Here's my VkPipelineVertexInputStateCreateInfo, I'm using programmable vertex pulling:
VkPipelineVertexInputStateCreateInfo inputState = {};
inputState.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
How can I fix the error and get right eye working? I'm using HTC Vive, OpenVR 1.7.15, AMD R9 Nano, Vulkan 1.1 on Vulkan SDK 1.1.126.0, Windows 10. I have enabled device extension VK_KHR_MULTIVIEW_EXTENSION_NAME.

What's the equivalent of pmdGet3DCoordinates for Intel RealSense 3D camera (SR300)?

I am trying to write the similar code for a PMD camera now for an Intel RealSense 3D camera (SR300). However, I can't find a method that does the same thing in the realsense SDK. Is there a hack for this or what do you suggest?
pmdGet3DCoordinates(PMDHandle hnd, float * data, size_t size);
an example usage is:
float * cartesianDist = new float[nRows*nCols*3];
res = pmdGet3DCoordinates(hnd, cartesianDist, nCols*nRows*3*sizeOf(float));
I need this function in order to create an xyzmap as in the code below:
int res = pmdGet3DCoordinates(hnd, dists, 3 * numPixels * sizeof(float));
xyzMap = cv::Mat(xyzMap.size(), xyzMap.type(), dists);
I have written the following code so far, I don't know if it make any sense in terms of Intel RealSense. Please feel free to comment.
void SR300Camera::fillInZCoords()
{
//int res = pmdGet3DCoordinates(hnd, dists, 3 * numPixels * sizeof(float)); //store x,y,z coordinates dists (type: float*)
////float * zCoords = new float[1]; //store z-Coordinates of dists in zCoords
//xyzMap = cv::Mat(xyzMap.size(), xyzMap.type(), dists);
Projection *projection = pp->QueryCaptureManager()->QueryDevice()->CreateProjection();
std::vector<Point3DF32> vertices;
vertices.resize(bufferSize.width * bufferSize.height);
projection->QueryVertices(sample->depth, &vertices[0]);
xyzBuffer.clear();
for (int i = 0; i < bufferSize.width*bufferSize.height; i++) {
//cv::Point3f p;
//p.x = vertices[i].x;
//p.y = vertices[i].y;
//p.z = vertices[i].z;
//xyzBuffer.push_back(p);
xyzBuffer.push_back(cv::Point3f(vertices[i].x, vertices[i].y, vertices[i].z));
}
xyzMap = cv::Mat(xyzBuffer);
projection->Release();
}
sts = projection->QueryVertices(depthMap, &pos3D[0]); is an almost equivalent and does the job of converting depth UV-map to real-world xyz map.
Status sts = sm ->AcquireFrame(true);
if (sts < STATUS_NO_ERROR) {
if (sts == Status::STATUS_STREAM_CONFIG_CHANGED) {
wprintf_s(L"Stream configuration was changed, re-initilizing\n");
sm ->Close();
}
}
sample = sm->QuerySample();
PXCImage *depthMap = sample->depth;
renderd.RenderFrame(sample->depth);
PXCImage::ImageData depthImage;
depthMap->AcquireAccess(PXCImage::ACCESS_READ, &depthImage);
PXCImage::ImageInfo imgInfo = depthMap->QueryInfo();
int depth_stride = depthImage.pitches[0] / sizeof(pxcU16);
PXCProjection * projection = device->CreateProjection();
pxcU16 *dpixels = (pxcU16*)depthImage.planes[0];
unsigned int dpitch = depthImage.pitches[0] / sizeof(pxcU16);
PXCPoint3DF32 *pos3D = new PXCPoint3DF32[num_pixels];
sts = projection->QueryVertices(depthMap, &pos3D[0]);
if (sts < Status::STATUS_NO_ERROR) {
wprintf_s(L"Projection was unsuccessful! \n");
sm->Close();
}

Camera matrix invalid when aligned with up vector

I'm currently implementing cameras on my engine and I'm having an issue when the camera is looking from top to the floor (example, eye position 0.,0.,50. and target is 0.,0.,0.) my up vector is 0.,0.,1..
Then when I do the maths, the crossproduct of position and up gives 0.,0.,0. and then the view is screwed and nothing is rendered. If I move the camera, everything works as expected.
How can I solve this?
if (node==NULL || target==NULL) return;
node->Update();
eye=node->GetWorldMatrix()*GRPVECTOR(0.0,0.0,0.0); //converts from matrix to vector my vector transformation
target->Update();
obj=target->GetWorldMatrix()*GRPVECTOR(0.0,0.0,0.0);
GRPVECTOR ev;
GRPVECTOR z;
GRPVECTOR x_tmp;
GRPVECTOR x;
GRPVECTOR y;
ev = eye - obj;
ev.Normalize();
z=ev;
x_tmp.CrossProduct(&up,&z);
if (x_tmp.GetLengthf()==0.0f)
return; //my view is screwed, I return
x_tmp.Normalize();
x=x_tmp;
y.CrossProduct(&z,&x);
this->viewmatrix.matrix[0][0] = x.vector[0];
this->viewmatrix.matrix[0][1] = y.vector[0];
this->viewmatrix.matrix[0][2] = z.vector[0];
this->viewmatrix.matrix[0][3] = 0.0f;
this->viewmatrix.matrix[1][0] = x.vector[1];
this->viewmatrix.matrix[1][1] = y.vector[1];
this->viewmatrix.matrix[1][2] = z.vector[1];
this->viewmatrix.matrix[1][3] = 0.0f;
this->viewmatrix.matrix[2][0] = x.vector[2];
this->viewmatrix.matrix[2][1] = y.vector[2];
this->viewmatrix.matrix[2][2] = z.vector[2];
this->viewmatrix.matrix[2][3] = 0.0f;
this->viewmatrix.matrix[3][0] = -x.vector[0] * eye.vector[0] + -x.vector[1] * eye.vector[1] + -x.vector[2] * eye.vector[2];
this->viewmatrix.matrix[3][1] = -y.vector[0] * eye.vector[0] + -y.vector[1] * eye.vector[1] + -y.vector[2] * eye.vector[2];
this->viewmatrix.matrix[3][2] = -z.vector[0] * eye.vector[0] + -z.vector[1] * eye.vector[1] + -z.vector[2] * eye.vector[2];
this->viewmatrix.matrix[3][3] = 1.0f;
GRPMATRIX Translate;
Translate.BuildTranslationMatrix(-obj.vector[0],-obj.vector[1],-obj.vector[2]);
this->viewmatrix.GetMulplicationMatrix(&this->viewmatrix,&Translate);

Build Tangent using Lengyel’s Method but not correct

screen shoot:http://1drv.ms/1C9fgyl
pls note that the normal buffer is very strange. I use method in this link: http://www.terathon.com/code/tangent.html
the fabric tangent is very different between left half side and right half side.
I don't know what happen.
I test another sponza scene and put them in other guys who use same method to calc TBN and got same issue.
what's the bug in my algrithom?
bool OBJModel_Loader::BuildTangent(std::vector<OBJModel* >& Objects)
{
for (OBJModel* actor : Objects)
{
PracEngLogf("BuildTangent For Model: %s! \n", actor->Name.c_str());
int cnt = actor->VertexBuffer.size();
PEVector3* tan1 = new PEVector3[cnt * 2];
PEVector3* tan2 = tan1 + cnt;
ZeroMemory(tan1, cnt*sizeof(PEVector3) * 2);
for (size_t i = 0; i < actor->FaceInfos.size(); i+=3)
{
int Idx1 = actor->FaceInfos[i].VertexIdx;
int Idx2 = actor->FaceInfos[i + 1].VertexIdx;
int Idx3 = actor->FaceInfos[i + 2].VertexIdx;
const PEFloat3& v1 = actor->VertexBuffer[Idx1];
const PEFloat3& v2 = actor->VertexBuffer[Idx2];
const PEFloat3& v3 = actor->VertexBuffer[Idx3];
const PEFloat2& w1 = actor->UVs[actor->FaceInfos[i].UVIdx];
const PEFloat2& w2 = actor->UVs[actor->FaceInfos[i+1].UVIdx];
const PEFloat2& w3 = actor->UVs[actor->FaceInfos[i+2].UVIdx];
PEVector3 E1(v2.X - v1.X, v2.Y - v1.Y, v2.Z - v1.Z);
PEVector3 E2(v3.X - v1.X, v3.Y - v1.Y, v3.Z - v1.Z);
PEFloat2 St1 ( w2.X - w1.X, w2.Y - w1.Y );
PEFloat2 St2 ( w3.X - w1.X, w3.Y - w1.Y );
PEVector3 sDir;
PEVector3 tDir;
float r = St1.X*St2.Y - St2.X*St1.Y;
if (fabs(r) < 1e-6f)
{
sDir = PEVector3(1.0f, .0f, .0f);
tDir = PEVector3(.0f, 1.0f, .0f);
}
else
{
r = 1.0f / r;
sDir = PEVector3((St2.Y*E1.x - St1.Y*E2.x)*r, (St2.Y*E1.y - St1.Y*E2.y)*r, (St2.Y*E1.z - St1.Y*E2.z)*r);
tDir = PEVector3((St1.X*E2.x - St2.X*E1.x)*r, (St1.X*E2.y - St2.X*E1.y)*r, (St1.X*E2.z - St2.X*E1.z)*r);
}
tan1[Idx1] += sDir;
tan1[Idx2] += sDir;
tan1[Idx3] += sDir;
tan2[Idx1] += tDir;
tan2[Idx2] += tDir;
tan2[Idx3] += tDir;
}
for (size_t i = 0; i < actor->FaceInfos.size(); i++)
{
int Idx = actor->FaceInfos[i].VertexIdx;
const PEVector3& N = actor->VertexNormal[actor->FaceInfos[i].NormalIdx];
const PEVector3& T = tan1[Idx];
PEVector3 adjTangent = T - N * PEVector3::Dot(N, T);
adjTangent.Normalize();
actor->VertexTangent[Idx] = adjTangent;
actor->VertexTangent[Idx].w = PEVector3::Dot(N.Cross(T), tan2[Idx]) < .0f ? -1.0f : 1.0f;
}
delete[] tan1;
}
return true;
}

3D picking lwjgl

I have written some code to preform 3D picking that for some reason dosn't work entirely correct! (Im using LWJGL just so you know.)
This is how the code looks like:
if(Mouse.getEventButton() == 1) {
if (!Mouse.getEventButtonState()) {
Camera.get().generateViewMatrix();
float screenSpaceX = ((Mouse.getX()/800f/2f)-1.0f)*Camera.get().getAspectRatio();
float screenSpaceY = 1.0f-(2*((600-Mouse.getY())/600f));
float displacementRate = (float)Math.tan(Camera.get().getFovy()/2);
screenSpaceX *= displacementRate;
screenSpaceY *= displacementRate;
Vector4f cameraSpaceNear = new Vector4f((float) (screenSpaceX * Camera.get().getNear()), (float) (screenSpaceY * Camera.get().getNear()), (float) (-Camera.get().getNear()), 1);
Vector4f cameraSpaceFar = new Vector4f((float) (screenSpaceX * Camera.get().getFar()), (float) (screenSpaceY * Camera.get().getFar()), (float) (-Camera.get().getFar()), 1);
Matrix4f tmpView = new Matrix4f();
Camera.get().getViewMatrix().transpose(tmpView);
Matrix4f invertedViewMatrix = (Matrix4f)tmpView.invert();
Vector4f worldSpaceNear = new Vector4f();
Matrix4f.transform(invertedViewMatrix, cameraSpaceNear, worldSpaceNear);
Vector4f worldSpaceFar = new Vector4f();
Matrix4f.transform(invertedViewMatrix, cameraSpaceFar, worldSpaceFar);
Vector3f rayPosition = new Vector3f(worldSpaceNear.x, worldSpaceNear.y, worldSpaceNear.z);
Vector3f rayDirection = new Vector3f(worldSpaceFar.x - worldSpaceNear.x, worldSpaceFar.y - worldSpaceNear.y, worldSpaceFar.z - worldSpaceNear.z);
rayDirection.normalise();
Ray clickRay = new Ray(rayPosition, rayDirection);
Vector tMin = new Vector(), tMax = new Vector(), tempPoint;
float largestEnteringValue, smallestExitingValue, temp, closestEnteringValue = Camera.get().getFar()+0.1f;
Drawable closestDrawableHit = null;
for(Drawable d : this.worldModel.getDrawableThings()) {
// Calcualte AABB for each object... needs to be moved later...
firstVertex = true;
for(Surface surface : d.getSurfaces()) {
for(Vertex v : surface.getVertices()) {
worldPosition.x = (v.x+d.getPosition().x)*d.getScale().x;
worldPosition.y = (v.y+d.getPosition().y)*d.getScale().y;
worldPosition.z = (v.z+d.getPosition().z)*d.getScale().z;
worldPosition = worldPosition.rotate(d.getRotation());
if (firstVertex) {
maxX = worldPosition.x; maxY = worldPosition.y; maxZ = worldPosition.z;
minX = worldPosition.x; minY = worldPosition.y; minZ = worldPosition.z;
firstVertex = false;
} else {
if (worldPosition.x > maxX) {
maxX = worldPosition.x;
}
if (worldPosition.x < minX) {
minX = worldPosition.x;
}
if (worldPosition.y > maxY) {
maxY = worldPosition.y;
}
if (worldPosition.y < minY) {
minY = worldPosition.y;
}
if (worldPosition.z > maxZ) {
maxZ = worldPosition.z;
}
if (worldPosition.z < minZ) {
minZ = worldPosition.z;
}
}
}
}
// ray/slabs intersection test...
// clickRay.getOrigin().x + clickRay.getDirection().x * f = minX
// clickRay.getOrigin().x - minX = -clickRay.getDirection().x * f
// clickRay.getOrigin().x/-clickRay.getDirection().x - minX/-clickRay.getDirection().x = f
// -clickRay.getOrigin().x/clickRay.getDirection().x + minX/clickRay.getDirection().x = f
largestEnteringValue = -clickRay.getOrigin().x/clickRay.getDirection().x + minX/clickRay.getDirection().x;
temp = -clickRay.getOrigin().y/clickRay.getDirection().y + minY/clickRay.getDirection().y;
if(largestEnteringValue < temp) {
largestEnteringValue = temp;
}
temp = -clickRay.getOrigin().z/clickRay.getDirection().z + minZ/clickRay.getDirection().z;
if(largestEnteringValue < temp) {
largestEnteringValue = temp;
}
smallestExitingValue = -clickRay.getOrigin().x/clickRay.getDirection().x + maxX/clickRay.getDirection().x;
temp = -clickRay.getOrigin().y/clickRay.getDirection().y + maxY/clickRay.getDirection().y;
if(smallestExitingValue > temp) {
smallestExitingValue = temp;
}
temp = -clickRay.getOrigin().z/clickRay.getDirection().z + maxZ/clickRay.getDirection().z;
if(smallestExitingValue < temp) {
smallestExitingValue = temp;
}
if(largestEnteringValue > smallestExitingValue) {
//System.out.println("Miss!");
} else {
if (largestEnteringValue < closestEnteringValue) {
closestEnteringValue = largestEnteringValue;
closestDrawableHit = d;
}
}
}
if(closestDrawableHit != null) {
System.out.println("Hit at: (" + clickRay.setDistance(closestEnteringValue).x + ", " + clickRay.getCurrentPosition().y + ", " + clickRay.getCurrentPosition().z);
this.worldModel.removeDrawableThing(closestDrawableHit);
}
}
}
I just don't understand what's wrong, the ray are shooting and i do hit stuff that gets removed but the result of the ray are verry strange it sometimes removes the thing im clicking at, sometimes it removes things thats not even close to what im clicking at, and sometimes it removes nothing at all.
Edit:
Okay so i have continued searching for errors and by debugging the ray (by painting smal dots where it travles) i can now se that there is something oviously wrong with the ray that im sending out... it has its origin near the world center and always shots to the same position no matter where i direct my camera...
My initial toughts is that there might be some error in the way i calculate my viewMatrix (since it's not possible to get the viewmatrix from the glulookat method in lwjgl; I have to build it my self and I guess thats where the problem is at)...
Edit2:
This is how i calculate it currently:
private double[][] viewMatrixDouble = {{0,0,0,0}, {0,0,0,0}, {0,0,0,0}, {0,0,0,1}};
public Vector getCameraDirectionVector() {
Vector actualEye = this.getActualEyePosition();
return new Vector(lookAt.x-actualEye.x, lookAt.y-actualEye.y, lookAt.z-actualEye.z);
}
public Vector getActualEyePosition() {
return eye.rotate(this.getRotation());
}
public void generateViewMatrix() {
Vector cameraDirectionVector = getCameraDirectionVector().normalize();
Vector side = Vector.cross(cameraDirectionVector, this.upVector).normalize();
Vector up = Vector.cross(side, cameraDirectionVector);
viewMatrixDouble[0][0] = side.x; viewMatrixDouble[0][1] = up.x; viewMatrixDouble[0][2] = -cameraDirectionVector.x;
viewMatrixDouble[1][0] = side.y; viewMatrixDouble[1][1] = up.y; viewMatrixDouble[1][2] = -cameraDirectionVector.y;
viewMatrixDouble[2][0] = side.z; viewMatrixDouble[2][1] = up.z; viewMatrixDouble[2][2] = -cameraDirectionVector.z;
/*
Vector actualEyePosition = this.getActualEyePosition();
Vector zaxis = new Vector(this.lookAt.x - actualEyePosition.x, this.lookAt.y - actualEyePosition.y, this.lookAt.z - actualEyePosition.z).normalize();
Vector xaxis = Vector.cross(upVector, zaxis).normalize();
Vector yaxis = Vector.cross(zaxis, xaxis);
viewMatrixDouble[0][0] = xaxis.x; viewMatrixDouble[0][1] = yaxis.x; viewMatrixDouble[0][2] = zaxis.x;
viewMatrixDouble[1][0] = xaxis.y; viewMatrixDouble[1][1] = yaxis.y; viewMatrixDouble[1][2] = zaxis.y;
viewMatrixDouble[2][0] = xaxis.z; viewMatrixDouble[2][1] = yaxis.z; viewMatrixDouble[2][2] = zaxis.z;
viewMatrixDouble[3][0] = -Vector.dot(xaxis, actualEyePosition); viewMatrixDouble[3][1] =-Vector.dot(yaxis, actualEyePosition); viewMatrixDouble[3][2] = -Vector.dot(zaxis, actualEyePosition);
*/
viewMatrix = new Matrix4f();
viewMatrix.load(getViewMatrixAsFloatBuffer());
}
Would be verry greatfull if anyone could verify if this is wrong or right, and if it's wrong; supply me with the right way of doing it...
I have read alot of threads and documentations about this but i can't seam to wrapp my head around it...
I just don't understand what's wrong, the ray are shooting and i do hit stuff that gets removed but things are not disappearing where i press on the screen.
OpenGL is not a scene graph, it's a drawing library. So after removing something from your internal representation you must redraw the scene. And your code is missing some call to a function that triggers a redraw.
Okay so i finaly solved it with the help from the guys at gamedev and a friend, here is a link to the answer where i have posted the code!