vtkResliceImageViewer get bounds image in view - c++

Situation:
I'm using vtkresliceimageviewer to display the three MPR views, based on this example.
So I can rotate the cursors to generate a new view on all three screens.
Question:
I want to know the bounds of my image on the viewer, even after rotating some of the cursors.
obs
I tried to get the values from the imageActor and ask for my renderer positions, but this works only when I have no interaction from the other cursors.
Example for axial:
void UpdatePointWordToViewer(vtkRenderer* rend, double p[4])
{
rend->SetWorldPoint(p);
rend->WorldToDisplay();
rend->GetDisplayPoint(p);
}
void UpdateBoxAxial()
{
auto bounds = pMPR->GetViewer(2)->GetImageActor()->GetBounds();
auto ren = pMPR->GetViewer(2)->GetRenderer();
auto size = pMPR->GetViewer(2)->GetInteractor()->GetSize();
double pIni1[]{ 0,0,0,1 };
double pIni2[]{ 0,0,0,1 };
double pIni3[]{ 0,0,0,1 };
double pIni4[]{ 0,0,0,1 };
double pConvert1[]{ 0,0,0 };
double pConvert2[]{ 0,0,0 };
//check the bounds
pIni1[0] = bounds[0];
pIni1[1] = bounds[3];
pIni1[2] = bounds[4];
pIni2[0] = bounds[1];
pIni2[1] = bounds[3];
pIni2[2] = bounds[4];
pIni3[0] = bounds[0];
pIni3[1] = bounds[2];
pIni3[2] = bounds[4];
pIni4[0] = bounds[1];
pIni4[1] = bounds[2];
pIni4[2] = bounds[4];
//convert the points for viewer coordinates
UpdatePointWordToViewer(ren, pIni1);
UpdatePointWordToViewer(ren, pIni2);
UpdatePointWordToViewer(ren, pIni3);
UpdatePointWordToViewer(ren, pIni4);
//P1
if (pIni1[0] < pIni3[0])
pConvert1[0] = pIni1[0];
else
pConvert1[0] = pIni3[0];
if (pIni3[1] < pIni4[1])
pConvert1[1] = pIni3[1];
else
pConvert1[1] = pIni4[1];
//P2
if (pIni2[0] > pIni4[0])
pConvert2[0] = pIni2[0];
else
pConvert2[0] = pIni4[0];
if (pIni1[1] > pIni2[1])
pConvert2[1] = pIni1[1];
else
pConvert2[1] = pIni2[1];
}
So I know the minimum point (Left - Down) and the maximum point (Right - Top)

Related

My C++ code is not detecting objects correctly yolov5

I have a yolov5 onnx file where I trained apples and bananas. I was using python until today, but I decided to switch to c++ to gain some speed. I get correct results when I use yolov5's own onnx files and image in the code I added below. But when I add my own onnx file and my test image it gives me wrong result. You can also find the attached image. What is the problem here?
// Include Libraries.
\#include \<opencv2/opencv.hpp\>
\#include \<fstream\>
// Namespaces.
using namespace cv;
using namespace std;
using namespace cv::dnn;
// Constants.
const float INPUT_WIDTH = 640.0;
const float INPUT_HEIGHT = 640.0;
const float SCORE_THRESHOLD = 0.3;
const float NMS_THRESHOLD = 0.4;
const float CONFIDENCE_THRESHOLD = 0.65;
// Text parameters.
const float FONT_SCALE = 0.7;
const int FONT_FACE = FONT_HERSHEY_SIMPLEX;
const int THICKNESS = 1;
// Colors.
Scalar BLACK = Scalar(0,0,0);
Scalar BLUE = Scalar(255, 178, 50);
Scalar YELLOW = Scalar(0, 255, 255);
Scalar RED = Scalar(0,0,255);
// Draw the predicted bounding box.
void draw_label(Mat& input_image, string label, int left, int top)
{
// Display the label at the top of the bounding box.
int baseLine;
Size label_size = getTextSize(label, FONT_FACE, FONT_SCALE, THICKNESS, &baseLine);
top = max(top, label_size.height);
// Top left corner.
Point tlc = Point(left, top);
// Bottom right corner.
Point brc = Point(left + label_size.width, top + label_size.height + baseLine);
// Draw black rectangle.
rectangle(input_image, tlc, brc, BLACK, FILLED);
// Put the label on the black rectangle.
putText(input_image, label, Point(left, top + label_size.height), FONT_FACE, FONT_SCALE, YELLOW, THICKNESS);
}
vector\<Mat\> pre_process(Mat &input_image, Net &net)
{
// Convert to blob.
Mat blob;
blobFromImage(input_image, blob, 1./255., Size(INPUT_WIDTH, INPUT_HEIGHT), Scalar(), true, false);
net.setInput(blob);
// Forward propagate.
vector<Mat> outputs;
net.forward(outputs, net.getUnconnectedOutLayersNames());
return outputs;
}
Mat post_process(Mat &input_image, vector\<Mat\> &outputs, const vector\<string\> &class_name)
{
// Initialize vectors to hold respective outputs while unwrapping detections.
vector\<int\> class_ids;
vector\<float\> confidences;
vector\<Rect\> boxes;
// Resizing factor.
float x_factor = input_image.cols / INPUT_WIDTH;
float y_factor = input_image.rows / INPUT_HEIGHT;
float *data = (float *)outputs[0].data;
const int dimensions = 85;
const int rows = 25200;
// Iterate through 25200 detections.
for (int i = 0; i < rows; ++i)
{
float confidence = data[4];
// Discard bad detections and continue.
if (confidence >= CONFIDENCE_THRESHOLD)
{
float * classes_scores = data + 5;
// Create a 1x85 Mat and store class scores of 80 classes.
Mat scores(1, class_name.size(), CV_32FC1, classes_scores);
// Perform minMaxLoc and acquire index of best class score.
Point class_id;
double max_class_score;
minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
// Continue if the class score is above the threshold.
if (max_class_score > SCORE_THRESHOLD)
{
// Store class ID and confidence in the pre-defined respective vectors.
confidences.push_back(confidence);
class_ids.push_back(class_id.x);
// Center.
float cx = data[0];
float cy = data[1];
// Box dimension.
float w = data[2];
float h = data[3];
// Bounding box coordinates.
int left = int((cx - 0.5 * w) * x_factor);
int top = int((cy - 0.5 * h) * y_factor);
int width = int(w * x_factor);
int height = int(h * y_factor);
// Store good detections in the boxes vector.
boxes.push_back(Rect(left, top, width, height));
}
}
// Jump to the next column.
data += 85;
}
// Perform Non Maximum Suppression and draw predictions.
vector<int> indices;
NMSBoxes(boxes, confidences, SCORE_THRESHOLD, NMS_THRESHOLD, indices);
for (int i = 0; i < indices.size(); i++)
{
int idx = indices[i];
Rect box = boxes[idx];
int left = box.x;
int top = box.y;
int width = box.width;
int height = box.height;
// Draw bounding box.
rectangle(input_image, Point(left, top), Point(left + width, top + height), BLUE, 3*THICKNESS);
// Get the label for the class name and its confidence.
string label = format("%.2f", confidences[idx]);
label = class_name[class_ids[idx]] + ":" + label;
// Draw class labels.
draw_label(input_image, label, left, top);
//cout<<"The Value is "<<label;
//cout<<endl;
}
return input_image;
}
int main()
{
vector<string> class_list;
ifstream ifs("/Users/admin/Documents/C++/First/obj.names");
string line;
while (getline(ifs, line))
{
class_list.push_back(line);
}
// Load image.
Mat frame;
frame = imread("/Users/admin/Documents/C++/First/test.jpg");
// Load model.
Net net;
net = readNet("/Users/admin/Documents/C++/First/my.onnx");
vector<Mat> detections;
detections = pre_process(frame, net);
Mat img = post_process(frame, detections, class_list);
//Mat img = post_process(frame.clone(), detections, class_list);
// Put efficiency information.
// The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
vector<double> layersTimes;
double freq = getTickFrequency() / 1000;
double t = net.getPerfProfile(layersTimes) / freq;
string label = format("Inference time : %.2f ms", t);
putText(img, label, Point(20, 40), FONT_FACE, FONT_SCALE, RED);
imshow("Output", img);
waitKey(0);
return 0;
}
The photos I use are 640x480. I played around with the size of the photo, thinking it might be related, but the same problem persisted.
The Yolov5 output format is xyxy as can be seen here:
https://github.com/ultralytics/yolov5/blob/bfa1f23045c7c4136a9b8ced9d6be8249ed72692/detect.py#L161
Not xywh as you are assuming in your code

Rotate image and with loss of quality

I have this code that rotates an image. But the result is an image with a transparency effect (https://pixs.ru/image/G82BPR - like this) that accumulates with each call to this function. I want to get the image on the output in its original form, but inverted and have no idea what to do. Thanks if you have any advice.
bool rotate_sticker(GdkEventScroll* scroll, const unsigned int inx) {
Gtk::Image* img = dynamic_cast<Gtk::Image*>(small_stickers[inx]->get_child());
Glib::RefPtr< Gdk::Pixbuf > old_pixbuf = img->get_pixbuf();
Glib::RefPtr<Gdk::Window> win = small_stickers[inx]->get_window();
Cairo::RefPtr<Cairo::Region> region = Cairo::Region::create();
Glib::RefPtr<Gdk::DrawingContext> draw_context = win->begin_draw_frame(region);
const double degree = 5;
const double w = 135;
const double h = 135;
Cairo::RefPtr<Cairo::ImageSurface> surface = Cairo::ImageSurface::create(Cairo::Format::FORMAT_ARGB32,w,h);
Cairo::RefPtr<Cairo::Context> cr = Cairo::Context::create(surface);
//rotate by central point
cr->translate (w*0.5, h*0.5);
cr->rotate_degrees(degree);
cr->translate (-0.5*w, -0.5*h);
Gdk::Cairo::set_source_pixbuf(cr,old_pixbuf);
cr->paint();
win->end_draw_frame(draw_context);
img->set(Gdk::Pixbuf::create(cr->get_target(),0,0,w,h));
return true;
}

gluPartialDisk in osg/osgEarth

I've been trying to create an openGL gluDisk like object in osgEarth. So far I've attempted to do the following (Edited, this is the correct answer):
void ViewDriver::drawCircZone(double lat, double lon, double innerRadius, double outerRadius, QColor color, double beginAngle, double endAngle){
GeometryFactory g;
osg::ref_ptr<osgEarth::Geometry> outerCircleGeom = g.createArc(osg::Vec3d(lat, lon, 0), convertFromMetersToMercDeg(outerRadius), beginAngle, endAngle);
osg::ref_ptr<osgEarth::Geometry> innerCircleGeom = g.createArc(osg::Vec3d(lat, lon, 0), convertFromMetersToMercDeg(innerRadius), beginAngle, endAngle);
osg::Vec3dArray* outerCircArray = outerCircleGeom->createVec3dArray();
osg::Vec3dArray* innerCircArray = innerCircleGeom->createVec3dArray();
Vec3dVector* diskVec = new Vec3dVector;
for(int i = 0; i < outerCircArray->size() - 1; i++){
diskVec->push_back((*outerCircArray)[i]);
}
//This is important for closing the shape and not giving it a Pac-Man-like mouth
diskVec->push_back((*outerCircArray)[0]);
//This is how you make a "hole", by iterating backwards
for(int i = innerCircArray->size() - 1; i >= 0; i--){
diskVec->push_back((*innerCircArray)[i]);
}
osg::ref_ptr<osgEarth::Symbology::Ring> diskRing = new Ring(diskVec);
diskRing->close();
osg::ref_ptr<Feature> circFeature = new Feature(diskRing, view->getMapViewer()->geoSRS);
Style circStyle;
circStyle.getOrCreate<PolygonSymbol>()->outline() = true;
circStyle.getOrCreate<PolygonSymbol>()->fill()->color() = Color(color.red()/255.0, color.green()/255.0, color.blue()/255.0, 1.0);
circStyle.getOrCreate<AltitudeSymbol>()->clamping() = AltitudeSymbol::CLAMP_RELATIVE_TO_TERRAIN;
circStyle.getOrCreate<AltitudeSymbol>()->technique() = AltitudeSymbol::TECHNIQUE_DRAPE;
osg::ref_ptr<FeatureNode> circNode = new FeatureNode(circFeature, circStyle);
circNode->setDynamic(true);
view->getMapNode()->addChild(circNode);
}
What stumped me originally was that I don't have a lot of graphics knowledge. Somewhere I read that when drawing outlines, do it in clockwise direction. When drawing outlines in counter clockwise direction they will "cut-out" or create a "hole" when combined with clockwise-drawn points. I was filling the "hole" outline with the smaller circle's point in a clockwise direction originally when testing that method which was why it didn't work.
void ViewDriver::drawCircZone(double lat, double lon, double innerRadius, double outerRadius, QColor color, double beginAngle, double endAngle){
GeometryFactory g;
osg::ref_ptr<osgEarth::Geometry> outerCircleGeom = g.createArc(osg::Vec3d(lat, lon, 0), convertFromMetersToMercDeg(outerRadius), beginAngle, endAngle);
osg::ref_ptr<osgEarth::Geometry> innerCircleGeom = g.createArc(osg::Vec3d(lat, lon, 0), convertFromMetersToMercDeg(innerRadius), beginAngle, endAngle);
osg::Vec3dArray* outerCircArray = outerCircleGeom->createVec3dArray();
osg::Vec3dArray* innerCircArray = innerCircleGeom->createVec3dArray();
Vec3dVector* diskVec = new Vec3dVector;
for(int i = 0; i < outerCircArray->size() - 1; i++){
diskVec->push_back((*outerCircArray)[i]);
}
//This is important for closing the shape and not giving it a Pac-Man-like mouth
diskVec->push_back((*outerCircArray)[0]);
for(int i = innerCircArray->size() - 1; i >= 0; i--){
diskVec->push_back((*innerCircArray)[i]);
}
osg::ref_ptr<osgEarth::Symbology::Ring> diskRing = new Ring(diskVec);
diskRing->close();
osg::ref_ptr<Feature> circFeature = new Feature(diskRing, view->getMapViewer()->geoSRS);
Style circStyle;
circStyle.getOrCreate<PolygonSymbol>()->outline() = true;
circStyle.getOrCreate<PolygonSymbol>()->fill()->color() = Color(color.red()/255.0, color.green()/255.0, color.blue()/255.0, 1.0);
circStyle.getOrCreate<AltitudeSymbol>()->clamping() = AltitudeSymbol::CLAMP_RELATIVE_TO_TERRAIN;
circStyle.getOrCreate<AltitudeSymbol>()->technique() = AltitudeSymbol::TECHNIQUE_DRAPE;
osg::ref_ptr<FeatureNode> circNode = new FeatureNode(circFeature, circStyle);
circNode->setDynamic(true);
view->getMapNode()->addChild(circNode);
}

OpenCasCade BrepFeat_MakeCylindricalHole(); function

Currently I'm working on a project to drill some cylindrical holes on to a given shape.
So for do that I did use BRepFeat_MakeCylindricalHole(); function and used Standard_EXPORT void PerformThruNext (const Standard_Real Radius, const Standard_Boolean WithControl = Standard_True);
Because in this scenario I haven't the depth of the cylinder. Using performThruNext it will drilled, Top face to Bottom face.
User will be given the start and the end points, then using that will be drilling holes.
**1.when start and end points having decimal fraction part (gp_pnt StartPnt (328.547,-4.325,97.412);
gp_pnt EndPnt
(291.841,-7.586,101.651);), it doesn't drill the face correctly.
2.when diameter getting to a higher value,happens the same thing,
,
** following is my code,any help would be highly appreciated. Thanks!
void COCCAPPDoc::OnBox2()
{
ofstream out;
out.open("C:/Users/Dell/Desktop/Blade/areaInit.txt");
CString str;
TopoDS_Shape bladeWithCore,DrilledShape,DrilledShape2;
/* IMPORT bladeWithCore.brep FILE */
Standard_CString strFile = "C:/Users/Dell/Desktop/Blade/bladeWithCore.brep";
BRep_Builder brepS;
BRepTools::Read(bladeWithCore,strFile,brepS);
TopoDS_Face Face_52;
Standard_CString strFace = "C:/Users/Dell/Desktop/Blade/Face_52.brep";
BRep_Builder brepF;
BRepTools::Read(Face_52,strFace,brepF);
BRepClass3d_SolidClassifier classify;
classify.Load(bladeWithCore);
classify.PerformInfinitePoint(1.0e-6);
TopAbs_State state = classify.State();
if(state==TopAbs_IN) bladeWithCore.Reverse();
DrilledShape = bladeWithCore;
gp_Vec v1(gp_Pnt( 330,11,108),gp_Pnt(291,4,109)); //FINDING VECTOR FROM START TO THE END POINT
gp_Pnt point(330,11,108);
gp_Pnt startpoint (330,11,108);
TopoDS_Vertex aV1 = BRepBuilderAPI_MakeVertex (startpoint);
BRepTools::Write(aV1,"C:/Users/Dell/Desktop/Blade/startpoint.brep");
/* gp_Vec v1(gp_Pnt( 330.208114,11.203318,108.480255),gp_Pnt(291.103558,4.265285,109.432663));
gp_Pnt point(330.208114,11.203318,108.480255);*/
gp_Dir d1(v1); //GETTING DIRECTION USING FOUND VECTOR
gp_Vec UnitVector(d1); //FINDING UNITVECTOR
gp_Dir dir_line(0,0,1);
gp_Vec UnitVector_line(dir_line);
gp_Pnt minPoint;
Standard_Real diameter = 0.1;
int noOfInstances =3;
int gap = -10;
for(int i=0; i {
point.Translate(UnitVector);
gp_Pnt newPoint = point;
newPoint.Translate(UnitVector_line*10);
BRepBuilderAPI_MakeEdge LINE(newPoint, point);
BRepBuilderAPI_MakeWire lineWire(LINE);
TopoDS_Wire wire = TopoDS::Wire(lineWire);
TopoDS_Shape lineShape = TopoDS_Shape(wire);
BRepTools::Write(lineShape,"C:/Users/Dell/Desktop/Blade/linlineShapee.brep");
intersection( bladeWithCore,lineShape, point, minPoint);
TopoDS_Vertex checkPoint = BRepBuilderAPI_MakeVertex (minPoint);
BRepTools::Write(checkPoint,"C:/Users/Dell/Desktop/Blade/checkPoint.brep");
gp_Ax1 location =gp_Ax1(minPoint,gp_Dir(0,0,1));
BRepFeat_MakeCylindricalHole drilled;
drilled.Init(DrilledShape,location);
drilled.PerformThruNext(diameter/2.0);
BRepFeat_Status status = drilled.Status();
if(status==BRepFeat_NoError)
{
DrilledShape = drilled.Shape();
BRepTools::Write(DrilledShape,"C:/Users/Dell/Desktop/Blade/bladeWithCoreDrilled.brep");
}
}
Handle(AIS_InteractiveObject) obj3 = (new AIS_Shape(DrilledShape));
myAISContext->Display(obj3);
COCCAPPDoc:: Fit();
}
void COCCAPPDoc::intersection(TopoDS_Shape bladeWithCore,TopoDS_Shape lineShape,gp_Pnt point,gp_Pnt& minPoint)
{
BRepExtrema_DistShapeShape interSection( bladeWithCore, lineShape, Extrema_ExtFlag_MIN, Extrema_ExtAlgo_Grad);
interSection.Perform();
Standard_Integer Solutions = interSection.NbSolution();
CArray disArray;
int minindex = 1;
for(int i = 1; i {
double distance = point.Distance((interSection.PointOnShape1(i)));
disArray.Add(distance);
}
for(int i = 1; i {
if(disArray[0] > disArray[i])
{
disArray[0] = disArray[i];
minindex = i;
}
}
minPoint = interSection.PointOnShape1(minindex);
TopoDS_Vertex aV = BRepBuilderAPI_MakeVertex (minPoint);
BRepTools::Write(aV,"C:/Users/Dell/Desktop/Blade/minPoint.brep");
}

3D picking lwjgl

I have written some code to preform 3D picking that for some reason dosn't work entirely correct! (Im using LWJGL just so you know.)
This is how the code looks like:
if(Mouse.getEventButton() == 1) {
if (!Mouse.getEventButtonState()) {
Camera.get().generateViewMatrix();
float screenSpaceX = ((Mouse.getX()/800f/2f)-1.0f)*Camera.get().getAspectRatio();
float screenSpaceY = 1.0f-(2*((600-Mouse.getY())/600f));
float displacementRate = (float)Math.tan(Camera.get().getFovy()/2);
screenSpaceX *= displacementRate;
screenSpaceY *= displacementRate;
Vector4f cameraSpaceNear = new Vector4f((float) (screenSpaceX * Camera.get().getNear()), (float) (screenSpaceY * Camera.get().getNear()), (float) (-Camera.get().getNear()), 1);
Vector4f cameraSpaceFar = new Vector4f((float) (screenSpaceX * Camera.get().getFar()), (float) (screenSpaceY * Camera.get().getFar()), (float) (-Camera.get().getFar()), 1);
Matrix4f tmpView = new Matrix4f();
Camera.get().getViewMatrix().transpose(tmpView);
Matrix4f invertedViewMatrix = (Matrix4f)tmpView.invert();
Vector4f worldSpaceNear = new Vector4f();
Matrix4f.transform(invertedViewMatrix, cameraSpaceNear, worldSpaceNear);
Vector4f worldSpaceFar = new Vector4f();
Matrix4f.transform(invertedViewMatrix, cameraSpaceFar, worldSpaceFar);
Vector3f rayPosition = new Vector3f(worldSpaceNear.x, worldSpaceNear.y, worldSpaceNear.z);
Vector3f rayDirection = new Vector3f(worldSpaceFar.x - worldSpaceNear.x, worldSpaceFar.y - worldSpaceNear.y, worldSpaceFar.z - worldSpaceNear.z);
rayDirection.normalise();
Ray clickRay = new Ray(rayPosition, rayDirection);
Vector tMin = new Vector(), tMax = new Vector(), tempPoint;
float largestEnteringValue, smallestExitingValue, temp, closestEnteringValue = Camera.get().getFar()+0.1f;
Drawable closestDrawableHit = null;
for(Drawable d : this.worldModel.getDrawableThings()) {
// Calcualte AABB for each object... needs to be moved later...
firstVertex = true;
for(Surface surface : d.getSurfaces()) {
for(Vertex v : surface.getVertices()) {
worldPosition.x = (v.x+d.getPosition().x)*d.getScale().x;
worldPosition.y = (v.y+d.getPosition().y)*d.getScale().y;
worldPosition.z = (v.z+d.getPosition().z)*d.getScale().z;
worldPosition = worldPosition.rotate(d.getRotation());
if (firstVertex) {
maxX = worldPosition.x; maxY = worldPosition.y; maxZ = worldPosition.z;
minX = worldPosition.x; minY = worldPosition.y; minZ = worldPosition.z;
firstVertex = false;
} else {
if (worldPosition.x > maxX) {
maxX = worldPosition.x;
}
if (worldPosition.x < minX) {
minX = worldPosition.x;
}
if (worldPosition.y > maxY) {
maxY = worldPosition.y;
}
if (worldPosition.y < minY) {
minY = worldPosition.y;
}
if (worldPosition.z > maxZ) {
maxZ = worldPosition.z;
}
if (worldPosition.z < minZ) {
minZ = worldPosition.z;
}
}
}
}
// ray/slabs intersection test...
// clickRay.getOrigin().x + clickRay.getDirection().x * f = minX
// clickRay.getOrigin().x - minX = -clickRay.getDirection().x * f
// clickRay.getOrigin().x/-clickRay.getDirection().x - minX/-clickRay.getDirection().x = f
// -clickRay.getOrigin().x/clickRay.getDirection().x + minX/clickRay.getDirection().x = f
largestEnteringValue = -clickRay.getOrigin().x/clickRay.getDirection().x + minX/clickRay.getDirection().x;
temp = -clickRay.getOrigin().y/clickRay.getDirection().y + minY/clickRay.getDirection().y;
if(largestEnteringValue < temp) {
largestEnteringValue = temp;
}
temp = -clickRay.getOrigin().z/clickRay.getDirection().z + minZ/clickRay.getDirection().z;
if(largestEnteringValue < temp) {
largestEnteringValue = temp;
}
smallestExitingValue = -clickRay.getOrigin().x/clickRay.getDirection().x + maxX/clickRay.getDirection().x;
temp = -clickRay.getOrigin().y/clickRay.getDirection().y + maxY/clickRay.getDirection().y;
if(smallestExitingValue > temp) {
smallestExitingValue = temp;
}
temp = -clickRay.getOrigin().z/clickRay.getDirection().z + maxZ/clickRay.getDirection().z;
if(smallestExitingValue < temp) {
smallestExitingValue = temp;
}
if(largestEnteringValue > smallestExitingValue) {
//System.out.println("Miss!");
} else {
if (largestEnteringValue < closestEnteringValue) {
closestEnteringValue = largestEnteringValue;
closestDrawableHit = d;
}
}
}
if(closestDrawableHit != null) {
System.out.println("Hit at: (" + clickRay.setDistance(closestEnteringValue).x + ", " + clickRay.getCurrentPosition().y + ", " + clickRay.getCurrentPosition().z);
this.worldModel.removeDrawableThing(closestDrawableHit);
}
}
}
I just don't understand what's wrong, the ray are shooting and i do hit stuff that gets removed but the result of the ray are verry strange it sometimes removes the thing im clicking at, sometimes it removes things thats not even close to what im clicking at, and sometimes it removes nothing at all.
Edit:
Okay so i have continued searching for errors and by debugging the ray (by painting smal dots where it travles) i can now se that there is something oviously wrong with the ray that im sending out... it has its origin near the world center and always shots to the same position no matter where i direct my camera...
My initial toughts is that there might be some error in the way i calculate my viewMatrix (since it's not possible to get the viewmatrix from the glulookat method in lwjgl; I have to build it my self and I guess thats where the problem is at)...
Edit2:
This is how i calculate it currently:
private double[][] viewMatrixDouble = {{0,0,0,0}, {0,0,0,0}, {0,0,0,0}, {0,0,0,1}};
public Vector getCameraDirectionVector() {
Vector actualEye = this.getActualEyePosition();
return new Vector(lookAt.x-actualEye.x, lookAt.y-actualEye.y, lookAt.z-actualEye.z);
}
public Vector getActualEyePosition() {
return eye.rotate(this.getRotation());
}
public void generateViewMatrix() {
Vector cameraDirectionVector = getCameraDirectionVector().normalize();
Vector side = Vector.cross(cameraDirectionVector, this.upVector).normalize();
Vector up = Vector.cross(side, cameraDirectionVector);
viewMatrixDouble[0][0] = side.x; viewMatrixDouble[0][1] = up.x; viewMatrixDouble[0][2] = -cameraDirectionVector.x;
viewMatrixDouble[1][0] = side.y; viewMatrixDouble[1][1] = up.y; viewMatrixDouble[1][2] = -cameraDirectionVector.y;
viewMatrixDouble[2][0] = side.z; viewMatrixDouble[2][1] = up.z; viewMatrixDouble[2][2] = -cameraDirectionVector.z;
/*
Vector actualEyePosition = this.getActualEyePosition();
Vector zaxis = new Vector(this.lookAt.x - actualEyePosition.x, this.lookAt.y - actualEyePosition.y, this.lookAt.z - actualEyePosition.z).normalize();
Vector xaxis = Vector.cross(upVector, zaxis).normalize();
Vector yaxis = Vector.cross(zaxis, xaxis);
viewMatrixDouble[0][0] = xaxis.x; viewMatrixDouble[0][1] = yaxis.x; viewMatrixDouble[0][2] = zaxis.x;
viewMatrixDouble[1][0] = xaxis.y; viewMatrixDouble[1][1] = yaxis.y; viewMatrixDouble[1][2] = zaxis.y;
viewMatrixDouble[2][0] = xaxis.z; viewMatrixDouble[2][1] = yaxis.z; viewMatrixDouble[2][2] = zaxis.z;
viewMatrixDouble[3][0] = -Vector.dot(xaxis, actualEyePosition); viewMatrixDouble[3][1] =-Vector.dot(yaxis, actualEyePosition); viewMatrixDouble[3][2] = -Vector.dot(zaxis, actualEyePosition);
*/
viewMatrix = new Matrix4f();
viewMatrix.load(getViewMatrixAsFloatBuffer());
}
Would be verry greatfull if anyone could verify if this is wrong or right, and if it's wrong; supply me with the right way of doing it...
I have read alot of threads and documentations about this but i can't seam to wrapp my head around it...
I just don't understand what's wrong, the ray are shooting and i do hit stuff that gets removed but things are not disappearing where i press on the screen.
OpenGL is not a scene graph, it's a drawing library. So after removing something from your internal representation you must redraw the scene. And your code is missing some call to a function that triggers a redraw.
Okay so i finaly solved it with the help from the guys at gamedev and a friend, here is a link to the answer where i have posted the code!