I have a 3D .vtk model that I render and I extract the contour from the resulting image using a vtkContourFilter (using vtk version 7.0.0 on Ubuntu 16.04).
I would like to project it from different perspectives, but as I loop over the different camera positions (I checked that the camera positions are indeed changed) the interactive viewer that launches with each iteration always shows the contour from the first image.
When I output the first few coordinates of the found contour points (which I store as a vtkPolyData) I also noticed that the content in my set of contour points does not change.
I have tried some online suggestions that worked for others, such as adding:
ContFilter->Modified();
ContFilter->Update();
and
polyData->Modified(); // This is the 3D vtkPolyData that I project
and
ContFilter->SetValue(0, 10);
ContFilter->SetValue(0, 255);
As a wild guess I also tried adding:
polyData->Modified();
// Remove old links
renderWindow->RemoveRenderer(renderer);
mapper->RemoveAllInputs();
// Set new links
renderer->SetActiveCamera(camera);
renderWindow->AddRenderer(renderer);
renderer->Modified();
renderer->ResetCameraClippingRange();
renderWindow->Modified();
mapper->SetInputData(polyData);
renderWindow->Render();
within the for loop, before using the ContourFilter, but it still does not update. With that I tried everything I could think of and find online.
This is the relevant code:
// Prepare the rendering environment to project the 3D model to an image from different perspectives
vtkSmartPointer<vtkDataSetMapper> mapper = vtkSmartPointer<vtkDataSetMapper>::New();
mapper->SetInputData(polyData);
mapper->ScalarVisibilityOff();
vtkSmartPointer<vtkActor> actor = vtkSmartPointer<vtkActor>::New();
actor->SetMapper(mapper);
actor->GetProperty()->SetInterpolationToFlat();
vtkSmartPointer<vtkRenderer> renderer = vtkSmartPointer<vtkRenderer>::New();
renderer->SetBackground(1,1,1);
renderer->AddActor(actor);
vtkSmartPointer<vtkCamera> camera = vtkSmartPointer<vtkCamera>::New();
vtkSmartPointer<vtkRenderWindow> renderWindow = vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->SetOffScreenRendering(1);
vtkSmartPointer<vtkWindowToImageFilter> windowToImageFilter = vtkSmartPointer<vtkWindowToImageFilter>::New();
vtkSmartPointer<vtkContourFilter> ContFilter = vtkSmartPointer<vtkContourFilter>::New();
vtkSmartPointer<vtkPolyData> contour = vtkSmartPointer<vtkPolyData>::New();
// Loop over the camera positions. At each iteration render/project,
// extract the contour and finally render the 3D model and the found
// contour
double * iPoint;
double * camPos;
double * contourStart;
int nContours;
for(int i=0; i<positions->GetNumberOfPoints(); i++){
// Print the camera position
iPoint = positions->GetPoint(i);
std::cout << iPoint[0] << " " << iPoint[1] << " " << iPoint[2] << std::endl;
//Move camera
camera->SetPosition(iPoint[0], iPoint[1], iPoint[2]);
camera->SetFocalPoint(focalPointOrig[0], focalPointOrig[1], focalPointOrig[2]);
camera->SetViewAngle(viewAngle);
camera->Modified();
camera->SetRoll(90);
// Does this help to update the view?
polyData->Modified();
// Remove old links and set them again
renderWindow->RemoveRenderer(renderer);
mapper->RemoveAllInputs();
renderer->SetActiveCamera(camera);
renderWindow->AddRenderer(renderer);
renderer->Modified();
renderer->ResetCameraClippingRange();
renderWindow->Modified();
// Render/project the data
mapper->SetInputData(polyData);
renderWindow->Render();
// Print camera position for debugging
camera->GetPosition(camPos);
std::cout << camPos[0] << " " << camPos[1] << " " << camPos[2] << std::endl;
// Get the image and apply a contourfilter
windowToImageFilter->SetInput(renderWindow);
windowToImageFilter->Update();
ContFilter->SetInputConnection(windowToImageFilter->GetOutputPort());
// Saw someone do this as a workaround for updating the view
ContFilter->SetValue(0, 10);
ContFilter->SetValue(0, 255);
// Does this help to update the view?
ContFilter->Modified();
//Get the contour from the contourfilter
ContFilter->Update();
contour = ContFilter->GetOutput();
// Print the first points coordinates to see if they changed
contourStart = contour->GetPoint(1);
std::cout << contourStart[0] << " " << contourStart[1] << " " << std::endl;
// Print the number of contours to see if it may be stored as an additional contour
nContours = ContFilter->GetNumberOfContours();
std::cout << nContours << std::endl;
// Render the 3D model and the found contour
actor->GetProperty()->SetColor(0.9,0.9,0.8);
// Create a mapper and actor of the silhouette
vtkSmartPointer<vtkPolyDataMapper> mapper_contour = vtkSmartPointer<vtkPolyDataMapper>::New();
mapper_contour->SetInputData(contour);
// Try this again here
polyData->Modified();
vtkSmartPointer<vtkActor> actor_contour = vtkSmartPointer<vtkActor>::New();
actor_contour->SetMapper(mapper_contour);
actor_contour->GetProperty()->SetLineWidth(2.);
// 2 renderers and a render window
vtkSmartPointer<vtkRenderer> renderer1 = vtkSmartPointer<vtkRenderer>::New();
renderer1->AddActor(actor);
vtkSmartPointer<vtkRenderer> renderer2 = vtkSmartPointer<vtkRenderer>::New();
renderer2->AddActor(actor_contour);
// Set the 3D model renderer to the same perspective but don't change the camera perspective of the contour
renderer1->SetActiveCamera(camera);
// Setup the window
vtkSmartPointer<vtkRenderWindow> renderwindow = vtkSmartPointer<vtkRenderWindow>::New();
renderwindow->SetSize(1600, 800);
renderwindow->AddRenderer(renderer1);
renderer1->SetViewport(0., 0., 0.5, 1.);
renderwindow->AddRenderer(renderer2);
renderer2->SetViewport(0.5, 0., 1., 1.);
// Setup the interactor
vtkSmartPointer<vtkInteractorStyleTrackballCamera> style = vtkSmartPointer<vtkInteractorStyleTrackballCamera>::New();
vtkSmartPointer<vtkRenderWindowInteractor> iren = vtkSmartPointer<vtkRenderWindowInteractor>::New();
iren->SetRenderWindow( renderwindow);
iren->SetInteractorStyle(style);
// Display the coordinate system axes
vtkSmartPointer<vtkAxesActor> axes = vtkSmartPointer<vtkAxesActor>::New();
vtkSmartPointer<vtkOrientationMarkerWidget> widget = vtkSmartPointer<vtkOrientationMarkerWidget>::New();
widget->SetOutlineColor( 0.9300, 0.5700, 0.1300 );
widget->SetOrientationMarker( axes );
widget->SetInteractor( iren );
widget->SetViewport( 0.0, 0.0, 0.4, 0.4 );
widget->SetEnabled( 1 );
widget->InteractiveOn();
// Render the 3D model and the found contour
renderwindow->Render();
iren->Start();
}
Just found the answer.
As mentioned in the warning in the detailed description of the vtkWindowToImageFilter class reference webpage (https://www.vtk.org/doc/nightly/html/classvtkWindowToImageFilter.html), vtkWindows generally do not rerender unless you call their Modified() function. Now my projected views are updated like I wanted.
So I changed
// Get the image and apply a contourfilter
windowToImageFilter->SetInput(renderWindow);
windowToImageFilter->Update();
to
// Get the image and apply a contourfilter
windowToImageFilter->Modified();
windowToImageFilter->SetInput(renderWindow);
windowToImageFilter->Update();
See here the warning text in case the link above ever stops working:
Warning:
A vtkWindow doesn't behave like other parts of the VTK pipeline: its modification time doesn't get updated when an image is rendered. As a result, naive use of vtkWindowToImageFilter will produce an image of the first image that the window rendered, but which is never updated on subsequent window updates. This behavior is unexpected and in general undesirable.
To force an update of the output image, call vtkWindowToImageFilter's Modified method after rendering to the window.
In VTK versions 4 and later, this filter is part of the canonical way to output an image of a window to a file (replacing the obsolete SaveImageAsPPM method for vtkRenderWindows that existed in 3.2 and earlier). Connect this filter to the output of the window, and filter's output to a writer such as vtkPNGWriter.
Reading back alpha planes is dependent on the correct operation of the render window's GetRGBACharPixelData method, which in turn is dependent on the configuration of the window's alpha planes. As of VTK 4.4+, machine-independent behavior is not automatically assured because of these dependencies.
Related
I have some crazy issues in my code, and these issue is:
1) As you see in code below, if I execute it I will got a 3D preview model without sliderWidget. (Fixed)
2) If I change this line from mapper->SetInputConnection(surface->GetOutputPort()); to mapper->SetInputConnection(reader->GetOutputPort()); I will got a sliderWidget but I will not see a 3D model. (Fixed)
3) If I try to set custom key event, its will be work and change value but every change I need to wait until its reload or build 3d dicom (its work like you run and create a 3D model every time not like a slider).
class vtkSliderCallback : public vtkCommand
{
public:
int counter = 1;
static vtkSliderCallback *New()
{
return new vtkSliderCallback;
}
virtual void Execute(vtkObject *caller, unsigned long, void*)
{
vtkSliderWidget *sliderWidget =
reinterpret_cast<vtkSliderWidget*>(caller);
cout << this->counter << endl;
this->SphereSource->SetValue(this->counter++, static_cast<vtkSliderRepresentation *>(sliderWidget->GetRepresentation())->GetValue());
}
vtkSliderCallback():SphereSource(0) {}
vtkMarchingCubes *SphereSource;
};
int main(int argc, char* argv[])
{
// Verify input arguments
if ( argc < 4 )
{
std::cout << "Usage: " << argv[0]
<< " DicomSiresFolder" << " isoValueStep" << " OutputDirectory" << std::endl;
return EXIT_FAILURE;
}
std::string folder = argv[1];
// A sphere
vtkSmartPointer<vtkImageData> volume =
vtkSmartPointer<vtkImageData>::New();
vtkSmartPointer<vtkDICOMImageReader> reader =
vtkSmartPointer<vtkDICOMImageReader>::New();
reader->SetDirectoryName(folder.c_str());
reader->Update();
volume->DeepCopy(reader->GetOutput());
vtkSmartPointer<vtkMarchingCubes> surface =
vtkSmartPointer<vtkMarchingCubes>::New();
surface->SetInputData(volume);
surface->ComputeNormalsOn();
surface->SetValue(0, 400);
vtkSmartPointer<vtkPolyDataMapper> mapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputConnection(surface->GetOutputPort());
mapper->ScalarVisibilityOff();
vtkSmartPointer<vtkActor> actor =
vtkSmartPointer<vtkActor>::New();
actor->SetMapper(mapper);
actor->GetProperty()->SetInterpolationToFlat();
// A renderer and render window
vtkSmartPointer<vtkRenderer> renderer =
vtkSmartPointer<vtkRenderer>::New();
vtkSmartPointer<vtkRenderWindow> renderWindow =
vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->AddRenderer(renderer);
// An interactor
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor =
vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
// Add the actors to the scene
renderer->AddActor(actor);
// Render an image (lights and cameras are created automatically)
renderWindow->Render();
vtkSmartPointer<vtkSliderRepresentation3D> sliderRep =
vtkSmartPointer<vtkSliderRepresentation3D>::New();
sliderRep->SetMinimumValue(-800.0);
sliderRep->SetMaximumValue(800.0);
sliderRep->SetTitleText("Iso Resolution");
sliderRep->GetPoint1Coordinate()->SetCoordinateSystemToWorld();
sliderRep->GetPoint1Coordinate()->SetValue(-4,6,0);
sliderRep->GetPoint2Coordinate()->SetCoordinateSystemToWorld();
sliderRep->GetPoint2Coordinate()->SetValue(4,6,0);
sliderRep->SetSliderLength(0.075);
sliderRep->SetSliderWidth(0.05);
sliderRep->SetEndCapLength(0.05);
vtkSmartPointer<vtkSliderWidget> sliderWidget =
vtkSmartPointer<vtkSliderWidget>::New();
sliderWidget->SetInteractor(renderWindowInteractor);
sliderWidget->SetRepresentation(sliderRep);
sliderWidget->SetAnimationModeToAnimate();
sliderWidget->EnabledOn();
vtkSmartPointer<vtkSliderCallback> callback =
vtkSmartPointer<vtkSliderCallback>::New();
callback->SphereSource = surface;
sliderWidget->AddObserver(vtkCommand::InteractionEvent,callback);
renderWindowInteractor->Initialize();
renderWindow->Render();
renderWindowInteractor->Start();
return EXIT_SUCCESS;
}
UPDATE:
I Fixed issue on point 1 and 2, but still Issue 3 needed to fix.
Your callback is changing the value for vtkMarchingCube, so you're running a marching cubes every time, so yes, it's rebuilding a mesh every time. As far as I understand what you need to do, one solution can be to precompute vtkMarchingCubes output for all possible values: not elegant (but it could work if you have enough memory).
In any case, use vtkFlyingEdges3D instead of vtkMarchingCubes, it's much faster (it could be fast enough to solve your problem without any other modification).
I’m trying to obtain a polyData with only the visible part of a 3D model.
To that extent, I’m passing the original data through a vtkSelectVisiblePoints filter.
I’m using a mock renderer, mapper and actor because I want to post-process the visible points before displaying them.
However, the output of the vtkSelectVisiblePoints filter contains “0” points for some reason…
Using the following example:
http://www.vtk.org/Wiki/VTK/Examples/Cxx/PolyData/SelectVisiblePoints
I came up with the following piece of code:
// Render window and interactor
vtkSmartPointer<vtkRenderWindow> renderWindow =
vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->SetSize(800, 800);
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor =
vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
// Mock renderer, mapper and actor
vtkSmartPointer<vtkRenderer> mockRenderer =
vtkSmartPointer<vtkRenderer>::New();
renderWindow->AddRenderer(mockRenderer);
vtkSmartPointer<vtkPolyDataMapper> mockMapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
mockMapper->SetInput(reader->GetOutput());
vtkSmartPointer<vtkActor> mockActor = vtkSmartPointer<vtkActor>::New();
mockActor->SetMapper(mockMapper);
mockRenderer->AddActor(mockActor);
// Set camera to the correct position
mockRenderer->GetActiveCamera()->SetPosition(0, -1, 0);
mockRenderer->GetActiveCamera()->SetFocalPoint(0, 0, 0);
mockRenderer->GetActiveCamera()->SetViewUp(0, 1, 0);
mockRenderer->ResetCamera();
vtkSmartPointer<vtkSelectVisiblePoints> selectVisiblePoints =
vtkSmartPointer<vtkSelectVisiblePoints>::New();
selectVisiblePoints->SetInput(reader->GetOutput());
selectVisiblePoints->SetRenderer(mockRenderer);
selectVisiblePoints->Update();
std::cout << "Visible nr of points = " << selectVisiblePoints->GetOutput()->GetNumberOfPoints() << std::endl;
renderWindow->RemoveRenderer(mockRenderer);
… prints 0.
However, if I call renderWindow->Render(), the visible part of the model is correctly displayed...
Am I missing anything...?
The answer was right there, in the documentation of the filter:
Warning You must carefully synchronize the execution of this filter.
The filter refers to a renderer, which is modified every time a render
occurs. Therefore, the filter is always out of date, and always
executes. You may have to perform two rendering passes
It worked as expected if I added the //new lines, like Arnas also suggested in his comment:
vtkSmartPointer<vtkSelectVisiblePoints> selectVisiblePoints =
vtkSmartPointer<vtkSelectVisiblePoints>::New();
selectVisiblePoints->SetInput(originalData);
selectVisiblePoints->SetRenderer(renderer);
selectVisiblePoints->Update();
renderWindow->Render(); // new
selectVisiblePoints->Update(); // new
std::cout << "Visible nr of points = " << selectVisiblePoints->GetOutput()->GetNumberOfPoints() << std::endl;
I'm trying to use VTK to change points, i.e. I am changing coordinates (x,y,z) by a (+1/-1) value. I am reading an .OBJ file and then accessing the points of a model and when I am trying to visualise the changes in the model the RenderWindow doesnt show anything. Below is my code:-
vtkSmartPointer<vtkOBJReader> reader = vtkSmartPointer<vtkOBJReader>::New();
reader->SetFileName(filename.c_str());
reader->Update();
vtkSmartPointer<vtkPolyData> polyData = reader->GetOutput();
polyData->Update();
Point3d point;
std::vector<Point3d> vertices;
double p[3];
vtkPoints* points = vtkPoints::New();
vtkDoubleArray* pcoord = vtkDoubleArray::New();
pcoord->SetNumberOfComponents(3);
pcoord->SetNumberOfTuples(polyData->GetNumberOfPoints());
for(vtkIdType i = 0; i < polyData->GetNumberOfPoints(); i++)
{
polyData->GetPoint(i,p);
p[0] +=1;
p[1] +=1;
p[2] +=1;
pcoord->SetTuple(i, p);
}
points->SetData(pcoord);
polyData->SetPoints(points);
polyData->Modified();
//Visualize Code
vtkSmartPointer<vtkPolyDataMapper> mapper = vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputConnection(reader->GetOutputPort());
vtkSmartPointer<vtkActor> actor = vtkSmartPointer<vtkActor>::New();
actor->SetMapper(mapper);
vtkSmartPointer<vtkRenderer> renderer = vtkSmartPointer<vtkRenderer>::New();
renderer->AddActor(actor);
vtkRenderWindow* renderWindow = vtkRenderWindow::New();
renderWindow->AddRenderer(renderer);
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor = vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
renderWindowInteractor->Initialize();
renderWindowInteractor->Start();
I am new to this 3D and VTK stuff, Please check where I am missing since I want to change the coordinates of each vertex (x,y,z). Do I need to further change something else as well?
Many Thanks.
First thing, you have alignment issues in the code that you included in your question. I believe you are missing a for loop too. (where is "i" defined?)
The major issue is that you are not rendering polyData at all. The only thing you are rendering is what is in the .obj file. Evident from this line:
mapper->SetInputConnection(reader->GetOutputPort());
In order to render polyData replace the above line with:
mapper->SetInputData(polyData);
This assumes polyData was created correctly. I can't easily tell this from the code you provided.
Here is an example that may help.
I am trying to render views of a 3D mesh in VTK, I am doing the following:
vtkSmartPointer<vtkRenderWindow> render_win = vtkSmartPointer<vtkRenderWindow>::New();
vtkSmartPointer<vtkRenderer> renderer = vtkSmartPointer<vtkRenderer>::New();
render_win->AddRenderer(renderer);
render_win->SetSize(640, 480);
vtkSmartPointer<vtkCamera> cam = vtkSmartPointer<vtkCamera>::New();
cam->SetPosition(50, 50, 50);
cam->SetFocalPoint(0, 0, 0);
cam->SetViewUp(0, 1, 0);
cam->Modified();
vtkSmartPointer<vtkActor> actor_view = vtkSmartPointer<vtkActor>::New();
actor_view->SetMapper(mapper);
renderer->SetActiveCamera(cam);
renderer->AddActor(actor_view);
render_win->Render();
I am trying to simulate a rendering from a calibrated Kinect, for which I know the intrinsic parameters. How can I set the intrinsic parameters (focal length and principle point) to the vtkCamera.
I wish to do this so that the 2d pixel - 3d camera coordinate would be the same as if the image were taken from a kinect.
Hopefully this will help others trying to convert standard pinhole camera parameters to a vtkCamera: I created a gist showing how to do the full conversion. I verified that the world points project to the correct location in the rendered image. The key code from the gist is pasted below.
gist: https://gist.github.com/decrispell/fc4b69f6bedf07a3425b
// apply the transform to scene objects
camera->SetModelTransformMatrix( camera_RT );
// the camera can stay at the origin because we are transforming the scene objects
camera->SetPosition(0, 0, 0);
// look in the +Z direction of the camera coordinate system
camera->SetFocalPoint(0, 0, 1);
// the camera Y axis points down
camera->SetViewUp(0,-1,0);
// ensure the relevant range of depths are rendered
camera->SetClippingRange(depth_min, depth_max);
// convert the principal point to window center (normalized coordinate system) and set it
double wcx = -2*(principal_pt.x() - double(nx)/2) / nx;
double wcy = 2*(principal_pt.y() - double(ny)/2) / ny;
camera->SetWindowCenter(wcx, wcy);
// convert the focal length to view angle and set it
double view_angle = vnl_math::deg_per_rad * (2.0 * std::atan2( ny/2.0, focal_len ));
std::cout << "view_angle = " << view_angle << std::endl;
camera->SetViewAngle( view_angle );
I too am using VTK to simulate the view from a kinect sensor. I am using VTK 6.1.0. I know this question is old, but hopefully my answer may help someone else.
The question is how can we set a projection matrix to map world coordinates to clip coordinates. For more info on that see this OpenGL explanation.
I use a Perspective Projection Matrix to simulate the kinect sensor. To control the intrinsic parameters you can use the following member functions of vtkCamera.
double fov = 60.0, np = 0.5, fp = 10; // the values I use
cam->SetViewAngle( fov ); // vertical field of view angle
cam->SetClippingRange( np, fp ); // near and far clipping planes
In order to give you a sense of what that may look like. I have an old project that I did completely in C++ and OpenGL in which I set the Perspective Projection Matrix similar to how I described, grabbed the z-buffer, and then reprojected the points out onto a scene that I viewed from a different camera. (The visualized point cloud looks noisy because I also simulated noise).
If you need your own custom Projection Matrix that isn't the Perspective flavor. I believe it is:
cam->SetUserTransform( transform ); // transform is a pointer to type vtkHomogeneousTransform
However, I have not used the SetUserTransform method.
This thread was super useful to me for setting camera intrinsics in VTK, especially decrispell's answer. To be complete, however, one case is missing: if the focal length in the x and y directions are not equal. This can easily be added to the code by using the SetUserTransform method. Below is a sample code in python :
cam = self.renderer.GetActiveCamera()
m = np.eye(4)
m[0,0] = 1.0*fx/fy
t = vtk.vtkTransform()
t.SetMatrix(m.flatten())
cam.SetUserTransform(t)
where fx and fy are the x and y focal length in pixels, i.e. the two first diagnoal elements of the intrinsic camera matrix. np is and alias for the numpy import.
Here is a gist showing the full solution in python (without extrinsics for simplicity). It places a sphere at a given 3D position, renders the scene into an image after setting the camera intrinsics, and then displays a red circle at the projection of the sphere center on the image plane: https://gist.github.com/benoitrosa/ffdb96eae376503dba5ee56f28fa0943
I have a mesh model (.ply file) and I have rendered it with VTK and changed the camera viewpoint.
From the new camera viewpoint how can I render an RGB and a depth image? I think this might be raytracing, but not sure
EDIT
I want to generate a real depth map, as opposed to just a visualization of the depth. The visualization can be achieved by using values in Z buffer and scaling between 0-255 but this does not provide real depth information.
To get the real world depth from the camera I have done the following:
double z = render_win->GetZbufferDataAtPoint(x,y);
worldPicker->Pick(x, y, z, renderer);
worldPicker->GetPickPosition(coords);
double euclidean_distance = sqrt(pow((coords[0] - cam_pos[0]), 2) + pow(coords[1] - cam_pos[1], 2) + pow(coords[2] - cam_pos[2], 2));
where cam_pos is the camera position in real world coordinates. This seems to do the trick
Here is a short few lines if you already have the application set up and rendering, this has to be after the mesh has rendered at least once
// Screenshot
vtkSmartPointer<vtkWindowToImageFilter> windowToImageFilter =
vtkSmartPointer<vtkWindowToImageFilter>::New();
windowToImageFilter->SetInput(renderWindow);
windowToImageFilter->SetMagnification(3); //set the resolution of the output image (3 times the current resolution of vtk render window)
windowToImageFilter->SetInputBufferTypeToRGBA(); //also record the alpha (transparency) channel
windowToImageFilter->Update();
vtkSmartPointer<vtkPNGWriter> writer =
vtkSmartPointer<vtkPNGWriter>::New();
writer->SetFileName("screenshot2.png");
writer->SetInputConnection(windowToImageFilter->GetOutputPort());
writer->Write();
This is from the VTK Public Wiki
To retrieve RGB image you can use vtkWindowToImageFilter class, allowing to read data from the vtkWindow.
For the Depth Image, i tried a solution which is to modify color on the mesh using vtkDepthSortPolyData , and again, read from the window.
See the example above :
# define VTK_CREATE(type, nom) vtkSmartPointer<type> nom = vtkSmartPointer<type>::New()
int main ()
{
VTK_CREATE(vtkPLYReader, reader);
reader->SetFileName ("mesh.ply");
reader->Update ();
// create depth sort algo
VTK_CREATE(vtkDepthSortPolyData, sort);
sort->SetInput(reader->GetOutput ());
// init stuff for rendering
VTK_CREATE(vtkPolyDataMapper, mapper);
VTK_CREATE(vtkActor, actor);
VTK_CREATE(vtkRenderer, rend);
VTK_CREATE(vtkRenderWindow, rw);
VTK_CREATE(vtkRenderWindowInteractor, inte);
VTK_CREATE(vtkCamera, cam);
sort->SetDirectionToBackToFront (); // camera direction
sort->SetCamera (cam); // set camera or runtime warning
sort->SortScalarsOn ();
sort->Update ();
mapper->SetScalarVisibility(true);
// limit max scalar (nb Color)
mapper->SetScalarRange (0, sort->GetOutput ()->GetNumberOfCells ());
mapper->SetInputConnection(sort->GetOutputPort ());
mapper->Update ();
actor->SetMapper(mapper);
actor->RotateY (59); // transform with a rotation to see depth
actor->GetProperty()->SetColor(1, 0, 0);
sort->SetProp3D (actor); // set the actor to the algo
rend->SetActiveCamera (cam);
rw->AddRenderer(rend);
rend->AddActor(actor);
inte->SetRenderWindow (rw);
inte->Initialize ();
// read depth image from vtk Window
VTK_CREATE (vtkWindowToImageFilter, screen);
screen->SetInput (rw);
screen->Update ();
// start rendering for visualization
rw->Render ();
// save the depth img as a png
VTK_CREATE(vtkPNGWriter, writer);
writer->SetFileName ("output.png");
writer->SetInputConnection (screen->GetOutputPort ());
writer->Write ();
inte->Start ();
return 0;
}
Tested on VTK 5.10.