My task: Calculate the pixel coordinates (e.g. make a snapshot) of a 3D mesh to find the 2D shape of this mesh from a specific camera angle.
I'm currently using Qt3D with a QGeometryRenderer to render a scene containing a mesh to a QWidget which works fine.
I tried to render the content of the QWidget into a Pixmap with QWidget::render() as proposed by this post How to create screenshot of QWidget?. Saving the pixmap to a .jpg results in a blank image with a default background color which makes sense because the QWidget is not holding the mesh object itself.
Here is how the scene is set in my mainwindow.cpp
// sets the scene objects, camera, lights,...
void MainWindow::setScene() {
scene = custommesh->createScene(mesh->getVertices(),
mesh->getVerticesNormals(),
mesh->getFaceNormals(),
mesh->getVerticesIndex(),
mesh->getFacesIndex()); // QEntity*
custommesh->setMaterial(scene); // CustomMeshRenderer object
camera = custommesh->setCamera(view);
custommesh->setLight(scene, camera);
custommesh->setCamController(scene, camera);
view->setRootEntity(scene); // Qt3DExtras::Qt3DWindow object
// Setting up a QWiget working as a container for the view
QWidget *container = QWidget::createWindowContainer(view);
container->setMinimumSize(QSize(500, 500));
QSizePolicy policy = QSizePolicy(QSizePolicy::Policy(5), QSizePolicy::Policy(5));
policy.setHorizontalStretch(1);
policy.setVerticalStretch(1);
container->setSizePolicy(policy);
container->setObjectName("meshWidget");
this->ui->meshLayout->insertWidget(0, container);
}
As for the rendering here is the custommeshrenderer class where the QGeometryRenderer is defined and a QEntity* is returned when initializing the mesh.
#include "custommeshrenderer.h"
#include <Qt3DRender/QAttribute>
#include <Qt3DExtras>
#include <Qt3DRender/QGeometryRenderer>
CustommeshRenderer::CustommeshRenderer()
{
rootEntity = new Qt3DCore::QEntity;
customMeshEntity = new Qt3DCore::QEntity(rootEntity);
transform = new Qt3DCore::QTransform;
customMeshRenderer = new Qt3DRender::QGeometryRenderer;
customGeometry = new Qt3DRender::QGeometry(customMeshRenderer);
m_pVertexDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::VertexBuffer, customGeometry);
m_pNormalDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::VertexBuffer, customGeometry);
m_pColorDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::VertexBuffer, customGeometry);
m_pIndexDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::IndexBuffer, customGeometry);
}
/**
Set vertices and their normals for the scene
#param vertices List with all vertices of the mesh
#param vertices_normals List with all vertice normals
#param face_normals List with all face normals
#param vertice_idx List with the indices for the vertices
#param face_idx List with all indices for the faces
#return Entity where some components were added
*/
Qt3DCore::QEntity *CustommeshRenderer::createScene(QList<QVector3D> vertices, QList<QVector3D> vertices_normals, QList<QVector3D> face_normals, QList<int> vertices_idx, QList<QVector3D> faces_idx) {
// Setting scale to 8.0
transform->setScale(8.0f);
// Setting all the colors to (200, 0, 0)
QList<QVector3D> color_list;
for(int i = 0; i < vertices.length(); i++) {
color_list.append(QVector3D(200.0f, 0.0f, 0.0f));
}
// Fill vertexBuffer with data which hold the vertices, normals and colors
// Build structure: Size of Verticles List * 3 (x,y,z) * 4 (since x,y,z are floats, which needs 4 bytes each)
vertexBufferData.resize(vertices.length() * 3 * (int)sizeof(float));
float *rawVertexArray = reinterpret_cast<float *>(vertexBufferData.data());
normalBufferData.resize(vertices_normals.length() * 3 * (int)sizeof(float));
float *rawNormalArray = reinterpret_cast<float *>(normalBufferData.data());
colorBufferData.resize(color_list.length() * 3 * (int)sizeof(float));
float *rawColorArray = reinterpret_cast<float *>(colorBufferData.data());
setRawVertexArray(rawVertexArray, vertices);
setRawNormalArray(rawNormalArray, vertices_normals);
setRawColorArray(rawColorArray, color_list);
//Fill indexBufferData with data which holds the triangulation information (patches/tris/lines)
indexBufferData.resize(faces_idx.length() * 3 * (int)sizeof(uint));
uint *rawIndexArray = reinterpret_cast<uint *>(indexBufferData.data());
setRawIndexArray(rawIndexArray, faces_idx);
//Set data to buffers
m_pVertexDataBuffer->setData(vertexBufferData);
m_pNormalDataBuffer->setData(normalBufferData);
m_pColorDataBuffer->setData(colorBufferData);
m_pIndexDataBuffer->setData(indexBufferData);
// Attributes
Qt3DRender::QAttribute *positionAttribute = new Qt3DRender::QAttribute();
positionAttribute->setAttributeType(Qt3DRender::QAttribute::VertexAttribute);
positionAttribute->setBuffer(m_pVertexDataBuffer);
// positionAttribute->setBuffer(m_pVertexDataBuffer.data());
positionAttribute->setDataType(Qt3DRender::QAttribute::Float);
positionAttribute->setDataSize(3);
positionAttribute->setByteOffset(0);
positionAttribute->setByteStride(3 * sizeof(float));
positionAttribute->setCount(vertices.length());
positionAttribute->setName(Qt3DRender::QAttribute::defaultPositionAttributeName());
Qt3DRender::QAttribute *normalAttribute = new Qt3DRender::QAttribute();
normalAttribute->setAttributeType(Qt3DRender::QAttribute::VertexAttribute);
normalAttribute->setBuffer(m_pNormalDataBuffer);
//normalAttribute->setBuffer(m_pNormalDataBuffer.data());
normalAttribute->setDataType(Qt3DRender::QAttribute::Float);
normalAttribute->setDataSize(3);
normalAttribute->setByteOffset(0);
normalAttribute->setByteStride(3 * sizeof(float));
normalAttribute->setCount(vertices.length());
normalAttribute->setName(Qt3DRender::QAttribute::defaultNormalAttributeName());
Qt3DRender::QAttribute* colorAttribute = new Qt3DRender::QAttribute();
colorAttribute->setAttributeType(Qt3DRender::QAttribute::VertexAttribute);
colorAttribute->setBuffer(m_pColorDataBuffer);
//colorAttribute->setBuffer(m_pColorDataBuffer.data());
colorAttribute->setDataType(Qt3DRender::QAttribute::Float);
colorAttribute->setDataSize(3);
colorAttribute->setByteOffset(0);
colorAttribute->setByteStride(3 * sizeof(float));
colorAttribute->setCount(vertices.length());
colorAttribute->setName(Qt3DRender::QAttribute::defaultColorAttributeName());
Qt3DRender::QAttribute *indexAttribute = new Qt3DRender::QAttribute();
indexAttribute->setAttributeType(Qt3DRender::QAttribute::IndexAttribute);
indexAttribute->setBuffer(m_pIndexDataBuffer);
//indexAttribute->setBuffer(m_pIndexDataBuffer.data());
indexAttribute->setDataType(Qt3DRender::QAttribute::UnsignedInt);
indexAttribute->setDataSize(3);
indexAttribute->setByteOffset(0);
indexAttribute->setByteStride(3 * sizeof(uint));
indexAttribute->setCount(face_normals.length());
customGeometry->addAttribute(positionAttribute);
customGeometry->addAttribute(normalAttribute);
/*customGeometry->addAttribute(colorAttribute);*/
customGeometry->addAttribute(indexAttribute);
//Set the final geometry and primitive type
customMeshRenderer->setPrimitiveType(Qt3DRender::QGeometryRenderer::Triangles);
customMeshRenderer->setVerticesPerPatch(3);
customMeshRenderer->setGeometry(customGeometry);
customMeshRenderer->setVertexCount(faces_idx.length()*3);
customMeshEntity->addComponent(customMeshRenderer);
customMeshEntity->addComponent(transform);
setMaterial(customMeshEntity);
return rootEntity;
}
What is the best way access the framebuffer or is there any other method to take a snapshot of the mesh?
My last hope would be to implement the rendering pipeline (at least from projected coords to pixel coords) myself, but i would prefer another solution. Unfortunately I have to rely on Qt3D and can't switch to other classes like QOpenGLWidget. At least I haven't found a possibility to integrate it yet.
I'm pretty new to Qt3D and the lack of detailed documentation doesn't make it easier.
You can use QRenderCapture for this. This essentially does a glReadPixels for you. The documentation is a bit sparse on this one, but there is an example online.
Alternatively, I implemented an offline renderer, which could help you in case that you don't want a whole 3D window.
I'm not sure what you mean by
Calculate the pixel coordinates (e.g. make a snapshot) of a 3D mesh to find the 2D shape of this mesh from a specific camera angle
but if you e.g. want to render the whole mesh in only one color (without highlights), you could try QPerVertexColorMaterial, which gave me exactly that result.
Related
I have a problem making particles, so I'm going to ask you a question. The problem is that when you make several particles, the texture of the particles is the texture of the particles made at the end. This is my code.
enter code herem_pParticleBuffer = new CStructuredBuffer;
m_pParticleBuffer->Create(sizeof(tParticle), m_iMaxParticle, nullptr);
m_pSharedBuffer = new CStructuredBuffer;
m_pSharedBuffer->Create(sizeof(tParticleShared), 1, nullptr);
m_pMesh = CResMgr::GetInst()->FindRes<CMesh>(L"PointMesh");
m_pMtrl = CResMgr::GetInst()->FindRes<CMaterial>(L"ParticleMtrl");
Ptr<CTexture> pParticle = _pTexture;
m_pMtrl->SetData(SHADER_PARAM::TEX_0, pParticle.GetPointer());
m_pUpdateMtrl = CResMgr::GetInst()->FindRes<CMaterial>(L"ParticleUpdateMtrl");
This is where the particles are initialized.
float fRatio = tData[_in.iInstID].m_fCurTime / tData[_in.iInstID].m_fLifeTime;
float4 vCurColor = (g_vec4_1 - g_vec4_0) * fRatio + g_vec4_0;
return vCurColor * g_tex_0.Sample(g_sam_0, _in.vUV);
This is the hlsl corresponding to the pixel shader of the particles.
I'd like to display on the screen 2 squares using instance rendering.
The problem is :
When I draw 2 squares, they are connected. I can't draw multiple separate square.
I know it has to do with PrimitiveType : i'm using Qt3DRender::QGeometryRenderer::LineLoop or Qt3DRender::QGeometryRenderer::TriangleFan but is there a way to tell Qt to separate my data buffer in multiple instances ? so that the PrimitiveType apply on each instances and not all the vertices.
I got a function that create shapes :
Qt3DCore::QEntity* SceneBuilder::createShapeInstancing(const QList<QVector3D> & listVertices, int nbPointGeometry, const QColor color, Qt3DCore::QEntity * rootEntity)
{
// Material
Qt3DExtras::QPhongMaterial *material = new Qt3DExtras::QPhongMaterial(rootEntity);
material->setAmbient(color);
// Custom entity
Qt3DCore::QEntity *customMeshEntity = new Qt3DCore::QEntity(rootEntity);
// Custom Mesh
Qt3DRender::QGeometryRenderer *customMeshRenderer = new Qt3DRender::QGeometryRenderer(rootEntity);
Qt3DRender::QGeometry *customGeometry = new Qt3DRender::QGeometry(customMeshRenderer);
Qt3DRender::QBuffer *vertexDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::VertexBuffer, customGeometry);
int nbPointGeometry = nbPoint;
QByteArray vertexBufferData;
vertexBufferData.resize(listVertices.size() * nbPointGeometry * sizeof(QVector3D));
QVector3D *posData = reinterpret_cast<QVector3D *>(vertexBufferData.data());
QList<QVector3D>::const_iterator it;
for (it = listVertices.begin(); it != listVertices.end(); it++)
{
*posData = *it;
++posData;
}
vertexDataBuffer->setData(vertexBufferData);
//Attributes
Qt3DRender::QAttribute *positionAttribute = new Qt3DRender::QAttribute();
positionAttribute->setName(Qt3DRender::QAttribute::defaultPositionAttributeName());
positionAttribute->setAttributeType(Qt3DRender::QAttribute::VertexAttribute);
positionAttribute->setBuffer(vertexDataBuffer);
positionAttribute->setVertexBaseType(Qt3DRender::QAttribute::Float);
positionAttribute->setVertexSize(3);
positionAttribute->setByteOffset(0);
positionAttribute->setByteStride((0 + 3) * sizeof(float));
positionAttribute->setCount(listVertices.size());
//positionAttribute->setDivisor(1);
customGeometry->addAttribute(positionAttribute);
customMeshRenderer->setGeometry(customGeometry);
//customMeshRenderer->setInstanceCount(listVertices.size()/nbPoint);
if (nbPointGeometry == 1)
customMeshRenderer->setPrimitiveType(Qt3DRender::QGeometryRenderer::Points);
else if (nbPointGeometry == 2)
customMeshRenderer->setPrimitiveType(Qt3DRender::QGeometryRenderer::Lines);
else if (nbPointGeometry == 3)
customMeshRenderer->setPrimitiveType(Qt3DRender::QGeometryRenderer::Triangles);
else
customMeshRenderer->setPrimitiveType(Qt3DRender::QGeometryRenderer::LineLoop);
customMeshEntity->addComponent(customMeshRenderer);
customMeshEntity->addComponent(material);
return customMeshEntity;
}
When i call this function, i put my 2 squares inside the listVertices variables. So that all my squares are display in one draw call.
But the shapes are one way or another connected. Is there a way to remove that ? I'm not using InstanceCount or Divisor but i don't know how it works. I made some tests with it but nothing worked
Actually, I just found an example of instanced rendering here.
They subclassed QSphereGeometry and performed the instanced rendering in a shader. You could create a RectangleGeometry which holds the vertices for one triangle and equip it with the same functionality as the InstancedGeometry in the example.
Result:
I am trying to display a point cloud, consisting of vertices and color with OSG. A static point cloud to display is rather easy with this guide.
But I am not capable of updating such a point cloud. My intention is to create a geometry and attach it to my viewer class once.
This is the mentioned method which is called once in the beginning.
The OSGWidget strongly depends on this OpenGLWidget based approach.
void OSGWidget::attachGeometry(osg::ref_ptr<osg::Geometry> geom)
{
osg::Geode* geode = new osg::Geode;
geom->setDataVariance(osg::Object::DYNAMIC);
geom->setUseDisplayList(false);
geom->setUseVertexBufferObjects(true);
bool addDrawSuccess = geode->addDrawable(geom.get()); // Adding Drawable Shape to the geometry node
if (!addDrawSuccess)
{
throw "Adding Drawable failed!";
}
{
osg::StateSet* stateSet = geode->getOrCreateStateSet();
stateSet->setMode(GL_LIGHTING, osg::StateAttribute::OFF);
}
float aspectRatio = static_cast<float>(this->width()) / static_cast<float>(this->height());
// Setting up the camera
osg::Camera* camera = new osg::Camera;
camera->setViewport(0, 0, this->width(), this->height());
camera->setClearColor(osg::Vec4(0.f, 0.f, 0.f, 1.f)); // Kind of Backgroundcolor, clears the buffer and sets the default color (RGBA)
camera->setProjectionMatrixAsPerspective(30.f, aspectRatio, 1.f, 1000.f); // Create perspective projection
camera->setGraphicsContext(graphicsWindow_); // embed
osgViewer::View* view = new osgViewer::View;
view->setCamera(camera); // Set the defined camera
view->setSceneData(geode); // Set the geometry
view->addEventHandler(new osgViewer::StatsHandler);
osgGA::TrackballManipulator* manipulator = new osgGA::TrackballManipulator;
manipulator->setAllowThrow(false);
view->setCameraManipulator(manipulator);
///////////////////////////////////////////////////
// Set the viewer
//////////////////////////////////////////////////
viewer_->addView(view);
viewer_->setThreadingModel(osgViewer::CompositeViewer::SingleThreaded);
viewer_->realize();
this->setFocusPolicy(Qt::StrongFocus);
this->setMinimumSize(100, 100);
this->setMouseTracking(true);
}
After I have 'attached' the geometry, I am trying to update the geometry like this
void PointCloudViewOSG::processData(DepthDataSet depthData)
{
if (depthData.points()->empty())
{
return; // empty cloud, cannot do anything
}
const DepthDataSet::IndexPtr::element_type& index = *depthData.index();
const size_t nPixel = depthData.points().get()->points.size();
if (depthData.intensity().isValid() && !index.empty() )
{
for (int i = 0; i < nPixel; i++)
{
float x = depthData.points().get()->points[i].x;
float y = depthData.points().get()->points[i].y;
float z = depthData.points().get()->points[i].z;
m_vertices->push_back(osg::Vec3(x
, y
, z));
// 32 bit integer variable containing the rgb (8 bit per channel) value
uint32_t rgb_val_;
memcpy(&rgb_val_, &(depthData.points().get()->points[i].rgb), sizeof(uint32_t));
uint32_t red, green, blue;
blue = rgb_val_ & 0x000000ff;
rgb_val_ = rgb_val_ >> 8;
green = rgb_val_ & 0x000000ff;
rgb_val_ = rgb_val_ >> 8;
red = rgb_val_ & 0x000000ff;
m_colors->push_back(
osg::Vec4f((float)red / 255.0f,
(float)green / 255.0f,
(float)blue / 255.0f,
1.0f)
);
}
m_geometry->setVertexArray(m_vertices.get());
m_geometry->setColorArray(m_colors.get());
m_geometry->setColorBinding(osg::Geometry::BIND_PER_VERTEX);
m_geometry->addPrimitiveSet(new osg::DrawArrays(osg::PrimitiveSet::POINTS, 0, m_vertices->size()));
}
}
My guess is that the
addPrimitiveSet(...)
Shall not be called every time I update the geometry.
Or can it be the attachment of the geometry, so that I have to reattach it every time?
PointCloudlibrary (PCL) is unfortunately not an alternative since of some incompatibilities with my application.
Update: When I am reattaching the geometry to the OSGWidget class,
calling
this->attachGeometry(m_geometry)
after
m_geometry->addPrimitiveSet(new osg::DrawArrays(osg::PrimitiveSet::POINTS, 0, m_vertices->size()));
I get my point cloud visible, but this procedure is definitely wrong since I am losing way too much performance and the display driver crashes.
You need to set the array and add the primitive set only once, after that you can update the vertices like this:
osg::Vec3Array* vx = static_cast<osg::Vec3Array*>(m_vertices);
for (int i = 0; i < nPixel; i++)
{
float x, y, z;
// fill with your data...
(*vx)[i].set(x, y, z);
}
m_vertices->dirty();
The same goes for colors and other arrays.
As you're using VBO, you don't need to call dirtyDisplayList()
If you need instead to recompure the bounding box of the geometry, call
m_geometry->dirtyBound()
In case the number of points changes between updates, you can push new vertices into the array if its size is too small, and update the PrimitiveSet count like this:
osg::DrawArrays* drawArrays = static_cast<osg::DrawArrays*>(m_geometry->getPrimitiveSet(0));
drawArrays->setCount(nPixel);
drawArrays->dirty();
rickvikings solution works - I only had one issue... (OSG 3.6.1 on OSX)
I had to modify the m_vertices array directly, it would cause OSG to crash if I used the static_cast method above to modify the vertices array:
osg::Vec3Array* vx = static_cast(m_vertices);
For some reason OSG would not create a buffer object in the vertices array class if using the static_cast approach.
I'm sure there is an answer to this on the web but I can't find it.
I'm importing a scene from Blender that has multiple meshes, into OpenTK.
The library I'm using to import is Assimp-net, and the file format is Collada (.dae).
I have created a spaceship with multiple parts, each part being a mesh.
Now when I import and draw, the geometry of the objects looks fine and materials work as expected. However, the different parts are not rotated, scaled, or translated as they appear in Blender. What happens is the different parts are not connected, and some appear larger/smaller than they should, in the wrong place etc.
Is there a setting I'm missing when I export from Blender, or is there some Assimp member/function I can use to transform the meshes before I render them?
Importing the file:
string filename = #"C:\Path\ship.dae";
Scene ship;
//Create a new importer
AssimpImporter importer = new AssimpImporter();
//This is how we add a configuration (each config is its own class)
NormalSmoothingAngleConfig config = new NormalSmoothingAngleConfig(66.0f);
importer.SetConfig(config);
//Import the model
ship = importer.ImportFile(filename, PostProcessPreset.TargetRealTimeMaximumQuality);
//End of example
importer.Dispose();
Drawing the meshes(entire "RenderFrame" event handler in OpenTK):
// Clear color/depth buffers
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
// Define world space
GL.MatrixMode(MatrixMode.Projection);
GL.LoadIdentity();
GL.Ortho(-15.0, 15.0, -15.0, 15.0, 15.0, -15.0);
// Rotate around X and Y axes for better viewing
rotateX(xrot);
rotateY(yrot);
GL.Enable(EnableCap.ColorMaterial);
var rootnode = wes10.RootNode;
foreach (Node node in rootnode.Children)
{
//for each node, do
GL.MatrixMode(MatrixMode.Modelview); //ensure your current matrix is the model matrix.
GL.PushMatrix(); //save current model matrix so you can undo next transformations;
var meshIndices = node.MeshIndices;
if (meshIndices == null)
continue;
else
{
Matrix4d convertedTransform = new Matrix4d();
getConvertedMatrix(node.Transform, ref convertedTransform);
GL.MultMatrix(ref convertedTransform);
GL.Begin(BeginMode.Triangles);
foreach (uint i in meshIndices)
{
Mesh mesh = wes10.Meshes[i];
Material mat = wes10.Materials[mesh.MaterialIndex];
// Material setup
var spec_color = mat.ColorSpecular;
var amb_color = mat.ColorAmbient;
var diff_color = mat.ColorDiffuse;
float[] mat_specular = { spec_color.R, spec_color.G, spec_color.B, spec_color.A };
float[] mat_ambient = { amb_color.R, amb_color.G, amb_color.B, amb_color.A };
float[] mat_diffuse = { diff_color.R, diff_color.G, diff_color.B, diff_color.A };
float[] mat_shininess = { 0.0f };
GL.Material(MaterialFace.FrontAndBack, MaterialParameter.Specular, mat_specular);
GL.Material(MaterialFace.FrontAndBack, MaterialParameter.Ambient, mat_ambient);
GL.Material(MaterialFace.FrontAndBack, MaterialParameter.Diffuse, mat_diffuse);
GL.Material(MaterialFace.FrontAndBack, MaterialParameter.Shininess, mat_shininess);
foreach (Face face in mesh.Faces)
{
foreach (uint indice in face.Indices)
{
var normal = mesh.Normals[indice];
var pos = mesh.Vertices[indice];
//var tex = mesh.GetTextureCoords(0)[v];
//GL.TexCoord2(tex.X, tex.Y);
GL.Normal3(normal.X, normal.Y, normal.Z);
GL.Vertex3(pos.X, pos.Y, pos.Z);
}
}
}
}
GL.PopMatrix();
}
GL.End();
game.SwapBuffers();
Updated to use suggestions.
In the c example, there is a transformation matrix per node...
aiMultiplyMatrix4(trafo,&nd->mTransformation);
Check this:
Data structure
scene graph.
If you don't know what to do with that matrix, check this to learn about matrix stack. (Be aware that modern OpenGL recommand to implement your own transformation matrix)
Golobaly, you need the folowing steps for rendering (read the c example for details):
//for each node, do
glMatrixMode (GL_MODELVIEW); //ensure your current matrix is the model matrix.
glPushMatrix (); //save current model matrix so you can undo next transformations;
glMultMatrixf(Transformation);//apply your node matrix
//render your node, in your example it's surely a mesh
glPopMatrix (); //restore model matrix
I'm trying to create a 2D background for my ogre scene that renders the camera frames for the QCAR SDK. This is on an iPad with iOS 6.
At the moment I'm retrieving the pixel data like so in renderFrameQCAR:
const QCAR::Image *image = camFrame.getImage(1);
if(image) {
pixels = (unsigned char *)image->getPixels();
}
This returns pixels in the RGB888 format, then passing it to my ogre scene in the renderOgre() functions:
if(isUpdated)
scene.setCameraFrame(pixels);
scene.m_pRoot->renderOneFrame();
The setCameraFrame(pixels) function consists of:
void CarScene::setCameraFrame(const unsigned char *pixels)
{
HardwarePixelBufferSharedPtr pBuffer = m_pBackgroundTexture->getBuffer();
pBuffer->lock(HardwareBuffer::HBL_DISCARD);
const PixelBox& pBox = pBuffer->getCurrentLock();
PixelBox *tmp = new PixelBox(screenWidth, screenHeight, 0, PF_R8G8B8, &pixels);
pBuffer->blit(pBuffer, *tmp, pBox);
pBuffer->unlock();
delete tmp;
}
In this function I'm attempting to create a new PixelBox, copy the pixels into it and the copy that over the the pixelBuffer.
When I first create my Ogre3D scene, I set up the m_pBackgroundTexture & background rect2d like so:
void CarScene::createBackground()
{
m_pBackgroundTexture = TextureManager::getSingleton().createManual("DynamicTexture", ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, TEX_TYPE_2D, m_pViewport->getActualWidth(), m_pViewport->getActualHeight(), 0, PF_R8G8B8, TU_DYNAMIC_WRITE_ONLY_DISCARDABLE);
m_pBackgroundMaterial = MaterialManager::getSingleton().create("Background", ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME);
m_pBackgroundMaterial->getTechnique(0)->getPass(0)->createTextureUnitState("DynamicTexture");
m_pBackgroundMaterial->getTechnique(0)->getPass(0)->setSceneBlending(SBT_TRANSPARENT_ALPHA);
m_pBackgroundMaterial->getTechnique(0)->getPass(0)->setDepthCheckEnabled(false);
m_pBackgroundMaterial->getTechnique(0)->getPass(0)->setDepthWriteEnabled(false);
m_pBackgroundMaterial->getTechnique(0)->getPass(0)->setLightingEnabled(false);
m_pBackgroundRect = new Rectangle2D(true);
m_pBackgroundRect->setCorners(-1.0, 1.0, 1.0, -1.0);
m_pBackgroundRect->setMaterial("Background");
m_pBackgroundRect->setRenderQueueGroup(RENDER_QUEUE_BACKGROUND);
AxisAlignedBox aabInf;
aabInf.setInfinite();
m_pBackgroundRect->setBoundingBox(aabInf);
SceneNode* node = m_pSceneManager->getRootSceneNode()->createChildSceneNode();
node->attachObject(m_pBackgroundRect);
}
After this all I get is a white background with no texture, and I have no idea why this is not displaying the output! My goal for this is just to have the camera rendering in the background so I can project my 3d model onto it.
Thanks,
Harry.