CImg Image is Colorless - c++

Currently I am in the process of refining a function in my basic level editor program that allows me to save the maps I create. It spits out a .bmp image of the map produced. It does this through a library I've just discovered called CImg, which I know next to nothing about. Everything seems to work, but the resulting .bmp image is not colored, appearing instead in different shades of black and white. Like I said, I know basically nothing about the library, so if you know what the problem could be here, I would appreciate some help.
Here's the save function:
void Map::Save() {
Vertex top_left_most, top_right_most, bottom_left_most;
int img_w = 0, img_h = 0;
std::vector<std::pair<GLuint, GLuint>>::iterator tl = bufferIDs.begin(); //This little block gives the _most variables valid starting vals
glBindBuffer(GL_ARRAY_BUFFER, tl->second);
glGetBufferSubData(GL_ARRAY_BUFFER, sizeof(TextureCoord), sizeof(Vertex), &top_left_most);
top_right_most = bottom_left_most = top_left_most;
for (auto i = bufferIDs.begin(); i != bufferIDs.end(); ++i) { //SEEKS TOP LEFT MOST TILE ON MAP
Vertex current_coord;
glBindBuffer(GL_ARRAY_BUFFER, i->second);
glGetBufferSubData(GL_ARRAY_BUFFER, sizeof(TextureCoord), sizeof(Vertex), &current_coord);
if ((current_coord.x < top_left_most.x && current_coord.y < top_left_most.y) ||
(current_coord.x == top_left_most.x && current_coord.y < top_left_most.y) ||
(current_coord.x < top_left_most.x && current_coord.y == top_left_most.y)) {
top_left_most = current_coord;
}
}
for (auto i = bufferIDs.begin(); i != bufferIDs.end(); ++i) { //SEEKS TOP RIGHT MOST TILE ON MAP
Vertex current_coord;
glBindBuffer(GL_ARRAY_BUFFER, i->second);
glGetBufferSubData(GL_ARRAY_BUFFER, sizeof(TextureCoord), sizeof(Vertex), &current_coord);
if ((current_coord.x > top_right_most.x && current_coord.y < top_right_most.y) ||
(current_coord.x == top_right_most.x && current_coord.y < top_right_most.y) ||
(current_coord.x > top_right_most.x && current_coord.y == top_right_most.y)) {
top_right_most = current_coord;
}
}
for (auto i = bufferIDs.begin(); i != bufferIDs.end(); ++i) { //SEEKS BOTTOM LEFT MOST TILE ON MAP
Vertex current_coord;
glBindBuffer(GL_ARRAY_BUFFER, i->second);
glGetBufferSubData(GL_ARRAY_BUFFER, sizeof(TextureCoord), sizeof(Vertex), &current_coord);
if ((current_coord.x < bottom_left_most.x && current_coord.y > bottom_left_most.y) ||
(current_coord.x == bottom_left_most.x && current_coord.y > bottom_left_most.y) ||
(current_coord.x < bottom_left_most.x && current_coord.y == bottom_left_most.y)) {
bottom_left_most = current_coord;
}
}
img_w = (top_right_most.x + 64) - top_left_most.x; //Calculating image dimensions for the buffer
img_h = (bottom_left_most.y + 64) - top_left_most.y;
GLuint *image = new GLuint[img_w * img_h]; //Creating the image buffer
int int_start_x = 0; //start_x and y that will be used in buffer pointer positioning computations
int int_start_y = 0;
//these nested fors fill the buffer
for (GLfloat start_y = top_left_most.y; start_y != bottom_left_most.y + 64; start_y += 64) {
for (GLfloat start_x = top_left_most.x; start_x != top_right_most.x + 64; start_x += 64) {
bool in_map = false;
std::vector<std::pair<GLuint, GLuint>>::iterator valid_tile;
for (auto i = bufferIDs.begin(); i != bufferIDs.end(); ++i) { //This for checks to see if tile corresponding to start_x & y is present in map
Vertex current_tile_pos;
glBindBuffer(GL_ARRAY_BUFFER, i->second);
glGetBufferSubData(GL_ARRAY_BUFFER, sizeof(TextureCoord), sizeof(Vertex), &current_tile_pos);
if (current_tile_pos.x == start_x && current_tile_pos.y == start_y) {
in_map = true;
valid_tile = i;
break;
}
}
GLuint *imagepos = image; //Repositioning the pointer into the final image's buffer
imagepos += int_start_x + (int_start_y * img_w);
if (in_map) { //if in map, that tile's texture is used to fill the corresponding part of the image buffer
GLuint *texture = new GLuint[64 * 64];
glBindTexture(GL_TEXTURE_2D, valid_tile->first);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE, texture);
GLuint *texturepos = texture;
for (GLuint ypos = 0; ypos != 64; ++ypos) {
std::memcpy(imagepos, texturepos, 64 * 4);
texturepos += 64;
imagepos += img_w;
}
if (texture)
delete[] texture;
}
else { //otherwise, a default all-black array is used to fill the corresponding untiled part of the image buffer
GLuint *black_buffer = new GLuint[64 * 64];
GLuint *blackpos = black_buffer;
GLuint solid_black;
char *p = (char *)&solid_black;
p[0] = 0;
p[1] = 0;
p[2] = 0;
p[3] = 255;
for (GLuint i = 0; i != 64 * 64; ++i) {
black_buffer[i] = solid_black;
}
for (GLuint ypos = 0; ypos != 64; ++ypos) {
std::memcpy(imagepos, blackpos, 64 * 4);
blackpos += 64;
imagepos += img_w;
}
if (black_buffer)
delete[] black_buffer;
}
int_start_x += 64;
}
int_start_x = 0;
int_start_y += 64;
}
cimg_library::CImg<GLuint> final_image(image, img_w, img_h); //no color!!
final_image.save_bmp("map.bmp");
if (image)
delete[] image;
}
In case some explanation would be helpful, Vertex is a simple struct of two GLfloats (as is TextureCoord), bufferIDs is an std::vector of std::pairs of GLuints, the first representing a texture ID, and the second representing a VBO ID.
Here are the requested sample images:
what the image should look like (this is in monochrome)
Same exact image as above, but created using the reinterpret_cast method

Your line
cimg_library::CImg<GLuint> final_image(image, img_w, img_h);
is wrong if you are expecting a colour image because that creates a single channel image. You need a 3 at the end to make 3 channels - one for Red, one for Green and one for Blue.
Also, your data is stored in GLuint which means that a 4x2 pixel image will be stored like this, i.e. band-interleaved-by-pixel:
RGBA RGBA RGBA RGBA
RGBA RGBA RGBA RGBA
whereas CImg wants to store that in a band-interleaved-by-plane fashion:
RRRRRRRR
GGGGGGGG
BBBBBBBB
AAAAAAAA
This link explains the layout of CImg memory buffers.

Related

Can a KTX image file be a cubemap arrays?

Is it valid for a KTX image to be a cubemaps arrays, or is that not a thing?
I have some code that I'm currently using for uploading the data from a KTX file to the GPU. Currently, the code works for a regular 2d image, a cubemap, and a texture array. However, it would not support a KTX image that is a cubemap array, if that is a thing.
If it is possible, what is the code below missing to accomplish that?
uint32_t offset = 0;
for (uint32_t layer = 0; layer < layers; layer++) {
for (uint32_t face = 0; face < faces; face++) {
for (uint32_t level = 0; level < mipLevels; level++) {
offset = tex->GetImageOffset(layer, face, level);
vk::BufferImageCopy bufferCopyRegion = {};
bufferCopyRegion.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
bufferCopyRegion.imageSubresource.mipLevel = level;
bufferCopyRegion.imageSubresource.baseArrayLayer = (faces == 6 ? face : layer); // TexArray or Cubemap, not both.
bufferCopyRegion.imageSubresource.layerCount = 1;
bufferCopyRegion.imageExtent.width = width >> level;;
bufferCopyRegion.imageExtent.height = height >> level;
bufferCopyRegion.imageExtent.depth = 1;
bufferCopyRegion.bufferOffset = offset;
bufferCopyRegions.push_back(bufferCopyRegion);
}
}
}
Vulkan command to transfer the image.
// std::vector<vk::BufferImageCopy> regions;
cmdBuf->copyBufferToImage(srcBufferHandle, destImageHandle,
vk::ImageLayout::eTransferDstOptimal, uint32_t(regions.size()), regions.data());
Yes, KTX also supports cube map arrays (see the KTX specification). Those are stored using layers.
The Vulkan spec states the following on how cube maps are stored in a cube map array:
For cube arrays, each set of six sequential
layers is a single cube, so the number of cube maps in a cube map array view is layerCount / 6, and
image array layer (baseArrayLayer + i) is face index (i mod 6) of cube i / 6.
So you need to change the baseArrayLayer of your buffer copy region accordingly.
Sample code:
// Setup buffer copy regions to get the data from the ktx file to your own image
for (uint32_t layer = 0; layer < ktxTexture->numLayers; layer++) {
for (uint32_t face = 0; face < 6; face++) {
for (uint32_t level = 0; level < ktxTexture->numLevels; level++) {
ktx_size_t offset;
KTX_error_code ret = ktxTexture_GetImageOffset(ktxTexture, level, layer, face, &offset);
VkBufferImageCopy bufferCopyRegion = {};
bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
bufferCopyRegion.imageSubresource.mipLevel = level;
bufferCopyRegion.imageSubresource.baseArrayLayer = layer * 6 + face;
bufferCopyRegion.imageSubresource.layerCount = 1;
bufferCopyRegion.imageExtent.width = ktxTexture->baseWidth >> level;
bufferCopyRegion.imageExtent.height = ktxTexture->baseHeight >> level;
bufferCopyRegion.imageExtent.depth = 1;
bufferCopyRegion.bufferOffset = offset;
bufferCopyRegions.push_back(bufferCopyRegion);
}
}
}
// Create the image view for a cube map array
VkImageViewCreateInfo view = vks::initializers::imageViewCreateInfo();
view.viewType = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
view.format = format;
view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A };
view.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
view.subresourceRange.layerCount = 6 * cubeMap.layerCount;
view.subresourceRange.levelCount = cubeMap.mipLevels;
view.image = cubeMap.image;
vkCreateImageView(device, &view, nullptr, &cubeMap.view);

Ray Tracing using Nvidia Optix with Open Asset Import Library (assimp) - rendering multiple meshes

I'm trying to combine the versatility of Open Asset Import Library (reading in a variety of 3D model filetypes) with NVidia Optix ray tracing to render the models.
So far, it is working whenever the model I'm rendering is made up of a single mesh. When I try to render a file with more than one mesh, I get only partial results. I can't narrow down where the issue is, looking for some insight. Relevant code here:
Loading a file using assimp importer and creating the optix buffers:
int loadAsset(const char* path)
{
Assimp::Importer importer;
scene = importer.ReadFile(
path,
aiProcess_Triangulate
//| aiProcess_JoinIdenticalVertices
| aiProcess_SortByPType
| aiProcess_ValidateDataStructure
| aiProcess_SplitLargeMeshes
| aiProcess_FixInfacingNormals
);
if (scene) {
getBoundingBox(&scene_min, &scene_max);
scene_center.x = (scene_min.x + scene_max.x) / 2.0f;
scene_center.y = (scene_min.y + scene_max.y) / 2.0f;
scene_center.z = (scene_min.z + scene_max.z) / 2.0f;
float3 optixMin = { scene_min.x, scene_min.y, scene_min.z };
float3 optixMax = { scene_max.x, scene_max.y, scene_max.z };
aabb.set(optixMin, optixMax);
unsigned int numVerts = 0;
unsigned int numFaces = 0;
if (scene->mNumMeshes > 0) {
printf("Number of meshes: %d\n", scene->mNumMeshes);
// get the running total number of vertices & faces for all meshes
for (unsigned int i = 0; i < scene->mNumMeshes; i++) {
numVerts += scene->mMeshes[i]->mNumVertices;
numFaces += scene->mMeshes[i]->mNumFaces;
}
printf("Found %d Vertices and %d Faces\n", numVerts, numFaces);
// set up buffers
optix::Buffer vertices = context->createBuffer(RT_BUFFER_INPUT, RT_FORMAT_FLOAT3, numVerts);
optix::Buffer normals = context->createBuffer(RT_BUFFER_INPUT, RT_FORMAT_FLOAT3, numVerts);
optix::Buffer faces = context->createBuffer(RT_BUFFER_INPUT, RT_FORMAT_UNSIGNED_INT3, numFaces);
optix::Buffer materials = context->createBuffer(RT_BUFFER_INPUT, RT_FORMAT_UNSIGNED_INT, numVerts);
// unused buffer
Buffer tbuffer = context->createBuffer(RT_BUFFER_INPUT, RT_FORMAT_FLOAT2, 0);
// create material
std::string defaultPtxPath = "C:\\ProgramData\\NVIDIA Corporation\\OptiX SDK 4.1.0\\SDK\\build\\lib\\ptx\\";
Program phong_ch = context->createProgramFromPTXFile(defaultPtxPath + "optixPrimitiveIndexOffsets_generated_phong.cu.ptx", "closest_hit_radiance");
Program phong_ah = context->createProgramFromPTXFile(defaultPtxPath + "optixPrimitiveIndexOffsets_generated_phong.cu.ptx", "any_hit_shadow");
Material matl = context->createMaterial();
matl->setClosestHitProgram(0, phong_ch);
matl->setAnyHitProgram(1, phong_ah);
matl["Kd"]->setFloat(0.7f, 0.7f, 0.7f);
matl["Ka"]->setFloat(1.0f, 1.0f, 1.0f);
matl["Kr"]->setFloat(0.0f, 0.0f, 0.0f);
matl["phong_exp"]->setFloat(1.0f);
std::string triangle_mesh_ptx_path(ptxPath("triangle_mesh.cu"));
Program meshIntersectProgram = context->createProgramFromPTXFile(triangle_mesh_ptx_path, "mesh_intersect");
Program meshBboxProgram = context->createProgramFromPTXFile(triangle_mesh_ptx_path, "mesh_bounds");
optix::float3 *vertexMap = reinterpret_cast<optix::float3*>(vertices->map());
optix::float3 *normalMap = reinterpret_cast<optix::float3*>(normals->map());
optix::uint3 *faceMap = reinterpret_cast<optix::uint3*>(faces->map());
unsigned int *materialsMap = static_cast<unsigned int*>(materials->map());
context["vertex_buffer"]->setBuffer(vertices);
context["normal_buffer"]->setBuffer(normals);
context["index_buffer"]->setBuffer(faces);
context["texcoord_buffer"]->setBuffer(tbuffer);
context["material_buffer"]->setBuffer(materials);
Group group = createSingleGeometryGroup(meshIntersectProgram, meshBboxProgram, vertexMap,
normalMap, faceMap, materialsMap, matl);
context["top_object"]->set(group);
context["top_shadower"]->set(group);
vertices->unmap();
normals->unmap();
faces->unmap();
materials->unmap();
}
return 0;
}
return 1;
}
And the relevant function for creating the geometries and filling the buffers:
Group createSingleGeometryGroup(Program meshIntersectProgram, Program meshBboxProgram, optix::float3 *vertexMap,
optix::float3 *normalMap, optix::uint3 *faceMap, unsigned int *materialsMap, Material matl) {
Group group = context->createGroup();
optix::Acceleration accel = context->createAcceleration("Trbvh");
group->setAcceleration(accel);
std::vector<GeometryInstance> gis;
unsigned int vertexOffset = 0u;
unsigned int faceOffset = 0u;
for (unsigned int m = 0; m < scene->mNumMeshes; m++) {
aiMesh *mesh = scene->mMeshes[m];
if (!mesh->HasPositions()) {
throw std::runtime_error("Mesh contains zero vertex positions");
}
if (!mesh->HasNormals()) {
throw std::runtime_error("Mesh contains zero vertex normals");
}
printf("Mesh #%d\n\tNumVertices: %d\n\tNumFaces: %d\n", m, mesh->mNumVertices, mesh->mNumFaces);
// add points
for (unsigned int i = 0u; i < mesh->mNumVertices; i++) {
aiVector3D pos = mesh->mVertices[i];
aiVector3D norm = mesh->mNormals[i];
vertexMap[i + vertexOffset] = optix::make_float3(pos.x, pos.y, pos.z) + aabb.center();
normalMap[i + vertexOffset] = optix::normalize(optix::make_float3(norm.x, norm.y, norm.z));
materialsMap[i + vertexOffset] = 0u;
}
// add faces
for (unsigned int i = 0u; i < mesh->mNumFaces; i++) {
aiFace face = mesh->mFaces[i];
// add triangles
if (face.mNumIndices == 3) {
faceMap[i + faceOffset] = optix::make_uint3(face.mIndices[0], face.mIndices[1], face.mIndices[2]);
}
else {
printf("face indices != 3\n");
faceMap[i + faceOffset] = optix::make_uint3(-1);
}
}
// create geometry
optix::Geometry geometry = context->createGeometry();
geometry->setPrimitiveCount(mesh->mNumFaces);
geometry->setIntersectionProgram(meshIntersectProgram);
geometry->setBoundingBoxProgram(meshBboxProgram);
geometry->setPrimitiveIndexOffset(faceOffset);
optix::GeometryInstance gi = context->createGeometryInstance(geometry, &matl, &matl + 1);
gis.push_back(gi);
vertexOffset += mesh->mNumVertices;
faceOffset += mesh->mNumFaces;
}
printf("VertexOffset: %d\nFaceOffset: %d\n", vertexOffset, faceOffset);
// add all geometry instances to a geometry group
GeometryGroup gg = context->createGeometryGroup();
gg->setChildCount(static_cast<unsigned int>(gis.size()));
for (unsigned i = 0u; i < gis.size(); i++) {
gg->setChild(i, gis[i]);
}
Acceleration a = context->createAcceleration("Trbvh");
gg->setAcceleration(a);
group->setChildCount(1);
group->setChild(0, gg);
return group;
}
Running the above code on a sample file from assimp (using the dwarf.x, file contains 2 meshes) yields this result:
You can see only part of the second mesh (the dwarf's body) is rendered. I tried rendering each mesh separately, one at a time, and they render in full. But when putting them together I get this.
I'm thinking the issue is either with creating the geometry, perhaps I have these lines wrong:
geometry->setPrimitiveCount(mesh->mNumFaces);
geometry->setPrimitiveIndexOffset(faceOffset);
or the assimp postprocessing flags
scene = importer.ReadFile(
path,
aiProcess_Triangulate
//| aiProcess_JoinIdenticalVertices
| aiProcess_SortByPType
| aiProcess_ValidateDataStructure
| aiProcess_SplitLargeMeshes
| aiProcess_FixInfacingNormals
);
(note above, I had to comment out JoinIdenticalVertices because it gave me a horribly wrong result shown below):
Has anyone been able to successfully combine nvidia optix with open asset import library for rendering files with multiple meshes?
I found a solution, although not sure how optimal.
Each mesh still gets its own geometry, however instead of creating single vertex, index and normal buffers which are shared among all geometries, I create separate buffers for each geometry.
Then, instead of
context["vertex_buffer"]->setBuffer(vertices);
context["normal_buffer"]->setBuffer(normals);
context["index_buffer"]->setBuffer(faces);
context["texcoord_buffer"]->setBuffer(tbuffer);
context["material_buffer"]->setBuffer(materials);
I use
geometry["vertex_buffer"]->setBuffer(vertices);
geometry["normal_buffer"]->setBuffer(normals);
geometry["index_buffer"]->setBuffer(faces);
geometry["texcoord_buffer"]->setBuffer(tbuffer);
geometry["material_buffer"]->setBuffer(materials);
The result:

openGL drawElements - one extra triangle, using index array?

I'm generating a terrain from a .bmp file, as a very early precursor for a strategy game. In my code I load the BMP file as an openGL texture, then using a double loop to generate coordinates (x, y redChannel). Then I create indices by again double looping and generating the triangles for a square between (x,y) to (x+1, y+1). However, when I run the code, I end up with an extra triangle going from the end of one line to the beginning of the next line, and which I cannot seem to solve. This only happens when I use varied heights and a sufficiently large map, or at least it is not visible otherwise.
This is the code:
void Map::setupVertices(GLsizei* &sizeP, GLint * &vertexArray, GLubyte* &colorArray){
//textureNum is the identifier generated by glGenTextures
GLuint textureNum = loadMap("heightmap.bmp");
//Bind the texture again, and extract the needed data
glBindTexture(GL_TEXTURE_2D, textureNum);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
GLint i = height*width;
GLubyte * imageData = new GLubyte[i+1];
glGetTexImage(GL_TEXTURE_2D,0,GL_RED, GL_UNSIGNED_BYTE, &imageData[0]);
//Setup varibles: counter (used for counting vertices)
//VertexArray: pointer to address for storing the vertices. Size: 3 ints per point, width*height points total
//ColorArray: pointer to address for storing the color data. 3 bytes per point.
int counter = 0;
vertexArray = new GLint[height*width*3];
colorArray = new GLubyte[height*width*3];
srand(time(NULL));
//Loop through rows
for (int y = 0; y < height; y++){
//Loop along the line
for (int x=0; x < width; x++){
//Add vertices: x, y, redChannel
//Add colordata: the common-color.
colorArray[counter] = imageData[x+y*width];
vertexArray[counter++] = x;
colorArray[counter] = imageData[x+y*width];
vertexArray[counter++] = y;
colorArray[counter] = imageData[x+y*width];//(float) (rand() % 255);
vertexArray[counter++] = (float)imageData[x+y*width] /255 * maxHeight;
}
}
//"Return" total vertice amount
sizeP = new GLsizei(counter);
}
void Map::setupIndices(GLsizei* &sizeP, GLuint* &indexArray){
//Pointer to location for storing indices. Size: 2 triangles per square, 3 points per triangle, width*height triangles
indexArray = new GLuint[width*height*2*3];
int counter = 0;
//Loop through rows, don't go to top row (because those triangles are to the row below)
for (int y = 0; y < height-1; y++){
//Loop along the line, don't go to last point (those are connected to second last point)
for (int x=0; x < width-1; x++){
//
// TL___TR
// | / |
// LL___LR
int lowerLeft = x + width*y;
int lowerRight = lowerLeft+1;
int topLeft = lowerLeft + width+1;
int topRight = topLeft + 1;
indexArray[counter++] = lowerLeft;
indexArray[counter++] = lowerRight;
indexArray[counter++] = topLeft;
indexArray[counter++] = topLeft;
indexArray[counter++] = lowerRight;
indexArray[counter++] = topRight;
}
}
//"Return" the amount of indices
sizeP = new GLsizei(counter);
}
I eventually draw this with this code:
void drawGL(){
glPushMatrix();
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3,GL_INT,0,mapHeight);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(3,GL_UNSIGNED_BYTE,0,mapcolor);
if (totalIndices != 0x00000000){
glDrawElements(GL_TRIANGLES, *totalIndices, GL_UNSIGNED_INT, indices);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glPopMatrix();
}
Here's a picture of the result:
http://s22.postimg.org/k2qoru3kx/open_GLtriangles.gif
And with only blue lines and black background.
http://s21.postimg.org/5yw8sz5mv/triangle_Error_Blue_Line.gif
There also appears to be one of these going in the other direction as well, at the very edge right, but I'm supposing for now that it may be related to the same issue.
I'd simplify this part:
int lowerLeft = x + width * y;
int lowerRight = (x + 1) + width * y;
int topLeft = x + width * (y + 1);
int topRight = (x + 1) + width * (y + 1);
The problem looks like topLeft has an extra + 1 when it should only have the + width.
This causes the "top" vertices to both be shifted along by one column. You might not notice the offsets within the grid and, as you pointed out, they're not visible until the height changes.
Also, returning new GLsizei(counter) seems a bit round about. Why not just pass in GLsizei& counter.
These might be worth a look too. You can save a fair bit of data using strip primitives for many procedural objects:
Generate a plane with triangle strips
triangle-strip-for-grids-a-construction

texture problems with librocket, ogre & ios

We are trying to use librocket http://librocket.com/ together with Ogre http://www.ogre3d.org/. They're both part of gamekit http://code.google.com/p/gamekit/ which I use for this project.
This all works fine together as long as I don't load an image with librocket. As soon as I do that the viewport on the iPad is not fullscreen anymore but small in the lower corner. Like this: http://uploads.undef.ch/machine/ipad.png
I can't make a connection between loading/rendering a texture and resizing of the viewport. And I can't find anything wrong with the RenderInterface. http://uploads.undef.ch/machine/RenderInterfaceOgre3D.cpp
Is there any OpenGLES command that could have an affect on the active viewport size?
This is the relevant code that loads an image and displays it:
// Called by Rocket when a texture is required by the library.
bool RenderInterfaceOgre3D::LoadTexture(Rocket::Core::TextureHandle& texture_handle, Rocket::Core::Vector2i& texture_dimensions, const Rocket::Core::String& source)
{
Ogre::TextureManager* texture_manager = Ogre::TextureManager::getSingletonPtr();
Ogre::TexturePtr ogre_texture = texture_manager->getByName(Ogre::String(source.CString()));
if (ogre_texture.isNull())
{
ogre_texture = texture_manager->load(Ogre::String(source.CString()),
DEFAULT_ROCKET_RESOURCE_GROUP,
Ogre::TEX_TYPE_2D,
0);
}
if (ogre_texture.isNull())
return false;
texture_dimensions.x = ogre_texture->getWidth();
texture_dimensions.y = ogre_texture->getHeight();
texture_handle = reinterpret_cast<Rocket::Core::TextureHandle>(new RocketOgre3DTexture(ogre_texture));
return true;
}
// Called by Rocket when it wants to render geometry that it does not wish to optimise.
void RenderInterfaceOgre3D::RenderGeometry(Rocket::Core::Vertex* vertices, int num_vertices, int* indices, int num_indices, Rocket::Core::TextureHandle texture, const Rocket::Core::Vector2f& translation)
{
// We've chosen to not support non-compiled geometry in the Ogre3D renderer.
// But if you want, you can uncomment this code, so borders will be shown.
/*
Rocket::Core::CompiledGeometryHandle gh = CompileGeometry(vertices, num_vertices, indices, num_indices, texture);
RenderCompiledGeometry(gh, translation);
ReleaseCompiledGeometry(gh);
*/
}
// Called by Rocket when it wants to compile geometry it believes will be static for the forseeable future.
Rocket::Core::CompiledGeometryHandle RenderInterfaceOgre3D::CompileGeometry(Rocket::Core::Vertex* vertices, int num_vertices, int* indices, int num_indices, Rocket::Core::TextureHandle texture)
{
RocketOgre3DCompiledGeometry* geometry = new RocketOgre3DCompiledGeometry();
geometry->texture = texture == NULL ? NULL : (RocketOgre3DTexture*) texture;
geometry->render_operation.vertexData = new Ogre::VertexData();
geometry->render_operation.vertexData->vertexStart = 0;
geometry->render_operation.vertexData->vertexCount = num_vertices;
geometry->render_operation.indexData = new Ogre::IndexData();
geometry->render_operation.indexData->indexStart = 0;
geometry->render_operation.indexData->indexCount = num_indices;
geometry->render_operation.operationType = Ogre::RenderOperation::OT_TRIANGLE_LIST;
// Set up the vertex declaration.
Ogre::VertexDeclaration* vertex_declaration = geometry->render_operation.vertexData->vertexDeclaration;
size_t element_offset = 0;
vertex_declaration->addElement(0, element_offset, Ogre::VET_FLOAT3, Ogre::VES_POSITION);
element_offset += Ogre::VertexElement::getTypeSize(Ogre::VET_FLOAT3);
vertex_declaration->addElement(0, element_offset, Ogre::VET_COLOUR, Ogre::VES_DIFFUSE);
element_offset += Ogre::VertexElement::getTypeSize(Ogre::VET_COLOUR);
vertex_declaration->addElement(0, element_offset, Ogre::VET_FLOAT2, Ogre::VES_TEXTURE_COORDINATES);
#if GK_PLATFORM == GK_PLATFORM_APPLE_IOS
// Create the vertex buffer.
Ogre::HardwareVertexBufferSharedPtr vertex_buffer = Ogre::HardwareBufferManager::getSingleton().createVertexBuffer(vertex_declaration->getVertexSize(0), num_vertices, Ogre::HardwareBuffer::HBU_DYNAMIC_WRITE_ONLY_DISCARDABLE,true);
geometry->render_operation.vertexData->vertexBufferBinding->setBinding(0, vertex_buffer);
// Fill the vertex buffer.
RocketOgre3DVertex* ogre_vertices = (RocketOgre3DVertex*) vertex_buffer->lock(0, vertex_buffer->getSizeInBytes(), Ogre::HardwareBuffer::HBL_DISCARD);
#else
Ogre::HardwareVertexBufferSharedPtr vertex_buffer = Ogre::HardwareBufferManager::getSingleton().createVertexBuffer(vertex_declaration->getVertexSize(0), num_vertices, Ogre::HardwareBuffer::HBU_STATIC_WRITE_ONLY);
geometry->render_operation.vertexData->vertexBufferBinding->setBinding(0, vertex_buffer);
// Fill the vertex buffer.
RocketOgre3DVertex* ogre_vertices = (RocketOgre3DVertex*) vertex_buffer->lock(0, vertex_buffer->getSizeInBytes(), Ogre::HardwareBuffer::HBL_NORMAL);
#endif
for (int i = 0; i < num_vertices; ++i)
{
ogre_vertices[i].x = vertices[i].position.x;
ogre_vertices[i].y = vertices[i].position.y;
ogre_vertices[i].z = 0;
Ogre::ColourValue diffuse(vertices[i].colour.red / 255.0f, vertices[i].colour.green / 255.0f, vertices[i].colour.blue / 255.0f, vertices[i].colour.alpha / 255.0f);
render_system->convertColourValue(diffuse, &ogre_vertices[i].diffuse);
ogre_vertices[i].u = vertices[i].tex_coord[0];
ogre_vertices[i].v = vertices[i].tex_coord[1];
}
vertex_buffer->unlock();
#if GK_PLATFORM == GK_PLATFORM_APPLE_IOS
// Create the index buffer.
Ogre::HardwareIndexBufferSharedPtr index_buffer = Ogre::HardwareBufferManager::getSingleton().createIndexBuffer(Ogre::HardwareIndexBuffer::IT_16BIT, num_indices, Ogre::HardwareBuffer::HBU_STATIC_WRITE_ONLY);
geometry->render_operation.indexData->indexBuffer = index_buffer;
geometry->render_operation.useIndexes = true;
#else
Ogre::HardwareIndexBufferSharedPtr index_buffer = Ogre::HardwareBufferManager::getSingleton().createIndexBuffer(Ogre::HardwareIndexBuffer::IT_32BIT, num_indices, Ogre::HardwareBuffer::HBU_STATIC_WRITE_ONLY);
geometry->render_operation.indexData->indexBuffer = index_buffer;
geometry->render_operation.useIndexes = true;
#endif
// Fill the index buffer.
unsigned short * ogre_indices = (unsigned short*)index_buffer->lock(0, index_buffer->getSizeInBytes(), Ogre::HardwareBuffer::HBL_NORMAL);
#if GK_PLATFORM == GK_PLATFORM_APPLE_IOS
//unsigned short short_indices[num_indices];
for(int i=0;i<num_indices;i++)
ogre_indices[i] = indices[i];
//memcpy(ogre_indices, short_indices, sizeof(unsigned short) * num_indices);
#else
memcpy(ogre_indices, indices, sizeof(unsigned int) * num_indices);
#endif
index_buffer->unlock();
return reinterpret_cast<Rocket::Core::CompiledGeometryHandle>(geometry);
}
// Called by Rocket when it wants to render application-compiled geometry.
void RenderInterfaceOgre3D::RenderCompiledGeometry(Rocket::Core::CompiledGeometryHandle geometry, const Rocket::Core::Vector2f& translation)
{
Ogre::Matrix4 transform;
transform.makeTrans(translation.x, translation.y, 0);
render_system->_setWorldMatrix(transform);
render_system = Ogre::Root::getSingleton().getRenderSystem();
RocketOgre3DCompiledGeometry* ogre3d_geometry = (RocketOgre3DCompiledGeometry*) geometry;
if (ogre3d_geometry->texture != NULL)
{
render_system->_setTexture(0, true, ogre3d_geometry->texture->texture);
// Ogre can change the blending modes when textures are disabled - so in case the last render had no texture,
// we need to re-specify them.
render_system->_setTextureBlendMode(0, colour_blend_mode);
render_system->_setTextureBlendMode(0, alpha_blend_mode);
}
else
render_system->_disableTextureUnit(0);
render_system->_render(ogre3d_geometry->render_operation);
}

Trouble fitting depth image to RGB image using Kinect 1.0 SDK

I'm trying to get the Kinect depth camera pixels to overlay onto the RGB camera. I am using the C++ Kinect 1.0 SDK with an Xbox Kinect, OpenCV and trying to use the new "NuiImageGetColorPixelCoordinateFrameFromDepthPixelFrameAtResolution" method.
I have watched the image render itself in slow motion and looks as if pixels are being drawn multiple times in the one frame. It first draws itself from the top and left borders, then it gets to a point (you can see a 45 degree angle in there) where it starts drawing weird.
I have been trying to base my code off of the C# code written by Adam Smith at the MSDN forums but no dice. I have stripped out the overlay stuff and just want to draw the depth normalized depth pixels where it "should" be in the RGB image.
The image on the left is what I'm getting when trying to fit the depth image to RGB space, and the image on the right is the "raw" depth image as I like to see it. I was hoping this my method would create a similar image to the one on the right with slight distortions.
This is the code and object definitions that I have at the moment:
// From initialization
INuiSensor *m_pNuiInstance;
NUI_IMAGE_RESOLUTION m_nuiResolution = NUI_IMAGE_RESOLUTION_640x480;
HANDLE m_pDepthStreamHandle;
IplImage *m_pIplDepthFrame;
IplImage *m_pIplFittedDepthFrame;
m_pIplDepthFrame = cvCreateImage(cvSize(640, 480), 8, 1);
m_pIplFittedDepthFrame = cvCreateImage(cvSize(640, 480), 8, 1);
// Method
IplImage *Kinect::GetRGBFittedDepthFrame() {
static long *pMappedBits = NULL;
if (!pMappedBits) {
pMappedBits = new long[640*480*2];
}
NUI_IMAGE_FRAME pNuiFrame;
NUI_LOCKED_RECT lockedRect;
HRESULT hr = m_pNuiInstance->NuiImageStreamGetNextFrame(m_pDepthStreamHandle, 0, &pNuiFrame);
if (FAILED(hr)) {
// return the older frame
return m_pIplFittedDepthFrame;
}
bool hasPlayerData = HasSkeletalEngine(m_pNuiInstance);
INuiFrameTexture *pTexture = pNuiFrame.pFrameTexture;
pTexture->LockRect(0, &lockedRect, NULL, 0);
if (lockedRect.Pitch != 0) {
cvZero(m_pIplFittedDepthFrame);
hr = m_pNuiInstance->NuiImageGetColorPixelCoordinateFrameFromDepthPixelFrameAtResolution(
m_nuiResolution,
NUI_IMAGE_RESOLUTION_640x480,
640 * 480, /* size is previous */ (unsigned short*) lockedRect.pBits,
(640 * 480) * 2, /* size is previous */ pMappedBits);
if (FAILED(hr)) {
return m_pIplFittedDepthFrame;
}
for (int i = 0; i < lockedRect.size; i++) {
unsigned char* pBuf = (unsigned char*) lockedRect.pBits + i;
unsigned short* pBufS = (unsigned short*) pBuf;
unsigned short depth = hasPlayerData ? ((*pBufS) & 0xfff8) >> 3 : ((*pBufS) & 0xffff);
unsigned char intensity = depth > 0 ? 255 - (unsigned char) (256 * depth / 0x0fff) : 0;
long
x = pMappedBits[i], // tried with *(pMappedBits + (i * 2)),
y = pMappedBits[i + 1]; // tried with *(pMappedBits + (i * 2) + 1);
if (x >= 0 && x < m_pIplFittedDepthFrame->width && y >= 0 && y < m_pIplFittedDepthFrame->height) {
m_pIplFittedDepthFrame->imageData[x + y * m_pIplFittedDepthFrame->widthStep] = intensity;
}
}
}
pTexture->UnlockRect(0);
m_pNuiInstance->NuiImageStreamReleaseFrame(m_pDepthStreamHandle, &pNuiFrame);
return(m_pIplFittedDepthFrame);
}
Thanks
I have found that the problem was that the loop,
for (int i = 0; i < lockedRect.size; i++) {
// code
}
was iterating on a per-byte basis, not on a per-short (2 bytes) basis. Since lockedRect.size returns the number of bytes the fix was simply changing the increment to i += 2, even better would be changing it to sizeof(short), like so,
for (int i = 0; i < lockedRect.size; i += sizeof(short)) {
// code
}