Extracting raw data from template for use in CUDA - c++

The following code is a snippet from the PCL (point cloud) library. It calculates the integral sum of an image.
template <class DataType, unsigned Dimension> class IntegralImage2D
{
public:
static const unsigned dim_fst = Dimension;
typedef cv::Vec<typename TypeTraits<DataType>::IntegralType, dim_fst> FirstType;
std::vector<FirstType> img_fst;
//.... lots of methods missing here that actually calculate the integral sum
/** \brief Compute the first order sum within a given rectangle
* \param[in] start_x x position of rectangle
* \param[in] start_y y position of rectangle
* \param[in] width width of rectangle
* \param[in] height height of rectangle
*/
inline FirstType getFirstOrderSum(unsigned start_x, unsigned start_y, unsigned width, unsigned height) const
{
const unsigned upper_left_idx = start_y * (wdt + 1) + start_x;
const unsigned upper_right_idx = upper_left_idx + width;
const unsigned lower_left_idx =(start_y + height) * (wdt + 1) + start_x;
const unsigned lower_right_idx = lower_left_idx + width;
return(img_fst[lower_right_idx] + img_fst[upper_left_idx] - img_fst[upper_right_idx] - img_fst[lower_left_idx]);
}
Currently the results are obtained using the following code:
IntegralImage2D<float,3> iim_xyz;
IntegralImage2D<float, 3>::FirstType fo_elements;
IntegralImage2D<float, 3>::SecondType so_elements;
fo_elements = iim_xyz.getFirstOrderSum(pos_x - rec_wdt_2, pos_y - rec_hgt_2, rec_wdt, rec_hgt);
so_elements = iim_xyz.getSecondOrderSum(pos_x - rec_wdt_2, pos_y - rec_hgt_2, rec_wdt, rec_hgt);
However I'm trying to parallelise the code (write getFirstOrderSum as a CUDA device function). Since CUDA doesn't recognise these FirstType and SecondType objects (or any opencv objects for that matter) I'm struggling (I'm new to C++) to extract the raw data from the template.
If possible I would like to cast the img_fst object to some kind of vector or array that I can allocate on the cuda kernel.
it seems img_fst is of type std::vector<cv::Matx<double,3,1>

As it turns out you can pass the raw data as you would using a normal vector.
void computation(ps::IntegralImage2D<float, 3> iim_xyz){
cv::Vec<double, 3>* d_img_fst = 0;
cudaErrorCheck(cudaMalloc((void**)&d_img_fst, sizeof(cv::Vec<double, 3>)*(iim_xyz.img_fst.size())));
cudaErrorCheck(cudaMemcpy(d_img_fst, &iim_xyz.img_fst[0], sizeof(cv::Vec<double, 3>)*(iim_xyz.img_fst.size()), cudaMemcpyHostToDevice));
//..
}
__device__ double* getFirstOrderSum(unsigned start_x, unsigned start_y, unsigned width, unsigned height, int wdt, cv::Vec<double, 3>* img_fst)
{
const unsigned upper_left_idx = start_y * (wdt + 1) + start_x;
const unsigned upper_right_idx = upper_left_idx + width;
const unsigned lower_left_idx = (start_y + height) * (wdt + 1) + start_x;
const unsigned lower_right_idx = lower_left_idx + width;
double* result = new double[3];
result[0] = img_fst[lower_right_idx].val[0] + img_fst[upper_left_idx].val[0] - img_fst[upper_right_idx].val[0] - img_fst[lower_left_idx].val[0];
result[1] = img_fst[lower_right_idx].val[1] + img_fst[upper_left_idx].val[1] - img_fst[upper_right_idx].val[1] - img_fst[lower_left_idx].val[1];
result[2] = img_fst[lower_right_idx].val[2] + img_fst[upper_left_idx].val[2] - img_fst[upper_right_idx].val[2] - img_fst[lower_left_idx].val[2];
return result; //i have to delete this pointer otherwise I will create memory leak
}

Related

Image subtraction with CUDA and textures

My goal is to use C++ with CUDA to subtract a dark frame from a raw image. I want to use textures for acceleration. The input of the images is cv::Mat with the type CV_8UC4 (I use the pointer to the data of the cv::Mat). This is the kernel I came up with, but I have no idea how to eventually subtract the textures from each other:
__global__ void DarkFrameSubtractionKernel(unsigned char* outputImage, size_t pitchOutputImage,
cudaTextureObject_t inputImage, cudaTextureObject_t darkImage, int width, int height)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
const float tx = (x + 0.5f);
const float ty = (y + 0.5f);
if (x >= width || y >= height) return;
uchar4 inputImageTemp = tex2D<uchar4>(inputImage, tx, ty);
uchar4 darkImageTemp = tex2D<uchar4>(darkImage, tx, ty);
outputImage[y * pitchOutputImage + x] = inputImageTemp - darkImageTemp; // this line will throw an error
}
This is the function that calls the kernel (you can see that I create the textures from unsigned char):
void subtractDarkImage(unsigned char* inputImage, size_t pitchInputImage, unsigned char* outputImage,
size_t pitchOutputImage, unsigned char* darkImage, size_t pitchDarkImage, int width, int height,
cudaStream_t stream)
{
cudaResourceDesc resDesc = {};
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.width = width;
resDesc.res.pitch2D.height = height;
resDesc.res.pitch2D.devPtr = inputImage;
resDesc.res.pitch2D.pitchInBytes = pitchInputImage;
resDesc.res.pitch2D.desc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned);
cudaTextureDesc texDesc = {};
texDesc.readMode = cudaReadModeElementType;
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
cudaTextureObject_t imageInputTex, imageDarkTex;
CUDA_CHECK(cudaCreateTextureObject(&imageInputTex, &resDesc, &texDesc, 0));
resDesc.res.pitch2D.devPtr = darkImage;
resDesc.res.pitch2D.pitchInBytes = pitchDarkImage;
CUDA_CHECK(cudaCreateTextureObject(&imageDarkTex, &resDesc, &texDesc, 0));
dim3 block(32, 8);
dim3 grid = paddedGrid(block.x, block.y, width, height);
DarkImageSubtractionKernel << <grid, block, 0, stream >> > (reinterpret_cast<uchar4*>(outputImage), pitchOutputImage / sizeof(uchar4),
imageInputTex, imageDarkTex, width, height);
CUDA_CHECK(cudaDestroyTextureObject(imageInputTex));
CUDA_CHECK(cudaDestroyTextureObject(imageDarkTex));
}
The code does not compile as I can not subtract a uchar4 from another one (in the kernel). Is there an easy way of subtraction here?
Help is very much appreciated.
Is there an easy way of subtraction here?
There are no arithmetic operators defined for CUDA built-in vector types. If you replace
outputImage[y * pitchOutputImage + x] = inputImageTemp - darkImageTemp;
with
uchar4 val;
val.x = inputImageTemp.x - darkImageTemp.x;
val.y = inputImageTemp.y - darkImageTemp.y;
val.z = inputImageTemp.z - darkImageTemp.z;
val.w = inputImageTemp.w - darkImageTemp.w;
outputImage[y * pitchOutputImage + x] = val;
things will work. If this offends you, I suggest writing a small library of helper functions to hide the mess.

cuda multiple image erosion not work

i'm trying to implement multiple black(0) and white(255) image erosion with cuda,i use a square (5x5)structure element.The kernel that i had implemented take an unsigned char array buffer in which are stored nImg images 200X200 px . To allow erosion of multiple image simultaneosly i make a grid with 3D structure:
each block has the dimension of the strel (5x5)
the grid has height = image_height/blockDim.y , width = image_width/blockDim.x , z = nImg
i've try to implement it extending that sample.
the problem is that if i store the pixels that a block of threads consider into a shared buffer shared between the threads of the block;
to allow fast memory access, the algorithm doesn't work properly.I try to change the bindex that for me make mistake,but i cannot found a solution.
any suggestion?
here's my code:
//strel size
#define STREL_W 5
#define STREL_H 5
// distance from the cente of strel to strel width or height
#define R (STREL_H/2)
//size of the 2D region that each block consider i.e all the neighborns that each thread in a block consider
#define BLOCK_W (STREL_W+(2*R))
#define BLOCK_H (STREL_H+(2*R))
__global__ void erode_multiple_img_SM(unsigned char * buffer_in,
unsigned char * buffer_out,
int w,
int h ){
//array stored in shared memory,that contain all pixel neighborns that each thread in a block consider
__shared__ unsigned char fast_acc_arr[BLOCK_W*BLOCK_H];
// map thread in a 3D structure
int col = blockIdx.x * STREL_W + threadIdx.x -R ;
int row = blockIdx.y * STREL_H + threadIdx.y -R ;
int plane = blockIdx.z * blockDim.z + threadIdx.z;
// check if a foreground px of strel is not contain in a region of the image with size of strel (if only one px is not contain the image is eroded)
bool is_contain = true;
// clamp to edge of image
col = max(0,col);
col = min(col,w-1);
row = max(0,row);
row = min(row,h-1);
//map each thread in one dim coord to map 3D structure(grid) with image buffer(1D)
unsigned int index = (plane * h * w) + (row * w) + col;
unsigned int bindex = threadIdx.y * blockDim.y + threadIdx.x;
//each thread copy its pixel of the block to shared memory (shared with thread of a block)
fast_acc_arr[bindex] = buffer_in[index];
__syncthreads();
//the strel must be contain in image, thread.x and thread.y are the coords of the center of the mask that correspond to strel in image, and it must be contain in image
if((threadIdx.x >= R) && (threadIdx.x < BLOCK_W-R) && (threadIdx.y >= R) && (threadIdx.y <BLOCK_H-R)){
for(int dy=-R; dy<=R; dy++){
if(is_contain == false)
break;
for (int dx = -R ; dx <= R; dx++) {
//if only one element in mask is different from the value of strel el --> the strel is not contain in the mask --> the center of the mask is eroded (and it's no necessary to consider the other el of the mask this is the motivation of the break)
if (fast_acc_arr[bindex + (dy * blockDim.x) + dx ] != 255 ){
buffer_out[index ] = 0;
is_contain = false;
break;
}
}
}
// if the strel is contain into the image the the center is not eroded
if(is_contain == true)
buffer_out[index] = 255;
}
}
that are my kernel settings:
dim3 block(5,5,1);
dim3 grid(200/(block.x),200/(block.y),nImg);
my kernel call:
erode_multiple_img_SM<<<grid,block>>>(dimage_src,dimage_dst,200,200);
my image input and output:
input: output(150 buff element):
code without shared memory(low speed):
__global__ void erode_multiple_img(unsigned char * buffer_in,
unsigned char * buffer_out,
int w,int h ){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int plane = blockIdx.z * blockDim.z +threadIdx.z;
bool is_contain = true;
col = max(0,col);
col = min(col,w-1);
row = max(0,row);
row = min(row,h-1);
for(int dy=-STREL_H/2; dy<=STREL_H/2; dy++){
if(is_contain == false)
break;
for (int dx = -STREL_W/2 ; dx <= STREL_W/2; dx++) {
if (buffer_in[(plane * h * w) +( row + dy) * w + (col + dx) ] !=255 ){
buffer_out[(plane * h * w) + row * w + col ] = 0;
is_contain = false;
break;
}
}
}
if(is_contain == true)
buffer_out[(plane * h * w) + row * w +col ] = 255;
}
UPDATED ALGORITHM
i try to follow that samples to do convolution.I change the input image, now has 512x512 size and i wrote that algorithm:
#define STREL_SIZE 5
#define TILE_W 16
#define TILE_H 16
#define R (STREL_H/2)
#define BLOCK_W (TILE_W+(2*R))
#define BLOCK_H (TILE_H+(2*R))
__global__ void erode_multiple_img_SM_v2(unsigned char * buffer_in,
unsigned char * buffer_out,
int w,int h ){
// Data cache: threadIdx.x , threadIdx.y
__shared__ unsigned char data[TILE_W +STREL_SIZE ][TILE_W +STREL_SIZE ];
// global mem address of this thread
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int plane = blockIdx.z * blockDim.z +threadIdx.z;
int gLoc = (plane*h/w)+ row*w +col;
bool is_contain = true;
// load cache (32x32 shared memory, 16x16 threads blocks)
// each threads loads four values from global memory into shared mem
int x, y; // image based coordinate
if((col<w)&&(row<h)) {
data[threadIdx.x][threadIdx.y]=buffer_in[gLoc];
if (threadIdx.y > (h-STREL_SIZE))
data[threadIdx.x][threadIdx.y + STREL_SIZE]=buffer_in[gLoc + STREL_SIZE];
if (threadIdx.x >(w-STREL_SIZE))
data[threadIdx.x + STREL_SIZE][threadIdx.y]=buffer_in[gLoc+STREL_SIZE];
if ((threadIdx.x >(w-STREL_SIZE)) && (threadIdx.y > (h-STREL_SIZE)))
data[threadIdx.x+STREL_SIZE][threadIdx.y+STREL_SIZE] = buffer_in[gLoc+2*STREL_SIZE];
//wait for all threads to finish read
__syncthreads();
//buffer_out[gLoc] = data[threadIdx.x][threadIdx.y];
unsigned char min_value = 255;
for(x=0;x<STREL_SIZE;x++){
for(y=0;y<STREL_SIZE;y++){
min_value = min( (data[threadIdx.x+x][threadIdx.y+y]) , min_value);
}
}
buffer_out[gLoc]= min_value;
}
}
my kernel settings now are:
dim3 block(16,16);
dim3 grid(512/(block.x),512/(block.y),nImg);
input:
output:
seems that the pixels of the apron are not copyied in the ouput buffer
You may want to read the following links for more detailed description and better example code on how to implement an image convolution CUDA kernel function.
http://igm.univ-mlv.fr/~biri/Enseignement/MII2/Donnees/convolutionSeparable.pdf
https://www.evl.uic.edu/sjames/cs525/final.html
Basically using a convolution filter of the size (5 x 5) does not mean setting the size of the thread block to be (5 x 5).
Typically, for a non-separable convolution, you could use a thread block of the size (16 x 16), to calculate a block of (16 x 16) pixels on the output image. To achieve this you need to read a block of ((2+16+2) x (2+16+2)) pixels from the input image to the shared memory, using the (16 x 16) threads collaboratively.

CUDA, "illegal memory access was encountered" in Memcpy

I have this cuda file:
#include "cuda.h"
#include "../../HandleError.h"
#include "Sphere.hpp"
#include <stdlib.h>
#include <CImg.h>
#define WIDTH 1280
#define HEIGHT 720
#define rnd(x) (x*rand()/RAND_MAX)
#define SPHERES_COUNT 5
using namespace cimg_library;
__global__
void kernel(unsigned char* bitmap, Sphere* s)
{
// Map threadIdx/blockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = x - blockDim.x * gridDim.x / 2;
float oy = y - blockDim.y * gridDim.y / 2;
float r = 0.2, g = 0.2, b = 0.5;
float maxz = -INF;
for (int i = 0; i < SPHERES_COUNT; i++) {
float n, t = s[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
bitmap[offset*3] = (int)(r * 255);
bitmap[offset*3 + 1] = (int)(g * 255);
bitmap[offset*3 + 2] = (int)(b * 255);
}
__constant__ Sphere s[SPHERES_COUNT];
int main ()
{
//Capture start time
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
//Create host bitmap
CImg<unsigned char> image(WIDTH, HEIGHT, 1, 3);
image.permute_axes("cxyz");
//Allocate device bitmap data
unsigned char* dev_bitmap;
HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, image.size()*sizeof(unsigned char)));
//Generate spheres and copy them on the GPU one by one
Sphere* temp_s = (Sphere*)malloc(SPHERES_COUNT*sizeof(Sphere));
for (int i=0; i <SPHERES_COUNT; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
HANDLE_ERROR(cudaMemcpyToSymbol(s, temp_s, sizeof(Sphere)*SPHERES_COUNT));
free(temp_s);
//Generate a bitmap from spere data
dim3 grids(WIDTH/16, HEIGHT/16);
dim3 threads(16, 16);
kernel<<<grids, threads>>>(dev_bitmap, s);
//Copy the bitmap back from the GPU for display
HANDLE_ERROR(cudaMemcpy(image.data(), dev_bitmap,
image.size()*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
cudaFree(dev_bitmap);
image.permute_axes("yzcx");
image.save("render.bmp");
}
It compiles fine, but when executed I get this error:
an illegal memory access was encountered in main.cu at line 82
that is, here:
//Copy the bitmap back from the GPU for display
HANDLE_ERROR(cudaMemcpy(image.data(), dev_bitmap,
image.size()*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
I cannot understand why...
I know that If remove this:
bitmap[offset*3] = (int)(r * 255);
bitmap[offset*3 + 1] = (int)(g * 255);
bitmap[offset*3 + 2] = (int)(b * 255);
The error is not reported, so I thought It may be an out of index error, reported later, but I have An identical version of this program that makes no use of constant memory, and it works fine with the very same version of the kernel function...
There are two things at issue here. The first is this:
__constant__ Sphere s[SPHERES_COUNT];
int main ()
{
......
kernel<<<grids, threads>>>(dev_bitmap, s);
......
In host code, s is a host memory variable which provides a handle for the CUDA runtime to hook up with the device constant memory symbol. It doesn't contain a valid device pointer and can't be passed to kernel calls. The result is a invalid memory access error.
You could do this:
__constant__ Sphere s[SPHERES_COUNT];
int main ()
{
......
Sphere *d_s;
cudaGetSymbolAddress((void **)&d_s, s);
kernel<<<grids, threads>>>(dev_bitmap, d_s);
......
which would cause a symbol lookup to get the device address of s, and it would be valid to pass that to the kernel. However, the GPU relies on the compiler emitting specific instructions to access memory through the constant cache. The device compiler will only emit these instructions when it can detect that a __constant__ variable is being accessed within a kernel, which is not possible when using a pointer. You can see more about how the compiler will generate code for constant variable access in this Stack Overflow question and answer.

Several arithmetic operations parallelized in C++Amp

I am trying to parallelize a convolution filter using C++Amp. I would like the following function to start working (I don't know how to do it properly):
float* pixel_color[] = new float [16];
concurrency::array_view<float, 2> pixels(4, 4, pixel_array), taps(4, 4, myTap4Kernel_array);
concurrency::array_view<float, 1> pixel(16, pixel_color); // I don't know which data structure to use here
parallel_for_each(
pixels.extent, [=](concurrency::index<2> idx) restrict(amp)
{
int row=idx[0];
int col=idx[1];
pixels(row, col) = taps(row, col) * pixels(row, col);
pixel[0] += pixels(row, col);
});
pixel_color.synchronize();
pixels_.at<Pixel>(j, i) = pixel_color
}
The main problem is that I don't know how to use the pixel structure properly (which concurrent data structure to use here as I don't need all 16 elements). And I don't know if I can safely add the values this way.
The following code doesn't work, it does not add appropriate values to pixel[0].
I also would like to define
concurrency::array_view<float, 2> pixels(4, 4, pixel_array), taps(4, 4, myTap4Kernel_array);
outside the method (for example in the header file) and initialize it in the costructor or other function (as this is a bottle-neck and takes a lot of time copying the data between CPU and GPU). Does anybody know how to do this?
You're no the right track but doing in place manipulations of arrays on a GPU is tricky as you cannot guarantee the order in which different elements are updated.
Here's an example of something very similar. The ApplyColorSimplifierTiledHelper method contains an AMP restricted parallel_for_each that calls SimplifyIndexTiled for each index in the 2D array. SimplifyIndexTiled calculates a new value for each pixel in destFrame based on the value of the pixels surrounding the corresponding pixel in srcFrame. This solves the race condition issue present in your code.
This code comes from the Codeplex site for the C++ AMP book. The Cartoonizer case study includes several examples of these sorts of image processing problems implemented in C++ AMP using; arrays, textures, tiled/untiled and multi-GPU. The C++ AMP book discusses the implementation in some detail.
void ApplyColorSimplifierTiledHelper(const array<ArgbPackedPixel, 2>& srcFrame,
array<ArgbPackedPixel, 2>& destFrame, UINT neighborWindow)
{
const float_3 W(ImageUtils::W);
assert(neighborWindow <= FrameProcessorAmp::MaxNeighborWindow);
tiled_extent<FrameProcessorAmp::TileSize, FrameProcessorAmp::TileSize>
computeDomain = GetTiledExtent(srcFrame.extent);
parallel_for_each(computeDomain, [=, &srcFrame, &destFrame]
(tiled_index<FrameProcessorAmp::TileSize, FrameProcessorAmp::TileSize> idx)
restrict(amp)
{
SimplifyIndexTiled(srcFrame, destFrame, idx, neighborWindow, W);
});
}
void SimplifyIndex(const array<ArgbPackedPixel, 2>& srcFrame, array<ArgbPackedPixel,
2>& destFrame, index<2> idx,
UINT neighborWindow, const float_3& W) restrict(amp)
{
const int shift = neighborWindow / 2;
float sum = 0;
float_3 partialSum;
const float standardDeviation = 0.025f;
const float k = -0.5f / (standardDeviation * standardDeviation);
const int idxY = idx[0] + shift; // Corrected index for border offset.
const int idxX = idx[1] + shift;
const int y_start = idxY - shift;
const int y_end = idxY + shift;
const int x_start = idxX - shift;
const int x_end = idxX + shift;
RgbPixel orgClr = UnpackPixel(srcFrame(idxY, idxX));
for (int y = y_start; y <= y_end; ++y)
for (int x = x_start; x <= x_end; ++x)
{
if (x != idxX || y != idxY) // don't apply filter to the requested index, only to the neighbors
{
RgbPixel clr = UnpackPixel(srcFrame(y, x));
float distance = ImageUtils::GetDistance(orgClr, clr, W);
float value = concurrency::fast_math::pow(float(M_E), k * distance * distance);
sum += value;
partialSum.r += clr.r * value;
partialSum.g += clr.g * value;
partialSum.b += clr.b * value;
}
}
RgbPixel newClr;
newClr.r = static_cast<UINT>(clamp(partialSum.r / sum, 0.0f, 255.0f));
newClr.g = static_cast<UINT>(clamp(partialSum.g / sum, 0.0f, 255.0f));
newClr.b = static_cast<UINT>(clamp(partialSum.b / sum, 0.0f, 255.0f));
destFrame(idxY, idxX) = PackPixel(newClr);
}
The code uses ArgbPackedPixel, which is simply a mechanism for packing 8-bit RGB values into an unsigned long as C++ AMP does not support char. If your problem is small enough to fit into a texture then you may want to look at using this instead of an array as the pack/unpack is implemented in hardware on the GPU so is effectively "free", here you have to pay for it with additional compute. There is also an example of this implementation on CodePlex.
typedef unsigned long ArgbPackedPixel;
struct RgbPixel
{
unsigned int r;
unsigned int g;
unsigned int b;
};
const int fixedAlpha = 0xFF;
inline ArgbPackedPixel PackPixel(const RgbPixel& rgb) restrict(amp)
{
return (rgb.b | (rgb.g << 8) | (rgb.r << 16) | (fixedAlpha << 24));
}
inline RgbPixel UnpackPixel(const ArgbPackedPixel& packedArgb) restrict(amp)
{
RgbPixel rgb;
rgb.b = packedArgb & 0xFF;
rgb.g = (packedArgb & 0xFF00) >> 8;
rgb.r = (packedArgb & 0xFF0000) >> 16;
return rgb;
}

Flipping the 2D texture on a sphere with Ray-Tracing

I am working on my ray-tracer and I think I've made some significant achievements. I am currently trying to place texture images onto objects. However they don't place quite well. They appear flipped on the sphere. Here is the final image of my current code:
Here are the relevant code:
-Image Class for opening image
class Image
{
public:
Image() {}
void read_bmp_file(char* filename)
{
int i;
FILE* f = fopen(filename, "rb");
unsigned char info[54];
fread(info, sizeof(unsigned char), 54, f); // read the 54-byte header
// extract image height and width from header
width = *(int*)&info[18];
height = *(int*)&info[22];
int size = 3 * width * height;
data = new unsigned char[size]; // allocate 3 bytes per pixel
fread(data, sizeof(unsigned char), size, f); // read the rest of the data at once
fclose(f);
for(i = 0; i < size; i += 3)
{
unsigned char tmp = data[i];
data[i] = data[i+2];
data[i+2] = tmp;
}
/*Now data should contain the (R, G, B) values of the pixels. The color of pixel (i, j) is stored at
data[j * 3* width + 3 * i], data[j * 3 * width + 3 * i + 1] and data[j * 3 * width + 3*i + 2].
In the last part, the swap between every first and third pixel is done because windows stores the
color values as (B, G, R) triples, not (R, G, B).*/
}
public:
int width;
int height;
unsigned char* data;
};
-Texture class
class Texture: public Material
{
public:
Texture(char* filename): Material() {
image_ptr = new Image;
image_ptr->read_bmp_file(filename);
}
virtual ~Texture() {}
virtual void set_mapping(Mapping* mapping)
{ mapping_ptr = mapping;}
virtual Vec get_color(const ShadeRec& sr) {
int row, col;
if(mapping_ptr)
mapping_ptr->get_texel_coordinates(sr.local_hit_point, image_ptr->width, image_ptr->height, row, col);
return Vec (image_ptr->data[row * 3 * image_ptr->width + 3*col ]/255.0,
image_ptr->data[row * 3 * image_ptr->width + 3*col+1]/255.0,
image_ptr->data[row * 3 * image_ptr->width + 3*col+2]/255.0);
}
public:
Image* image_ptr;
Mapping* mapping_ptr;
};
-Mapping class
class SphericalMap: public Mapping
{
public:
SphericalMap(): Mapping() {}
virtual ~SphericalMap() {}
virtual void get_texel_coordinates (const Vec& local_hit_point,
const int hres,
const int vres,
int& row,
int& column) const
{
float theta = acos(local_hit_point.y);
float phi = atan2(local_hit_point.z, local_hit_point.x);
if(phi < 0.0)
phi += 2*PI;
float u = phi/(2*PI);
float v = (PI - theta)/PI;
column = (int)((hres - 1) * u);
row = (int)((vres - 1) * v);
}
};
-Local hit points:
virtual void Sphere::set_local_hit_point(ShadeRec& sr)
{
sr.local_hit_point.x = sr.hit_point.x - c.x;
sr.local_hit_point.y = (sr.hit_point.y - c.y)/R;
sr.local_hit_point.z = sr.hit_point.z -c.z;
}
-This is how I constructed the sphere in main:
Texture* t1 = new Texture("Texture\\earthmap2.bmp");
SphericalMap* sm = new SphericalMap();
t1->set_mapping(sm);
t1->set_ka(0.55);
t1->set_ks(0.0);
Sphere *s1 = new Sphere(Vec(-60,0,50), 149);
s1->set_material(t1);
w.add_object(s1);
Sorry for long codes but if I had any idea where that problem might occur, I'd have posted that part. Finally this is how I call get_color() function from the main:
xShaded += sr.material_ptr->get_color(sr).x * in.x * max(0.0, sr.normal.dot(l)) +
sr.material_ptr->ks * in.x * pow((max(0.0,sr.normal.dot(h))),1);
yShaded += sr.material_ptr->get_color(sr).y * in.y * max(0.0, sr.normal.dot(l)) +
sr.material_ptr->ks * in.y * pow((max(0.0,sr.normal.dot(h))),1);
zShaded += sr.material_ptr->get_color(sr).z * in.z * max(0.0, sr.normal.dot(l)) +
sr.material_ptr->ks * in.z * pow((max(0.0,sr.normal.dot(h))),1);
Shot in the dark: if memory serves, BMPs are stored from the bottom up, while many other image formats are top-down. Could that possibly be the problem? Perhaps your file reader just needs to reverse the rows?
Changing float phi = atan2(local_hit_point.z, local_hit_point.x); to float phi = atan2(local_hit_point.x, local_hit_point.z); solved the problem.