Flipping the 2D texture on a sphere with Ray-Tracing - c++

I am working on my ray-tracer and I think I've made some significant achievements. I am currently trying to place texture images onto objects. However they don't place quite well. They appear flipped on the sphere. Here is the final image of my current code:
Here are the relevant code:
-Image Class for opening image
class Image
{
public:
Image() {}
void read_bmp_file(char* filename)
{
int i;
FILE* f = fopen(filename, "rb");
unsigned char info[54];
fread(info, sizeof(unsigned char), 54, f); // read the 54-byte header
// extract image height and width from header
width = *(int*)&info[18];
height = *(int*)&info[22];
int size = 3 * width * height;
data = new unsigned char[size]; // allocate 3 bytes per pixel
fread(data, sizeof(unsigned char), size, f); // read the rest of the data at once
fclose(f);
for(i = 0; i < size; i += 3)
{
unsigned char tmp = data[i];
data[i] = data[i+2];
data[i+2] = tmp;
}
/*Now data should contain the (R, G, B) values of the pixels. The color of pixel (i, j) is stored at
data[j * 3* width + 3 * i], data[j * 3 * width + 3 * i + 1] and data[j * 3 * width + 3*i + 2].
In the last part, the swap between every first and third pixel is done because windows stores the
color values as (B, G, R) triples, not (R, G, B).*/
}
public:
int width;
int height;
unsigned char* data;
};
-Texture class
class Texture: public Material
{
public:
Texture(char* filename): Material() {
image_ptr = new Image;
image_ptr->read_bmp_file(filename);
}
virtual ~Texture() {}
virtual void set_mapping(Mapping* mapping)
{ mapping_ptr = mapping;}
virtual Vec get_color(const ShadeRec& sr) {
int row, col;
if(mapping_ptr)
mapping_ptr->get_texel_coordinates(sr.local_hit_point, image_ptr->width, image_ptr->height, row, col);
return Vec (image_ptr->data[row * 3 * image_ptr->width + 3*col ]/255.0,
image_ptr->data[row * 3 * image_ptr->width + 3*col+1]/255.0,
image_ptr->data[row * 3 * image_ptr->width + 3*col+2]/255.0);
}
public:
Image* image_ptr;
Mapping* mapping_ptr;
};
-Mapping class
class SphericalMap: public Mapping
{
public:
SphericalMap(): Mapping() {}
virtual ~SphericalMap() {}
virtual void get_texel_coordinates (const Vec& local_hit_point,
const int hres,
const int vres,
int& row,
int& column) const
{
float theta = acos(local_hit_point.y);
float phi = atan2(local_hit_point.z, local_hit_point.x);
if(phi < 0.0)
phi += 2*PI;
float u = phi/(2*PI);
float v = (PI - theta)/PI;
column = (int)((hres - 1) * u);
row = (int)((vres - 1) * v);
}
};
-Local hit points:
virtual void Sphere::set_local_hit_point(ShadeRec& sr)
{
sr.local_hit_point.x = sr.hit_point.x - c.x;
sr.local_hit_point.y = (sr.hit_point.y - c.y)/R;
sr.local_hit_point.z = sr.hit_point.z -c.z;
}
-This is how I constructed the sphere in main:
Texture* t1 = new Texture("Texture\\earthmap2.bmp");
SphericalMap* sm = new SphericalMap();
t1->set_mapping(sm);
t1->set_ka(0.55);
t1->set_ks(0.0);
Sphere *s1 = new Sphere(Vec(-60,0,50), 149);
s1->set_material(t1);
w.add_object(s1);
Sorry for long codes but if I had any idea where that problem might occur, I'd have posted that part. Finally this is how I call get_color() function from the main:
xShaded += sr.material_ptr->get_color(sr).x * in.x * max(0.0, sr.normal.dot(l)) +
sr.material_ptr->ks * in.x * pow((max(0.0,sr.normal.dot(h))),1);
yShaded += sr.material_ptr->get_color(sr).y * in.y * max(0.0, sr.normal.dot(l)) +
sr.material_ptr->ks * in.y * pow((max(0.0,sr.normal.dot(h))),1);
zShaded += sr.material_ptr->get_color(sr).z * in.z * max(0.0, sr.normal.dot(l)) +
sr.material_ptr->ks * in.z * pow((max(0.0,sr.normal.dot(h))),1);

Shot in the dark: if memory serves, BMPs are stored from the bottom up, while many other image formats are top-down. Could that possibly be the problem? Perhaps your file reader just needs to reverse the rows?

Changing float phi = atan2(local_hit_point.z, local_hit_point.x); to float phi = atan2(local_hit_point.x, local_hit_point.z); solved the problem.

Related

Image subtraction with CUDA and textures

My goal is to use C++ with CUDA to subtract a dark frame from a raw image. I want to use textures for acceleration. The input of the images is cv::Mat with the type CV_8UC4 (I use the pointer to the data of the cv::Mat). This is the kernel I came up with, but I have no idea how to eventually subtract the textures from each other:
__global__ void DarkFrameSubtractionKernel(unsigned char* outputImage, size_t pitchOutputImage,
cudaTextureObject_t inputImage, cudaTextureObject_t darkImage, int width, int height)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
const float tx = (x + 0.5f);
const float ty = (y + 0.5f);
if (x >= width || y >= height) return;
uchar4 inputImageTemp = tex2D<uchar4>(inputImage, tx, ty);
uchar4 darkImageTemp = tex2D<uchar4>(darkImage, tx, ty);
outputImage[y * pitchOutputImage + x] = inputImageTemp - darkImageTemp; // this line will throw an error
}
This is the function that calls the kernel (you can see that I create the textures from unsigned char):
void subtractDarkImage(unsigned char* inputImage, size_t pitchInputImage, unsigned char* outputImage,
size_t pitchOutputImage, unsigned char* darkImage, size_t pitchDarkImage, int width, int height,
cudaStream_t stream)
{
cudaResourceDesc resDesc = {};
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.width = width;
resDesc.res.pitch2D.height = height;
resDesc.res.pitch2D.devPtr = inputImage;
resDesc.res.pitch2D.pitchInBytes = pitchInputImage;
resDesc.res.pitch2D.desc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned);
cudaTextureDesc texDesc = {};
texDesc.readMode = cudaReadModeElementType;
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
cudaTextureObject_t imageInputTex, imageDarkTex;
CUDA_CHECK(cudaCreateTextureObject(&imageInputTex, &resDesc, &texDesc, 0));
resDesc.res.pitch2D.devPtr = darkImage;
resDesc.res.pitch2D.pitchInBytes = pitchDarkImage;
CUDA_CHECK(cudaCreateTextureObject(&imageDarkTex, &resDesc, &texDesc, 0));
dim3 block(32, 8);
dim3 grid = paddedGrid(block.x, block.y, width, height);
DarkImageSubtractionKernel << <grid, block, 0, stream >> > (reinterpret_cast<uchar4*>(outputImage), pitchOutputImage / sizeof(uchar4),
imageInputTex, imageDarkTex, width, height);
CUDA_CHECK(cudaDestroyTextureObject(imageInputTex));
CUDA_CHECK(cudaDestroyTextureObject(imageDarkTex));
}
The code does not compile as I can not subtract a uchar4 from another one (in the kernel). Is there an easy way of subtraction here?
Help is very much appreciated.
Is there an easy way of subtraction here?
There are no arithmetic operators defined for CUDA built-in vector types. If you replace
outputImage[y * pitchOutputImage + x] = inputImageTemp - darkImageTemp;
with
uchar4 val;
val.x = inputImageTemp.x - darkImageTemp.x;
val.y = inputImageTemp.y - darkImageTemp.y;
val.z = inputImageTemp.z - darkImageTemp.z;
val.w = inputImageTemp.w - darkImageTemp.w;
outputImage[y * pitchOutputImage + x] = val;
things will work. If this offends you, I suggest writing a small library of helper functions to hide the mess.

openCl path tracer creates strange noise patterns

I've made a path tracer using openCl and c++, following the basic structure in this tutorial: http://raytracey.blogspot.com/2016/11/opencl-path-tracing-tutorial-2-path.html. As far as I can tell, nothing is wrong with the path tracing algorithm itself, but I get strange stripe patterns in the image that don't match the regular noise of path tracing. striped image
There are distinct vertical stripes and more narrow horizontal ones that make the image look granular regardless of how many samples I take per pixel. Again, pixel by pixel, the path tracer seems to be working (the outlines of objects are correct even where they appear mid-stripe) as seen here: close-up.
The only difference between my code and the one in the tutorial I link is that Sam Lapere appears to be using the c++ wrapper for openCl, and I've added a couple of features like movement. There also are a few differences in how I'm handling light bounces.
I'm new to openCl. What could be causing this? It seems like it doesn't have to do with my ray tracer itself, but somehow in the way I'm implementing openCl. I'm also using an SDL texture and renderer to show the image to the screen
here is the tracer code if it helps:
kernel:
__kernel void render_kernel
(__constant struct Sphere* spheres, const int width, const int height,
const int sphere_count, __global int * output, __global float3*
pixel_buckets, __global int* counter, __constant struct Ray* camera,
__global bool* reset){
int gid = get_global_id(0);
//for movement
if (*reset){
pixel_buckets[gid] = (float3)(0,0,0);
counter[gid] = 0;
}
int xcoord = gid % width;
int ycoord = gid / width;
struct Ray camray = createCamRay(xcoord, ycoord, width, height, counter[gid], camera);
float3 final_color = trace(spheres, &camray, sphere_count, xcoord, ycoord);
counter[gid] ++;
//average colors
pixel_buckets[gid] += final_color;
output[gid] = colorInt(clampColor(pixel_buckets[gid] / counter[gid]));
}
trace:
float3 trace(__constant struct Sphere* spheres, struct Ray* camray, const int sphere_count,
unsigned int seed0, unsigned int seed1){
struct Ray ray = *camray;
struct Sphere sphere1;
sphere1.center = (float3)(0, 0, 3);
sphere1.radius = 0.7;
sphere1.color = (float3)(1,1,0);
const int bounce_count = 8;
float3 colors[20];
float3 emiss[20];
for (int bounce = 0; bounce < bounce_count; bounce ++){
int sphere_id = 0;
float hit_distance = intersectScene(spheres, &ray, &sphere_id, sphere_count);
struct Sphere hit_sphere = spheres[sphere_id];
float3 hit_point = ray.origin + (ray.direction * hit_distance);
float3 normal = normalize(hit_point - hit_sphere.center);
if (dot(normal, -ray.direction) < 0){
normal = -normal;
}
//random bounce angles
float rand_theta = get_random(seed0, seed1);
float theta = acos(sqrt(rand_theta));
float rand_phi = get_random(seed0, seed1);
float phi = 2 * PI * rand_phi;
//scales the tnb vectors
float x = sin(theta) * sin(phi);
float y = sin(theta) * cos(phi);
float n = cos(theta);
float3 hemx = normalize(cross(ray.direction, normal)) * x;
float3 hemy = normalize(cross(hemx, normal)) * y;
normal = normal * n;
float3 new_ray = normalize(hemx + hemy + normal);
ray.origin = hit_point + (normal * EPSILON);
ray.direction = new_ray;
colors[bounce] = hit_sphere.color;
emiss[bounce] = hit_sphere.emmissive;
}
colors[bounce_count] = (float3)(0,0,0);
emiss[bounce_count] = (float3)(0,0,0);
for (int i = bounce_count - 1; i >= 0; i--){
colors[i] = (colors[i] * emiss[i]) + (colors[i] * colors[i + 1]);
}
return colors[0];
}
random number generator:
float get_random(unsigned int *seed0, unsigned int *seed1) {
/* hash the seeds using bitwise AND operations and bitshifts */
*seed0 = 36969 * ((*seed0) & 65535) + ((*seed0) >> 16);
*seed1 = 18000 * ((*seed1) & 65535) + ((*seed1) >> 16);
unsigned int ires = ((*seed0) << 16) + (*seed1);
/* use union struct to convert int to float */
union {
float f;
unsigned int ui;
} res;
res.ui = (ires & 0x007fffff) | 0x40000000; /* bitwise AND, bitwise OR */
return (res.f - 2.0f) / 2.0f;
}
thanks

Halide Jit compilation

Im trying to compile my halide program to jit to use it later in code few times on different images. But i think i making something wrong, can anyone correct me?
First I create halide function to run:
void m_gammaFunctionTMOGenerate()
{
Halide::ImageParam img(Halide::type_of<float>(), 3);
img.set_stride(0, 4);
img.set_stride(2, 1);
Halide::Var x, y, c;
Halide::Param<float> key, sat, clampMax, clampMin;
Halide::Param<bool> cS;
Halide::Func gamma;
// algorytm
//img.width() , img.height();
if (cS.get())
{
float k1 = 1.6774;
float k2 = 0.9925;
sat.set((1 + k1) * pow(key.get(), k2) / (1 + k1 * pow(key.get(), k2)));
}
Halide::Expr luminance = img(x, y, 0) * 0.072186f + img(x, y, 1) * 0.715158f + img(x, y, 2) * 0.212656f;
Halide::Expr ldr_lum = (luminance - clampMin) / (clampMax - clampMin);
Halide::clamp(ldr_lum, 0.f, 1.f);
ldr_lum = Halide::pow(ldr_lum, key);
Halide::Expr imLum = img(x, y, c) / luminance;
imLum = Halide::pow(imLum, sat) * ldr_lum;
Halide::clamp(imLum, 0.f, 1.f);
gamma(x, y, c) = imLum;
// rozkład
gamma.vectorize(x, 16).parallel(y);
// kompilacja
auto & obuff = gamma.output_buffer();
obuff.set_stride(0, 4);
obuff.set_stride(2, 1);
obuff.set_extent(2, 3);
std::vector<Halide::Argument> arguments = { img, key, sat, clampMax, clampMin, cS };
m_gammaFunction = (gammafunction)(gamma.compile_jit());
}
store it in pointer:
typedef int(*gammafunction)(buffer_t*, float, float, float, float, bool, buffer_t*);
gammafunction m_gammaFunction;
then i try to run it:
buffer_t output_buf = { 0 };
//// The host pointers point to the start of the image data:
buffer_t buf = { 0 };
buf.host = (uint8_t *)data; // Might also need const_cast
float * output = new float[width * height * 4];
output_buf.host = (uint8_t*)(output);
// // If the buffer doesn't start at (0, 0), then assign mins
output_buf.extent[0] = buf.extent[0] = width; // In elements, not bytes
output_buf.extent[1] = buf.extent[1] = height; // In elements, not bytes
output_buf.extent[2] = buf.extent[2] = 4; // Assuming RGBA
// // No need to assign additional extents as they were init'ed to zero above
output_buf.stride[0] = buf.stride[0] = 4; // RGBA interleaved
output_buf.stride[1] = buf.stride[1] = width * 4; // Assuming no line padding
output_buf.stride[2] = buf.stride[2] = 1; // Channel interleaved
output_buf.elem_size = buf.elem_size = sizeof(float);
// Run the pipeline
int error = m_photoFunction(&buf, params[0], &output_buf);
But it doesn't work...
Error:
Exception thrown at 0x000002974F552DE0 in Viewer.exe: 0xC0000005: Access violation executing location 0x000002974F552DE0.
If there is a handler for this exception, the program may be safely continued.
Edit:
Here is my code for running function:
buffer_t output_buf = { 0 };
//// The host pointers point to the start of the image data:
buffer_t buf = { 0 };
buf.host = (uint8_t *)data; // Might also need const_cast
float * output = new float[width * height * 4];
output_buf.host = (uint8_t*)(output);
// // If the buffer doesn't start at (0, 0), then assign mins
output_buf.extent[0] = buf.extent[0] = width; // In elements, not bytes
output_buf.extent[1] = buf.extent[1] = height; // In elements, not bytes
output_buf.extent[2] = buf.extent[2] = 3; // Assuming RGBA
// // No need to assign additional extents as they were init'ed to zero above
output_buf.stride[0] = buf.stride[0] = 4; // RGBA interleaved
output_buf.stride[1] = buf.stride[1] = width * 4; // Assuming no line padding
output_buf.stride[2] = buf.stride[2] = 1; // Channel interleaved
output_buf.elem_size = buf.elem_size = sizeof(float);
// Run the pipeline
int error = m_gammaFunction(&buf, params[0], params[1], params[2], params[3], params[4] > 0.5 ? true : false, &output_buf);
if (error) {
printf("Halide returned an error: %d\n", error);
return -1;
}
memcpy(output, data, size * sizeof(float));
can anyone help me with it?
Edit:
Thanks to #KhouriGiordano I found out what I was doing wrong. Indeed I switched from AOT compiling to this code. So now my code looks like that:
class GammaOperator
{
public:
GammaOperator();
int realize(buffer_t * input, float params[], buffer_t * output, int width);
private:
HalideFloat m_key;
HalideFloat m_sat;
HalideFloat m_clampMax;
HalideFloat m_clampMin;
HalideBool m_cS;
Halide::ImageParam m_img;
Halide::Var x, y, c;
Halide::Func m_gamma;
};
GammaOperator::GammaOperator()
: m_img( Halide::type_of<float>(), 3)
{
Halide::Expr w = (1.f + 1.6774f) * pow(m_key.get(), 0.9925f) / (1.f + 1.6774f * pow(m_key.get(), 0.9925f));
Halide::Expr sat = Halide::select(m_cS, m_sat, w);
Halide::Expr luminance = m_img(x, y, 0) * 0.072186f + m_img(x, y, 1) * 0.715158f + m_img(x, y, 2) * 0.212656f;
Halide::Expr ldr_lum = (luminance - m_clampMin) / (m_clampMax - m_clampMin);
ldr_lum = Halide::clamp(ldr_lum, 0.f, 1.f);
ldr_lum = Halide::pow(ldr_lum, m_key);
Halide::Expr imLum = m_img(x, y, c) / luminance;
imLum = Halide::pow(imLum, sat) * ldr_lum;
imLum = Halide::clamp(imLum, 0.f, 1.f);
m_gamma(x, y, c) = imLum;
}
int GammaOperator::realize(buffer_t * input, float params[], buffer_t * output, int width)
{
m_img.set(Halide::Buffer(Halide::type_of<float>(), input));
m_img.set_stride(0, 4);
m_img.set_stride(1, width * 4);
m_img.set_stride(2, 4);
// algorytm
m_gamma.vectorize(x, 16).parallel(y);
//params[0], params[1], params[2], params[3], params[4] > 0.5 ? true : false
//{ img, key, sat, clampMax, clampMin, cS };
m_key.set(params[0]);
m_sat.set(params[1]);
m_clampMax.set(params[2]);
m_clampMin.set(params[3]);
m_cS.set(params[4] > 0.5f ? true : false);
//// kompilacja
m_gamma.realize(Halide::Buffer(Halide::type_of<float>(), output));
return 0;
}
and i use it like that:
buffer_t output_buf = { 0 };
//// The host pointers point to the start of the image data:
buffer_t buf = { 0 };
buf.host = (uint8_t *)data; // Might also need const_cast
float * output = new float[width * height * 4];
output_buf.host = (uint8_t*)(output);
// // If the buffer doesn't start at (0, 0), then assign mins
output_buf.extent[0] = buf.extent[0] = width; // In elements, not bytes
output_buf.extent[1] = buf.extent[1] = height; // In elements, not bytes
output_buf.extent[2] = buf.extent[2] = 4; // Assuming RGBA
// // No need to assign additional extents as they were init'ed to zero above
output_buf.stride[0] = buf.stride[0] = 4; // RGBA interleaved
output_buf.stride[1] = buf.stride[1] = width * 4; // Assuming no line padding
output_buf.stride[2] = buf.stride[2] = 1; // Channel interleaved
output_buf.elem_size = buf.elem_size = sizeof(float);
// Run the pipeline
int error = s_gamma->realize(&buf, params, &output_buf, width);
but it is still crashing on m_gamma.realize function with info in console:
Error: Constraint violated: f0.stride.0 (4) == 1 (1)
By using Halide::Param::get(), you are extracting the (default of 0) value from the Param object at the time you call get(). If you want to use the parameter value given at the time you call the generated function, just use it without calling get and it should be implicitly converted to an Expr.
Since Param is not convertible to a boolean, the Halide way of doing an if is Halide::select().
You aren't using the clamped return value of Halide::clamp().
I don't see cS being used by the Halide code, only the C code.
Now to your JIT problem. It looks like you started doing AOT compilation and switched to JIT.
You make a std::vector<Halide::Argument> but don't pass it anywhere. How can Halide know what Param you want to use? It looks at the Func and finds references to ImageParam and Param objects.
How can you know what order it expects the Param? You have no control over this. I was able to dump the bitcode by defining HL_GENBITCODE=1 and then view that with llvm-dis to see your function:
int gamma
( buffer_t *img
, float clampMax
, float key
, float clampMin
, float sat
, void *user_context
, buffer_t *result
);
Use gamma.realize(Halide::Buffer(Halide::type_of<float>(), &output_buf)) instead of using gamma.compile_jit() and trying to call the generated function properly.
For one time use:
Use Image instead of ImageParam.
Use Expr instead of Param.
For repeated use with a single JIT compile:
Keep the ImageParam and Param around and set them before realizing the Func.

Extracting raw data from template for use in CUDA

The following code is a snippet from the PCL (point cloud) library. It calculates the integral sum of an image.
template <class DataType, unsigned Dimension> class IntegralImage2D
{
public:
static const unsigned dim_fst = Dimension;
typedef cv::Vec<typename TypeTraits<DataType>::IntegralType, dim_fst> FirstType;
std::vector<FirstType> img_fst;
//.... lots of methods missing here that actually calculate the integral sum
/** \brief Compute the first order sum within a given rectangle
* \param[in] start_x x position of rectangle
* \param[in] start_y y position of rectangle
* \param[in] width width of rectangle
* \param[in] height height of rectangle
*/
inline FirstType getFirstOrderSum(unsigned start_x, unsigned start_y, unsigned width, unsigned height) const
{
const unsigned upper_left_idx = start_y * (wdt + 1) + start_x;
const unsigned upper_right_idx = upper_left_idx + width;
const unsigned lower_left_idx =(start_y + height) * (wdt + 1) + start_x;
const unsigned lower_right_idx = lower_left_idx + width;
return(img_fst[lower_right_idx] + img_fst[upper_left_idx] - img_fst[upper_right_idx] - img_fst[lower_left_idx]);
}
Currently the results are obtained using the following code:
IntegralImage2D<float,3> iim_xyz;
IntegralImage2D<float, 3>::FirstType fo_elements;
IntegralImage2D<float, 3>::SecondType so_elements;
fo_elements = iim_xyz.getFirstOrderSum(pos_x - rec_wdt_2, pos_y - rec_hgt_2, rec_wdt, rec_hgt);
so_elements = iim_xyz.getSecondOrderSum(pos_x - rec_wdt_2, pos_y - rec_hgt_2, rec_wdt, rec_hgt);
However I'm trying to parallelise the code (write getFirstOrderSum as a CUDA device function). Since CUDA doesn't recognise these FirstType and SecondType objects (or any opencv objects for that matter) I'm struggling (I'm new to C++) to extract the raw data from the template.
If possible I would like to cast the img_fst object to some kind of vector or array that I can allocate on the cuda kernel.
it seems img_fst is of type std::vector<cv::Matx<double,3,1>
As it turns out you can pass the raw data as you would using a normal vector.
void computation(ps::IntegralImage2D<float, 3> iim_xyz){
cv::Vec<double, 3>* d_img_fst = 0;
cudaErrorCheck(cudaMalloc((void**)&d_img_fst, sizeof(cv::Vec<double, 3>)*(iim_xyz.img_fst.size())));
cudaErrorCheck(cudaMemcpy(d_img_fst, &iim_xyz.img_fst[0], sizeof(cv::Vec<double, 3>)*(iim_xyz.img_fst.size()), cudaMemcpyHostToDevice));
//..
}
__device__ double* getFirstOrderSum(unsigned start_x, unsigned start_y, unsigned width, unsigned height, int wdt, cv::Vec<double, 3>* img_fst)
{
const unsigned upper_left_idx = start_y * (wdt + 1) + start_x;
const unsigned upper_right_idx = upper_left_idx + width;
const unsigned lower_left_idx = (start_y + height) * (wdt + 1) + start_x;
const unsigned lower_right_idx = lower_left_idx + width;
double* result = new double[3];
result[0] = img_fst[lower_right_idx].val[0] + img_fst[upper_left_idx].val[0] - img_fst[upper_right_idx].val[0] - img_fst[lower_left_idx].val[0];
result[1] = img_fst[lower_right_idx].val[1] + img_fst[upper_left_idx].val[1] - img_fst[upper_right_idx].val[1] - img_fst[lower_left_idx].val[1];
result[2] = img_fst[lower_right_idx].val[2] + img_fst[upper_left_idx].val[2] - img_fst[upper_right_idx].val[2] - img_fst[lower_left_idx].val[2];
return result; //i have to delete this pointer otherwise I will create memory leak
}

Scaling and rotating texture onto another texture by raw buffer data

I submitted this to gamedev, but they seem rather slow so I hope I could find an answer here.
I've been messing with C++ AMP and OGRE in attempt to make writing to/altering textures to my liking easier on my behalf. In this I've been trying to draw a texture onto my "dynamic" texture with strange results. It appears that a solid 3/4 of my image is cropped off and it's driving me mad as I cannot seem to find the fix.
Here's a video of the problem: http://www.youtube.com/watch?v=uFWxHtHtqAI
And here's all of the necessary code for the sake of understanding even though the kernel is really where the issue at hand rests:
DynamicTexture.h
#define ValidTexCoord(x, y, width, height) ((x) >= 0 && (x) < (width) && (y) >= 0 && (y) < (height))
void TextureKernel(array<uint32, 2> &buffer, array_view<uint32, 2> texture, uint32 x, uint32 y, Real rot, Real scale, bool alpha)
{
Real
c = cos(-rot) / scale,
s = sin(-rot) / scale;
int32
//e = int32(sqrt((texture.extent[1] * texture.extent[1]) + (texture.extent[0] * texture.extent[0])) * scale * 0.5F),
dx = texture.extent[1] / 2,
dy = texture.extent[0] / 2;
parallel_for_each(buffer.extent, [=, &buffer](index<2> idx) restrict(amp)
{
int32
tex_x = int32((Real(idx[1] - x) * c) - (Real(idx[0] - y) * s)) + dx,
tex_y = int32((Real(idx[1] - x) * s) + (Real(idx[0] - y) * c)) + dy;
if(ValidTexCoord(tex_x, tex_y, texture.extent[1], texture.extent[0]))
{
if(!alpha || (alpha && texture(tex_y, tex_x) != 0))
{
buffer(idx) = texture(tex_y, tex_x);
}
}
else
{
buffer(idx) = 0x336699FF;
}
});
}
template<typename T, int32 Rank>
void SetKernel(array<T, Rank> &arr, T val)
{
parallel_for_each(arr.extent, [&arr, val](index<Rank> idx) restrict(amp)
{
arr(idx) = val;
});
}
class DynamicTexture
{
static int32
id;
array<uint32, 2>
buffer;
public:
const int32
width,
height;
TexturePtr
textureptr;
DynamicTexture(const int32 width, const int32 height, uint32 color = 0) :
width(width),
height(height),
buffer(extent<2>(height, width))
{
SetKernel(buffer, color);
textureptr = TextureManager::getSingleton().createManual("DynamicTexture" + StringConverter::toString(++id), ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, TextureType::TEX_TYPE_2D, width, height, 0, PixelFormat::PF_A8R8G8B8);
}
~DynamicTexture()
{
}
void Texture(TexturePtr texture, uint32 x, uint32 y, Real rot = 0.F, Real scale = 1.F, bool alpha = false)
{
HardwarePixelBufferSharedPtr
pixelbuffer = texture->getBuffer();
TextureKernel(buffer, array_view<uint32, 2>(texture->getHeight(), texture->getWidth(), (uint32 *)pixelbuffer->lock(HardwareBuffer::HBL_READ_ONLY)), x, y, rot, scale, alpha);
pixelbuffer->unlock();
}
void CopyToBuffer()
{
HardwarePixelBufferSharedPtr
pixelbuffer = textureptr->getBuffer();
copy(buffer, stdext::make_checked_array_iterator<uint32 *>((uint32 *)pixelbuffer->lock(HardwareBuffer::HBL_DISCARD), width * height));
pixelbuffer->unlock();
}
void Reset(uint32 color)
{
SetKernel(buffer, color);
}
};
int32
DynamicTexture::id = 0;
main.cpp
void initScene()
{
dynamictexture = new DynamicTexture(window->getWidth(), window->getHeight());
TextureManager::getSingleton().load("minotaur.jpg", Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, Ogre::TextureType::TEX_TYPE_2D, 0);
}
bool frameStarted(const FrameEvent &evt)
{
static Real
ang = 0.F;
ang += 0.05F;
if(ang > Math::TWO_PI)
{
ang = 0.F;
}
dynamictexture->Reset(0);
dynamictexture->Texture(TextureManager::getSingleton().getByName("minotaur.jpg"), dynamictexture->width / 2, dynamictexture->height / 2, ang, 4.F, true);
dynamictexture->CopyToBuffer();
return true;
}
As you can see, the dynamic texture is the size of the window (which in this case is 800x600) and the minotaur.jpg is 84x84. I'm simply placing it at half the width and height (center), rotating it by ang (radians), and scaling it to 4x.
In the kernel itself, I simply followed a 2D rotation matrix (where x and y are offset by the parameters 'x' and 'y'):
x' = x cosθ - y sinθ
y' = x sinθ + y cosθ
Also note that idx[1] represents the x value in the array and idx[0] represents the y because it's arranged in the manner that value = buffer[y + (x * height)] (or something along those lines, but just know it's in the correct format).
Thanks for any and all help!
Regards,
Tannz0rz
I found the solution thanks to this guy: https://sites.google.com/site/ofauckland/examples/rotating-pixels
const Real
HALF_PI = Math::HALF_PI;
const int32
cx = texture.extent[1] / 2,
cy = texture.extent[0] / 2;
parallel_for_each(buffer.extent, [=, &buffer](index<2> idx) restrict(amp)
{
int32
tex_x = idx[1] - x,
tex_y = idx[0] - y;
Real
dist = sqrt(Real((tex_x * tex_x) + (tex_y * tex_y))) / scale,
theta = atan2(Real(tex_y), Real(tex_x)) - angle - HALF_PI;
tex_x = int32(dist * sin(theta)) + cx;
tex_y = int32(dist * cos(theta)) + cy;
if(ValidTexCoord(tex_x, tex_y, texture.extent[1], texture.extent[0]))
{
buffer(idx) = texture(tex_y, tex_x);
}
});