Panorama to Tiny Planet in OpenCV C++ - c++

I tried to convert panorama to tiny planet using C++ and OpenCV but the image result is noisy. I am not really sure which part I did wrong. I think it has something to do with color.
I tried to convert panorama to tiny planet using C++ and OpenCV but the image result is noisy. I am not really sure which part I did wrong. I think it has something to do with color.
Tutorial I referred to
http://codeofthedamned.com/index.php/the-little-planet-effect
Panorama source
Tiny image result
#import <opencv2/opencv.hpp>
#import <opencv2/imgcodecs/ios.h>
#import "OpenCVWrapper.h"
using namespace cv;
#implementation OpenCVWrapper
+ (UIImage*)createTinyPlanetFromImage: (UIImage*)image {
Mat pano;
UIImageToMat(image, pano);
Mat grayMat;
RenderProjection(pano, 1000.0, grayMat);
return MatToUIImage(grayMat);
}
void RenderProjection(Mat &pano, long len, Mat &output) {
const double k_pi = 3.1415926535897932384626433832795;
const double k_pi_inverse = 0.31830988618379067153776752674503;
output.create(len, len, CV_16UC3);
long half_len = len / 2;
cv::Size sz = pano.size();
for (long indexX = 0; indexX < len; ++indexX) {
for (long indexY = 0; indexY < len; ++indexY) {
double sphereX = (indexX - half_len) * 10.0 / len;
double sphereY = (indexY - half_len) * 10.0 / len;
double Qx, Qy, Qz;
if (GetIntersection(sphereX, sphereY, Qx, Qy, Qz)) {
double theta = std::acos(Qz);
double phi = std::atan2(Qy, Qx) + k_pi;
theta = theta * k_pi_inverse;
phi = phi * (0.5 * k_pi_inverse);
double Sx = min(sz.width -2.0, sz.width * phi);
double Sy = min(sz.height-2.0, sz.height * theta);
output.at<Vec3s>(int(indexY), int(indexX)) = BilinearSample(pano, Sx, Sy);
}
}
}
}
bool GetIntersection(double u, double v, double &x, double &y, double &z) {
double Nx = 0.0;
double Ny = 0.0;
double Nz = 1.0;
double dir_x = u - Nx;
double dir_y = v - Ny;
double dir_z = -1.0 - Nz;
double a = (dir_x * dir_x) + (dir_y * dir_y) + (dir_z * dir_z);
double b = (dir_x * Nx) + (dir_y * Ny) + (dir_z * Nz);
b *= 2;
double d = b * b;
double q = -0.5 * (b - std::sqrt(d));
double t = q / a;
x = (dir_x * t) + Nx;
y = (dir_y * t) + Ny;
z = (dir_z * t) + Nz;
return true;
}
Vec3s BilinearSample(Mat &image, double x, double y) {
Vec3s c00 = image.at<Vec3s>(int(y), int(x));
Vec3s c01 = image.at<Vec3s>(int(y), int(x) + 1);
Vec3s c10 = image.at<Vec3s>(int(y) + 1, int(x));
Vec3s c11 = image.at<Vec3s>(int(y) + 1, int(x) + 1);
double X0 = x - floor(x);
double X1 = 1.0 - X0;
double Y0 = y - floor(y);
double Y1 = 1.0 - Y0;
double w00 = X0 * Y0;
double w01 = X1 * Y0;
double w10 = X0 * Y1;
double w11 = X1 * Y1;
short r = short(c00[2] * w00 + c01[2] * w01
+ c10[2] * w10 + c11[2] * w11);
short g = short(c00[1] * w00 + c01[1] * w01
+ c10[1] * w10 + c11[1] * w11);
short b = short(c00[0] * w00 + c01[0] * w01
+ c10[0] * w10 + c11[0] * w11);
return make_BGR(b, g, r);
}
Vec3s make_BGR(short blue, short green, short red) {
Vec3s result;
result[0] = blue;
result[1] = green;
result[2] = red;
return result;
}
#end

There problem solved when I replaced UIImageToMat(image, pano); and MatToUIImage(grayMat); with this code and we can remove this header #import <opencv2/imgcodecs/ios.h>
static void UIImageToMat2(UIImage *image, cv::Mat &mat) {
assert(image.size.width > 0 && image.size.height > 0);
assert(image.CGImage != nil || image.CIImage != nil);
// Create a pixel buffer.
NSInteger width = image.size.width;
NSInteger height = image.size.height;
cv::Mat mat8uc4 = cv::Mat((int)height, (int)width, CV_8UC4);
// Draw all pixels to the buffer.
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
if (image.CGImage) {
// Render with using Core Graphics.
CGContextRef contextRef = CGBitmapContextCreate(mat8uc4.data, mat8uc4.cols, mat8uc4.rows, 8, mat8uc4.step, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, width, height), image.CGImage);
CGContextRelease(contextRef);
} else {
// Render with using Core Image.
static CIContext* context = nil; // I do not like this declaration contains 'static'. But it is for performance.
if (!context) {
context = [CIContext contextWithOptions:#{ kCIContextUseSoftwareRenderer: #NO }];
}
CGRect bounds = CGRectMake(0, 0, width, height);
[context render:image.CIImage toBitmap:mat8uc4.data rowBytes:mat8uc4.step bounds:bounds format:kCIFormatRGBA8 colorSpace:colorSpace];
}
CGColorSpaceRelease(colorSpace);
// Adjust byte order of pixel.
cv::Mat mat8uc3 = cv::Mat((int)width, (int)height, CV_8UC3);
cv::cvtColor(mat8uc4, mat8uc3, cv::COLOR_RGBA2BGR);
mat = mat8uc3;
}
and
static UIImage *MatToUIImage2(cv::Mat &mat) {
// Create a pixel buffer.
assert(mat.elemSize() == 1 || mat.elemSize() == 3);
cv::Mat matrgb;
if (mat.elemSize() == 1) {
cv::cvtColor(mat, matrgb, cv::COLOR_GRAY2RGB);
} else if (mat.elemSize() == 3) {
cv::cvtColor(mat, matrgb, cv::COLOR_BGR2RGB);
}
// Change a image format.
NSData *data = [NSData dataWithBytes:matrgb.data length:(matrgb.elemSize() * matrgb.total())];
CGColorSpaceRef colorSpace;
if (matrgb.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGImageRef imageRef = CGImageCreate(matrgb.cols, matrgb.rows, 8, 8 * matrgb.elemSize(), matrgb.step.p[0], colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault);
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return image;
}

Related

Convert a fisheye image to an equirectangular image with opencv4

I want to transform a single round fisheye image to an equirectangular image with a C++ algorithm and OpenCV4.
The idea is from a input image loaded on my computer like this :
I want to obtain an output image like this :
I'm using the method described on this blog :
http://paulbourke.net/dome/dualfish2sphere/
The method can be described by this picture :
Unfortunately when I run my code, I obtain something like this :
I'm working on a MacOSX with Xcode and I use Terminal "ITerm2" to build and execute my code.
The code is the following :
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
const double PI = 3.141592653589793;
const string PATH_IMAGE = "/Users/Kenza/Desktop/Xcode_cpp_opencv/PaulBourke2/PaulBourke2/Images/img1.jpg";
const int ESC = 27;
Point2f findCorrespondingFisheyePoint(int Xe, int Ye, double He, double We, double Hf, double Wf, double FOV){
Point2f fisheyePoint;
double Xfn, Yfn; //Normalized Cartesian Coordinates
double longitude, latitude, Px, Py, Pz; //Spherical Coordinates
double r, theta; //Polar coordinates
double Xpn, Ypn; //Normalized Polar coordinates
//Normalize Coordinates
Xfn = ( ( 2.0 * (double)Xe ) - We) / Wf;//Between -1 and 1
Yfn = ( ( 2.0 * (double)Ye ) - He) / Hf;//Between -1 and 1
//Normalize Coordinates to Spherical Coordinates
longitude = Xfn*PI; //Between -PI and PI (2*PI interval)
latitude = Yfn*(PI/2.0); //Between -PI/2 and PI/2 (PI interval)
Px = cos(latitude)*cos(longitude);
Py = cos(latitude)*sin(longitude);
Pz = sin(latitude);
//Spherical Coordinates to Polar Coordinates
r = 2.0 * atan2(sqrt(pow(Px,2)+pow(Pz,2)),Py)/FOV;
theta = atan2(Pz,-Px);
Xpn = r * cos(theta);
Ypn = r * sin(theta);
//Normalize Coordinates to CartesianImage Coordinates
fisheyePoint.x = (int)(((Xpn+1.0)*Wf)/2.0);
fisheyePoint.y = (int)(((Ypn+1.0)*Hf)/2.0);
return fisheyePoint;
}
int main(int argc, char** argv){
Mat fisheyeImage, equirectangularImage;
fisheyeImage = imread(PATH_IMAGE, CV_32FC1);
namedWindow("Fisheye Image", WINDOW_AUTOSIZE);
imshow("Fisheye Image", fisheyeImage);
while(waitKey(0) != ESC) {
//wait until the key ESC is pressed
}
//destroyWindow("Fisheye Image");
int Hf, Wf; //Height, width and FOV for the input image (=fisheyeImage)
double FOV;
int He, We; //Height and width for the outpout image (=EquirectangularImage)
Hf = fisheyeImage.size().height;
Wf = fisheyeImage.size().width;
FOV = PI; //FOV in radian
//We keep the same ratio for the image input and the image output
We = Wf;
He = Hf;
equirectangularImage.create(Hf, Wf, fisheyeImage.type()); //We create the outpout image (=EquirectangularImage)
//For each pixels of the ouput equirectangular Image
for (int Xe = 0; Xe <equirectangularImage.size().width; Xe++){
for (int Ye = 0; Ye <equirectangularImage.size().height; Ye++){
equirectangularImage.at<Vec3b>(Point(Xe,Ye)) = fisheyeImage.at<Vec3b>(findCorrespondingFisheyePoint(Xe, Ye, He, We, Hf, Wf, FOV)); //We find the corresponding point in the fisheyeImage
}
}
namedWindow("Equirectangular Image", WINDOW_AUTOSIZE);
imshow("Equirectangular Image",equirectangularImage);
while(waitKey(0) != ESC) {
//wait until the key ESC is pressed
}
destroyWindow("Fisheye Image");
imwrite("equirectangularImage.jpg", equirectangularImage);
return 0;
}
With this code, I get the result expected :
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
const string PATH_IMAGE = "/Users/Kenza/Desktop/Xcode_cpp_opencv/Sos/Sos/Images/img1.jpg";
const int ESC = 27;
Point2f findCorrespondingFisheyePoint(int Xe, int Ye, int We, int He, float FOV){
Point2f fisheyePoint;
float theta, phi, r;
Point3f sphericalPoint;
theta = CV_PI * (Xe / ( (float) We ) - 0.5);
phi = CV_PI * (Ye / ( (float) He ) - 0.5);
sphericalPoint.x = cos(phi) * sin(theta);
sphericalPoint.y = cos(phi) * cos(theta);
sphericalPoint.z = sin(phi);
theta = atan2(sphericalPoint.z, sphericalPoint.x);
phi = atan2(sqrt(pow(sphericalPoint.x,2) + pow(sphericalPoint.z,2)), sphericalPoint.y);
r = ( (float) We ) * phi / FOV;
fisheyePoint.x = (int) ( 0.5 * ( (float) We ) + r * cos(theta) );
fisheyePoint.y = (int) ( 0.5 * ( (float) He ) + r * sin(theta) );
return fisheyePoint;
}
int main(int argc, char** argv){
Mat fisheyeImage, equirectangularImage;
int Wf, Hf;
float FOV;
int We, He;
fisheyeImage = imread(PATH_IMAGE, IMREAD_COLOR);
namedWindow("Fisheye Image");
imshow("fisheye Image", fisheyeImage);
Wf = fisheyeImage.size().width;
Hf = fisheyeImage.size().height;
FOV = (180 * CV_PI ) / 180;
We = Wf;
He = Hf;
while (waitKey(0) != ESC){
}
equirectangularImage.create(He, We, CV_8UC3);
for (int Xe = 0; Xe < We; Xe++){
for (int Ye = 0; Ye < He; Ye++){
Point2f fisheyePoint = findCorrespondingFisheyePoint(Xe, Ye, We, He, FOV);
if (fisheyePoint.x >= We || fisheyePoint.y >= He)
continue;
if (fisheyePoint.x < 0 || fisheyePoint.y < 0)
continue;
equirectangularImage.at<Vec3b>(Point(Xe, Ye)) = fisheyeImage.at<Vec3b>(fisheyePoint);
}
}
namedWindow("Equirectangular Image");
imshow("Equirectangular Image", equirectangularImage);
while (waitKey(0) != ESC){
}
imwrite("im2.jpg", equirectangularImage);
}

C++ Kinect v2 & freenect2: how to convert depth data to real world coordinates

I am trying to compute real world xyz coordinates using a Kinect v2 camera (in Linux), but my computation give me wrong results.
Here is the code:
cv::Point3f xyzWorld={0.0f};
xyzWorld.z = pointDepth;
xyzWorld.x = (float) ((float)x -(depthcx)) * xyzWorld.z / depthfx;
xyzWorld.y = (float) ((float)y - (depthcy)) * xyzWorld.z / depthfy;
xyzWorld.z = pointDepth;
return xyzWorld;
I think the problem is due to the depth value of fx, fy, cx and cy.
Can someone help me?
I am using freenect2.
Why not just use the OpenNi implementation
OniStatus VideoStream::convertDepthToWorldCoordinates(float depthX, float depthY, float depthZ, float* pWorldX, float* pWorldY, float* pWorldZ)
{
if (m_pSensorInfo->sensorType != ONI_SENSOR_DEPTH)
{
m_errorLogger.Append("convertDepthToWorldCoordinates: Stream is not from DEPTH\n");
return ONI_STATUS_NOT_SUPPORTED;
}
float normalizedX = depthX / m_worldConvertCache.resolutionX - .5f;
float normalizedY = .5f - depthY / m_worldConvertCache.resolutionY;
OniVideoMode videoMode;
int size = sizeof(videoMode);
getProperty(ONI_STREAM_PROPERTY_VIDEO_MODE, &videoMode, &size);
float const convertToMillimeters = (videoMode.pixelFormat == ONI_PIXEL_FORMAT_DEPTH_100_UM) ? 10.f : 1.f;
*pWorldX = (normalizedX * depthZ * m_worldConvertCache.xzFactor) / convertToMillimeters;
*pWorldY = (normalizedY * depthZ * m_worldConvertCache.yzFactor) / convertToMillimeters;
*pWorldZ = depthZ / convertToMillimeters;
return ONI_STATUS_OK;
}
and
OniStatus VideoStream::convertWorldToDepthCoordinates(float worldX, float worldY, float worldZ, float* pDepthX, float* pDepthY, float* pDepthZ)
{
if (m_pSensorInfo->sensorType != ONI_SENSOR_DEPTH)
{
m_errorLogger.Append("convertWorldToDepthCoordinates: Stream is not from DEPTH\n");
return ONI_STATUS_NOT_SUPPORTED;
}
*pDepthX = m_worldConvertCache.coeffX * worldX / worldZ + m_worldConvertCache.halfResX;
*pDepthY = m_worldConvertCache.halfResY - m_worldConvertCache.coeffY * worldY / worldZ;
*pDepthZ = worldZ;
return ONI_STATUS_OK;
}
and the world conversion cache :
void VideoStream::refreshWorldConversionCache()
{
if (m_pSensorInfo->sensorType != ONI_SENSOR_DEPTH)
{
return;
}
OniVideoMode videoMode;
int size = sizeof(videoMode);
getProperty(ONI_STREAM_PROPERTY_VIDEO_MODE, &videoMode, &size);
size = sizeof(float);
float horizontalFov;
float verticalFov;
getProperty(ONI_STREAM_PROPERTY_HORIZONTAL_FOV, &horizontalFov, &size);
getProperty(ONI_STREAM_PROPERTY_VERTICAL_FOV, &verticalFov, &size);
m_worldConvertCache.xzFactor = tan(horizontalFov / 2) * 2;
m_worldConvertCache.yzFactor = tan(verticalFov / 2) * 2;
m_worldConvertCache.resolutionX = videoMode.resolutionX;
m_worldConvertCache.resolutionY = videoMode.resolutionY;
m_worldConvertCache.halfResX = m_worldConvertCache.resolutionX / 2;
m_worldConvertCache.halfResY = m_worldConvertCache.resolutionY / 2;
m_worldConvertCache.coeffX = m_worldConvertCache.resolutionX / m_worldConvertCache.xzFactor;
m_worldConvertCache.coeffY = m_worldConvertCache.resolutionY / m_worldConvertCache.yzFactor;
}
struct WorldConversionCache
{
float xzFactor;
float yzFactor;
float coeffX;
float coeffY;
int resolutionX;
int resolutionY;
int halfResX;
int halfResY;
} m_worldConvertCache;
all taken from
OpenNI GitHub repository
The horizontal and vertical fov you can just get directly from the from the description of each frame.

Scaling and rotating texture onto another texture by raw buffer data

I submitted this to gamedev, but they seem rather slow so I hope I could find an answer here.
I've been messing with C++ AMP and OGRE in attempt to make writing to/altering textures to my liking easier on my behalf. In this I've been trying to draw a texture onto my "dynamic" texture with strange results. It appears that a solid 3/4 of my image is cropped off and it's driving me mad as I cannot seem to find the fix.
Here's a video of the problem: http://www.youtube.com/watch?v=uFWxHtHtqAI
And here's all of the necessary code for the sake of understanding even though the kernel is really where the issue at hand rests:
DynamicTexture.h
#define ValidTexCoord(x, y, width, height) ((x) >= 0 && (x) < (width) && (y) >= 0 && (y) < (height))
void TextureKernel(array<uint32, 2> &buffer, array_view<uint32, 2> texture, uint32 x, uint32 y, Real rot, Real scale, bool alpha)
{
Real
c = cos(-rot) / scale,
s = sin(-rot) / scale;
int32
//e = int32(sqrt((texture.extent[1] * texture.extent[1]) + (texture.extent[0] * texture.extent[0])) * scale * 0.5F),
dx = texture.extent[1] / 2,
dy = texture.extent[0] / 2;
parallel_for_each(buffer.extent, [=, &buffer](index<2> idx) restrict(amp)
{
int32
tex_x = int32((Real(idx[1] - x) * c) - (Real(idx[0] - y) * s)) + dx,
tex_y = int32((Real(idx[1] - x) * s) + (Real(idx[0] - y) * c)) + dy;
if(ValidTexCoord(tex_x, tex_y, texture.extent[1], texture.extent[0]))
{
if(!alpha || (alpha && texture(tex_y, tex_x) != 0))
{
buffer(idx) = texture(tex_y, tex_x);
}
}
else
{
buffer(idx) = 0x336699FF;
}
});
}
template<typename T, int32 Rank>
void SetKernel(array<T, Rank> &arr, T val)
{
parallel_for_each(arr.extent, [&arr, val](index<Rank> idx) restrict(amp)
{
arr(idx) = val;
});
}
class DynamicTexture
{
static int32
id;
array<uint32, 2>
buffer;
public:
const int32
width,
height;
TexturePtr
textureptr;
DynamicTexture(const int32 width, const int32 height, uint32 color = 0) :
width(width),
height(height),
buffer(extent<2>(height, width))
{
SetKernel(buffer, color);
textureptr = TextureManager::getSingleton().createManual("DynamicTexture" + StringConverter::toString(++id), ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, TextureType::TEX_TYPE_2D, width, height, 0, PixelFormat::PF_A8R8G8B8);
}
~DynamicTexture()
{
}
void Texture(TexturePtr texture, uint32 x, uint32 y, Real rot = 0.F, Real scale = 1.F, bool alpha = false)
{
HardwarePixelBufferSharedPtr
pixelbuffer = texture->getBuffer();
TextureKernel(buffer, array_view<uint32, 2>(texture->getHeight(), texture->getWidth(), (uint32 *)pixelbuffer->lock(HardwareBuffer::HBL_READ_ONLY)), x, y, rot, scale, alpha);
pixelbuffer->unlock();
}
void CopyToBuffer()
{
HardwarePixelBufferSharedPtr
pixelbuffer = textureptr->getBuffer();
copy(buffer, stdext::make_checked_array_iterator<uint32 *>((uint32 *)pixelbuffer->lock(HardwareBuffer::HBL_DISCARD), width * height));
pixelbuffer->unlock();
}
void Reset(uint32 color)
{
SetKernel(buffer, color);
}
};
int32
DynamicTexture::id = 0;
main.cpp
void initScene()
{
dynamictexture = new DynamicTexture(window->getWidth(), window->getHeight());
TextureManager::getSingleton().load("minotaur.jpg", Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, Ogre::TextureType::TEX_TYPE_2D, 0);
}
bool frameStarted(const FrameEvent &evt)
{
static Real
ang = 0.F;
ang += 0.05F;
if(ang > Math::TWO_PI)
{
ang = 0.F;
}
dynamictexture->Reset(0);
dynamictexture->Texture(TextureManager::getSingleton().getByName("minotaur.jpg"), dynamictexture->width / 2, dynamictexture->height / 2, ang, 4.F, true);
dynamictexture->CopyToBuffer();
return true;
}
As you can see, the dynamic texture is the size of the window (which in this case is 800x600) and the minotaur.jpg is 84x84. I'm simply placing it at half the width and height (center), rotating it by ang (radians), and scaling it to 4x.
In the kernel itself, I simply followed a 2D rotation matrix (where x and y are offset by the parameters 'x' and 'y'):
x' = x cosθ - y sinθ
y' = x sinθ + y cosθ
Also note that idx[1] represents the x value in the array and idx[0] represents the y because it's arranged in the manner that value = buffer[y + (x * height)] (or something along those lines, but just know it's in the correct format).
Thanks for any and all help!
Regards,
Tannz0rz
I found the solution thanks to this guy: https://sites.google.com/site/ofauckland/examples/rotating-pixels
const Real
HALF_PI = Math::HALF_PI;
const int32
cx = texture.extent[1] / 2,
cy = texture.extent[0] / 2;
parallel_for_each(buffer.extent, [=, &buffer](index<2> idx) restrict(amp)
{
int32
tex_x = idx[1] - x,
tex_y = idx[0] - y;
Real
dist = sqrt(Real((tex_x * tex_x) + (tex_y * tex_y))) / scale,
theta = atan2(Real(tex_y), Real(tex_x)) - angle - HALF_PI;
tex_x = int32(dist * sin(theta)) + cx;
tex_y = int32(dist * cos(theta)) + cy;
if(ValidTexCoord(tex_x, tex_y, texture.extent[1], texture.extent[0]))
{
buffer(idx) = texture(tex_y, tex_x);
}
});

Bilinear Interpolation, something wrong with my implementation

I am trying to implement a bilinear interpolation function, but for some reason I am getting bad output. I cant seem to figure out what's wrong, any help getting on the right track will be appreciated.
double lerp(double c1, double c2, double v1, double v2, double x)
{
if( (v1==v2) ) return c1;
double inc = ((c2-c1)/(v2 - v1)) * (x - v1);
double val = c1 + inc;
return val;
};
void bilinearInterpolate(int width, int height)
{
// if the current size is the same, do nothing
if(width == GetWidth() && height == GetHeight())
return;
//Create a new image
std::unique_ptr<Image2D> image(new Image2D(width, height));
// x and y ratios
double rx = (double)(GetWidth()) / (double)(image->GetWidth()); // oldWidth / newWidth
double ry = (double)(GetHeight()) / (double)(image->GetHeight()); // oldWidth / newWidth
// loop through destination image
for(int y=0; y<height; ++y)
{
for(int x=0; x<width; ++x)
{
double sx = x * rx;
double sy = y * ry;
uint xl = std::floor(sx);
uint xr = std::floor(sx + 1);
uint yt = std::floor(sy);
uint yb = std::floor(sy + 1);
for (uint d = 0; d < image->GetDepth(); ++d)
{
uchar tl = GetData(xl, yt, d);
uchar tr = GetData(xr, yt, d);
uchar bl = GetData(xl, yb, d);
uchar br = GetData(xr, yb, d);
double t = lerp(tl, tr, xl, xr, sx);
double b = lerp(bl, br, xl, xr, sx);
double m = lerp(t, b, yt, yb, sy);
uchar val = std::floor(m + 0.5);
image->SetData(x,y,d,val);
}
}
}
//Cleanup
mWidth = width; mHeight = height;
std::swap(image->mData, mData);
}
Input Image (4 pixels wide and high)
My Output
Expected Output (Photoshop's Bilinear Interpolation)
Photoshop's algorithm assumes that each source pixel's color is in the center of the pixel, while your algorithm assumes that the color is in its topleft. This causes your results to be shifted half a pixel up and left compared to Photoshop.
Another way to look at it is that your algorithm maps the x coordinate range (0, srcWidth) to (0, dstWidth), while Photoshop maps (-0.5, srcWidth-0.5) to (-0.5, dstWidth-0.5), and the same in y coordinate.
Instead of:
double sx = x * rx;
double sy = y * ry;
You can use:
double sx = (x + 0.5) * rx - 0.5;
double sy = (y + 0.5) * ry - 0.5;
to get similar results. Note that this can give you a negative value for sx and sy.

Separating Axis Theorem is driving me nuts!

i am working on an implementation of the Separting Axis Theorem for use in 2D games. It kind of works but just kind of.
I use it like this:
bool penetration = sat(c1, c2) && sat(c2, c1);
Where c1 and c2 are of type Convex, defined as:
class Convex
{
public:
float tx, ty;
public:
std::vector<Point> p;
void translate(float x, float y) {
tx = x;
ty = y;
}
};
(Point is a structure of float x, float y)
The points are typed in clockwise.
My current code (ignore Qt debug):
bool sat(Convex c1, Convex c2, QPainter *debug)
{
//Debug
QColor col[] = {QColor(255, 0, 0), QColor(0, 255, 0), QColor(0, 0, 255), QColor(0, 0, 0)};
bool ret = true;
int c1_faces = c1.p.size();
int c2_faces = c2.p.size();
//For every face in c1
for(int i = 0; i < c1_faces; i++)
{
//Grab a face (face x, face y)
float fx = c1.p[i].x - c1.p[(i + 1) % c1_faces].x;
float fy = c1.p[i].y - c1.p[(i + 1) % c1_faces].y;
//Create a perpendicular axis to project on (axis x, axis y)
float ax = -fy, ay = fx;
//Normalize the axis
float len_v = sqrt(ax * ax + ay * ay);
ax /= len_v;
ay /= len_v;
//Debug graphics (ignore)
debug->setPen(col[i]);
//Draw the face
debug->drawLine(QLineF(c1.tx + c1.p[i].x, c1.ty + c1.p[i].y, c1.p[(i + 1) % c1_faces].x + c1.tx, c1.p[(i + 1) % c1_faces].y + c1.ty));
//Draw the axis
debug->save();
debug->translate(c1.p[i].x, c1.p[i].y);
debug->drawLine(QLineF(c1.tx, c1.ty, ax * 100 + c1.tx, ay * 100 + c1.ty));
debug->drawEllipse(QPointF(ax * 100 + c1.tx, ay * 100 + c1.ty), 10, 10);
debug->restore();
//Carve out the min and max values
float c1_min = FLT_MAX, c1_max = FLT_MIN;
float c2_min = FLT_MAX, c2_max = FLT_MIN;
//Project every point in c1 on the axis and store min and max
for(int j = 0; j < c1_faces; j++)
{
float c1_proj = (ax * (c1.p[j].x + c1.tx) + ay * (c1.p[j].y + c1.ty)) / (ax * ax + ay * ay);
c1_min = min(c1_proj, c1_min);
c1_max = max(c1_proj, c1_max);
}
//Project every point in c2 on the axis and store min and max
for(int j = 0; j < c2_faces; j++)
{
float c2_proj = (ax * (c2.p[j].x + c2.tx) + ay * (c2.p[j].y + c2.ty)) / (ax * ax + ay * ay);
c2_min = min(c2_proj, c2_min);
c2_max = max(c2_proj, c2_max);
}
//Return if the projections do not overlap
if(!(c1_max >= c2_min && c1_min <= c2_max))
ret = false; //return false;
}
return ret; //return true;
}
What am i doing wrong? It registers collision perfectly but is over sensitive on one edge (in my test using a triangle and a diamond):
//Triangle
push_back(Point(0, -150));
push_back(Point(0, 50));
push_back(Point(-100, 100));
//Diamond
push_back(Point(0, -100));
push_back(Point(100, 0));
push_back(Point(0, 100));
push_back(Point(-100, 0));
I am getting this mega-adhd over this, please help me out :)
http://u8999827.fsdata.se/sat.png
OK, I was wrong the first time. Looking at your picture of a failure case it is obvious a separating axis exists and is one of the normals (the normal to the long edge of the triangle). The projection is correct, however, your bounds are not.
I think the error is here:
float c1_min = FLT_MAX, c1_max = FLT_MIN;
float c2_min = FLT_MAX, c2_max = FLT_MIN;
FLT_MIN is the smallest normal positive number representable by a float, not the most negative number. In fact you need:
float c1_min = FLT_MAX, c1_max = -FLT_MAX;
float c2_min = FLT_MAX, c2_max = -FLT_MAX;
or even better for C++
float c1_min = std::numeric_limits<float>::max(), c1_max = -c1_min;
float c2_min = std::numeric_limits<float>::max(), c2_max = -c2_min;
because you're probably seeing negative projections onto the axis.