I have created a stitching algorithm that can stitch many images together. It doesn't use a bundle adjustment but that is a later addition to be made.
After finding the homography between the two images I am stitching I do a series of translations, perspectiveTransforms and warpPerspectives to achieve my end result. The following two images are two image stitched the first being images passed left to right and the latter being images passed right to left. Each image includes information about the translation matrix created to stitch the images. The result image is the new padded size of a blank images that will contain the stitched images (what you are seeing pictured).
Image 1: Min x 478.711 Min y 3.16 Max x 1853.66 Max y 1449.42
Image 2: Min x 0 Min y 0 Max x 1200 Max y 900
Result Image: Min x 0 Min y 0 Max x 1853.66 Max y 1449.42
[1, 0, 0;
0, 1, 0; Stitch 2
0, 0, 1]
Image 1: Min x -671.462 Min y -461.816 Max x 652.656 Max y 655.324
Image 2: Min x 0 Min y 0 Max x 1200 Max y 900
Result Image: Min x -671.462 Min y -461.816 Max x 1200 Max y 900
[1, 0, 672;
0, 1, 462; Stitch2/Rev
0, 0, 1]
Looks like it works great which is awesome. However when I run the code piping in 3 images I get weird lines where the black background should be and then my padding begins to fail. Again reference images and translations.
---------------------
Image 1: Min x 390.279 Min y 72.4343 Max x 1658.93 Max y 1110.17
Image 2: Min x 0 Min y 0 Max x 1200 Max y 900
Result Image: Min x 0 Min y 0 Max x 1658.93 Max y 1110.17
[1, 0, 0;
0, 1, 0;
0, 0, 1]
Image 1: Min x 439.817 Min y -20.4711 Max x 2205.2 Max y 1288.93
Image 2: Min x 0 Min y 0 Max x 1200 Max y 900
Result Image: Min x 0 Min y -20.4711 Max x 2205.2 Max y 1288.93
[1, 0, 0;
0, 1, 21;
0, 0, 1]
---------------------
Code for this section follows. Any help is appreciated.
Mat result, descriptors_updated;
vector<Point2f> fourPointImage1, fourPointImage2, image1dst, image2dst;
vector<KeyPoint> keypoints_updated;
float min_x, min_y, max_x, max_y, result_x, result_y, img1_min_x, img1_min_y, img2_min_x, img2_min_y,img1_max_x, img1_max_y, img2_max_x, img2_max_y, result_min_x, result_max_x, result_min_y, result_max_y;
//get four corners of image MATs
fourPointImage1 = fourCorners(image1);
fourPointImage2 = fourCorners(image2);
//get the min and max corners
tie(min_x, min_y, max_x, max_y) = minMaxCorners(fourPointImage1);
//- Htr use to map image one to result in line with the already warped image two
Mat Htr = Mat::eye(3,3,CV_64F);
if (min_x < 0){
max_x = image2.size().width - min_x;
Htr.at<double>(0,2)= -min_x;
}
if (min_y < 0){
max_y = image2.size().height - min_y;
Htr.at<double>(1,2)= -min_y;
}
//- Need to create a padded blank image to accomodate stitched images.
//- Must first determine where the translations of the two images will end up to determine if shift is needed and how much to pad blank image
perspectiveTransform(fourPointImage1, image1dst, Htr*homography);
perspectiveTransform(fourPointImage2, image2dst, Htr);
//- Now determine what is out of bounds
//New coordinates for image 1
tie(img1_min_x, img1_min_y,img1_max_x,img1_max_y) = minMaxCorners(image1dst);
cout << "Image 1: Min x " << img1_min_x << " Min y " << img1_min_y << " Max x " << img1_max_x << " Max y " << img1_max_y << endl;
//New coordinates for iamge 2
tie(img2_min_x, img2_min_y,img2_max_x,img2_max_y) = minMaxCorners(image2dst);
cout << "Image 2: Min x " << img2_min_x << " Min y " << img2_min_y << " Max x " << img2_max_x << " Max y " << img2_max_y << endl;
//determine bounding area for resulting images
result_min_x = min(img1_min_x, img2_min_x);
result_max_x = max(img1_max_x, img2_max_x);
result_min_y = min(img1_min_y, img2_min_y);
result_max_y = max(img1_max_y, img2_max_y);
cout << "Result Image: Min x " << result_min_x << " Min y " << result_min_y << " Max x " << result_max_x << " Max y " << result_max_y << endl;
//Determine size of padded result (blank image) to be used for stitching
result_x = (abs(floor(result_min_x)) + abs(ceil(result_max_x)));
result_y = (abs(floor(result_min_y)) + abs(ceil(result_max_y)));
result = Mat(Size(result_x,result_y), CV_32FC1,Scalar(0,0,0));
//-Move the images to the postiive by creating a matrix to represent the translation
int anchor_x, anchor_y = 0;
Mat translation = Mat::eye(3,3,CV_64F);
if(result_min_x < 0){
anchor_x = (int) floor(result_min_x);
translation.at<double>(0,2) -= anchor_x;
}
if(result_min_y < 0){
anchor_y = (int) floor(result_min_y);
translation.at<double>(1,2) -= anchor_y;
}
cout << translation << endl;
//Warp each image accordingly to the new padded result image
warpPerspective(image2, result, translation, result.size(), INTER_LINEAR, BORDER_TRANSPARENT, 0);
warpPerspective(image1, result, (translation*homography), result.size(), INTER_LINEAR, BORDER_TRANSPARENT,0);
vector<Point2f> fourCorners(Mat image){
vector<Point2f> corners;
corners.push_back(Point2f (0,0));
corners.push_back(Point2f (image.size().width,0));
corners.push_back(Point2f (0, image.size().height));
corners.push_back(Point2f (image.size().width, image.size().height));
return(corners);
}
tuple<float, float, float, float> minMaxCorners(vector<Point2f> imageMatrix){
float min_x1, min_x2, min_y1, min_y2, max_x1, max_x2, max_y1, max_y2,img_min_x, img_min_y, img_max_x, img_max_y;
min_x1 = min(imageMatrix.at(0).x, imageMatrix.at(1).x);
min_x2 = min(imageMatrix.at(2).x, imageMatrix.at(3).x);
min_y1 = min(imageMatrix.at(0).y, imageMatrix.at(1).y);
min_y2 = min(imageMatrix.at(2).y, imageMatrix.at(3).y);
max_x1 = max(imageMatrix.at(0).x, imageMatrix.at(1).x);
max_x2 = max(imageMatrix.at(2).x, imageMatrix.at(3).x);
max_y1 = max(imageMatrix.at(0).y, imageMatrix.at(1).y);
max_y2 = max(imageMatrix.at(2).y, imageMatrix.at(3).y);
img_min_x = min(min_x1, min_x2);
img_min_y = min(min_y1, min_y2);
img_max_x = max(max_x1, max_x2);
img_max_y = max(max_y1, max_y2);
return{img_min_x,img_min_y, img_max_x, img_max_y };
}
EDIT I am not sure but it only happens sometimes as you can see in the following image.
Related
I want to see the distribution of a color with respect to image width. That is, if a (black and white) image has width of 720 px, then I want to conclude that a specific range (e.g. pixels [500,720]) has more white color in compared to rest of the image. What I thought is, I need a slice of the image of 720x1 px, then I need to check the values and distribute them w.r.t. width of 720 px. But I don't know the way I can apply this in a suitable way?
edit: I use OpenCV 4.0.0 with C++.
Example Case: In the first image, it is obvious that right hand side pixels are white. I want to get estimate coordinates of this dense line or zone. The light pink zone is where I am interested in and the red borders are the range where I want to find it.
If you want to get minimum continious range of image columns which contain more white than the rest of the image, than you need first to calculate number of white pixels in each column. Lets assume we have an image 720x500 (500 pixels high and 720 pixels wide). Than you will get an array Arr of 720 elements that equal number of white pixels in each column (1x500) respectively.
const int Width = img.cols;
int* Arr = new int[Width];
for( int x = 0; x < Width; x++ ) {
Arr[x] = 0;
for( int y = 0; y < img.rows; y++ ) {
if ( img.at<cv::Vec3b>(y,x) == cv::Vec3b(255,255,255) ) {
Arr[x]++;
}
}
}
You need to find a minimum range [A;B] in this array that satisfies condition Sum(Arr[0 to A-1]) + Sum(Arr[B+1 to Width-1]) < Sum(Arr[A to B]).
// minimum range width is guaranteed to be less or equal to (Width/2 + 1)
int bestA = 0, minimumWidth = Width/2 + 1;
int total = RangeSum(Arr, 0, Width-1);
for (int i = 0; i < Width; i++) {
for (int j = i; j < Width && j < i + minimumWidth; j++) {
int rangeSum = RangeSum(Arr, i, j);
if (rangeSum > total - rangeSum) {
bestA = i;
minimumWidth = j - i + 1;
break;
}
}
}
std::cout << "Most white minimum range - [" << bestA << ";" << bestA + minimumWidth - 1 << "]\n";
You can optimize the code if you precalculate sums for all [0; i] ranges, i from 0 to Width - 1. Than you can calculate RangeSum(Arr, A, B) as PrecalculatedSums[B] - PrecalculatedSums[A] (in O(1) complexity).
I have a Matrix with values filled in every Field. The size is e.g. 15x15(225) now I want to calculate the Weight of every Field based on the Center Field of the Matrix. For a bigger distance, the value of the Pixel will be less weighted for the calculation. This should be look like a circle around the center Field. Here a example Image:
The small Rectangle is the centre field. The weighting should be a Gaussain-weighted circular window with a sigma of 1.5. How could I get this done? My thought was sth. like this where every Weight is filled in a Matrix with the same Size for the calculation afterwards.
expf = 1.f/(2.f * 1.5 * 1.5);
[...]
W[k] = (i*i + j*j) * expf;
Where i and j are the distanze from the centre pixel (e.g. for first iteration i = -7, j = -7)
For me this solution seemed to be fine, but the values I get are always very small e.g:
W[0]: 3.48362e-10
W[1]: 6.26123e-09
W[2]: 7.21553e-08
W[3]: 5.3316e-07
W[4]: 2.52596e-06
W[5]: 7.67319e-06
W[6]: 1.49453e-05
[...]
W[40]: 0.000523195
W[41]: 0.000110432
W[42]: 1.49453e-05
W[43]: 1.29687e-06
W[44]: 7.21553e-08
W[45]: 5.3316e-07
W[46]: 9.58266e-06
W[47]: 0.000110432
W[48]: 0.000815988
[...]
W[85]: 0.055638
W[86]: 0.0117436
W[87]: 0.00158933
W[88]: 0.000137913
[...]
W[149]: 7.67319e-06
W[150]: 2.52596e-06
W[151]: 4.53999e-05
W[152]: 0.000523195
W[153]: 0.00386592
Could it be, that the calculation of the weights is wrong?
The PDF of a multivariate normal distribution is
2 π -k / 2 |Σ|-0.5exp(-0.5 ((x - μ) |Σ|-1 ((x - μ))
For your case, this translates to
double weight(int i, int j, double var) {
return 1 / (2 * M_PI) * std::exp(-0.5 * (i * i + j * j) / var / var);
}
where i and j are centered at 0 and 0, and var is the variance.
Note:
This is the PDF. If you want the value to be 1 at the center, use weight(i, j, var) / weight(0, 0, var). Otherwise, you will indeed get small numbers.
The decay is specified by var - lower values will show larger decay.
The following code prints
$ g++ --std=c++11 gs.cpp && ./a.out
1
0.884706
1
4.78512e-06
for example
#include <cmath>
#include <iostream>
double weight(int i, int j, double var) {
return 1 / (2 * M_PI) * std::exp(-0.5 * (i * i + j * j) / var / var);
}
int main() {
{
const double f = weight(0, 0, 20);
std::cout << weight(0, 0, 20) / f << std::endl;
std::cout << weight(-7, -7, 20) / f << std::endl;
}
{
const double f = weight(0, 0, 2);
std::cout << weight(0, 0, 2) / f << std::endl;
std::cout << weight(-7, -7, 2) / f << std::endl;
}
}
I am currently working on a project which partly relies on OpenCV's Support Vector Machines (SVMs).
In this project, I consider a train database of 454 images, equally divided into two classes : blurred, or sharp. My aim is to determine whether an input image is blurred, according to a set of 16 descriptors adapted from the academic literature.
These descriptors are computed for all the images of the database. Then, I train a SVM with the obtained data to predict the blurredness of an image, using OpenCV's train_auto function. Besides, in order to check the relevance of the classification, I perform an algorithm of cross-validation on the train database : 80 % of the images are randomly picked up to train the SVM while the remaining 20 % are used to test it. Again, both train and test images are equally split into blurred and non blurred images.
Now, it happens that this SVM is not correctly trained. Indeed, the same label is attributed to all the images, leading to a misclassification rate of 50 %. I am now trying to find and unterstand the cause of this phenomenon, which is all the more surprising that, if I use the fitcsvm function with the same dataset in Matlab, this misclassification ratio drops to 10.05 %. So, a priori, the data can actually be separated by the SVM : but, for some reason, OpenCV cannot find the right parameters to tune and train the SVM.
My implementation is below :
int main() {
// Reads descriptors
// A [0 .. 226] : examples of descriptors for images of class 0 (blurred)
// A [227 .. 453] : examples of descriptors for images of class 1 (sharp)
Mat A = readData("data_train_16.txt");
// Shuffles the indices
std::vector<int> t;
t.reserve(227);
for (int i = 0 ; i < 227 ; i++) t.push_back(i);
FisherYatesShuffle(t);
// Selects 80 % of the indices to train a SVM
int n = 0.8 * 227;
trainData = Mat(2 * n, 16, CV_32F);
for (int i = 0 ; i < n ; i++) {
A.row(t[i]).copyTo(trainData.row(i));
A.row(227 + t[i]).copyTo(trainData.row(i + n));
}
// So we have :
// trainData [0 .. n - 1] : examples of descriptors for images of class 0
// trainData [n .. 2 * n] : examples of descriptors for images of class 1
labels = Mat(2 * n, 1, CV_32S, Scalar::all(0));
labels.rowRange(0, n).setTo(0);
labels.rowRange(n, 2 * n).setTo(1);
// Trains the SVM
CvSVM svm;
svm.train_auto(trainData, labels, Mat(), Mat(), CvSVMParams());
// Tests the SVM on the remaining 20% images
int ok_0 = 0, ok_1 = 0;
int terms = t.size() - n;
for (int i = n ; i < t.size() ; i++) {
Mat D;
int answer;
// Tests if a blurred image is marked as 'blurred' by the SVM
A.row(t[i]).copyTo(D);
answer = int(svm.predict(D));
if (answer == 0) ok_0++;
// Tests if a sharp image is marked as 'sharp' by the SVM
A.row(227 + t[i]).copyTo(D);
answer = int(svm.predict(D));
if (answer == 1) ok_1++;
}
// Outputs some info
CvSVMParams params = svm.get_params();
std::cout << "SVM type : " << params.svm_type << ", kernel : " << params.kernel_type << std::endl;
std::cout << "C : " << params.C << ", nu : " << params.nu << ", deg : " << params.degree << ", gamma : " << params.gamma << ", coef0 : " << params.coef0 << std::endl;
std::cout << "Class 0 : " << ok_0 << "/" << terms << " (" << 100.0f * float(ok_0) / float(terms) << " %), ";
std::cout << "Class 1 : " << ok_1 << "/" << terms << " (" << 100.0f * float(ok_1) / float(terms) << " %)" << std::endl;
std::cout << "Total : " << 100.0f * float(ok_0 + ok_1) / float(2 * terms) << " %" << std::endl;
return 0;
}
Output :
SVM type : 100 [i.e. C_SVM], kernel : 2 [i.e. RBF]
C : 2.5, nu : 0, deg : 0, gamma : 0.50625, coef0 : 0
Class 0 : 0/46 (0 %), Class 1 : 46/46 (100 %)
Such a ratio leads me to think that, to be labelled as 0, a point should be very close to the existing points already marked as 0. So, instead of having a well-trained SVM like this, I suspect the current situation to be similar to that one. The two figures represent 2D SVMs trained with the same dataset (approximately, I just outputted them at different times). Blue dots represent points of the class 0, red dots, points of the class 1. In the second figure, you can see that, due to bad parameters for the SVM's kernel, we'll have the following situation : even if a point of class 0 (i.e. a blue point) falls near many other blue points, let's say in the bottom left corner of the figure, that point will be misclassified anyway since it will fall in the red zone, corresponding to class 1. This is not the case in the first figure, where the margins are correctly computed.
I think it's the same problem here. And it doesn't tell me why Matlab's fitcsvm succeeds in classifying the data, when OpenCV's train_auto doesn't.
I tried to :
check the contents of the trainData and labels matrices : everything is normal
normalize the data for each variable : no changes
use CvParamGrid structures in the train_auto function, and set some parameters to fixed values by setting the parameter step to 0 : no changes
change params.term_crit in train_auto and increase the number of iterations for OpenCV's optimization problem solver : no changes
decrease the dimensionality of the data and use only 2 components : the algorithm works with a misclassification rate of approximately 16 or 17 % (I obtained a similar value with Matlab).
Do you have any advices, any trails to explain this strange behavior of my code ? Thanks you in advance !
_
[Edit] Minor changes (fixed some grammatical errors, added some extra explanations).
I have image A and i want to get the bird-eye's view of image A. So I used getPerspectiveTransform method to get the transform matrix. The output result is 3x3 matrix. See my code. In my case i want to know the scale factor of the 3x3 matrix. I have looked the opencv document, but i cannot find detail of the transform matrix and i don't know how to get the scale. Also i have read some paper, the paper said we can get scaling, shearing and ratotion from a11, a12, a21, a22. See the pic. So how can i get the scale factor. Can you give me some advice? And can you explain the getPerspectiveTransform output matrix?Thank you!
Points[0] = Point2f(..., ...);
Points[1] = Point2f(..., ...);
Points[2] = Point2f(..., ...);
Points[3] = Point2f(..., ...);
dst[0] = Point2f(..., ...);
dst[1] = Point2f(..., ...);
dst[2] = Point2f(..., ...);
dst[3] = Point2f(..., ...);
Mat trans = getPerspectiveTransform(gpsPoints, dst);//I want to know the scale of trans
warpPerspective(A, B, trans, img.size());
When i change the camara position, the trapezium size and position will change. Now we set it into a rectangle and rectangle width/height was known. But i think camera in different height the rectangle size should have been changed.Because if we set into same size rectangle, the rectangle may have different detal. That's why i want to know scale from 3x3 transfrom matrix. For example, trapezium1 and trapezium2 have transfrom scale s1 and s2. So we can set rectangle1(width,height) = s2/s1 * rectangle2(width,height).
Ok, here you go:
H is the homography
H = T*R*S*L with
T = [1,0,tx; 0,1,ty; 0,0,1]
R = [cos(a),sin(a),0; -sin(a),cos(a),0; 0,0,1]
S = [sx,shear,0; 0,sy,0; 0,0,1]
L = [1,0,0; 0,1,0; lx,ly,1]
where tx/ty is translation; a is rotation angle; sx/sy is scale; shear is shearing factor; lx/ly are perspective foreshortening parameters.
You want to compute sx and sy if I understood right.
Now If lx and ly are both 0 it would be easy to compute sx and sy. It would be to decompose the upper left part of H by QR decomposition resulting in Q*R where Q is an orthogonal matrix (= rotation matrix) and R is an upper triangle matrix ([sx, shear; 0,sy]).
h1 h2 h3
h4 h5 h6
0 0 1
=> Q*R = [h1,h2; h4,h5]
But lx and ly destroy the easy way. So you have to find out how the upper left part of the matrix would look like without the influence of lx and ly.
If your whole homography is:
h1 h2 h3
h4 h5 h6
h7 h8 1
then you'll have:
Q*R =
h1-(h7*h3) h2-(h8*h3)
h4-(h7*h6) h5-(h8*h6)
So if you compute Q and R from this matrix, you can compute rotation, scale and shear easily.
I've tested this with a small C++ program:
double scaleX = (rand()%200) / 100.0;
double scaleY = (rand()%200) / 100.0;
double shear = (rand()%100) / 100.0;
double rotation = CV_PI*(rand()%360)/180.0;
double transX = rand()%100 - 50;
double transY = rand()%100 - 50;
double perspectiveX = (rand()%100) / 1000.0;
double perspectiveY = (rand()%100) / 1000.0;
std::cout << "scale: " << "(" << scaleX << "," << scaleY << ")" << "\n";
std::cout << "shear: " << shear << "\n";
std::cout << "rotation: " << rotation*180/CV_PI << " degrees" << "\n";
std::cout << "translation: " << "(" << transX << "," << transY << ")" << std::endl;
cv::Mat ScaleShearMat = (cv::Mat_<double>(3,3) << scaleX, shear, 0, 0, scaleY, 0, 0, 0, 1);
cv::Mat RotationMat = (cv::Mat_<double>(3,3) << cos(rotation), sin(rotation), 0, -sin(rotation), cos(rotation), 0, 0, 0, 1);
cv::Mat TranslationMat = (cv::Mat_<double>(3,3) << 1, 0, transX, 0, 1, transY, 0, 0, 1);
cv::Mat PerspectiveMat = (cv::Mat_<double>(3,3) << 1, 0, 0, 0, 1, 0, perspectiveX, perspectiveY, 1);
cv::Mat HomographyMatWithoutPerspective = TranslationMat * RotationMat * ScaleShearMat;
cv::Mat HomographyMat = HomographyMatWithoutPerspective * PerspectiveMat;
std::cout << "Homography:\n" << HomographyMat << std::endl;
cv::Mat DecomposedRotaScaleShear(2,2,CV_64FC1);
DecomposedRotaScaleShear.at<double>(0,0) = HomographyMat.at<double>(0,0) - (HomographyMat.at<double>(2,0)*HomographyMat.at<double>(0,2));
DecomposedRotaScaleShear.at<double>(0,1) = HomographyMat.at<double>(0,1) - (HomographyMat.at<double>(2,1)*HomographyMat.at<double>(0,2));
DecomposedRotaScaleShear.at<double>(1,0) = HomographyMat.at<double>(1,0) - (HomographyMat.at<double>(2,0)*HomographyMat.at<double>(1,2));
DecomposedRotaScaleShear.at<double>(1,1) = HomographyMat.at<double>(1,1) - (HomographyMat.at<double>(2,1)*HomographyMat.at<double>(1,2));
std::cout << "Decomposed submat: \n" << DecomposedRotaScaleShear << std::endl;
Now you can test the result by using the QR matrix decomposition of http://www.bluebit.gr/matrix-calculator/
First you can try to set perspectiveX and perspectiveY to zero. You'll see that you can use the upper left part of the matrix to decompose to the input values of rotation angle, shear and scale.
But if you don't set perspectiveX and perspectiveX to zero, you can use the "DecomposedRotaScaleShear" and decompose it to QR.
You'll get a result page with
Q:
a a
-a a
here you can compute acos(a) to get the angle
R:
sx shear
0 sy
here you can read sx and sy directly.
Hope this helps and I hope there is no error ;)
I'm now trying to generate straight sync points on my 2D map path.
In other words I want to spilt distance between point A on map for example X : 301 Y : 679 to point B X : 360 Y : 630, every 8 unit of distance is passed.
Every 8 distance unit calculated by sqrt(pow(a_x - b_x, 2), pow(a_y - b_y, 2)). I want to obtain next coordinates on map, like by a a_x + distance and b_y + distance.
I tried to do it but it didn't work, x axis doesnt change propertly.
Here is my code :
float base_x = active->getX();
float base_y = active->getY();
float destx = incoming_packet.get()->getFloat(4);
float desty = incoming_packet.get()->getFloat(8);
float distance = active->distance(destx, desty); // calculated by sqrt(pow(curent character x pos - destx, 2), pow(current character y pos - desty, 2))
float delta_X = active->vDistance(base_x, destx); // calculated by sqrt(pow(base_x - destx, 2))
float delta_Y = active->vDistance(base_y, desty); // calculated by sqrt(pow(base_y - desty, 2))
float cosa = delta_X / distance;
float sina = delta_Y / distance;
int mapx = 1;
int mapy = 1;
if(distance > 8)///active sync
{
for(float dist = 8; dist < distance;dist+=8)
{
base_x += mapx * (cosa * 8);
base_y += mapy * (sina * 8);
BOOST_LOG_TRIVIAL(debug) << "[ACTIVESYNC]NEXT SYNC ACK X : " << base_x << " Y : " << base_y;
}
}
What I'm doing here wrong ?
"cosa" (and cosb) are apparently dimensionless. (i.e. meters / meters)
"mapx" (and "mapy") are also dimensionless.
Note that in your for loop, base_x, base_y describe a point on the map.
And the 2 interesting computations in that loop
base_x += mapx * (cosa * 8);
base_y += mapy * (sina * 8);
become meaningless by attempting to add dimensionless numbers to a point. It might be ok to multiply by dimensionless numbers, but it is unreasonable to add dimensionless numbers to map points.
I suggest cosa and cosb be changed to represent the distance (i.e. meters) for each step.
float cosa = delta_X / 8; // size of steps in x direction
float sina = delta_Y / 8; // size of steps in y direction
Now the for loop can add 8 steps of cosa and sina appropriately to describe the path way points, and cosa and sina both have appropriate dimensions for the next computation.
The for loop can simplify to :
for(int step = 0; step < 8; step += 1)
{
base_x += (mapx * cosa);
base_y += (mapy * sina);
// remove or adapt the following
std::cout << std::setw(10) << std::left << (step+1) << std::setw(10)
<< base_x << std::setw(10) << base_y << std::endl;
// you were using:
//BOOST_LOG_TRIVIAL(debug) << "[ACTIVESYNC]NEXT SYNC ACK X : "
// << base_x << " Y : " << base_y;
}
My dummy code outputs:
base x/y 301 679
dest x/y 360 630
delta x/y 59 -49
step_x = 7.375
step_y = -6.125
step base_x base_y
0 301 679
1 308.375 672.875
2 315.75 666.75
3 323.125 660.625
4 330.5 654.5
5 337.875 648.375
6 345.25 642.25
7 352.625 636.125
8 360 630
Do these way points look more what you are looking for?