opencv kmeans returns wrong result - c++

I'm trying to Cluster ten two dimensional points with the OpenCV kmeans. The Code looks like this:
vector<Point2f>data( 10 );
data[0] = {118.90323, 1088.7419};
data[1] = {143.5, 1064.5};
data[2] = {110, 1054};
data[3] = {662, 645};
data[4] = {650, 625.5};
data[5] = {94, 363};
data[6] = {60, 360};
data[7] = {103.97369, 315.71054};
data[8] = {70.5, 313};
data[9] = {1466, 278.55554};
cout << "data:" << data << endl;
Mat labels;
int cluster_number = 4;
TermCriteria criteria = TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.1 );
Mat centers;
kmeans(data, cluster_number, labels, criteria, 1, KMEANS_RANDOM_CENTERS, centers);
cout << "centers:" << centers << endl;
cout << "labels:" << labels << endl;
The output is:
data:
[118.90323, 1088.7419; 143.5, 1064.5; 110, 1054; 662, 645; 650, 625.5; 94, 363; 60, 360; 103.97369, 315.71054; 70.5, 313; 1466, 278.55554]
centers:
[1.9113665e-38, 2.4802983e-43; 0, 0; 118.90323, 1088.7419; 0, 0]
labels:
[2; 0; 0; 0; 0; 0; 0; 0; 3; 1]
This is obviously wrong. I would like to have the labels
[0,0,0,1,1,2,2,2,2,3]
Thank you very much for your help!!

This test works for me:
void main()
{
float data[10][2]=
{{118.90323, 1088.7419},
{143.5, 1064.5},
{110, 1054},
{662, 645},
{650, 625.5},
{94, 363},
{60, 360},
{103.97369, 315.71054},
{70.5, 313},
{1466, 278.55554}};
Mat points(10,2, CV_32FC1,*data);
cout << points << endl;
Mat labels, centers;
int k =4;
cv::kmeans(points,k, labels,TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0),3, KMEANS_PP_CENTERS, centers);
std::cout << "labels: " << labels << endl;
std::cout << "centers " << centers << endl;
cv::waitKey(0);
getchar();
}

Related

How to find robot TCP in image with CalibrateHandEye

I'm currently developing a programm for a robot with a camera attached to the end affector.
The goal is to calculate where the robots TCP appears in the camera frame. I'm using opencv in c++.
The robot is a UR5 from Universal Robots.
My plan:
collect multiple (8) data-sets:
robot pose (XYZ in meters, directly from the robot controller)
robot rotation (rx ry rz in radians, directly from the robot controller)
take a picture of calibration pattern for each step
run calibrateCamera over the set of pictures to get tvec and rvec for every step
run calibrateHandEye
for t_gripper2base i use the robot pose
for R_gripper2base i use the robot rotation
for t_target2cam i use tvec from calibrateCamera
for R_target2cam i use rvec from calibrateCamera
I seem to get correct values (I measured the distance from cam to TCP and the t_cam2gripper seems to be correct.
Translation vector target to cam:
[-0.0001052803107026547;
-0.0780872727019615;
-0.1243323507069755]
Rotation matrix target to cam:
[0.9999922523048892, 0.002655868335207422, -0.002905459271957709;
-0.001229768871633805, 0.9119334002787367, 0.4103363999508009;
0.003739384804660116, -0.4103296477461107, 0.9119296010009958]
The formula to transform the point from TCP coordinates to the image should look like this:
(u,v,w) = C * T * (X,Y,Z,1)
But after the division by w my values are still way off (should be around (600,600)
Actual image position vector homogenized:
[1778.542462313536;
-1626.72483032188;
1]
#include <QCoreApplication>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main(int argc, char *argv[])
{
QCoreApplication a(argc, argv);
Mat defaultCameraMatrix = (Mat_<double>(3, 3));
Mat defaultDistortion = (Mat_<double>(1, 5));
defaultCameraMatrix.at<double>(0, 0) = 1739.3749; // default values from previous intrinsic camera calibration
defaultCameraMatrix.at<double>(0, 1) = 0;
defaultCameraMatrix.at<double>(0, 2) = 639.5;
defaultCameraMatrix.at<double>(1, 0) = 0;
defaultCameraMatrix.at<double>(1, 1) = 1739.3749;
defaultCameraMatrix.at<double>(1, 2) = 479.5;
defaultCameraMatrix.at<double>(2, 0) = 0;
defaultCameraMatrix.at<double>(2, 1) = 0;
defaultCameraMatrix.at<double>(2, 2) = 1;
defaultDistortion.at<double>(0, 0) = -0.165909;
defaultDistortion.at<double>(0, 1) = 0.303675;
defaultDistortion.at<double>(0, 2) = 0.0;
defaultDistortion.at<double>(0, 3) = 0.0;
defaultDistortion.at<double>(0, 4) = 0.0;
vector<Mat> R_gripper2base, t_gripper2base, R_target2cam, t_target2cam;
Mat actualRobotPos1 = (Mat_<double>(3, 1)),
actualRobotPos2 = (Mat_<double>(3, 1)),
actualRobotPos3 = (Mat_<double>(3, 1)),
actualRobotPos4 = (Mat_<double>(3, 1)),
actualRobotPos5 = (Mat_<double>(3, 1)),
actualRobotPos6 = (Mat_<double>(3, 1)),
actualRobotPos7 = (Mat_<double>(3, 1)),
actualRobotPos8 = (Mat_<double>(3, 1));
actualRobotPos1.at<double>(0,0) = -0.193139;
actualRobotPos1.at<double>(1,0) = 0.463823;
actualRobotPos1.at<double>(2,0) = -0.025;
t_gripper2base.push_back(actualRobotPos1);
actualRobotPos2.at<double>(0,0) = -0.193139;
actualRobotPos2.at<double>(1,0) = 0.463823;
actualRobotPos2.at<double>(2,0) = -0.025;
t_gripper2base.push_back(actualRobotPos2);
actualRobotPos3.at<double>(0,0) = -0.21273;
actualRobotPos3.at<double>(1,0) = 0.4426;
actualRobotPos3.at<double>(2,0) = -0.0288;
t_gripper2base.push_back(actualRobotPos3);
actualRobotPos4.at<double>(0,0) = -0.17213;
actualRobotPos4.at<double>(1,0) = 0.4103;
actualRobotPos4.at<double>(2,0) = 0.014;
t_gripper2base.push_back(actualRobotPos4);
actualRobotPos5.at<double>(0,0) = -0.13724;
actualRobotPos5.at<double>(1,0) = 0.45;
actualRobotPos5.at<double>(2,0) = 0.02978;
t_gripper2base.push_back(actualRobotPos5);
actualRobotPos6.at<double>(0,0) = -0.1655;
actualRobotPos6.at<double>(1,0) = 0.478;
actualRobotPos6.at<double>(2,0) = -0.0211;
t_gripper2base.push_back(actualRobotPos6);
actualRobotPos7.at<double>(0,0) = -0.17018;
actualRobotPos7.at<double>(1,0) = 0.46458;
actualRobotPos7.at<double>(2,0) = -0.03761;
t_gripper2base.push_back(actualRobotPos7);
actualRobotPos8.at<double>(0,0) = -0.193139;
actualRobotPos8.at<double>(1,0) = 0.463823;
actualRobotPos8.at<double>(2,0) = 0.025;
t_gripper2base.push_back(actualRobotPos8);
Mat actualRobotRotVec1 = (Mat_<double>(3, 1)),
actualRobotRotVec2 = (Mat_<double>(3, 1)),
actualRobotRotVec3 = (Mat_<double>(3, 1)),
actualRobotRotVec4 = (Mat_<double>(3, 1)),
actualRobotRotVec5 = (Mat_<double>(3, 1)),
actualRobotRotVec6 = (Mat_<double>(3, 1)),
actualRobotRotVec7 = (Mat_<double>(3, 1)),
actualRobotRotVec8 = (Mat_<double>(3, 1));
actualRobotRotVec1.at<double>(0,0) = -3.14159;
actualRobotRotVec1.at<double>(1,0) = 0.00;
actualRobotRotVec1.at<double>(2,0) = 0.00719124;
R_gripper2base.push_back(actualRobotRotVec1);
actualRobotRotVec2.at<double>(0,0) = -2.06;
actualRobotRotVec2.at<double>(1,0) = -2.36;
actualRobotRotVec2.at<double>(2,0) = 0.03;
R_gripper2base.push_back(actualRobotRotVec2);
actualRobotRotVec3.at<double>(0,0) = 2.39;
actualRobotRotVec3.at<double>(1,0) = 1.86;
actualRobotRotVec3.at<double>(2,0) = 0.49;
R_gripper2base.push_back(actualRobotRotVec3);
actualRobotRotVec4.at<double>(0,0) = -2.66;
actualRobotRotVec4.at<double>(1,0) = 0.08;
actualRobotRotVec4.at<double>(2,0) = 0.09;
R_gripper2base.push_back(actualRobotRotVec4);
actualRobotRotVec5.at<double>(0,0) = -2.84;
actualRobotRotVec5.at<double>(1,0) = 0.19;
actualRobotRotVec5.at<double>(2,0) = 0.69;
R_gripper2base.push_back(actualRobotRotVec5);
actualRobotRotVec6.at<double>(0,0) = 2.1;
actualRobotRotVec6.at<double>(1,0) = -2.34;
actualRobotRotVec6.at<double>(2,0) = -0.02;
R_gripper2base.push_back(actualRobotRotVec6);
actualRobotRotVec7.at<double>(0,0) = 1.66;
actualRobotRotVec7.at<double>(1,0) = -2.53;
actualRobotRotVec7.at<double>(2,0) = -0.23;
R_gripper2base.push_back(actualRobotRotVec7);
actualRobotRotVec8.at<double>(0,0) = -3.14159;
actualRobotRotVec8.at<double>(1,0) = 0.00;
actualRobotRotVec8.at<double>(2,0) = 0.00719124;
R_gripper2base.push_back(actualRobotRotVec8);
// for(int i = 0; i < t_gripper2base.size(); i++)
// {
// cout << t_gripper2base[i] << endl << endl;
// }
// for(int i = 0; i < R_gripper2base.size(); i++)
// {
// cout << R_gripper2base[i] << endl << endl;
// }
vector<String> fileNames;
glob("PATH*.png", fileNames, false); // directory of images
vector<vector<Point2f>> corners(fileNames.size());
Mat chessboardImg, chessboardImgGray;
vector<Point3f> objp;
vector<vector<Point3f>> worldCoordinates;
int checkerBoard[2] = {9,6};
double fieldSize = 0.008;
Mat cameraMatrixHandEye, distortionHandEye;
vector<Mat> rvecs, tvecs;
for(int i = 1; i < checkerBoard[1]; i++){
for(int j = 1; j < checkerBoard[0]; j++){
objp.push_back(Point3f(j*fieldSize, i*fieldSize, 0));
}
}
for(int i = 0; i < 8; i++)
{
chessboardImg = imread(fileNames[i]);
cvtColor(chessboardImg, chessboardImgGray, COLOR_BGR2GRAY);
bool patternFound = findChessboardCorners(chessboardImgGray, Size(8,5), corners[i], CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE);
if(patternFound)
{
cornerSubPix(chessboardImgGray, corners[i],Size(11,11), Size(-1,-1), TermCriteria(TermCriteria::EPS + TermCriteria::MAX_ITER, 30, 0.1));
drawChessboardCorners(chessboardImg, Size(8,5), corners[i], patternFound);
worldCoordinates.push_back(objp);
}
//***** Check loaded images and detected chessboard *****
//imshow("source", chessboardImgGray);
//imshow("chess", chessboardImg);
//waitKey(0);
//*******************************************************
}
float reprojectionError = calibrateCamera(worldCoordinates, corners, Size(1280,960), cameraMatrixHandEye, distortionHandEye, rvecs, tvecs, CALIB_FIX_ASPECT_RATIO + CALIB_FIX_K3 +
CALIB_ZERO_TANGENT_DIST + CALIB_FIX_PRINCIPAL_POINT);
//***** Check camera calibration results *****
//cout << "Reprojection Error CHE: " << endl << reprojectionError << endl << endl;
//cout << "Camera Matrix CHE: " << endl << cameraMatrixHandEye << endl << endl;
//cout << "Distortion CHE: " << endl << distortionHandEye << endl << endl;
//for(int i = 0; i < numberOfPoses; i++)
//{
// cout << "No. " << i+1 << " Target translation: " << endl << tvecs[i] << endl << endl;
// cout << "No. " << i+1 << " Target rotation: " << endl << rvecs[i] << endl << endl;
//}
//********************************************/
for(int i = 0; i < rvecs.size(); i++)
{
t_target2cam.emplace_back(tvecs[i]);
R_target2cam.emplace_back(rvecs[i]);
}
// for(int i = 0; i < t_target2cam.size(); i++)
// {
// cout << t_target2cam[i] << endl << endl;
// }
// for(int i = 0; i < R_target2cam.size(); i++)
// {
// cout << R_target2cam[i] << endl << endl;
// }
Mat R_cam2gripper;
Mat t_cam2gripper = (Mat_<double>(3, 1));
calibrateHandEye(R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, R_cam2gripper, t_cam2gripper);
cout << t_cam2gripper << endl << endl;
cout << R_cam2gripper << endl << endl;
Mat transformationMat4x4 = (Mat_<double>(4, 4));
Mat transformationMatInv4x4 = (Mat_<double>(4, 4));
Mat R_cam2gripperInv = (Mat_<double>(3, 3));
Mat t_cam2gripperInv = (Mat_<double>(3, 1));
transformationMat4x4.at<double>(0, 0) = R_cam2gripper.at<double>(0, 0);
transformationMat4x4.at<double>(0, 1) = R_cam2gripper.at<double>(0, 1);
transformationMat4x4.at<double>(0, 2) = R_cam2gripper.at<double>(0, 2);
transformationMat4x4.at<double>(0, 3) = t_cam2gripper.at<double>(0, 0);
transformationMat4x4.at<double>(1, 0) = R_cam2gripper.at<double>(1, 0);
transformationMat4x4.at<double>(1, 1) = R_cam2gripper.at<double>(1, 1);
transformationMat4x4.at<double>(1, 2) = R_cam2gripper.at<double>(1, 2);
transformationMat4x4.at<double>(1, 3) = t_cam2gripper.at<double>(1, 0);
transformationMat4x4.at<double>(2, 0) = R_cam2gripper.at<double>(2, 0);
transformationMat4x4.at<double>(2, 1) = R_cam2gripper.at<double>(2, 1);
transformationMat4x4.at<double>(2, 2) = R_cam2gripper.at<double>(2, 2);
transformationMat4x4.at<double>(2, 3) = t_cam2gripper.at<double>(2, 0);
transformationMat4x4.at<double>(3, 0) = 0;
transformationMat4x4.at<double>(3, 1) = 0;
transformationMat4x4.at<double>(3, 2) = 0;
transformationMat4x4.at<double>(3, 3) = 1;
transformationMatInv4x4 = transformationMat4x4.inv();
R_cam2gripperInv.at<double>(0,0) = transformationMatInv4x4.at<double>(0,0);
R_cam2gripperInv.at<double>(0,1) = transformationMatInv4x4.at<double>(0,1);
R_cam2gripperInv.at<double>(0,2) = transformationMatInv4x4.at<double>(0,2);
R_cam2gripperInv.at<double>(1,0) = transformationMatInv4x4.at<double>(1,0);
R_cam2gripperInv.at<double>(1,1) = transformationMatInv4x4.at<double>(1,1);
R_cam2gripperInv.at<double>(1,2) = transformationMatInv4x4.at<double>(1,2);
R_cam2gripperInv.at<double>(2,0) = transformationMatInv4x4.at<double>(2,0);
R_cam2gripperInv.at<double>(2,1) = transformationMatInv4x4.at<double>(2,1);
R_cam2gripperInv.at<double>(2,2) = transformationMatInv4x4.at<double>(2,2);
t_cam2gripperInv.at<double>(0,0) = transformationMatInv4x4.at<double>(0,3);
t_cam2gripperInv.at<double>(1,0) = transformationMatInv4x4.at<double>(1,3);
t_cam2gripperInv.at<double>(2,0) = transformationMatInv4x4.at<double>(2,3);
cout << transformationMatInv4x4 << endl << endl;
cout << t_cam2gripperInv << endl << endl;
Point3f objectPoints1, objectPoints2;
vector<Point3f> objectPoints;
objectPoints1.x = 0; //TCP in TCP-Coordinates
objectPoints1.y = 0;
objectPoints1.z = 0;
objectPoints.push_back(objectPoints1);
vector<Point2f> imagePoints;
projectPoints(objectPoints, R_cam2gripperInv, t_cam2gripperInv, defaultCameraMatrix, defaultDistortion, imagePoints);
cout << imagePoints[0] << endl << endl;
return a.exec();
}
`
you need to use solvepnp to get rvec and tvec for each image separately and then you will have a list of rvecs and tvecs. list length equals no of images. To get a similar list of rvec and tvec for Gripper_to_base transformation, you need to derive the R and T matrices based on robot dynamics which take rotation angle as input. Then for each pose you need to input the rotation angle data to R,T matrices to get rvec and tvec for each pose and make list of same length. Then you input them to calibrateHandeye function.
I could finally resolve the problem.
The calibration was correct, but there were two mistakes in my approach:
To find my TCP I used the TCP-Coordinates from the robot control. Instead I had to use (0,0,0), which is the TCP in the TCP-Coordinate-System.
The second mistake was to use the transformation matrix built from R and t out of calibrateHandEye. Instead I had to use the inverse transformation matrix.

Extraction of each HSV value from an image using OpenCV and C++

I used the code below to extract each HSV value from any image and to print each value on the screen.
Mat image_HSV;
cvtColor(ori_image, image_HSV, CV_BGR2HSV);
Mat mask;
inRange(image_HSV, Scalar(100, 0, 0), Scalar(100, 255, 255), mask);
image_HSV.setTo(Scalar(0, 0, 0), mask);
int h = 0;
int s = 0;
int v = 0;
int col = image_HSV.cols;
int row = image_HSV.rows;
int corow = col * row; // image's full pixel number
for (int i = 0; i < image_HSV.cols; i++) { // image row pixel
for (int j = 0; j < image_HSV.rows; j++) { // image col pixel
Vec3b hsv = image_HSV.at<Vec3b>(i,j);
h += hsv.val[0];
s += hsv.val[1];
v += hsv.val[2];
if (hsv[0] != 100) {
hsv[0] = 0;
hsv[1] = 0;
hsv[2] = 0;
}
}
}
cout << "H: " << h / corow << "% \n";
cout << "S: " << s / corow << "% \n";
cout << "V: " << v / corow << "% \n";
waitKey(0);
return 0;
I used all red color image for this time, which RGB values were 255, 0, 0.
However, I have some strange results from this code.
As I know, each H,S,V value range is covered by 0-360, 0-100, and 0-100, respectively.
Further, I also followed the post linked below but I still have a trouble to get right values.
OpenCV (C++) - Set HSV values of a pixel
But, I still don't know how to fix it.
Any help would be greatly appreciated! Thanks!

Understanding of planes in NAryMatIterator

I have 3-dimension matrix:
const int n_mat_size = 5;
const int n_mat_sz[] = { n_mat_size , n_mat_size, n_mat_size };
cv::Mat m1(3, n_mat_sz, CV_32FC1);
Now I'd like to iterate its planes and expect that it should be three two-dimensional matrices:
const cv::Mat* arrays[] = { &m1, 0 };
cv::Mat planes[3];
cv::NAryMatIterator it(arrays, planes);
std::cout << it.nplanes << ", " << it.planes[0].rows << ", " << it.planes[0].cols;
I expect to get output "3, 5, 5", but instead I get "1, 1, 125". Where is the slice of matrix?
Because the matrix m1 is continuous, there is only one plane (or slice).
Please refer to the documentation for NAryMatIterator:
It iterates through the slices (or planes), not the elements, where "slice" is a continuous part of the arrays.
For example, the matrix m2 in the following code is not continuous:
const int n_mat_size = 5;
const int n_mat_sz[] = { n_mat_size , n_mat_size, n_mat_size };
cv::Mat m1(3, n_mat_sz, CV_32FC1);
// Get plane 2 and 3 of m1
// and row 2, row 3 and row 4 of every selected plane
// m2 is not continuous
cv::Mat m2 = m1(cv::Range(2,4), cv::Range(2,5));
const cv::Mat* arrays[] = { &m2, 0 };
cv::Mat planes[3];
cv::NAryMatIterator it(arrays, planes);
std::cout << it.nplanes << ", " << it.planes[0].rows << ", " << it.planes[0].cols << std::end;
The output of the above code is: 2, 1, 15.
Note that the number of rows of every plane is always 1 and the number of columns is the number of elements contained in the plane.
There is a statement:
planes[i] = Mat(1, (int)size, A.type(), A.data);
in the function void NAryMatIterator::init, which can be found at https://github.com/opencv/opencv/blob/master/modules/core/src/matrix.cpp#L4596 .
The above statement sets the size of the plane.
To separate the matrix you gave into planes, you could use cv::InputArray::getMatVector.
The following code shows it usage.
int main()
{
const int n_mat_size = 3;
const int n_mat_sz[] = { n_mat_size , n_mat_size, n_mat_size };
cv::Mat m1(3, n_mat_sz, CV_8U);
cv::MatIterator_<uchar> it = m1.begin<uchar>();
cv::MatIterator_<uchar> end = m1.end<uchar>();
for (uchar i = 0; it != end; ++it, ++i)
{
*it = i;
}
cv::InputArray arr(m1);
std::vector<cv::Mat> planes;
arr.getMatVector(planes);
for (size_t i = 0; i < planes.size(); ++i)
{
std::cout << "-------" << std::endl
<< planes[i] << std::endl << "******" << std::endl;
}
}
Its output is as follows:
-------
[ 0, 1, 2;
3, 4, 5;
6, 7, 8]
******
-------
[ 9, 10, 11;
12, 13, 14;
15, 16, 17]
******
-------
[ 18, 19, 20;
21, 22, 23;
24, 25, 26]
******
Maybe the easiest one is to use the method cv::Mat::row(int). The corresponding code is:
int main()
{
const int n_mat_size = 3;
const int n_mat_sz[] = { n_mat_size , n_mat_size, n_mat_size };
cv::Mat m1(3, n_mat_sz, CV_8U);
cv::MatIterator_<uchar> it = m1.begin<uchar>();
cv::MatIterator_<uchar> end = m1.end<uchar>();
for (uchar i = 0; it != end; ++it, ++i)
{
*it = i;
}
int n = m1.size[0];
for (int i = 0; i < n; ++i)
{
cv::Mat three_d_plane = m1.row(i);
// three_d_plane has a size 1x3x3
// std::cout supports only 2-d matrix. Therefore, we change it to 2-d here
cv::Mat two_d_plane(three_d_plane.size[1], three_d_plane.size[2], three_d_plane.type(), three_d_plane.data);
std::cout << two_d_plane << std::endl << "----" << std::endl;
}
}
The output is
[ 0, 1, 2;
3, 4, 5;
6, 7, 8]
----
[ 9, 10, 11;
12, 13, 14;
15, 16, 17]
----
[ 18, 19, 20;
21, 22, 23;
24, 25, 26]
----

Image processing : luminance weighted 2

I would like to weigh values of luminance on a new image.
I have an image (5px.jpg) of 5 pixels with these luminance :50,100,150,200,250.
I have a vector of coefficient.
I created a new Mat Z which combine luminance of 5px.jpg and the coefficient.
So, my first value of luminance is 50 (lum[0]=50) and I want it to be applied on the 5.1 (coef[0]=5.1) first pixel of my matrix. To do that, I need to weight the 6th pixel with the first and the second value of luminance. In my case,the luminance of my 6th pixel will be 95 because (0.1*50)+(0.9*100)=95
And so on...
But I do not know why my code does not works.
I had already asked a similar question for a vector here and now, I'm try to adapt to an image.
My picture in input :
My output :
#define MPI 3.14159265358979323846264338327950288419716939937510
#define RAD2DEG (180./MPI)
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/opencv.hpp>
#include <iostream>
#include <cmath>
#include <math.h>
#include <string.h>
using namespace cv;
using namespace std;
int main()
{
Mat image = imread("5px.jpg", 1);
if (image.empty())
{
cout << "Couldn't load " << image << endl;
}
else
{
cout << "Image upload, go" << endl;
}
namedWindow("ImageIn", CV_WINDOW_AUTOSIZE);
imshow("ImageIn", image);
Mat imgGrayScale;
cvtColor(image, imgGrayScale, CV_BGR2GRAY);
float *deltaP = new float[imgGrayScale.cols];
float *angle = new float[imgGrayScale.cols];
float *coeff = new float[imgGrayScale.cols];
int col;
for (col = 0; col < imgGrayScale.cols; ++col)
{
//cout << "position x = " << col << endl;
deltaP[col] = imgGrayScale.at<uchar>(0, col);
//cout << "luminance = " << deltaP[col] << endl;
angle[col] = acos(deltaP[col] / 255);
//cout << "angle =" << angle[col] << endl;
coeff[col] = (1 / cos(angle[col]));
cout << "coeff = " << coeff[col] << endl;
}
int width = imgGrayScale.size().width;
int height = imgGrayScale.size().height;
int width2 = width * 5;
int idx_coef = 0;
Mat Z = Mat::zeros(height, width2, CV_8UC1);
//for (int r = 0; r < imgGrayScale.rows; r++)
//{
//cout << "Saut de ligne " << endl << endl << endl;
for (int t = 0; t < imgGrayScale.cols; t++)
{
//cout << "Saut de colonne " << endl;
// Attribue le coeff à une variable
int c = int(coeff[idx_coef]);
//cout << "x" << t << endl;
for (int i = 0; i < c; ++i)
{
Z.at<uchar>(0, c) = imgGrayScale.at<uchar>(0, t);
}
float alpha = fmod(coeff[idx_coef], 1.f);
float beta = 1.f - alpha;
Z.at<uchar>(0, c + 1) = (alpha * imgGrayScale.at<uchar>(0, t) + beta * imgGrayScale.at<uchar>(0, t + 1));
idx_coef++;
coeff[idx_coef] = coeff[idx_coef] - beta;
if (idx_coef >= width - 1)
{
int cc = int(coeff[idx_coef]);
for (int i = 0; i < cc; ++i)
{
Z.at<uchar>(0, c) = imgGrayScale.at<uchar>(0, t);
}
idx_coef = 0;
break;
}
}
//}
namedWindow("m", CV_WINDOW_AUTOSIZE);
imshow("m", Z);
imwrite("lumianacetest.jpg", Z);
int t = waitKey();
if ((char)t == 27)
return 0;
}
You messed up with the indices while accessing the matrix Z. You shoudn't access Z at column c, but you need access the current column (as a vector::push_back would do). So you can keep the current index column in a variable, here idx_z, and increment it every time you access Z
Here your Z is CV_8U, so you lose accuracy since your values are float. You can create Z as CV_32F, and if you need to store values in CV_8U format to save the image, you can convert to CV_8U later, eventually.
The last columns of Z won't be set to any value (so I initialized them with value 0). If you need them to have the last value as in the imgGrayScale, just decomment the relevant part of the code.
Here the code:
#define MPI 3.14159265358979323846264338327950288419716939937510
#define RAD2DEG (180./MPI)
#include <opencv2\opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
int main()
{
Mat1b imgGrayScale = (Mat1b(2, 5) << 50, 100, 150, 200, 250,
50, 100, 150, 200, 250);
vector<float> deltaP(imgGrayScale.cols);
vector<float> angle(imgGrayScale.cols);
vector<float> coeff(imgGrayScale.cols);
int col;
for (col = 0; col < imgGrayScale.cols; ++col)
{
//cout << "position x = " << col << endl;
deltaP[col] = imgGrayScale.at<uchar>(0, col);
//cout << "luminance = " << deltaP[col] << endl;
angle[col] = acos(deltaP[col] / 255);
//cout << "angle =" << angle[col] << endl;
coeff[col] = (1 / cos(angle[col]));
cout << "coeff = " << coeff[col] << endl;
}
int width = imgGrayScale.size().width;
int height = imgGrayScale.size().height;
int width2 = width * 5;
Mat1f Z(height, width2, 0.f);
for (int r = 0; r < imgGrayScale.rows; r++)
{
int idx_lum = 0;
int idx_coef = 0;
int idx_z = 0;
vector<float> coef = coeff;
// Set all values in Z to the last value in imgGrayScale
Z.row(r) = imgGrayScale(r, imgGrayScale.cols-1);
while (true)
{
int c = int(coef[idx_coef]);
for (int i = 0; i < c; ++i)
{
Z(r, idx_z++) = imgGrayScale(r, idx_lum);
}
float alpha = fmod(coef[idx_coef], 1.f);
float beta = 1.f - alpha;
Z(r, idx_z++) = (alpha * imgGrayScale(r, idx_lum) + beta * imgGrayScale(r, idx_lum + 1));
idx_coef++;
idx_lum++;
coef[idx_coef] = coef[idx_coef] - beta;
if (idx_lum >= imgGrayScale.cols - 1 || idx_coef >= coef.size() - 1)
{
int cc = int(coef[idx_coef]);
for (int i = 0; i < cc; ++i)
{
Z(r, idx_z++) = imgGrayScale(r, idx_lum);
}
idx_coef = 0;
break;
}
}
}
Mat1b ZZ;
Z.convertTo(ZZ, CV_8U);
cout << "Float values:" << endl;
cout << Z << endl << endl;
cout << "Uchar values:" << endl;
cout << ZZ << endl << endl;
namedWindow("m", CV_WINDOW_AUTOSIZE);
imshow("m", Z);
imwrite("lumianacetest.png", ZZ);
waitKey();
return 0;
}

How to determine the value of the 3 channel in RGB or BGR

This is the image that i need to detect
// cdst is the image
// This is the code which i want to know what are the values in a certain pixel but the code below only detects the first then the 2nd and 3rd is equal to 0
void dec()
{
Mat em;
cdst="path of the image";
//this is the value of BGR per color that i wish to check
Vec3f red(0, 0, 255);
Vec3f blue(255, 0, 0);
Vec3f green(0, 128, 0);
Vec3f yellow(0, 255, 255);
Vec3f marron(0, 0,128);
Vec3f pink(147, 20, 255);
Vec3f indigo(130, 0, 75);
Vec3f midblue(112, 25, 25);
Vec3f magenta(139, 0, 139);
//em will hold the cdst image
em=cdst;
//for loop to determine what are the colors
for (int i = 0; i < l.size(); i++)
{
int x = l[i][0];
int y = l[i][1];
cout << x << " " << y<<endl;
Vec3f px = em.at<uchar>(y,x);
//Im trying to print all color of the 3 channels
//But it only the first on has a value then the second and third is 0
cout << px.val[0] << " " << px.val[1] << " "<<px.val[2]<<endl;
if (px == pink)
{
cout<<"A";
}
}
}
How about try this code?
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main(){
Mat em(100, 100, CV_8UC3, Scalar::all(0));
rectangle(em, Rect(30, 30, 10, 10), Scalar(147, 20, 255), -1);
imshow("em", em);
waitKey(1);
Vec3b pink(147,20,255);
for (int i = 0; i < em.rows; i++){
for (int j = 0; j < em.cols; j++){
Vec3b px = em.at<Vec3b>(i, j);
cout << px << endl;
if (px == pink){
cout << "A" ;
}
}
}
waitKey(0);
destroyAllWindows();
return 0;
}