Different results between self-made bgr2hsv and opencv bgr2hsv - c++

I implemented bgr2hsv function by accessing pixels with OpenCV, C++.
I just coded it with bgr2hsv algorithm on the Internet.
And I compared my bgr2hsv() to cvtColor() results.
Actually, result images has a little different colors even though the original image was same. I tried to see why different but I couldn't find it.
Could you see the source codes and result images?
Here's the code.
//self-made bgr2hsv
double b, g, r;
double bb, gg, rr;
double tmax, tmin;
double h = 0, s = 0, v = 0;
double del, delB, delG, delR;
Mat image = imread("lena.jpg", 1);
Mat clone1 = image.clone();
Mat img;
image.convertTo(img, CV_64F);
for (int y = 0; y < img.rows; y++)
{
for (int x = 0; x < img.cols; x++)
{
b = image.at<Vec3b>(y, x)[0];
g = image.at<Vec3b>(y, x)[1];
r = image.at<Vec3b>(y, x)[2];
bb = b / 255;
gg = g / 255;
rr = r / 255;
tmax = _max(bb, gg, rr);
tmin = _min(bb, gg, rr);
v = tmax;
del = tmax - tmin;
if (del == 0) {
h = 0;
s = 0;
}
else {
s = del / tmax;
delB = ((tmax - b) / 6 + del / 2) / del;
delG = ((tmax - g) / 6 + del / 2) / del;
delR = ((tmax - r) / 6 + del / 2) / del;
if (b == tmax) {
h = (2 / 3) + delG - delR;
}
if (g == tmax) {
h = (1 / 3) + delR - delB;
}
if (r == tmax) {
h = delB - delG;
}
if (h < 0) h += 1;
if (h > 1) h -= 1;
}
img.at<Vec3d>(y, x)[0] = h;
img.at<Vec3d>(y, x)[1] = s;
img.at<Vec3d>(y, x)[2] = v;
}
}
//bgr2hsv with cvtColor
cvtColor(image,clone1,CV_BGR2HSV);
imwrite("implemented_hsv.jpg",clone1);
imwrite("bgr2hsv.jpg", img);
//show images
imshow("bgr2hsv", img);
imshow("implemented_hsv",clone1);
waitKey(0);
And results are here.
enter image description here

I wouldn't suggest grabbing something from the internet and expecting it to give you the correct result unless you understand what's going on. Instead of using this, why not just use the formula from the OpenCV docs?
For an example of this particular conversion, see my answer here. It uses the exact formula OpenCV mentions in the docs linked above for BGR to HSV conversion. It's in Python and not C++, but Python is fairly easy to read anyways.

Related

OpenCV undistortPoints not giving the exact inverse of distortion model

I was doing some tests using the distortion model of OpenCV. Basically what I did is, implement the distortion equations and see if the cv::undistortPoints function gives me the inverse of these equations. I realized that cv::undistortPoints does not exactly give you the inverse of the distortion equations. When I saw this, I went to the implementation of cv::undistortPoints and realized that in the end condition of the iterative process of computing the inverse of the distortion model, OpenCV always does 5 iterations (if there are no distortion coefficients provided to the function it actually does 0 iterations) and does not use any error metric on the undistorted point to see if it is precisely undistorted. Haveing this in mind, I copied and modified the termination condition of the iteration process to take and error metrics into account. This gave me the exact inverse of the distortion model. The code showing this is attached at the end of this post. My question is:
Does this happen because OpenCV prefers performance (spending a bit less time) over accuracy (spending a bit more time) or is this just a "bug"? (it is obvious that with the termination condition that I propose the function will take more time to undistort each point)
Thank you very much!
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
using namespace cv;
// This is a copy of the opencv implementation
void cvUndistortPoints_copy( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix,
const CvMat* _distCoeffs,
const CvMat* matR, const CvMat* matP )
{
double A[3][3], RR[3][3], k[8]={0,0,0,0,0,0,0,0}, fx, fy, ifx, ify, cx, cy;
CvMat matA=cvMat(3, 3, CV_64F, A), _Dk;
CvMat _RR=cvMat(3, 3, CV_64F, RR);
const CvPoint2D32f* srcf;
const CvPoint2D64f* srcd;
CvPoint2D32f* dstf;
CvPoint2D64f* dstd;
int stype, dtype;
int sstep, dstep;
int i, j, n, iters = 1;
CV_Assert( CV_IS_MAT(_src) && CV_IS_MAT(_dst) &&
(_src->rows == 1 || _src->cols == 1) &&
(_dst->rows == 1 || _dst->cols == 1) &&
_src->cols + _src->rows - 1 == _dst->rows + _dst->cols - 1 &&
(CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) &&
(CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2));
CV_Assert( CV_IS_MAT(_cameraMatrix) &&
_cameraMatrix->rows == 3 && _cameraMatrix->cols == 3 );
cvConvert( _cameraMatrix, &matA );
if( _distCoeffs )
{
CV_Assert( CV_IS_MAT(_distCoeffs) &&
(_distCoeffs->rows == 1 || _distCoeffs->cols == 1) &&
(_distCoeffs->rows*_distCoeffs->cols == 4 ||
_distCoeffs->rows*_distCoeffs->cols == 5 ||
_distCoeffs->rows*_distCoeffs->cols == 8));
_Dk = cvMat( _distCoeffs->rows, _distCoeffs->cols,
CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k);
cvConvert( _distCoeffs, &_Dk );
iters = 5;
}
if( matR )
{
CV_Assert( CV_IS_MAT(matR) && matR->rows == 3 && matR->cols == 3 );
cvConvert( matR, &_RR );
}
else
cvSetIdentity(&_RR);
if( matP )
{
double PP[3][3];
CvMat _P3x3, _PP=cvMat(3, 3, CV_64F, PP);
CV_Assert( CV_IS_MAT(matP) && matP->rows == 3 && (matP->cols == 3 || matP->cols == 4));
cvConvert( cvGetCols(matP, &_P3x3, 0, 3), &_PP );
cvMatMul( &_PP, &_RR, &_RR );
}
srcf = (const CvPoint2D32f*)_src->data.ptr;
srcd = (const CvPoint2D64f*)_src->data.ptr;
dstf = (CvPoint2D32f*)_dst->data.ptr;
dstd = (CvPoint2D64f*)_dst->data.ptr;
stype = CV_MAT_TYPE(_src->type);
dtype = CV_MAT_TYPE(_dst->type);
sstep = _src->rows == 1 ? 1 : _src->step/CV_ELEM_SIZE(stype);
dstep = _dst->rows == 1 ? 1 : _dst->step/CV_ELEM_SIZE(dtype);
n = _src->rows + _src->cols - 1;
fx = A[0][0];
fy = A[1][1];
ifx = 1./fx;
ify = 1./fy;
cx = A[0][2];
cy = A[1][2];
for( i = 0; i < n; i++ )
{
double x, y, x0, y0;
if( stype == CV_32FC2 )
{
x = srcf[i*sstep].x;
y = srcf[i*sstep].y;
}
else
{
x = srcd[i*sstep].x;
y = srcd[i*sstep].y;
}
x0 = x = (x - cx)*ifx;
y0 = y = (y - cy)*ify;
// compensate distortion iteratively
int max_iters(500);
double e(1);
for( j = 0; j < max_iters && e>0; j++ )
{
double r2 = x*x + y*y;
double icdist = (1 + ((k[7]*r2 + k[6])*r2 + k[5])*r2)/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
double xant = x;
double yant = y;
x = (x0 - deltaX)*icdist;
y = (y0 - deltaY)*icdist;
e = pow(xant - x,2)+ pow(yant - y,2);
}
double xx = RR[0][0]*x + RR[0][1]*y + RR[0][2];
double yy = RR[1][0]*x + RR[1][1]*y + RR[1][2];
double ww = 1./(RR[2][0]*x + RR[2][1]*y + RR[2][2]);
x = xx*ww;
y = yy*ww;
if( dtype == CV_32FC2 )
{
dstf[i*dstep].x = (float)x;
dstf[i*dstep].y = (float)y;
}
else
{
dstd[i*dstep].x = x;
dstd[i*dstep].y = y;
}
}
}
void undistortPoints_copy( InputArray _src, OutputArray _dst,
InputArray _cameraMatrix,
InputArray _distCoeffs,
InputArray _Rmat=noArray(),
InputArray _Pmat=noArray() )
{
Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat();
Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat();
CV_Assert( src.isContinuous() && (src.depth() == CV_32F || src.depth() == CV_64F) &&
((src.rows == 1 && src.channels() == 2) || src.cols*src.channels() == 2));
_dst.create(src.size(), src.type(), -1, true);
Mat dst = _dst.getMat();
CvMat _csrc = src, _cdst = dst, _ccameraMatrix = cameraMatrix;
CvMat matR, matP, _cdistCoeffs, *pR=0, *pP=0, *pD=0;
if( R.data )
pR = &(matR = R);
if( P.data )
pP = &(matP = P);
if( distCoeffs.data )
pD = &(_cdistCoeffs = distCoeffs);
cvUndistortPoints_copy(&_csrc, &_cdst, &_ccameraMatrix, pD, pR, pP);
}
// Distortion model implementation
cv::Point2d distortPoint(cv::Point2d undistorted_point,cv::Mat camera_matrix, std::vector<double> distort_coefficients){
// Check that camera matrix is double
if (!(camera_matrix.type() == CV_64F || camera_matrix.type() == CV_64FC1)){
std::ostringstream oss;
oss<<"distortPoint(): Camera matrix type is wrong. It has to be a double matrix (CV_64)";
throw std::runtime_error(oss.str());
}
// Create distorted point
cv::Point2d distortedPoint;
distortedPoint.x = (undistorted_point.x - camera_matrix.at<double>(0,2))/camera_matrix.at<double>(0,0);
distortedPoint.y = (undistorted_point.y - camera_matrix.at<double>(1,2))/camera_matrix.at<double>(1,1);
// Get model
if (distort_coefficients.size() < 4 || distort_coefficients.size() > 8 ){
throw std::runtime_error("distortPoint(): Invalid numbrer of distortion coefficitnes.");
}
double k1(distort_coefficients[0]);
double k2(distort_coefficients[1]);
double p1(distort_coefficients[2]);// tangent distortion first coeficinet
double p2(distort_coefficients[3]);// tangent distortion second coeficinet
double k3(0);
double k4(0);
double k5(0);
double k6(0);
if (distort_coefficients.size() > 4)
k3 = distort_coefficients[4];
if (distort_coefficients.size() > 5)
k4 = distort_coefficients[5];
if (distort_coefficients.size() > 6)
k5 = distort_coefficients[6];
if (distort_coefficients.size() > 7)
k6 = distort_coefficients[7];
// Distort
double xcx = distortedPoint.x;
double ycy = distortedPoint.y;
double r2 = pow(xcx, 2) + pow(ycy, 2);
double r4 = pow(r2,2);
double r6 = pow(r2,3);
double k = (1+k1*r2+k2*r4+k3*r6)/(1+k4*r2+k5*r4+k5*r6);
distortedPoint.x = xcx*k + 2*p1*xcx*ycy + p2*(r2+2*pow(xcx,2));
distortedPoint.y = ycy*k + p1*(r2+2*pow(ycy,2)) + 2*p2*xcx*ycy;
distortedPoint.x = distortedPoint.x*camera_matrix.at<double>(0,0)+camera_matrix.at<double>(0,2);
distortedPoint.y = distortedPoint.y*camera_matrix.at<double>(1,1)+camera_matrix.at<double>(1,2);
// Exit
return distortedPoint;
}
int main(int argc, char** argv){
// Camera matrix
double cam_mat_da[] = {1486.58092,0,1046.72507,0,1489.8659,545.374244,0,0,1};
cv::Mat cam_mat(3,3,CV_64FC1,cam_mat_da);
// Distortion coefficients
double dist_coefs_da[] ={-0.13827409,0.29240721,-0.00088197,-0.00090189,0};
std::vector<double> dist_coefs(dist_coefs_da,dist_coefs_da+5);
// Distorted Point
cv::Point2d p0(0,0);
std::vector<cv::Point2d> p0_v;
p0_v.push_back(p0);
// Undistort Point
std::vector<cv::Point2d> ud_p_v;
cv::undistortPoints(p0_v,ud_p_v,cam_mat,dist_coefs);
cv::Point2d ud_p = ud_p_v[0];
ud_p.x = ud_p.x*cam_mat.at<double>(0,0)+cam_mat.at<double>(0,2);
ud_p.y = ud_p.y*cam_mat.at<double>(1,1)+cam_mat.at<double>(1,2);
// Redistort Point
cv::Point2d p = distortPoint(ud_p, cam_mat,dist_coefs);
// Undistort Point using own termination of iterative process
std::vector<cv::Point2d> ud_p_v_local;
undistortPoints_copy(p0_v,ud_p_v_local,cam_mat,dist_coefs);
cv::Point2d ud_p_local = ud_p_v_local[0];
ud_p_local.x = ud_p_local.x*cam_mat.at<double>(0,0)+cam_mat.at<double>(0,2);
ud_p_local.y = ud_p_local.y*cam_mat.at<double>(1,1)+cam_mat.at<double>(1,2);
// Redistort Point
cv::Point2d p_local = distortPoint(ud_p_local, cam_mat,dist_coefs);
// Display results
std::cout<<"Distorted original point: "<<p0<<std::endl;
std::cout<<"Undistorted point (CV): "<<ud_p<<std::endl;
std::cout<<"Distorted point (CV): "<<p<<std::endl;
std::cout<<"Erorr in the distorted point (CV): "<<sqrt(pow(p.x-p0.x,2)+pow(p.y-p0.y,2))<<std::endl;
std::cout<<"Undistorted point (Local): "<<ud_p_local<<std::endl;
std::cout<<"Distorted point (Local): "<<p_local<<std::endl;
std::cout<<"Erorr in the distorted point (Local): "<<sqrt(pow(p_local.x-p0.x,2)+pow(p_local.y-p0.y,2))<<std::endl;
// Exit
return 0;
}
As suggested, you could get actual motivation from the OpenCV forums. Note however that historically OpenCV has been developed with real-time or near-real-time applications in mind (for example, the Darpa Grand Challenge), hence you'll find easily code that optimizes for speed over accuracy.
In most cases 5 iterations are good enough. What is "enough" can be argued about, but for cases such as finding the optimal camera matrix one can argue that 0.1 pixel does not change much for many applications.
An important thing to note is that in some cases the function does not converge in 5 iterations. I don't know if there can be a case where it will not converge at all. This happens, for example, when the distortion parameters do not fit well the distortion, and therefore, there is no exact solution for some coordinates.
See Jensenb's answer here for a discussion.

How to realize Digital fill light in Opencv

I want to realize the function of fill-light by use OpenCV, but There have some problem. Black part of pics is too dark, Photos become blurred, i don't know how to Optimization code。that my code:
V, value, 0~100, increase the amplitude of the brightness.
S,Scope, 0~255, dark is all less than S.
increase exposure to light dark photos increment, unchanged, so to see more details of the dark.
m_imgOriginal: original image ,type:Mat
m_imgNew: new image , clone from m_imgOriginal ,type:Mat
int OpenCVClass::AddExposure(int v, int s)
{
int new_r = v*m_mean_val.val[0] / 150;
int new_g = v*m_mean_val.val[1] / 150;
int new_b = v*m_mean_val.val[2] / 150;
for (int y = 0; y < m_imgOriginal.rows; y++)
{
auto ptr = m_imgOriginal.ptr<uchar>(y);
auto qtr = m_imgNew.ptr<uchar>(y);
for (int x = 0; x < m_imgOriginal.cols; x++)
{
int mean = (ptr[0] + ptr[1] + ptr[2]) / 3;
if (mean <= s)
{
int r = ptr[0] + new_r;
qtr[0] = r>255 ? 255 : r;
int g = ptr[1] + new_g;
qtr[1] = g>255 ? 255 : g;
int b = ptr[2] + new_b;
qtr[2] = b>255 ? 255 : b;
int newMean = (qtr[0] + qtr[1] + qtr[2]) / 3;
if (newMean > s)
{
int nr = ptr[0] + (s - mean) ;
int ng = ptr[1] + (s - mean) ;
int nb = ptr[2] + (s - mean) ;
qtr[0] = nr>255 ? 255 : nr;
qtr[1] = ng>255 ? 255 : ng;
qtr[2] = nb>255 ? 255 : nb;
}
}
else
{
qtr[0] = ptr[0];
qtr[1] = ptr[1];
qtr[2] = ptr[2];
}
ptr += 3;
qtr += 3;
}
RenderBuffer(m_imgNew, m_displayBuffer);
}
return 0;
}
Optimization before
Optimization after
First, I would suggest to calculate a luminance value for each pixel, when testing agains 's'. I mean calculate 'mean' a different way (see this link on how to calculate luminance):
http://www.niwa.nu/2013/05/math-behind-colorspace-conversions-rgb-hsl/
Second, you are dealing with an 8 bit per channel image, don't expect near-or-perfect dark pixels to have any extra detail when you make them "brighter", they will just become grey or whiter.
Third, when "adding" brightness, I suggest using the HSL representation of pixel color values and increasing the luminance. In pseudocode:
1) Convert pixel color from RGB to HSL.
2) Increase luminance (or 'lightness').
3) Convert back pixel color to RGB.

Skin Detection with Gaussian Mixture Models

I'm doing skin detection algorithm according to this article. There are two models at page 21: Mixture of Gaussian Skin and Non-skin Color Model.
The first model for skin detection works exellent.
There are examples:
1)Orginal image:
2) Skin mask
But the non-skin model gives wrong results:
Here is my code:
ipl_image_wrapper NudityDetector::filterPixelsWithGMM(const float covarinceMatrix[][3], const float meanMatrix[][3], const float weightVector[], const float probValue) const
{
ipl_image_wrapper mask = cvCreateImage(cvGetSize(m_image.get()), IPL_DEPTH_8U, 1);
double probability = 0.0;
float x[3] = { 0, 0, 0};
for(int i = 0; i < m_image.get()->height; ++i)
{
for(int j = 0; j < m_image.get()->width; ++j)
{
if (m_image.get()->nChannels == 3)
{
x[0] = (reinterpret_cast<uchar*>(m_image.get()->imageData + i * m_image.get()->widthStep))[j * 3 + 2];
x[1] = (reinterpret_cast<uchar*>(m_image.get()->imageData + i * m_image.get()->widthStep))[j * 3 + 1];
x[2] = (reinterpret_cast<uchar*>(m_image.get()->imageData + i * m_image.get()->widthStep))[j * 3];
double cov_det = 0.0;
double power = 0.0;
double A1 = 0.0;
double A2 = 0.0;
double A3 = 0.0;
probability = 0;
for (int k = 0; k < 16; ++k)
{
cov_det = covarinceMatrix[k][0] * covarinceMatrix[k][1] * covarinceMatrix[k][2];
A1 = covarinceMatrix[k][1] * covarinceMatrix[k][2];
A2 = covarinceMatrix[k][0] * covarinceMatrix[k][2];
A3 = covarinceMatrix[k][0] * covarinceMatrix[k][1];
power =(std::pow((x[0] - meanMatrix[k][0]), 2) * A1 +
std::pow((x[1] - meanMatrix[k][1]), 2) * A2 +
std::pow((x[2] - meanMatrix[k][2]), 2) * A3 ) / (2 * cov_det);
probability += 100 * weightVector[k] *std::exp(-power) / (std::pow(2 * M_PI, 3/2) * std::pow(cov_det, 1/2));
}
if ( probability < probValue)
{
(reinterpret_cast<uchar*>(mask.get()->imageData + i * mask.get()->widthStep))[j] = 0;
}
else
{
(reinterpret_cast<uchar*>(mask.get()->imageData + i * mask.get()->widthStep))[j] = 255;
}
}
}
}
cvDilate(mask.get(), mask.get(), NULL, 2);
cvErode(mask.get(), mask.get(), NULL, 1);
return mask;
}
ipl_image_wrapper NudityDetector::detectSkinWithGMM(const float probValue) const
{
//matrices are from article
ipl_image_wrapper mask = filterPixelsWithGMM(COVARIANCE_SKIN_MATRIX, MEAN_SKIN_MATRIX, SKIN_WEIGHT_VECTOR, probValue);
return mask;
}
ipl_image_wrapper NudityDetector::detectNonSkinWithGMM(const float probValue) const
{
//matrices are from article
ipl_image_wrapper mask = filterPixelsWithGMM(COVARIANCE_NON_SKIN_MATRIX, MEAN_NON_SKIN_MATRIX, NON_SKIN_WEIGHT_VECTOR, probValue);
return mask;
}
What I'm doing wrong? Maybe I misunderstand the meaning of tre article? Or I translated formula wrong in the code?
Thank you in advance!
In fact, there seems to be nothing wrong with the results, non-skin model correctly identifies non-skin regions as 255 and skin regions as 0. You may just need to tune parameter probValue to a lower value to get rid of some false negatives (small non-skin regions)
GMM may not be an effective approach for skin detection and you may employ some edge intensity information as a regularization parameter so that detected regions will not be fragmented.

Pixels in YUV image

I am using opencv to achieve object tracking. I read that YUV image is better option to use than RGB image. My problem is that I fail to understand about the YUV format although i spend much time read notes. Y is the brightness which i believe is calculated from the combination of R, G, B component.
My main problem is how can I access and manipulate the pixels in YUV image format. In RGB format its easy to access the component and therefore change it using simple operatin like
src.at<Vec3b>(j,i).val[0] = 0; for example
But this is not the case in YUV. I need help in accessing and changing the pixel values in YUV image. For example if pixel in RGB is red, then I want to only keep the corresponding pixel in YUV and the rest is removed. Please help me with this.
I would suggest operating on your image in HSV or LAB rather than RGB.
The raw image from the camera will be in YCbCr (sometimes called YUV, which I think is incorrect, but I may be wrong), and laid out in a way that resembles something like YUYV (repeating), so if you can convert directly from that to HSV, you will avoid additional copy and conversion operations which will save you some time. That may only matter to you if you're processing video or batches of images however.
Here's some C++ code for converting between YCbCr and RGB (one uses integer math, the other floating point):
Colour::bgr Colour::YCbCr::toBgrInt() const
{
int c0 = 22987;
int c1 = -11698;
int c2 = -5636;
int c3 = 29049;
int y = this->y;
int cb = this->cb - 128;
int cr = this->cr - 128;
int b = y + (((c3 * cb) + (1 << 13)) >> 14);
int g = y + (((c2 * cb + c1 * cr) + (1 << 13)) >> 14);
int r = y + (((c0 * cr) + (1 << 13)) >> 14);
if (r < 0)
r = 0;
else if (r > 255)
r = 255;
if (g < 0)
g = 0;
else if (g > 255)
g = 255;
if (b < 0)
b = 0;
else if (b > 255)
b = 255;
return Colour::bgr(b, g, r);
}
Colour::bgr Colour::YCbCr::toBgrFloat() const
{
float y = this->y;
float cb = this->cb;
float cr = this->cr;
int r = y + 1.40200 * (cr - 0x80);
int g = y - 0.34414 * (cb - 0x80) - 0.71414 * (cr - 0x80);
int b = y + 1.77200 * (cb - 0x80);
if (r < 0)
r = 0;
else if (r > 255)
r = 255;
if (g < 0)
g = 0;
else if (g > 255)
g = 255;
if (b < 0)
b = 0;
else if (b > 255)
b = 255;
return Colour::bgr(b, g, r);
}
And a conversion from BGR to HSV:
Colour::hsv Colour::bgr2hsv(bgr const& in)
{
Colour::hsv out;
int const hstep = 255 / 3; // Hue step size between red -> green -> blue
int min = in.r < in.g ? in.r : in.g;
min = min < in.b ? min : in.b;
int max = in.r > in.g ? in.r : in.g;
max = max > in.b ? max : in.b;
out.v = max; // v
int chroma = max - min;
if (max > 0)
{
out.s = 255 * chroma / max; // s
}
else
{
// r = g = b = 0 // s = 0, v is undefined
out.s = 0;
out.h = 0;
out.v = 0; // it's now undefined
return out;
}
if (chroma == 0)
{
out.h = 0;
return out;
}
const int chroma2 = chroma * 2;
int offset;
int diff;
if (in.r == max)
{
offset = 3 * hstep;
diff = in.g - in.b;
}
else if (in.g == max)
{
offset = hstep;
diff = in.b - in.r;
}
else
{
offset = 2 * hstep;
diff = in.r - in.g;
}
int h = offset + (diff * (hstep + 1)) / chroma2;
// Rotate such that red has hue 0
if (h >= 255)
h -= 255;
assert(h >= 0 && h < 256);
out.h = h;
return out;
Unfortunately I do not have code to do this in one step.
You can also use the built-in OpenCV functions for colour conversion.
cvtColor(img, img, CV_BGR2HSV);
Also the U and V components are calculated as linear combinations of RGB values. Then it means, that different intensities of red (R,0,0) are mapped to some (y*R + a,u*R + b, v*R + c), which again means that to detect "red" in YUV one can calculate if the distance of the pixel to that line determined by y,u,v,a,b,c (some of which are redundant) is close to zero. That's achievable with a single dot product. Then set the remaining pixels to the (0,128,128) in YUV space (I think that's R=0,G=0,B=0 in almost all varieties of YCrCb, YUV and such).
There are several YUV formats, but the common ones keep Y at the same resolution as the original image, but U and V are half size, and are saved as separate or interlaced planes/channels after the single channel Y image buffer.
This allows you to efficiently access Y as a 1-channel 8-bit greyscale image.
Access and manipulate pixels does not know the colorformat so the same code applies for color components Y U and V. If you need to access in RGB mode, best is probably calling cv::cvtColor for your region of interest first.

Accessing certain pixel RGB value in openCV

I have searched internet and stackoverflow thoroughly, but I haven't found answer to my question:
How can I get/set (both) RGB value of certain (given by x,y coordinates) pixel in OpenCV? What's important-I'm writing in C++, the image is stored in cv::Mat variable. I know there is an IplImage() operator, but IplImage is not very comfortable in use-as far as I know it comes from C API.
Yes, I'm aware that there was already this Pixel access in OpenCV 2.2 thread, but it was only about black and white bitmaps.
EDIT:
Thank you very much for all your answers. I see there are many ways to get/set RGB value of pixel. I got one more idea from my close friend-thanks Benny! It's very simple and effective. I think it's a matter of taste which one you choose.
Mat image;
(...)
Point3_<uchar>* p = image.ptr<Point3_<uchar> >(y,x);
And then you can read/write RGB values with:
p->x //B
p->y //G
p->z //R
Try the following:
cv::Mat image = ...do some stuff...;
image.at<cv::Vec3b>(y,x); gives you the RGB (it might be ordered as BGR) vector of type cv::Vec3b
image.at<cv::Vec3b>(y,x)[0] = newval[0];
image.at<cv::Vec3b>(y,x)[1] = newval[1];
image.at<cv::Vec3b>(y,x)[2] = newval[2];
The low-level way would be to access the matrix data directly. In an RGB image (which I believe OpenCV typically stores as BGR), and assuming your cv::Mat variable is called frame, you could get the blue value at location (x, y) (from the top left) this way:
frame.data[frame.channels()*(frame.cols*y + x)];
Likewise, to get B, G, and R:
uchar b = frame.data[frame.channels()*(frame.cols*y + x) + 0];
uchar g = frame.data[frame.channels()*(frame.cols*y + x) + 1];
uchar r = frame.data[frame.channels()*(frame.cols*y + x) + 2];
Note that this code assumes the stride is equal to the width of the image.
A piece of code is easier for people who have such problem. I share my code and you can use it directly. Please note that OpenCV store pixels as BGR.
cv::Mat vImage_;
if(src_)
{
cv::Vec3f vec_;
for(int i = 0; i < vHeight_; i++)
for(int j = 0; j < vWidth_; j++)
{
vec_ = cv::Vec3f((*src_)[0]/255.0, (*src_)[1]/255.0, (*src_)[2]/255.0);//Please note that OpenCV store pixels as BGR.
vImage_.at<cv::Vec3f>(vHeight_-1-i, j) = vec_;
++src_;
}
}
if(! vImage_.data ) // Check for invalid input
printf("failed to read image by OpenCV.");
else
{
cv::namedWindow( windowName_, CV_WINDOW_AUTOSIZE);
cv::imshow( windowName_, vImage_); // Show the image.
}
The current version allows the cv::Mat::at function to handle 3 dimensions. So for a Mat object m, m.at<uchar>(0,0,0) should work.
uchar * value = img2.data; //Pointer to the first pixel data ,it's return array in all values
int r = 2;
for (size_t i = 0; i < img2.cols* (img2.rows * img2.channels()); i++)
{
if (r > 2) r = 0;
if (r == 0) value[i] = 0;
if (r == 1)value[i] = 0;
if (r == 2)value[i] = 255;
r++;
}
const double pi = boost::math::constants::pi<double>();
cv::Mat distance2ellipse(cv::Mat image, cv::RotatedRect ellipse){
float distance = 2.0f;
float angle = ellipse.angle;
cv::Point ellipse_center = ellipse.center;
float major_axis = ellipse.size.width/2;
float minor_axis = ellipse.size.height/2;
cv::Point pixel;
float a,b,c,d;
for(int x = 0; x < image.cols; x++)
{
for(int y = 0; y < image.rows; y++)
{
auto u = cos(angle*pi/180)*(x-ellipse_center.x) + sin(angle*pi/180)*(y-ellipse_center.y);
auto v = -sin(angle*pi/180)*(x-ellipse_center.x) + cos(angle*pi/180)*(y-ellipse_center.y);
distance = (u/major_axis)*(u/major_axis) + (v/minor_axis)*(v/minor_axis);
if(distance<=1)
{
image.at<cv::Vec3b>(y,x)[1] = 255;
}
}
}
return image;
}