OpenCV can not detect specific color well - c++

this is the one not applying the mask
this is the one applying the mask
Even though it detects vaguely, I want to make it more clear.
void MainWindow::updatePicture(){
Mat frame;
Mat blurred;
Mat grayBlurred;
Mat hsvBlurred;
Mat diff;
Mat movingObjectMask;
Mat colorMask;
Mat result;
this->cap.read(frame);
blur(frame, blurred, Size(this->kernel, this->kernel)); // blur the frame
cvtColor(blurred, grayBlurred, COLOR_BGR2GRAY); // convert to gray
/* make a mask that finds a moving object */
absdiff(this->previous, grayBlurred, diff); // compare it with previous frame which was blurred and converted to gray
threshold(diff, movingObjectMask, this->thresholdVal, 255, THRESH_BINARY); // binarize it
cvtColor(movingObjectMask, movingObjectMask, COLOR_GRAY2BGR);
/* make a mask that finds a specific color */
cvtColor(blurred, hsvBlurred, COLOR_BGR2HSV); // convert to HSV to track a color
inRange(hsvBlurred, this->hsvLowerBound, this->hsvUpperBound, colorMask); // track the color
cvtColor(colorMask, colorMask, COLOR_GRAY2BGR);
/* apply the masks */
bitwise_and(frame, movingObjectMask, result);
bitwise_and(result, colorMask, result);
cvtColor(result, result, COLOR_BGR2RGB);
/* end */
this->myLabel->setPixmap(mat2QPixmap(result, QImage::Format_RGB888));
this->previous = grayBlurred;
}
As you can see in the code, I make two masks that detect a moving object and a specific color(technically colors in a specific range).
Upper and lower hsv range were calculated like below.
void MainWindow::refreshRgb(){
Scalar lowerBound = hsvMult(this->currentHsv, 1 - this->ratio);
Scalar upperBound = hsvMult(this->currentHsv, 1 + this->ratio);
this->hsvLowerBound = lowerBound;
this->hsvUpperBound = upperBound;
}
Scalar hsvMult(const Scalar& scalar, double ratio){
int s = static_cast<int>(scalar[1]*ratio);
int v = static_cast<int>(scalar[2]*ratio);
if(s > 255)
s = 255;
if(v > 255)
v = 255;
return Scalar(static_cast<int>(scalar[0]), s, v);
}
How can I make it more clear?

Related

Change color of h value

I set my mask from BGR2HSV. I have my image:
How I can change the white color in the mask? So I want to change the white parts with other colors.
Mat mask;
mask = imread("C:\\Users\\...\\Desktop\\...\\mask.png");
if (!img.data)
{
cout << "Could not find the image";
return -1;
}
cvtColor(mask, mask, COLOR_BGR2HSV);
cvtColor(mask, mask, COLOR_HSV2BGR);
imshow("Ergebnis", mask);
waitKey(0);
Between two cvtColor functions, you need to split the image into its 3 channels with split. Looking at the conversion between RGB and HSV, make S channel 0 and choose an H value between [0-180]. Then, merge the channels back.
cv::Mat hsv = mask.clone(); // from your code
std::vector<cv::Mat> hsv_vec;
cv::split(hsv, hsv_vec);
cv::Mat &H = hsv_vec[0];
cv::Mat &S = hsv_vec[1];
cv::Mat &V = hsv_vec[2];
S = 0;
mask = (V > 10); // non-zero pixels in the original image
H(mask) = your_H_value_here; // H is between 0-180 in OpenCV
cv::merge(hsv_vec, hsv);
mask = hsv; // according to your code
As a side note, I suggest using convenient names for variables.

How do you make a face symmetric using openCV and C++?

I was looking at this tutorial, and it said "You can make a symmetric face, by averaging a face and its mirror reflection." - and there was an example of Obama's face being made symmetrical. I tried doing the same with openCV and C++, but these are the results I'm getting using the following code:
Mat3b getMean(const vector<Mat3b>& images) {
Mat m(images[0].rows, images[0].cols, CV_64FC3); // Create a 0 initialized image to use as accumulator
m.setTo(Scalar(0, 0, 0, 0)); //set all image elements to 0
Mat temp; // Use a temp image to hold the conversion of each input image to CV_64FC3
for (int i = 0; i < images.size(); ++i) { //loop through the images
images[i].convertTo(temp, CV_64FC3); // Convert the input images to CV_64FC3...
m += temp; //...so you can accumulate
}
m.convertTo(m, CV_8U, 1. / images.size()); // Convert back to CV_8UC3 type, applying the division to get the actual mean
return m;
}
int main() {
Mat img1 = imread("E:/barack-obama.jpg"), img2, img4;
resize(img1, img1, Size(0.4 * img1.cols, 0.4 * img1.rows), 1, 1, INTER_LINEAR);
flip(img1, img2, +1);
vector<Mat3b> imgs;
imgs.push_back(img1);
imgs.push_back(img2);
Mat3b img3 = getMean(imgs); // Compute the mean
//img3 = (img1 + img2)*0.5;
double alpha = 0.5, beta;
beta = (1.0 - alpha);
addWeighted(img1, alpha, img2, beta, 0.0, img4);
imshow("Original", img1);
imshow("getMean", img3);
imshow("AddWeighted", img4);
waitKey(0);
}

Why does the screen when disparity is output distorted via opencv?

First, I am a Korean who does not speak English.
Because I used Google's translation due to lack of English skills,
Please acknowledge it beforehand
.
I want to obtain disparity via opencv.
I asked for a corner using the "findChessboardCorners" function for the predecessor task.
We calibrated the stereo camera using the following coordinate values.
.
After finishing the screen correction, after seeing diparity, it looks only noise.
Why ?
// this function is get diparity
int getDisparity(Mat leftImg, Mat rightImg)
{
// value null check
if (leftImg.empty() || rightImg.empty()) {
return -1;
}
int SADWindowSize, numberOfDisparities;
//bool no_display;
//float scale;
// default variable
numberOfDisparities = 16;//16;// 192;
SADWindowSize = 3;//3;// 64;
Ptr<StereoSGBM> sgbm = StereoSGBM::create(0, numberOfDisparities, SADWindowSize);
Mat img1 = leftImg;
Mat img2 = rightImg;
// get view size
Size img_size = img1.size();
numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width / 8) + 15) & -16;
// set sgbm parameters
sgbm->setPreFilterCap(63);
int sgbmWinSize = SADWindowSize > 0 ? SADWindowSize : 3;
sgbm->setBlockSize(sgbmWinSize);
int cn = img1.channels();
sgbm->setP1(8 * cn*sgbmWinSize*sgbmWinSize);
sgbm->setP2(32 * cn*sgbmWinSize*sgbmWinSize);
sgbm->setMinDisparity(0);
sgbm->setNumDisparities(numberOfDisparities);
sgbm->setUniquenessRatio(10);
sgbm->setSpeckleWindowSize(100);
sgbm->setSpeckleRange(32);
sgbm->setDisp12MaxDiff(1);
sgbm->setMode(StereoSGBM::MODE_SGBM);
Mat disp, disp8;
// Disparity
sgbm->compute(img1, img2, disp);
// convert type
disp.convertTo(disp8, CV_8U, 255 / (numberOfDisparities*16.));
// show
namedWindow("left image", 1);
imshow("left image", img1);
namedWindow("right image", 1);
imshow("right image", img2);
cv::cvtColor(disp8, disparityMap, cv::COLOR_GRAY2BGR);
// disparity show
namedWindow("disparity", 0);
imshow("disparity", disparityMap);
waitKey(1);
return 0;
}
The image below is what I used.
left image
right image
result
sample image 2

Remove colour 'shades'/shadows

What OpenCV functions can be used to ignore/filter out colour 'variation'/shades of a colour (shadows, reflections, etc.)?
Shouldn't removing the Value/Intensity channel from HSV images create colour blocks and reduce/eliminate 'colour variance'/shades of white due to light?
If you look at the following image, the *walls are painted one solid consistent colour of cream/white. But it's colour varies alot because of shadows and light reflections. *Referring to the white walls above the lockers.
I thought that if I convert the image to HSV then remove the Value/Intensity channel that I can filter out those wall reflections and shadows, colour variation - ie, the light. Then I just colour reduce the image and I should have a large colour block for the wall (above the lockers)? Ie, see the wall in it's true form/colour as one solid colour block.
But from my image above you can see the wall is not one solid/consistent colour after removing the V channel and after colour reducing.
void removeIntensity()
{
Mat hsv, hs, reducedHs;
Mat image = imread("../../Book_Tutorials/images/11.jpg");
if (image.cols > 300) {
float scale = 300.0 / (float)image.rows;
resize(image, image, { int(scale * image.cols), 300 });
}
cvtColor(image, hsv, CV_BGR2HSV);
std::vector<Mat> hsvChannels;
split(hsv, hsvChannels);
// Set value/intensity to constant value: can I remove those channels completely?
hsvChannels[2] = 0;
merge(hsvChannels, hs);
reduce(hs, reducedHs, 6);
imshow("image", image);
imshow("hsv", hsv);
imshow("hs", hs);
imshow("reducedHs", reducedHs);
}
void reduce(const Mat& hsv, Mat& reduced, int nColours)
{
int n = hsv.rows * hsv.cols;
std::vector<int> labels;
Mat centres;
Mat collapsedImage = hsv.reshape(1, n);
collapsedImage.convertTo(collapsedImage, CV_32F);
kmeans(collapsedImage, nColours, labels, TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 1.0),
3, KMEANS_PP_CENTERS, centres);
for (int i = 0; i < n; i++) {
collapsedImage.at<float>(i, 0) = centres.at<float>(labels[i], 0);
collapsedImage.at<float>(i, 1) = centres.at<float>(labels[i], 1);
collapsedImage.at<float>(i, 2) = centres.at<float>(labels[i], 2);
}
reduced = collapsedImage.reshape(3, hsv.rows);
reduced.convertTo(reduced, CV_8U);
}

Normalizing color channels of and image by intensity values, OpenCV

I have split an image into 3 separate color channels - one blue, one green, and one red. I would like to normalize each of these channels by the image's intensity, where intensity = (red + blue + green)/3. To be clear, I am trying to make an image that is composed of one of the three color channels, divided by the image's intensity, where the intensity is described by the equation above.
I am new to OpenCV and I do not think I am doing this correctly; when the images are displayed, all the pixels appear to be black.
I am new to OpenCV (I have worked through the tutorials that come with the documentation, but that is it) - any advice about how to go about this normalization would be extremely helpful.
Thanks!
Here is my attempt:
int main(int argc, char** argv){
Mat sourceImage, I;
const char* redWindow = "Red Color Channel";
const char* greenWindow = "Green Color Channel";
const char* blueWindow = "Blue Color Channel";
if(argc != 2)
{
cout << "Incorrect number of arguments" << endl;
}
/* Load the image */
sourceImage = imread(argv[1], 1);
if(!sourceImage.data)
{
cout << "Image failed to load" << endl;
}
/* First, we have to allocate the new channels */
Mat r(sourceImage.rows, sourceImage.cols, CV_8UC1);
Mat b(sourceImage.rows, sourceImage.cols, CV_8UC1);
Mat g(sourceImage.rows, sourceImage.cols, CV_8UC1);
/* Now we put these into a matrix */
Mat out[] = {b, g, r};
/* Split the image into the three color channels */
split(sourceImage, out);
/* I = (r + b + g)/3 */
add(b, g, I);
add(I, r, I);
I = I/3;
Mat red = r/I;
Mat blue = b/I;
Mat green = g/I;
/* Create the windows */
namedWindow(blueWindow, 0);
namedWindow(greenWindow, 0);
namedWindow(redWindow, 0);
/* Show the images */
imshow(blueWindow, blue);
imshow(greenWindow, green);
imshow(redWindow, red);
waitKey(0);
return 0;
}
Once you divide by the intensity the pixel values will be in the range [0, 1], except since they are integers they will be 0 or 1. For a display image white is 255 and 0 is black, so this is why everything appears black to you.
You need to use floating point to get an accurate result, and you need to scale the result by 255 to see it.
Doing that results in this (which I an not sure is particularly useful)
(Image source: BSDS500)
And here is the code that generated it:
#include <opencv2/core/core.hpp>
#include <vector>
int main(int argc, char** argv)
{
// READ RGB color image and convert it to Lab
cv::Mat bgr_image = cv::imread("208001.jpg"); // BSDS500 mushroom
cv::imshow("original image", bgr_image);
cv::Mat bgr_image_f;
bgr_image.convertTo(bgr_image_f, CV_32FC3);
// Extract the color planes and calculate I = (r + g + b) / 3
std::vector<cv::Mat> planes(3);
cv::split(bgr_image_f, planes);
cv::Mat intensity_f((planes[0] + planes[1] + planes[2]) / 3.0f);
cv::Mat intensity;
intensity_f.convertTo(intensity, CV_8UC1);
cv::imshow("intensity", intensity);
//void divide(InputArray src1, InputArray src2, OutputArray dst, double scale=1, int dtype=-1)
cv::Mat b_normalized_f;
cv::divide(planes[0], intensity_f, b_normalized_f);
cv::Mat b_normalized;
b_normalized_f.convertTo(b_normalized, CV_8UC1, 255.0);
cv::imshow("b_normalized", b_normalized);
cv::Mat g_normalized_f;
cv::divide(planes[1], intensity_f, g_normalized_f);
cv::Mat g_normalized;
g_normalized_f.convertTo(g_normalized, CV_8UC1, 255.0);
cv::imshow("g_normalized", g_normalized);
cv::Mat r_normalized_f;
cv::divide(planes[2], intensity_f, r_normalized_f);
cv::Mat r_normalized;
r_normalized_f.convertTo(r_normalized, CV_8UC1, 255.0);
cv::imshow("r_normalized", r_normalized);
cv::waitKey();
}