I set my mask from BGR2HSV. I have my image:
How I can change the white color in the mask? So I want to change the white parts with other colors.
Mat mask;
mask = imread("C:\\Users\\...\\Desktop\\...\\mask.png");
if (!img.data)
{
cout << "Could not find the image";
return -1;
}
cvtColor(mask, mask, COLOR_BGR2HSV);
cvtColor(mask, mask, COLOR_HSV2BGR);
imshow("Ergebnis", mask);
waitKey(0);
Between two cvtColor functions, you need to split the image into its 3 channels with split. Looking at the conversion between RGB and HSV, make S channel 0 and choose an H value between [0-180]. Then, merge the channels back.
cv::Mat hsv = mask.clone(); // from your code
std::vector<cv::Mat> hsv_vec;
cv::split(hsv, hsv_vec);
cv::Mat &H = hsv_vec[0];
cv::Mat &S = hsv_vec[1];
cv::Mat &V = hsv_vec[2];
S = 0;
mask = (V > 10); // non-zero pixels in the original image
H(mask) = your_H_value_here; // H is between 0-180 in OpenCV
cv::merge(hsv_vec, hsv);
mask = hsv; // according to your code
As a side note, I suggest using convenient names for variables.
Related
this is the one not applying the mask
this is the one applying the mask
Even though it detects vaguely, I want to make it more clear.
void MainWindow::updatePicture(){
Mat frame;
Mat blurred;
Mat grayBlurred;
Mat hsvBlurred;
Mat diff;
Mat movingObjectMask;
Mat colorMask;
Mat result;
this->cap.read(frame);
blur(frame, blurred, Size(this->kernel, this->kernel)); // blur the frame
cvtColor(blurred, grayBlurred, COLOR_BGR2GRAY); // convert to gray
/* make a mask that finds a moving object */
absdiff(this->previous, grayBlurred, diff); // compare it with previous frame which was blurred and converted to gray
threshold(diff, movingObjectMask, this->thresholdVal, 255, THRESH_BINARY); // binarize it
cvtColor(movingObjectMask, movingObjectMask, COLOR_GRAY2BGR);
/* make a mask that finds a specific color */
cvtColor(blurred, hsvBlurred, COLOR_BGR2HSV); // convert to HSV to track a color
inRange(hsvBlurred, this->hsvLowerBound, this->hsvUpperBound, colorMask); // track the color
cvtColor(colorMask, colorMask, COLOR_GRAY2BGR);
/* apply the masks */
bitwise_and(frame, movingObjectMask, result);
bitwise_and(result, colorMask, result);
cvtColor(result, result, COLOR_BGR2RGB);
/* end */
this->myLabel->setPixmap(mat2QPixmap(result, QImage::Format_RGB888));
this->previous = grayBlurred;
}
As you can see in the code, I make two masks that detect a moving object and a specific color(technically colors in a specific range).
Upper and lower hsv range were calculated like below.
void MainWindow::refreshRgb(){
Scalar lowerBound = hsvMult(this->currentHsv, 1 - this->ratio);
Scalar upperBound = hsvMult(this->currentHsv, 1 + this->ratio);
this->hsvLowerBound = lowerBound;
this->hsvUpperBound = upperBound;
}
Scalar hsvMult(const Scalar& scalar, double ratio){
int s = static_cast<int>(scalar[1]*ratio);
int v = static_cast<int>(scalar[2]*ratio);
if(s > 255)
s = 255;
if(v > 255)
v = 255;
return Scalar(static_cast<int>(scalar[0]), s, v);
}
How can I make it more clear?
i'm trying to make an AR app, using aruco and Opencv (i'm a newbie). It detects aruco marker, and puts an image on it. I have tried to use wrapPerstective() function, however somethig is wrong, it returns Opencv error assertion failed ((m0.type() == cv_32f m0.type() == cv_64f) in wrapPerspective. Please give me a way to solve it
int main() {
cv::VideoCapture inputVideo;
inputVideo.open("gal.mp4");
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
cv::Mat sq = imread("zhuz.jpg", CV_LOAD_IMAGE_UNCHANGED);
while (inputVideo.grab()) {
vector<Point2f> sqPoints;
vector<Point2f> p;
sqPoints.push_back(Point2f(0, 0));
sqPoints.push_back(Point2f(sq.cols, 0));
sqPoints.push_back(Point2f(sq.cols, sq.rows));
sqPoints.push_back(Point2f(0, sq.rows));
cv::Mat image, warp_matrix;
inputVideo.retrieve(image);
Mat cpy_img(image.rows, image.cols, image.type());
Mat neg_img(image.rows, image.cols, image.type());
Mat gray;
Mat blank(sq.rows, sq.cols, sq.type());
std::vector<int> ids;
std::vector<std::vector<cv::Point2f>> corners;
cv::aruco::detectMarkers(image, dictionary, corners, ids);
if (ids.size() > 0) {
p.push_back(corners[0][0]);
p.push_back(corners[0][1]);
p.push_back(corners[0][2]);
p.push_back(corners[0][3]);
Mat wrap_matrix = getPerspectiveTransform(sqPoints, p);
blank = Scalar(0);
neg_img = Scalar(0); // Image is white when pixel values are zero
cpy_img = Scalar(0); // Image is white when pixel values are zero
bitwise_not(blank, blank);
warpPerspective(sq, neg_img, warp_matrix, Size(neg_img.cols, neg_img.rows)); // Transform overlay Image to the position - [ITEM1]
warpPerspective(blank, cpy_img, warp_matrix, Size(cpy_img.cols, neg_img.rows)); // Transform a blank overlay image to position
bitwise_not(cpy_img, cpy_img); // Invert the copy paper image from white to black
bitwise_and(cpy_img, image, cpy_img); // Create a "hole" in the Image to create a "clipping" mask - [ITEM2]
bitwise_or(cpy_img, neg_img, image); // Finally merge both items [ITEM1 & ITEM2]
}
cv::imshow("out", image);
}
}
The implementation of this functionality seems pretty straightforward in Python, as shown here: http://docs.opencv.org/trunk/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html
Yet, when I tried to do exactly the same in C++, I get bad arguments error (for the grabcut function). How to put the mask image in the right format?
I am a newbie at this, so I'd be very thankful if someone could help me understand better. Thank you!
Here's what I have so far:
Mat image;
image= imread(file);
Mat mask;
mask.setTo( GC_BGD );
mask = imread("messi5.png");
Mat image2 = image.clone();
// define bounding rectangle
cv::Rect rectangle(startX, startY, width, height);
cv::Mat result; // segmentation result (4 possible values)
cv::Mat bgModel,fgModel; // the models (internally used)
//// GrabCut segmentation that works, but with a rectangle, not with the mask I need
//cv::grabCut(image, // input image
// result, // segmentation result
// rectangle,// rectangle containing foreground
// bgModel,fgModel, // models
// 1, // number of iterations
// cv::GC_INIT_WITH_RECT); // use rectangle
grabCut( image, mask, rectangle, bgModel, fgModel, 1, GC_INIT_WITH_MASK);
cv::compare(mask,cv::GC_PR_FGD,mask,cv::CMP_EQ);
cv::Mat foreground(image.size(),CV_8UC3,cv::Scalar(255,255,255));
image.copyTo(foreground,mask); // bg pixels not copied
namedWindow( "Display window", WINDOW_AUTOSIZE );
imshow( "Display window", foreground );
waitKey(0);
return 0;
}
It looks like you have misunderstood the guide, repeated here from the linked guide in the question:
# newmask is the mask image I manually labelled
newmask = cv2.imread('newmask.png',0)
# whereever it is marked white (sure foreground), change mask=1
# whereever it is marked black (sure background), change mask=0
mask[newmask == 0] = 0
mask[newmask == 255] = 1
mask, bgdModel, fgdModel = cv2.grabCut(img,mask,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)
mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask[:,:,np.newaxis]
plt.imshow(img),plt.colorbar(),plt.show()
this is not what you have done i'm afraid. For a start you seem to have set the mask to the rgb image:
mask = imread("messi5.png");
whereas is should be set to the mask image:
mask = imread("newmask.png",CV_LOAD_IMAGE_GRAYSCALE);
EDIT from comments:
from a pure red mask painted over the image (an actual mask would be better).
maskTmp = imread("messi5.png");
std::vector<cv::Mat> channels(3)
split( messi5, channels);
cv::Mat maskRed = channels[2];
now threshold on the red channel to get your binary mask.
i'm trying to get a set of values in a binary image for inverting it.. but i'm having troubles to index the matrix, the first lines of my code are.
std::string path = "img/lena.jpg";
//Our color image
cv::Mat imageMat = cv::imread(path, CV_LOAD_IMAGE_GRAYSCALE);
if(imageMat.empty())
{
std::cerr << "ERROR: Could not read image " << argv[1] << std::endl;
return 1;
}
//Grayscale matrix
cv::Mat grayscaleMat (imageMat.size(), CV_8U);
//Convert BGR to Gray
cv::cvtColor( imageMat, grayscaleMat, CV_BGR2GRAY );
//Binary image
cv::Mat binaryMat(grayscaleMat.size(), grayscaleMat.type());
//Apply thresholding
cv::threshold(grayscaleMat, binaryMat, 100, 255, cv::THRESH_BINARY);
Now i need to work with the values in binaryMat, but i don't know how get it...
1: with opencv's c++ api, you don't need to allocate output/result Mat's. just leave them empty.
//Convert BGR to Gray
cv::Mat grayscaleMat;
cv::cvtColor( imageMat, grayscaleMat, CV_BGR2GRAY );
//Apply thresholding
cv::Mat binaryMat;
cv::threshold(grayscaleMat, binaryMat, 100, 255, cv::THRESH_BINARY);
2: now access the pixels:
uchar p = binaryMat.at<uchar>(y,x); // row,col world !
binaryMat.at<uchar>(5,5) = 17;
I'm trying to split two images along a seam, and then blend them together. In this process, I need to cut out each image along the seam by applying a mask. How can I apply a mask? I tried bitwise_and and multiplying the mask and the image, but neither worked.
int pano_width = left_template_width + right_template_width - roi_width;
// add zeros to the right of the left template
Mat full_left = Mat::zeros(roi_height, pano_width, CV_32FC3);
Mat tmp_l = full_left(Rect(0,0, left_template_width, roi_height));
imshow("Scene mask", mask0f3);
imshow("Cropped scene", cropped_scene);
Mat left_masked;
//bitwise_and(cropped_scene, mask0f3, left_masked); // full_left looks all black
multiply(cropped_scene, mask0f3, left_masked); // full_left looks like the scene mask, but with an extra black rectangle on the right side
left_masked.copyTo(tmp_l);
imshow("Full left", full_left);
I resorted to a terribly efficient, but working, hack:
void apply_mask(Mat& img, Mat mask) {
CV_Assert(img.rows == mask.rows);
CV_Assert(img.cols == mask.cols);
print_mat_type(img);
print_mat_type(mask);
for (int r = 0; r < mask.rows; r++) {
for (int c = 0; c < mask.cols; c++) {
if (mask.at<uchar>(r, c) == 0) {
img.at<Vec3f>(r, c) = Vec3f(0, 0, 0);
}
}
}
}
Here you have snippet that works using bitwise_and (look at docs how this methods works)
Mat img = imread("lena.jpg");
Mat mask = Mat::zeros(img.rows, img.cols, CV_8UC1);
Mat halfMask = mask(cv::Rect(0,0,img.rows/2, img.cols/2));
halfMask.setTo(cv::Scalar(255));
Mat left_masked;
bitwise_and(img, cv::Scalar(255,255,255), left_masked, mask);
So you can use something like:
bitwise_and(cropped_scene, cv::Scalar(255,255,255), left_masked, mask); // mask must be CV_8UC1!
But you have to change type, or create new mask, which has a type of CV_8UC1.
EDIT: Your function apply_mask can look like:
void apply_mask(Mat& img, Mat &mask, Mat &result) {
CV_Assert(img.rows == mask.rows);
CV_Assert(img.cols == mask.cols);
CV_Assert(img.type() == CV_32FC3);
bitwise_and(img, cv::Scalar(1.0f,1.0f,1.0f), result, mask);
}
Unfortunately if you pass input image as an output image in bitwise_and, you've got all black output. But passing another argument works fine.