I want to make matrix multiplication between image and mask. I want multiply in HSV value with 0.3. I think, the problem is between CV_32FC3 and CV_8UC3, but when I convert, still not work correct.
How can I do? Here is my current code:
Mat mask = Mat(frame.size(), CV_32FC3, cv::Scalar(1, 1, 1));
cv::fillConvexPoly(mask, pts, 3, cv::Scalar(1,1,0.3));
cvtColor(frame, frame, CV_BGR2HSV);
frame.convertTo(frame, CV_32FC3);
cv::multiply(frame,mask,frame);
frame.convertTo(frame, CV_8UC3);
cvtColor(frame, frame, CV_HSV2BGR);
If I do only this, see the mask is ok - white and black changes:
Mat mask = Mat(frame.size(), CV_32FC3, cv::Scalar(1, 1, 1));
cv::fillConvexPoly(mask, pts, 3, cv::Scalar(0,0,0));
imshow("mask", mask);
What I'm basically trying to do is blur an image, and combine it back with the orignal, so that only certain areas in the original image are blurred (the face should be blurred).
My general idea was to mask the parts in the original Iwant to have blurred, then blur the original as a copy and "merge" them together again.
To a certain extend this also worked.
My images:
(1) Original
(2) Original with parts that should be blurred
(3) Blurred
My C++ code that creates these images:
int main(void) {
cv::Mat srcImage = cv::imread(path);
srcImage.convertTo(srcImage, CV_32FC3, 1.0/255.0);
Mat _mask;
Mat img_gray;
cv::Scalar white = cv::Scalar(255, 255, 255);
cv::Scalar black = cv::Scalar(0, 0, 0);
cv::cvtColor(srcImage, img_gray, cv::COLOR_BGR2GRAY);
img_gray.convertTo(_mask, CV_32FC1);
// face
cv::circle(_mask, cv::Point(430, 350), 200, black, -1, 8, 0);
// eyes
cv::circle(_mask, cv::Point(502, 260), 27, white, -1, 8, 0);
cv::circle(_mask, cv::Point(390, 260), 27, white, -1, 8, 0);
// mouth
cv::ellipse(_mask, cv::Point(440, 390), cv::Point(60, 25), 0, 0, 360, white, -1, 8, 0);
cv::threshold(1.0-_mask, _mask, 0.9, 1.0, cv::THRESH_BINARY_INV);
cv::GaussianBlur(_mask,_mask,Size(21,21),11.0);
cv::Mat res;
cv::Mat bg = Mat(srcImage.size(), CV_32FC3);
bg = cv::Scalar(1.0, 1.0 ,1.0);
vector<Mat> ch_img(3);
vector<Mat> ch_bg(3);
cv::split(srcImage, ch_img);
cv::split(bg, ch_bg);
ch_img[0] = ch_img[0].mul(_mask) + ch_bg[0].mul(1.0 - _mask);
ch_img[1] = ch_img[1].mul(_mask) + ch_bg[1].mul(1.0 - _mask);
ch_img[2] = ch_img[2].mul(_mask) + ch_bg[2].mul(1.0 - _mask);
cv::merge(ch_img, res);
cv::merge(ch_bg, bg);
// original but with white mask
res.convertTo(res, CV_8UC3, 255.0);
imwrite("original_with_mask.jpg", res);
// blur original image
cv::Mat blurredImage;
bilateralFilter(srcImage, blurredImage, 10, 20, 5);
GaussianBlur(srcImage, blurredImage, Size(19, 19), 0, 0);
blurredImage.convertTo(blurredImage, CV_8UC3, 255.0);
imwrite("blurred.jpg", blurredImage);
cv::Mat maskedImage;
maskedImage = Mat(srcImage.size(), CV_32FC3);
// now combine blurred image and original using mask
// this fails
cv::bitwise_and(blurredImage, _mask, maskedImage);
cv::imwrite("masked.jpg", maskedImage);
}
My problem is that cv::bitwise_and(blurredImage, _mask, maskedImage); fails with
OpenCV Error: Sizes of input arguments do not match (The operation is neither 'array op array' (where arrays have the same size and type), nor 'array op scalar', nor 'scalar op array') in binary_op
Probably because _mask is a single channel image and blurredImage and maskedImage are 3-channel images.
How can I combine the images I got so that the currently white areas in image (2) are blurred using a transparent mask with "soft" edges?
Instead of float conversion you can just use the linearcombination of byte channel values. See
int main(int argc, char* argv[])
{
cv::Mat srcImage = cv::imread("C:/StackOverflow/Input/transparentMaskInput.jpg");
// blur whole image
cv::Mat blurredImage;
//cv::bilateralFilter(srcImage, blurredImage, 10, 20, 5); // use EITHER bilateral OR Gaússian filter
cv::GaussianBlur(srcImage, blurredImage, cv::Size(19, 19), 0, 0);
// create mask
cv::Scalar white = cv::Scalar(255, 255, 255);
cv::Scalar black = cv::Scalar(0, 0, 0);
cv::Mat mask = cv::Mat::zeros(srcImage.size(), CV_8UC1);
// face
cv::circle(mask, cv::Point(430, 350), 200, black, -1, 8, 0);
// eyes
cv::circle(mask, cv::Point(502, 260), 27, white, -1, 8, 0);
cv::circle(mask, cv::Point(390, 260), 27, white, -1, 8, 0);
// mouth
cv::ellipse(mask, cv::Point(440, 390), cv::Point(60, 25), 0, 0, 360, white, -1, 8, 0);
cv::GaussianBlur(mask, mask, cv::Size(21, 21), 11.0);
// byte inversion:
cv::Mat invertedMask = 255 - mask; // instead of inversion you could just draw the "face" black on a white background!
cv::Mat outputImage = cv::Mat(srcImage.size(), srcImage.type());
// for each pixel, merge blurred and original image regarding the blur-mask
for (int y = 0; y < outputImage.rows; ++y)
for (int x = 0; x < outputImage.cols; ++x)
{
cv::Vec3b pixelOrig = srcImage.at<cv::Vec3b>(y, x);
cv::Vec3b pixelBlur = blurredImage.at<cv::Vec3b>(y, x);
float blurVal = invertedMask.at<unsigned char>(y, x)/255.0f; // value between 0 and 1: zero means 100% orig image, one means 100% blurry image
cv::Vec3b pixelOut = blurVal * pixelBlur + (1.0f - blurVal)* pixelOrig;
outputImage.at<cv::Vec3b>(y, x) = pixelOut;
}
cv::imshow("input", srcImage);
cv::imshow("blurred", blurredImage);
cv::imshow("mask", mask);
cv::imshow("inverted mask", invertedMask);
cv::imshow("output", outputImage);
return 0;
}
using this input image:
computing this blurred and mask:
resulting in this output, by computing (mask/255) * blur + (1-mask/255)*blur (linear combination):
I define a function to do alphaBlend for two images of CV_8UC3 with a mask of CV_8UC1 in OpenCV:
//! 2018.01.16 13:54:39 CST
//! 2018.01.16 14:43:26 CST
void alphaBlend(Mat& img1, Mat&img2, Mat& mask, Mat& blended){
// Blend img1 and img2 (of CV_8UC3) with mask (CV_8UC1)
assert(img1.size() == img2.size() && img1.size() == mask.size());
blended = cv::Mat(img1.size(), img1.type());
for (int y = 0; y < blended.rows; ++y){
for (int x = 0; x < blended.cols; ++x){
float alpha = mask.at<unsigned char>(y, x)/255.0f;
blended.at<cv::Vec3b>(y,x) = alpha*img1.at<cv::Vec3b>(y,x) + (1-alpha)*img2.at<cv::Vec3b>(y,x);
}
}
}
Then, it's easy to do alpha bend on the images, just call alphaBlend(...). Here is an example:
#include <opencv2/opencv.hpp>
using namespace cv;
//! 2018.01.16 13:54:39 CST
//! 2018.01.16 14:43:26 CST
void alphaBlend(Mat& img1, Mat&img2, Mat& mask, Mat& blended){
// Blend img1 and img2 (of CV_8UC3) with mask (CV_8UC1)
assert(img1.size() == img2.size() && img1.size() == mask.size());
blended = cv::Mat(img1.size(), img1.type());
for (int y = 0; y < blended.rows; ++y){
for (int x = 0; x < blended.cols; ++x){
float alpha = mask.at<unsigned char>(y, x)/255.0f;
blended.at<cv::Vec3b>(y,x) = alpha*img1.at<cv::Vec3b>(y,x) + (1-alpha)*img2.at<cv::Vec3b>(y,x);
}
}
}
Mat createMask(Size sz){
// create mask
cv::Mat mask = cv::Mat::zeros(sz, CV_8UC1);
// white and black
cv::Scalar white = cv::Scalar(255, 255, 255);
cv::Scalar black = cv::Scalar(0, 0, 0);
// face
cv::circle(mask, cv::Point(430, 350), 200, black, -1, 8, 0);
// eyes
cv::circle(mask, cv::Point(502, 260), 27, white, -1, 8, 0);
cv::circle(mask, cv::Point(390, 260), 27, white, -1, 8, 0);
// mouth
cv::ellipse(mask, cv::Point(440, 390), cv::Point(60, 25), 0, 0, 360, white, -1, 8, 0);
// Blur
cv::GaussianBlur(mask, mask, cv::Size(21, 21), 11.0);
return mask;
}
int main(){
cv::Mat img = cv::imread("img04.jpg");
// blur whole image
cv::Mat blured;
//cv::bilateralFilter(img, blured, 10, 20, 5); // use EITHER bilateral OR Gaússian filter
cv::GaussianBlur(img, blured, cv::Size(19, 19), 0, 0);
// Create the mask
Mat mask = createMask(img.size());
Mat mask_inv = 255 - mask;
// Alpha blend
Mat blended1, blended2;
alphaBlend(img, blured, mask, blended1);
alphaBlend(img, blured, mask_inv, blended2);
// Display
cv::imshow("source", img);
cv::imshow("blured", blured);
cv::imshow("mask", mask);
cv::imshow("mask_inv", mask_inv);
cv::imshow("blended1", blended1);
cv::imshow("blended2", blended2);
cv::waitKey();
return 0;
}
Source:
Blured:
Mask1:
AlphaBlend 1:
Mask 2:
AlphaBlend 2:
Some useful links:
Alpha Blending in OpenCV C++ : Combining 2 images with transparent mask in opencv
Alpha Blending in OpenCV Python:
Gradient mask blending in opencv python
Probably because _mask is a single channel image and blurredImage and
maskedImage are 3-channel images.
Put this before calling the cv::bitwise_and:
P.S if you do not want to alter your mask becuase you want to use it in another place just do it in a temporary variable:
cv::Mat _mask_temp;
cv::cvtColor(_mask,_mask_temp,cv::COLOR_GRAY2BGR);
cv::bitwise_and(blurredImage, _mask_temp, maskedImage);
_mask_temp.release(); // just in case you do not want it anymore to be in your memory(optional)
EDIT (another problem):
The mask is 32F while the image is 8U. So, you need this:
cv::cvtColor(_mask,_mask,cv::COLOR_GRAY2BGR);
_mask.convertTo(_mask, CV_8UC3);
When I convert a QImage to cv::Mat the Mat comes out blurred.
This is the code that converts it:
QPixmap pixmap(*ui->imgLabel->pixmap());
QImage image = pixmap.toImage();
image.convertToFormat(QImage::Format_RGB888);
Mat matImage = Mat(image.height(), image.width(), CV_8UC3, image.scanLine(0));
This is the original image zoomed:
This is the image created:
Grateful for any help.
I don't know QT, but have you tried ready solutions from WEB? For example, from this link I found another method of conversion:
Mat qimage2mat(const QImage& qimage) {
cv::Mat mat = cv::Mat(qimage.height(), qimage.width(), CV_8UC4, (uchar*)qimage.bits(), qimage.bytesPerLine());
cv::Mat mat2 = cv::Mat(mat.rows, mat.cols, CV_8UC3 );
int from_to[] = { 0,0, 1,1, 2,2 };
cv::mixChannels( &mat, 1, &mat2, 1, from_to, 3 );
return mat2;
};
I hope you could help me.
I' m using QT and try to do a simple detection of edges on a image. But my program crash when i launch
cv::GaussianBlur( src, src, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT );
or
cv::Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, cv::BORDER_DEFAULT );
here is my code:
QImage *image1;
IplImage *cv_image1;
image1 = new QImage("./image.png"); // Format is ARGB32
cv_image1 = QImage2IplImage(image1);
cv::Mat src(cv_image1);
cv::imshow(window_name, src); // Work Well
cv::Mat src_gray;
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
cv::GaussianBlur(src, src, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT); //Crash Here
cv::imshow( window_name, src);
I think that was a problem of format.
But in another program with QIMAGES in ARGB32 this code work well.
Thank you.
Try going with proper QImage to cv::Mat conversion using this functions and you should be fine (I also included a conversion from cv::Mat to QImage):
cv::Mat cvmat_from_qimage(const QImage& qimage)
{
cv::Mat mat = cv::Mat(qimage.height(), qimage.width(), CV_8UC4, (uchar*)qimage.bits(), qimage.bytesPerLine());
cv::Mat mat2 = cv::Mat(mat.rows, mat.cols, CV_8UC3 );
int from_to[] = { 0,0, 1,1, 2,2 };
cv::mixChannels( &mat, 1, &mat2, 1, from_to, 3 );
return mat2;
}
QImage qimage_from_cvmat(const cv::Mat& mat)
{
cv::Mat rgb;
cvtColor(mat, rgb, CV_BGR2RGB);
return QImage((const unsigned char*)(rgb.data), rgb.cols, rgb.rows, QImage::Format_RGB888);
}
I Found a solution.
That' s weird but when I do:
cvtColor(src, src_gray, CV_RGB2GRAY );
cv::Sobel(src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, cv::BORDER_CONSTANT);
without the cv::GaussianBlur it works well. I just change the last parameter to cv::BORDER_CONSTANT