Related
I am selecting the color from first frame using mouse-handler.
I am trying to replace the selected color with background frame.
This is working fine for red color but this is not working for any other color like green, blue, etc. I am using following graph for color selection:
Click here!
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
// structure to be used in mouseHandler function
struct userdata {
Mat im;
vector<Point2f> points;
};
void mouseHandler(int event, int x, int y, int flags, void* data_ptr)
{
if (event == EVENT_LBUTTONDOWN) {
userdata* data = ((userdata*)data_ptr);
circle(data->im, Point(x, y), 3, Scalar(0, 0, 255), 5, LINE_AA);
imshow("Image", data->im);
if (data->points.size() < 1) {
data->points.push_back(Point2f(x, y));
}
}
}
int main(int argc, char** argv) {
// Take video frame from camera to select color of material
VideoCapture capt(0);
Mat frames;
capt >> frames;
Mat hsvimg;
// Converting image from BGR to HSV
cvtColor(frames, hsvimg, COLOR_BGR2HSV);
// Set data for mouse event
Mat im_temp = frames.clone();
userdata data;
data.im = im_temp;
cout << "Select the point on image for the color you want to create cloak and than press 'Enter'" << endl;
// Show image and wait for a click.
imshow("Image", im_temp);
// Set the callback function for any mouse event
setMouseCallback("Image", mouseHandler, &data);
waitKey(0);
//defining the HSV values of the point selected
Vec3b HSV_Color = hsvimg.at<Vec3b>(data.points[0]);
int hue = HSV_Color.val[0];
int saturation = HSV_Color.val[1];
int value = HSV_Color.val[2];
// Create a VideoCapture object and open the input file for demonstrating the cloak working
// If the input is the web camera, pass 0 instead of the video file name
// In first frame only background should be there, i.e., no person present
VideoCapture cap(0);
// Check if camera opened successfully
if (!cap.isOpened()) {
cout << "Error opening video stream or file" << endl;
return -1;
}
Mat background;
for (int i = 0; i < 30; i++)
{
cap >> background;
}
//Laterally invert the image / flip the image.
flip(background, background, 1);
while (1)
{
Mat frame;
// Capture frame-by-frame
cap >> frame;
// Laterally invert the image / flip the image
flip(frame, frame, 1);
//Converting image from BGR to HSV color space.
Mat hsv;
cvtColor(frame, hsv, COLOR_BGR2HSV);
Mat mask1, mask2;
// Creating masks to detect the upper and lower red color.
// Otherwise mask1 and mask2 are same for other colors
// Making different conditions according to hue values
// Take help from this: https://stackoverflow.com/questions/10948589/choosing-the-correct-upper-and-lower-hsv-boundaries-for-color-detection-withcv
if (saturation > 100) {
if (hue <= 10 || hue>165) {
inRange(hsv, Scalar(0, 120, 20), Scalar(10, 255, 255), mask1);
inRange(hsv, Scalar(170, 120, 20), Scalar(180, 255, 255), mask2);
}
else if (10<hue<=25) {
inRange(hsv, Scalar(10, 120, 20), Scalar(25, 255, 255), mask1);
inRange(hsv, Scalar(10, 120, 20), Scalar(25, 255, 255), mask2);
}
else if (25 < hue <= 38) {
inRange(hsv, Scalar(25, 120, 20), Scalar(35, 255, 255), mask1);
inRange(hsv, Scalar(25, 120, 20), Scalar(35, 255, 255), mask2);
}
else if (38 < hue <= 71) {
inRange(hsv, Scalar(38, 100, 20), Scalar(71, 255, 255), mask1);
inRange(hsv, Scalar(38, 100, 20), Scalar(71, 255, 255), mask2);
}
else if (71 < hue <= 100) {
inRange(hsv, Scalar(71, 120, 20), Scalar(95, 255, 255), mask1);
inRange(hsv, Scalar(71, 120, 20), Scalar(95, 255, 255), mask2);
}
else if (100 < hue <= 140) {
inRange(hsv, Scalar(100, 150, 20), Scalar(130, 255, 255), mask1);
inRange(hsv, Scalar(100, 150, 20), Scalar(130, 255, 255), mask2);
}
else if (140 < hue <= 165) {
inRange(hsv, Scalar(140, 120, 20), Scalar(170, 255, 255), mask1);
inRange(hsv, Scalar(140, 120, 20), Scalar(170, 255, 255), mask2);
}
}
else {
cout << "Use colored material." << endl;
break;
}
// Generating the final mask
mask1 = mask1 + mask2;
Mat kernel = Mat::ones(3, 3, CV_32F);
morphologyEx(mask1, mask1, cv::MORPH_OPEN, kernel);
morphologyEx(mask1, mask1, cv::MORPH_DILATE, kernel);
// creating an inverted mask to segment out the cloth from the frame
bitwise_not(mask1, mask2);
Mat res1, res2, final_output;
// Segmenting the cloth out of the frame using bitwise and with the inverted mask
bitwise_and(frame, frame, res1, mask2);
// creating image showing static background frame pixels only for the masked region
bitwise_and(background, background, res2, mask1);
// Generating the final augmented output.
addWeighted(res1, 1, res2, 1, 0, final_output);
imshow("magic", final_output);
waitKey(1);
// Press ESC on keyboard to exit
char c = (char)waitKey(25);
if (c == 27)
break;
// Also relese all the mat created in the code to avoid memory leakage.
frame.release(), hsv.release(), mask1.release(), mask2.release(), res1.release(), res2.release(), final_output.release();
}
// When everything done, release the video capture object
cap.release();
// Closes all the frames
cv::destroyAllWindows();
return 0;
}
It should do the same for all major colors as it is doing for red.
Have you checked the output of these two points when you are selecting another color?
Vec3b HSV_Color = hsvimg.at<Vec3b>(data.points[0]);
int hue = HSV_Color.val[0];
int saturation = HSV_Color.val[1];
int value = HSV_Color.val[2];
and this one
cvtColor(frame, hsv, COLOR_BGR2HSV);
I'm trying to write a Maya plugin that recreates a 2d drawing of bones in UV space to 3D space. I'm starting with a simple plane with this image:
What I need is two find the circles and create a hierarchy.
I tried Nuzhny approach but I'm getting horizontal lines like:
My code:
Mat image;
image = imread("c:/pjs/sk.jpg"); // Read the file
cv::Mat hsv_image;
cv::cvtColor(image, hsv_image, cv::COLOR_BGR2HSV);
cv::Mat lower_red_hue_range;
cv::Mat upper_red_hue_range;
cv::Mat white_hue_range;
//Separate the lines and circles
cv::inRange(hsv_image, cv::Scalar(0, 100, 100), cv::Scalar(10, 255, 255), lower_red_hue_range);
cv::inRange(hsv_image, cv::Scalar(160, 100, 100), cv::Scalar(179, 255, 255), upper_red_hue_range);
cv::inRange(hsv_image, cv::Scalar(0, 0, 20), cv::Scalar(0, 0, 255), white_hue_range);
cv::Mat red_hue_image;
cv::addWeighted(lower_red_hue_range, 1.0, upper_red_hue_range, 1.0, 0.0, red_hue_image);
cv::GaussianBlur(red_hue_image, red_hue_image, cv::Size(9, 9), 2, 2);
//Identify circles
std::vector<cv::Vec3f> circles;
cv::HoughCircles(red_hue_image, circles, HOUGH_GRADIENT, 1, red_hue_image.rows / 8, 100, 20, 0, 0);
if (circles.size() == 0) std::exit(-1);
for (size_t current_circle = 0; current_circle < circles.size(); ++current_circle) {
cv::Point center(std::round(circles[current_circle][0]), std::round(circles[current_circle][1]));
int radius = std::round(circles[current_circle][2]);
cv::circle(image, center, radius, cv::Scalar(0, 255, 0), 5);
}
//Get the contours
cv::threshold(white_hue_range, white_hue_range, 11, 255, cv::THRESH_BINARY);
cv::Mat element = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(3, 3));
element = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(20, 20));
cv::dilate(white_hue_range, white_hue_range, element);
cv::dilate(white_hue_range, white_hue_range, element);
cv::erode(white_hue_range, white_hue_range, element);
cv::erode(white_hue_range, white_hue_range, element);
element = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(5, 5));
cv::dilate(white_hue_range, white_hue_range, element);
Mat gray;
gray = white_hue_range;
Canny(gray, gray, 40, 100, 7);
/// Find contours
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
RNG rng(12345);
findContours(gray, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Draw contours
Mat drawing = Mat::zeros(gray.size(), CV_8UC3);
for (int i = 0; i < contours.size(); i++)
{
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
}
//Get the lines
vector<vector<Point2f> > lines;
vector<Point> approx;
for (unsigned int i = 0; i < contours.size(); i++)
{
if (contours[i].size() > 4) {
//cv::Rect box = cv::fitEllipse(contours[i]);
cv::RotatedRect box = cv::fitEllipseAMS(contours[i]);
cv::Point2f pts[4];
box.points(pts);
vector<cv::Point2f> line_pts;
line_pts.resize(2);
line_pts[0] = (pts[0] + pts[1]) / 2;
line_pts[1] = (pts[2] + pts[3]) / 2;
lines.push_back(line_pts);
}
}
for (int i = 0; i < lines.size(); i++)
{
line(image, lines[i].at(0), lines[i].at(1), 128, 4, LINE_8, 0);
}
imshow("Result window", image);
cvtColor to HSV.
inRange(redFrom, redTo) + findContours to find red circles.
inRange(whiteFrom, whiteTo) + findContours to find white lines.
Line contour to line:
cv::RotatedRect box = cv::fitEllipse(line_contours[i]);
cv::Point2f pts[4];
box.points(pts);
cv::Point2f line_pts[2];
line_pts[0] = (pts[0] + pts[3]) / 2;
line_pts[1] = (pts[1] + pts[2]) / 2;
Nested loops to find a nearest circle for each line point.
SSorry for my bad english . I have an image that Show 3 circles of different color, one red, one green and one blue and I can display this image into the 3 channels, but they appear white and in the code I display an image call "copy R" and I dont know how to make this copy R into another image whit any color I want to overlap the original image and changing the red color. How can i do this ???
this is my code, sorry is the first time i make a question and dont know how to publish properly
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#define w 400
using namespace cv;
/// Function headers
void MyFilledCircle(Mat img, Point center);
void MyFilledCircle1(Mat img, Point center);
void MyFilledCircle2(Mat img, Point center);
int main(void) {
//![create_images]
char window[] = "Original";
/// Create black empty images
Mat image = Mat::zeros(w, w, CV_8UC3);
/// 1.b. Creating circles
MyFilledCircle(image, Point(200, 200));
MyFilledCircle1(image, Point(150, 150));
MyFilledCircle2(image, Point(250, 250));
Mat channel[3];
split(image, channel);
//channel[0] = Mat::zeros(image.rows, image.cols, CV_8UC1);
merge(channel, 3, image);
Mat imageHSV;
Mat copy;
imshow(window, image);
//imshow("Color 1", imageHSV);
inRange(image, Scalar(0, 0, 255), Scalar(0, 0, 255), copy);
imshow("copy R", copy);
imshow("B", channel[0]);
imshow("G", channel[1]);
imshow("R", channel[2]);
//imshow("0", canal0);
//imwrite("dest.jpg", image);
waitKey(0);
return(0);
}
/// Function Declaration
//![myfilledcircle]
void MyFilledCircle1(Mat img, Point center)
{
circle(img,
center,
50,
Scalar(0, 255, 0),
FILLED,
LINE_8);
}
void MyFilledCircle(Mat img, Point center)
{
circle(img,
center,
50,
Scalar(0, 0, 255),
FILLED,
LINE_8);
}
void MyFilledCircle2(Mat img, Point center)
{
circle(img,
center,
50,
Scalar(255, 0, 0),
FILLED,
LINE_8);
}
From the looks of it, it looks like copy is a binary mask, and you want to superimpose this mask on image, such that only the non-zero pixels in the mask retain their original color.
If my assumption is correct, then using the subtract method, as shown below, should help you out:
Mat result;
cvtColor(copy,copy,CV_GRAY2BGR);//change copy to a 3 channel image
absdiff(image,image,result);//initialize mask as a black image of img.size()
subtract(copy,image,result);
subtract(copy,result,result);
I'm trying to find the values for Height and Width to recover the Aspect Ration of the object using the contour of an image with the code below but not having success, since the code is creating many rectangles all over the image, when my intention is to create a single rectangle around the object.
I'm trying to create this rectangle because i don't know if there is another way to get the Height and Width (or even the Aspect Ratio) other than this one.
***RNG rng(12345); //Global Variable used for drawing rectangles and circles for the contours of images.
/*Load the image*/
Mat img_bgr = imread("img.jpg", 1);
if (img_bgr.empty()){
cout << "No image..." << endl;
return -1;
}
/*Display the image*/
namedWindow("Original Image", WINDOW_NORMAL);
imshow("Original Image", img_bgr);
/*Conversion to HSV*/
Mat img_hsv;
cvtColor(img_bgr, img_hsv, CV_BGR2HSV);
/*Extracting colors - HSV*/
Mat green, yellow, brown;
//Yellow
inRange(img_hsv, Scalar(25, 0, 0), Scalar(36, 255, 255), yellow); //until 33 - consider "yellow" - from there up to 36 - consider for chlorosis
imwrite("c:\\test\\results\\yellow.jpg", yellow);
//Green
inRange(img_hsv, Scalar(37, 0, 0), Scalar(70, 255, 255), green); //Consider lower as 37
imwrite("c:\\test\\results\\green.jpg", green);
//Brown
inRange(img_hsv, Scalar(10, 0, 0), Scalar(20, 255, 255), brown);
imwrite("c:\\test\\results\\brown.jpg", brown);
namedWindow("Yellow", WINDOW_NORMAL);
imshow("Yellow", yellow);
namedWindow("Green", WINDOW_NORMAL);
imshow("Green", green);
namedWindow("Brown", WINDOW_NORMAL);
imshow("Brown", brown);
/*Finding Contours of the Thresholded images*/
vector<std::vector<Point>>green_cnt;
vector<std::vector<Point>>yellow_cnt;
vector<std::vector<Point>>brown_cnt;
//Green Contour
findContours(green, green_cnt, CV_RETR_TREE, CV_CHAIN_APPROX_NONE);
//Draw the Contours - Green
Mat green_cnt_draw(green.size(), CV_8UC3, Scalar(0, 0, 0));
Scalar green_cnt_colors[3];
green_cnt_colors[0] = Scalar(0, 255, 0);
green_cnt_colors[1] = Scalar(0, 255, 0);
green_cnt_colors[2] = Scalar(0, 255, 0);
for (size_t idx_green = 0; idx_green < green_cnt.size(); idx_green++){
drawContours(green_cnt_draw, green_cnt, idx_green, green_cnt_colors[idx_green % 3]);
}
namedWindow("Green - Contours", CV_WINDOW_NORMAL);
imshow("Green - Contours", green_cnt_draw);
//Yellow Contour
findContours(yellow, yellow_cnt, CV_RETR_TREE, CV_CHAIN_APPROX_NONE);
//Draw the Contours - Yellow
Mat yellow_cnt_draw(yellow.size(), CV_8UC3, Scalar(0, 0, 0));
Scalar yellow_cnt_colors[3];
yellow_cnt_colors[0] = Scalar(0, 255, 255);
yellow_cnt_colors[1] = Scalar(0, 255, 255);
yellow_cnt_colors[2] = Scalar(0, 255, 255);
for (size_t idx_yellow = 0; idx_yellow < yellow_cnt.size(); idx_yellow++){
drawContours(yellow_cnt_draw, yellow_cnt, idx_yellow, yellow_cnt_colors[idx_yellow % 3]);
}
namedWindow("Yellow - Contours", CV_WINDOW_NORMAL);
imshow("Yellow - Contours", yellow_cnt_draw);
//Brown Contour
findContours(brown, brown_cnt, CV_RETR_TREE, CV_CHAIN_APPROX_NONE);
//Draw the Contours - Brown
Mat brown_cnt_draw(brown.size(), CV_8UC3, Scalar(0, 0, 0));
Scalar brown_cnt_colors[3];
brown_cnt_colors[0] = Scalar(42, 42, 165);
brown_cnt_colors[1] = Scalar(42, 42, 165);
brown_cnt_colors[1] = Scalar(42, 42, 165);
for (size_t idx_brown = 0; idx_brown < brown_cnt.size(); idx_brown++){
drawContours(brown_cnt_draw, brown_cnt, idx_brown, brown_cnt_colors[idx_brown % 3]);
}
namedWindow("Brown - Contours", CV_WINDOW_NORMAL);
imshow("Brown - Contours", brown_cnt_draw);
/*Creating rectangles around the contours*/
//Green
vector<vector<Point>>green_contours_poly(green_cnt.size());
vector<Rect>green_boundRect(green_cnt.size());
vector<Point2f>green_center(green_cnt.size());
vector<float>green_radius(green_cnt.size());
for (int i = 0; i < green_cnt.size(); i++){
approxPolyDP(Mat(green_cnt[i]), green_contours_poly[i], 3, true);
green_boundRect[i] = boundingRect(Mat(green_cnt[i]));
minEnclosingCircle((Mat)green_contours_poly[i], green_center[i], green_radius[i]);
}
//Green - Draw polygonal contour AND bounding rects + circles
Mat green_drawRecAndCirc = Mat::zeros(green.size(), CV_8UC3);
for (int i = 0; i < green_cnt.size(); i++){
Scalar green_drawRecAndCircColor = Scalar(rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255));
rectangle(green_drawRecAndCirc, green_boundRect[i].tl(), green_boundRect[i].br(), green_drawRecAndCircColor, 2, 8, 0);
//circle(green_drawRecAndCirc, green_center[i], (int)green_radius[i], green_drawRecAndCircColor, 2, 8, 0);
}
imwrite("c:\\testeimagem\\theeye\\resultados\\green_rectangle_and_circle.jpg", green_drawRecAndCirc);
namedWindow("Green - Rectangle and Circle", CV_WINDOW_NORMAL);
imshow("Green - Rectangle and Circle", green_drawRecAndCirc);
/*Creating rectangles around the contours*/
//Yellow
vector<vector<Point>>yellow_contours_poly(yellow_cnt.size());
vector<Rect>yellow_boundRect(yellow_cnt.size());
vector<Point2f>yellow_center(yellow_cnt.size());
vector<float>yellow_radius(yellow_cnt.size());
for (int i = 0; i < yellow_cnt.size(); i++){
approxPolyDP(Mat(yellow_cnt[i]), yellow_contours_poly[i], 3, true);
yellow_boundRect[i] = boundingRect(Mat(yellow_cnt[i]));
minEnclosingCircle((Mat)yellow_contours_poly[i], yellow_center[i], yellow_radius[i]);
}
//Yellow - Draw polygonal contour AND bounding rects + circles
Mat yellow_drawRecAndCirc = Mat::zeros(yellow.size(), CV_8UC3);
for (int i = 0; i < yellow_cnt.size(); i++){
Scalar yellow_drawRecAndCircColor = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
rectangle(yellow_drawRecAndCirc, yellow_boundRect[i].tl(), yellow_boundRect[i].br(), yellow_drawRecAndCircColor, 2, 8, 0);
//circle(green_drawRecAndCirc, green_center[i], (int)green_radius[i], green_drawRecAndCircColor, 2, 8, 0);
}
waitKey(0);
destroyAllWindows;
return 0;
The original image is here:
And the example of the final result is here:
I tried the examples described in the following link (OpenCV Bounding Box) but I couldn't make it work either.
Edit 2:
Since i have to find some characteristics of the leaf that i cannot find with a rectangle (like, aspect ratio, mean diameter, radius ratio, roundness and mean feret) i had to change the approach of finding the leaf from a rectangle to an ellipse. The thing is, the ellipse is being drawn inside the leaf insteaf of contouring it.
Here is my code:
/*Load the image*/
Mat img_bgr = imread("image path", 1);
if (img_bgr.empty()){
cout << "No image found..." << endl;
return -1;
}
/*Conversion to HSV*/
Mat img_hsv;
cvtColor(img_bgr, img_hsv, CV_BGR2HSV);
/*Extracting colors - HSV*/
Mat yellow, green, brown;
//Yellow
inRange(img_hsv, Scalar(25, 80, 80), Scalar(36, 255, 255), yellow);
//Green
inRange(img_hsv, Scalar(37, 80, 80), Scalar(70, 255, 255), green);
//Brown
inRange(img_hsv, Scalar(10, 80, 80), Scalar(30, 200, 200), brown);
// logical OR mask
Mat1b mask = yellow | green | brown;
// Find non zero pixels
vector<Point> pts;
findNonZero(mask, pts);
// Compute ellipse
RotatedRect elipse = fitEllipse(pts);
//ELLIPSE - Heigth, Width and Center of Mass
cout << "ELLIPSE:" << endl;
cout << "\nHeight and Width: " << elipse.size; //Height and Width
cout << "\nCenter of Mass: " << elipse.center << endl; //Center of mass (probably given in X and Y coordinates)
// Show Ellipse
ellipse(img_bgr, elipse, Scalar(0, 0, 255), 3);
namedWindow("Ellipse", CV_WINDOW_NORMAL);
imshow("Ellipse", img_bgr);
waitKey(0);
destroyAllWindows;
return 0;
The result is shown below:
I can't understand what I'm doing wrong since i just changed the code the user Miki gave and that actually works perfectly.
Since your image is quite simple (you have a flat background) you can simplify a lot the task of finding the leaf. However, here I still use your approach based on thresholding the HSV values, which is likely to be more robust in general.
To find width and height of the leaf, you basically need to find it's bounding box. You don't need to find all contours of your color masks, nor to merge all bounding boxes. But you can:
1) compute the mask for the yellow, green and brown colors (I sligthly modified the ranges to more meaningful values)
Yellow:
Green:
Brown:
2) OR these mask toghether
3) find all non zero pixels
4) compute the bounding box
Code:
#include <opencv2/opencv.hpp>
#include <vector>
#include <string>
using namespace std;
using namespace cv;
int main()
{
// Load the image
Mat3b img_bgr = imread("path_to_image");
if (img_bgr.empty()){
cout << "No image..." << endl;
return -1;
}
// Convert to hsv
Mat3b img_hsv;
cvtColor(img_bgr, img_hsv, COLOR_BGR2HSV);
Mat1b yellow, green, brown;
//Yellow
inRange(img_hsv, Scalar(25, 80, 80), Scalar(36, 255, 255), yellow);
//Green
inRange(img_hsv, Scalar(37, 80, 80), Scalar(70, 255, 255), green);
//Brown
inRange(img_hsv, Scalar(10, 80, 80), Scalar(30, 200, 200), brown);
// logical OR mask
Mat1b mask = yellow | green | brown;
// Find non zero pixels
vector<Point> pts;
findNonZero(mask, pts);
// Compute bounding box
Rect box = boundingRect(pts);
cout << "Width: " << box.width;
cout << "Height: " << box.height << endl;
// Show box
rectangle(img_bgr, box, Scalar(0,0,255), 3);
imshow("Box", img_bgr);
return 0;
}
What I'm basically trying to do is blur an image, and combine it back with the orignal, so that only certain areas in the original image are blurred (the face should be blurred).
My general idea was to mask the parts in the original Iwant to have blurred, then blur the original as a copy and "merge" them together again.
To a certain extend this also worked.
My images:
(1) Original
(2) Original with parts that should be blurred
(3) Blurred
My C++ code that creates these images:
int main(void) {
cv::Mat srcImage = cv::imread(path);
srcImage.convertTo(srcImage, CV_32FC3, 1.0/255.0);
Mat _mask;
Mat img_gray;
cv::Scalar white = cv::Scalar(255, 255, 255);
cv::Scalar black = cv::Scalar(0, 0, 0);
cv::cvtColor(srcImage, img_gray, cv::COLOR_BGR2GRAY);
img_gray.convertTo(_mask, CV_32FC1);
// face
cv::circle(_mask, cv::Point(430, 350), 200, black, -1, 8, 0);
// eyes
cv::circle(_mask, cv::Point(502, 260), 27, white, -1, 8, 0);
cv::circle(_mask, cv::Point(390, 260), 27, white, -1, 8, 0);
// mouth
cv::ellipse(_mask, cv::Point(440, 390), cv::Point(60, 25), 0, 0, 360, white, -1, 8, 0);
cv::threshold(1.0-_mask, _mask, 0.9, 1.0, cv::THRESH_BINARY_INV);
cv::GaussianBlur(_mask,_mask,Size(21,21),11.0);
cv::Mat res;
cv::Mat bg = Mat(srcImage.size(), CV_32FC3);
bg = cv::Scalar(1.0, 1.0 ,1.0);
vector<Mat> ch_img(3);
vector<Mat> ch_bg(3);
cv::split(srcImage, ch_img);
cv::split(bg, ch_bg);
ch_img[0] = ch_img[0].mul(_mask) + ch_bg[0].mul(1.0 - _mask);
ch_img[1] = ch_img[1].mul(_mask) + ch_bg[1].mul(1.0 - _mask);
ch_img[2] = ch_img[2].mul(_mask) + ch_bg[2].mul(1.0 - _mask);
cv::merge(ch_img, res);
cv::merge(ch_bg, bg);
// original but with white mask
res.convertTo(res, CV_8UC3, 255.0);
imwrite("original_with_mask.jpg", res);
// blur original image
cv::Mat blurredImage;
bilateralFilter(srcImage, blurredImage, 10, 20, 5);
GaussianBlur(srcImage, blurredImage, Size(19, 19), 0, 0);
blurredImage.convertTo(blurredImage, CV_8UC3, 255.0);
imwrite("blurred.jpg", blurredImage);
cv::Mat maskedImage;
maskedImage = Mat(srcImage.size(), CV_32FC3);
// now combine blurred image and original using mask
// this fails
cv::bitwise_and(blurredImage, _mask, maskedImage);
cv::imwrite("masked.jpg", maskedImage);
}
My problem is that cv::bitwise_and(blurredImage, _mask, maskedImage); fails with
OpenCV Error: Sizes of input arguments do not match (The operation is neither 'array op array' (where arrays have the same size and type), nor 'array op scalar', nor 'scalar op array') in binary_op
Probably because _mask is a single channel image and blurredImage and maskedImage are 3-channel images.
How can I combine the images I got so that the currently white areas in image (2) are blurred using a transparent mask with "soft" edges?
Instead of float conversion you can just use the linearcombination of byte channel values. See
int main(int argc, char* argv[])
{
cv::Mat srcImage = cv::imread("C:/StackOverflow/Input/transparentMaskInput.jpg");
// blur whole image
cv::Mat blurredImage;
//cv::bilateralFilter(srcImage, blurredImage, 10, 20, 5); // use EITHER bilateral OR Gaússian filter
cv::GaussianBlur(srcImage, blurredImage, cv::Size(19, 19), 0, 0);
// create mask
cv::Scalar white = cv::Scalar(255, 255, 255);
cv::Scalar black = cv::Scalar(0, 0, 0);
cv::Mat mask = cv::Mat::zeros(srcImage.size(), CV_8UC1);
// face
cv::circle(mask, cv::Point(430, 350), 200, black, -1, 8, 0);
// eyes
cv::circle(mask, cv::Point(502, 260), 27, white, -1, 8, 0);
cv::circle(mask, cv::Point(390, 260), 27, white, -1, 8, 0);
// mouth
cv::ellipse(mask, cv::Point(440, 390), cv::Point(60, 25), 0, 0, 360, white, -1, 8, 0);
cv::GaussianBlur(mask, mask, cv::Size(21, 21), 11.0);
// byte inversion:
cv::Mat invertedMask = 255 - mask; // instead of inversion you could just draw the "face" black on a white background!
cv::Mat outputImage = cv::Mat(srcImage.size(), srcImage.type());
// for each pixel, merge blurred and original image regarding the blur-mask
for (int y = 0; y < outputImage.rows; ++y)
for (int x = 0; x < outputImage.cols; ++x)
{
cv::Vec3b pixelOrig = srcImage.at<cv::Vec3b>(y, x);
cv::Vec3b pixelBlur = blurredImage.at<cv::Vec3b>(y, x);
float blurVal = invertedMask.at<unsigned char>(y, x)/255.0f; // value between 0 and 1: zero means 100% orig image, one means 100% blurry image
cv::Vec3b pixelOut = blurVal * pixelBlur + (1.0f - blurVal)* pixelOrig;
outputImage.at<cv::Vec3b>(y, x) = pixelOut;
}
cv::imshow("input", srcImage);
cv::imshow("blurred", blurredImage);
cv::imshow("mask", mask);
cv::imshow("inverted mask", invertedMask);
cv::imshow("output", outputImage);
return 0;
}
using this input image:
computing this blurred and mask:
resulting in this output, by computing (mask/255) * blur + (1-mask/255)*blur (linear combination):
I define a function to do alphaBlend for two images of CV_8UC3 with a mask of CV_8UC1 in OpenCV:
//! 2018.01.16 13:54:39 CST
//! 2018.01.16 14:43:26 CST
void alphaBlend(Mat& img1, Mat&img2, Mat& mask, Mat& blended){
// Blend img1 and img2 (of CV_8UC3) with mask (CV_8UC1)
assert(img1.size() == img2.size() && img1.size() == mask.size());
blended = cv::Mat(img1.size(), img1.type());
for (int y = 0; y < blended.rows; ++y){
for (int x = 0; x < blended.cols; ++x){
float alpha = mask.at<unsigned char>(y, x)/255.0f;
blended.at<cv::Vec3b>(y,x) = alpha*img1.at<cv::Vec3b>(y,x) + (1-alpha)*img2.at<cv::Vec3b>(y,x);
}
}
}
Then, it's easy to do alpha bend on the images, just call alphaBlend(...). Here is an example:
#include <opencv2/opencv.hpp>
using namespace cv;
//! 2018.01.16 13:54:39 CST
//! 2018.01.16 14:43:26 CST
void alphaBlend(Mat& img1, Mat&img2, Mat& mask, Mat& blended){
// Blend img1 and img2 (of CV_8UC3) with mask (CV_8UC1)
assert(img1.size() == img2.size() && img1.size() == mask.size());
blended = cv::Mat(img1.size(), img1.type());
for (int y = 0; y < blended.rows; ++y){
for (int x = 0; x < blended.cols; ++x){
float alpha = mask.at<unsigned char>(y, x)/255.0f;
blended.at<cv::Vec3b>(y,x) = alpha*img1.at<cv::Vec3b>(y,x) + (1-alpha)*img2.at<cv::Vec3b>(y,x);
}
}
}
Mat createMask(Size sz){
// create mask
cv::Mat mask = cv::Mat::zeros(sz, CV_8UC1);
// white and black
cv::Scalar white = cv::Scalar(255, 255, 255);
cv::Scalar black = cv::Scalar(0, 0, 0);
// face
cv::circle(mask, cv::Point(430, 350), 200, black, -1, 8, 0);
// eyes
cv::circle(mask, cv::Point(502, 260), 27, white, -1, 8, 0);
cv::circle(mask, cv::Point(390, 260), 27, white, -1, 8, 0);
// mouth
cv::ellipse(mask, cv::Point(440, 390), cv::Point(60, 25), 0, 0, 360, white, -1, 8, 0);
// Blur
cv::GaussianBlur(mask, mask, cv::Size(21, 21), 11.0);
return mask;
}
int main(){
cv::Mat img = cv::imread("img04.jpg");
// blur whole image
cv::Mat blured;
//cv::bilateralFilter(img, blured, 10, 20, 5); // use EITHER bilateral OR Gaússian filter
cv::GaussianBlur(img, blured, cv::Size(19, 19), 0, 0);
// Create the mask
Mat mask = createMask(img.size());
Mat mask_inv = 255 - mask;
// Alpha blend
Mat blended1, blended2;
alphaBlend(img, blured, mask, blended1);
alphaBlend(img, blured, mask_inv, blended2);
// Display
cv::imshow("source", img);
cv::imshow("blured", blured);
cv::imshow("mask", mask);
cv::imshow("mask_inv", mask_inv);
cv::imshow("blended1", blended1);
cv::imshow("blended2", blended2);
cv::waitKey();
return 0;
}
Source:
Blured:
Mask1:
AlphaBlend 1:
Mask 2:
AlphaBlend 2:
Some useful links:
Alpha Blending in OpenCV C++ : Combining 2 images with transparent mask in opencv
Alpha Blending in OpenCV Python:
Gradient mask blending in opencv python
Probably because _mask is a single channel image and blurredImage and
maskedImage are 3-channel images.
Put this before calling the cv::bitwise_and:
P.S if you do not want to alter your mask becuase you want to use it in another place just do it in a temporary variable:
cv::Mat _mask_temp;
cv::cvtColor(_mask,_mask_temp,cv::COLOR_GRAY2BGR);
cv::bitwise_and(blurredImage, _mask_temp, maskedImage);
_mask_temp.release(); // just in case you do not want it anymore to be in your memory(optional)
EDIT (another problem):
The mask is 32F while the image is 8U. So, you need this:
cv::cvtColor(_mask,_mask,cv::COLOR_GRAY2BGR);
_mask.convertTo(_mask, CV_8UC3);