Really new to C++ so my apologies for such a question. Been trying out with these but they dont seem to work.
(I'm executing a template matching function in opencv - https://docs.opencv.org/3.4/de/da9/tutorial_template_matching.html)
Edit: Here is my code for my image, template and mask i used!
cv::Mat image = cv::Mat(height,width, CV16UC1, image); // image in short
cv::Mat temp;
image.convertTo(temp, CV32_F); // convert image to 32 bits
cv::image_template = cv::Mat(t_width, t_height, CV_32F, t_image); // template
cv::mask_template = cv::Mat(t_width, t_height, CV_32F, m_image); // mask
cv:: Mat img_display, result;
temp.copyTo(img_display); // image to display
int result_cols = temp.cols - image_template.cols + 1;
int result_rows = temp.rows - image_template.rows + 1;
result.create(result_rows, result_cols, CV32FC1);
// all the other code
matchTemplate(temp, image_template, result, 0, mask_template);
normalize( result, result, 0, 1, cv::NORM_MINMAX, -1, cv::Mat());
// localize the minimum and maximum values in the result matrix
double minVal;
double maxVal;
cv::Point minLoc;
cv::Point maxLoc;
cv::Point matchLoc;
minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat());
// for match_method TM_SQDIFF we take lowest values
matchLoc = minLoc;
// display source image and result matrix , draw rectangle around highest possible matching area
cv::rectangle( img_display, matchLoc, cv::Point( matchLoc.x + image_template.cols, matchLoc.y + image_template.rows), cv::Scalar::all(255), 2, 8, 0);
cv::rectangle( result, matchLoc, cv::Point(matchLoc.x + image_template.cols, matchLoc.y + image_template.rows), cv::Scalar::all(255), 2, 8, 0);
This is the given code:
cv::rectangle( img_display, matchLoc, cv::Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), cv::Scalar::all(0), 2, 8, 0 );
Tried to change it with the following code snippets but doesn't seem to work.
cv::rectangle( img_display, matchLoc, cv::Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), cv::Scalar(0,255,0) , 2, 8, 0 );
This doesn't work either
rectangle(ref, maxloc, Point(maxloc.x + tpl.cols, maxloc.y + tpl.rows), CV_RGB(0,255,0), 2);
Do let me know where I am wrong!
First of all, you are trying to scale your pixels 0 to 255 but you cant do that because your image is a float type image(32FC1) float type image pixel values can be scaled 0.0 to 1.0.
You need to convert your image to 8UC to be able to colorize easily. But this way will also have several problems which mentioned here. OpenCV matchTemplate function always gives result in 32FC1 format so it is difficult to make some colorized things on this type image.
In your source image you can draw your rectangles with your desired color but not in float type. You can also check this link
Simply add
#define CV_RGB(r, g, b)
on top of your code so that OpenCV knows it will use RGB color space instead of the default BGR.
And then draw your green rectangle this way.
rectangle(frame, Point(startX, startY), Point(endX, endY), CV_RGB(0, 255, 0), 2);
Related
I'm getting an image from classification and it's mask using Mask RCNN.
Now I'm getting the following image
I would like to get the area around the contour as an image without the background.
//Display the label at the top of the bounding box
int baseLine;
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
box.y = max(box.y, labelSize.height);
//rectangle(frame, Point(box.x, box.y - round(1.5 * labelSize.height)), Point(box.x + round(1.5 * labelSize.width), box.y + baseLine), Scalar(255, 255, 255), FILLED);
//putText(frame, label, Point(box.x, box.y), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0, 0, 0), 1);
// Resize the mask, threshold, color and apply it on the image
Scalar color = colors[classId % colors.size()];
// Resize the mask, threshold, color and apply it on the image
resize(objectMask, objectMask, Size(box.width, box.height));
Mat mask = (objectMask > maskThreshold);
Mat coloredRoi = (0.3 * color + 0.7 * frame(box));
coloredRoi.convertTo(coloredRoi, CV_8UC3);
// Draw the contours on the image
vector<Mat> contours;
Mat hierarchy;
mask.convertTo(mask, CV_8U);
findContours(mask, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);
drawContours(coloredRoi, contours, -1, color, 5, LINE_8, hierarchy, 100);
coloredRoi.copyTo(frame(box), mask);
imshow("img", coloredRoi);
cv::waitKey(0);
this is the image showing of just the mask:
source image:
Here is my horizontal gradient results.The left one is opencv result and the other one is matlab result
I am trying to do horizontal and vertical gradient which H =[1,-1] and V=[1;-1]
Mat H_gradient,G_Filter1,kernel,V_gradient;
Mat kernelH(1, 2, CV_32F);
kernelH.at<float>(0,0) = 1.0f;
kernelH.at<float>(0,1) = -1.0f;
Mat kernelV(2, 1, CV_32F);
kernelV.at<float>(0,0) = 1.0f;
kernelV.at<float>(1,0) = -1.0f;
cvtColor( image, image, CV_RGB2GRAY );
filter2D( image, H_gradient, -1 ,kernelH , Point( -1, -1 ), 0, BORDER_DEFAULT );
filter2D( image, V_gradient, -1 ,kernelV , Point( -1, -1 ), 0, BORDER_DEFAULT );
But still not match with my matlab code results. I dont know why?
My matlab code for gradients
image=double(image);
% horizontal and vertical gradient
H=[1 -1];
V=[1;-1];
H_Gradient=conv2(image,H,'same');
V_Gradient=conv2(image,V,'same');
try do
cvtColor( image, image, **CV_BGR2GRAY** );
instead of
cvtColor( image, image, **CV_RGB2GRAY** );
If you are using the default imread parameters, OpenCv use BGR color format instead of RGB as default!
Do the same that you did in Matlab, first convert your image to double.
image.convertTo(image, CV_32F);
Now I got the same result in OpenCv and Matlab.
I must find the orientation of the gradient in an image. I already obtain Gx,Gy and the total gradient.
//Scharr( src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT );
Sobel( img, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
convertScaleAbs( grad_x, abs_grad_x ); //Gradiente en X
/// Gradient Y
//Scharr( src_gray, grad_y, ddepth, 0, 1, scale, delta, BORDER_DEFAULT );
Sobel( img, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );
convertScaleAbs( grad_y, abs_grad_y ); //Gradiente en Y
/// Total Gradient (approximate)
addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad ); //Magnitud del Gradiente
Now, I must find the orientation of the gradient,but I dont find any code to get it. I know the theory but I dont know to put it in practice.
Anyone knows how can I get de orientation of the gradient?
Thanks for your time
EDIT: I tried to use this:
Mat modulo;
Mat orientacion;
cartToPolar(abs_grad_x,abs_grad_y,modulo,orientacion);
But it give me an error:
OpenCV Error: Assertion failed (X.size == Y.size && type == Y.type() && (depth == CV_32F || depth == CV_64F)) in cartToPolar, file C:\OpenCV246PC\opencv\modules\core\src\mathfuncs.cpp, line 448
I tried to change the depth to CV_32F and the image of gradient its not correct.
I am using openCV to write a code that can find and replace one image with another image
Here is my 1st image
Now i have 2nd image as this
I need to replace the second image with this
and final output should be like this
So how to start about ? I am not sure how can i find it, I tried using Template Matching but the images are supposed to be exactly equal for template matching, and when my images are distorted or skewed in some manner then it doesn't work ?
How can i match the image get the bounds using openCV, and replace with another image ?
Any help would be appreciated
Thank you
SURF algorithm, that's you want it. OPENCV SURF Example
You can use this SURF Algorithm for matching the images as show here
The code
line( img_scene , scene_corners[0] + Point2f( img_object .cols, 0), scene_corners[1] + Point2f( img_object .cols, 0), Scalar(0, 255, 0), 4 );
will draw image not on the scene image but on object so use this
line( img_scene , scene_corners[0], scene_corners[1] , Scalar(0, 255, 0), 4 );
line( img_scene , scene_corners[1] , scene_corners[2] , Scalar( 0, 255, 0), 4 );
line( img_scene , scene_corners[2] , scene_corners[3], Scalar( 0, 255, 0), 4 );
line( img_scene , scene_corners[3] , scene_corners[0], Scalar( 0, 255, 0), 4 );
Now to replace the image use this
Mat temp;
cv::resize(mReplacementImage,temp,img_object .size());
warpPerspective(temp, mReplacementImage, H, img_scene .size());
Mat mask = cv::Mat::ones(img_object .size(), CV_8U);
Mat temp2;
warpPerspective(mask, temp2, H, img_scene .size());
mReplacementImage.copyTo(img_scene , temp2);
cv::imwrite("output.bmp",img_scene );
i am searching a way to get Numerical gradient from the matrix. The same
function is implemented in matlab's default documentation.http://www.mathworks.com/help/techdoc/ref/gradient.html but i couldn't find any in opencv.
I want to port this to C++ using opencv.
should i use sobel for horizontal and vertical gradient or any
other function or way to do it???
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
/// Gradient X
Sobel( mat, grad_x, CV_32F, 1, 0, 3);
imshow("xx",grad_x);
convertScaleAbs( grad_x, abs_grad_x );
/// Gradient Y
Sobel( mat, grad_y, CV_32F, 0, 1, 3);
convertScaleAbs( grad_y, abs_grad_y );
/// Total Gradient (approximate)
Mat res;
addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, res );
[EDIT]
solution
Mat grad_x,abs_grad_x,grad_y,abs_grad_y;
int type=CV_64F ;
Gradient.setTo(Scalar::all(0));
/// Gradient Y
Sobel( input, grad_x, type, 1, 0, 3);
convertScaleAbs(grad_x,abs_grad_x);
cv::accumulateSquare(abs_grad_x,Gradient);
/// Gradient Y
Sobel(input, grad_y, type, 0, 1, 3);
convertScaleAbs(grad_y,abs_grad_y);
cv::accumulateSquare(abs_grad_y,Gradient);
imshow("gradient Mag",Gradient);
You can find the gradient calculation here Just like you have said to calculate sobel gradients, the example does so.