I'm doing object reconstruction from structure from motion. The current situation right now, is I'm getting a multiple views for a car and apply a mask rcnn for that object to remove the background, because I only want that object to reconstruct and have a clean object.
My current issues, right now are that The Object is not fully reconstructured.
The mask that I get from Mask RCNN is not always have a fixed size for the SFM to work
A background noise is still present in the scene reconstructed object
Camera parameters are messed up when I use only the mask that are got from different views, how to fix that ?
Here are some results:
Original image of course there are mutliple views of it
Mask RCNN results that I use for SFM
and here is the result from SFM
// Draw the predicted bounding box, colorize and show the mask on the image
void drawBox(Mat& frame, int classId, float conf, Rect box, Mat& objectMask, std::vector<Mat> &contours_images)
{
//Draw a rectangle displaying the bounding box
//rectangle(frame, Point(box.x, box.y), Point(box.x + box.width, box.y + box.height), Scalar(255, 178, 50), 3);
//Get the label for the class name and its confidence
string label = format("%.2f", conf);
if (!classes.empty())
{
CV_Assert(classId < (int)classes.size());
label = classes[classId] + ":" + label;
}
//Display the label at the top of the bounding box
int baseLine;
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
box.y = max(box.y, labelSize.height);
//rectangle(frame, Point(box.x, box.y - round(1.5 * labelSize.height)), Point(box.x + round(1.5 * labelSize.width), box.y + baseLine), Scalar(255, 255, 255), FILLED);
//putText(frame, label, Point(box.x, box.y), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0, 0, 0), 1);
// Resize the mask, threshold, color and apply it on the image
// Scalar color = colors[classId % colors.size()];
// Resize the mask, threshold, color and apply it on the image
resize(objectMask, objectMask, Size(box.width, box.height));
Mat mask = (objectMask > maskThreshold);
//Mat coloredRoi = (0.3 * color + 0.7 * frame(box));
// coloredRoi.convertTo(coloredRoi, CV_8UC3);
Mat coloredRoi(frame(box));
// Draw the contours on the image
vector<Mat> contours;
Mat hierarchy;
mask.convertTo(mask, CV_8U);
findContours(mask, contours, hierarchy, RETR_CCOMP, cv::CHAIN_APPROX_NONE);
//drawContours(coloredRoi, contours, -1, color, 5, LINE_8, hierarchy, 100);
// coloredRoi.copyTo(frame(box), mask);
Mat outframe;
coloredRoi.copyTo(outframe, mask);
cv::resize(outframe, outframe, cv::Size(400, 400));
contours_images.push_back(outframe);
// imshow("outframe", outframe);
// waitKey(0);
}
// For each frame, extract the bounding box and mask for each detected object
void postprocess(Mat& frame, const vector<Mat>& outs, vector<Mat> & maskes)
{
Mat outDetections = outs[0];
Mat outMasks = outs[1];
// Output size of masks is NxCxHxW where
// N - number of detected boxes
// C - number of classes (excluding background)
// HxW - segmentation shape
const int numDetections = outDetections.size[2];
const int numClasses = outMasks.size[1];
outDetections = outDetections.reshape(1, outDetections.total() / 7);
for (int i = 0; i < numDetections; ++i)
{
float score = outDetections.at<float>(i, 2);
if (score > confThreshold)
{
// Extract the bounding box
int classId = static_cast<int>(outDetections.at<float>(i, 1));
int left = static_cast<int>(frame.cols * outDetections.at<float>(i, 3));
int top = static_cast<int>(frame.rows * outDetections.at<float>(i, 4));
int right = static_cast<int>(frame.cols * outDetections.at<float>(i, 5));
int bottom = static_cast<int>(frame.rows * outDetections.at<float>(i, 6));
left = max(0, min(left, frame.cols - 1));
top = max(0, min(top, frame.rows - 1));
right = max(0, min(right, frame.cols - 1));
bottom = max(0, min(bottom, frame.rows - 1));
Rect box = Rect(left, top, right - left + 1, bottom - top + 1);
// Extract the mask for the object
Mat objectMask(outMasks.size[2], outMasks.size[3], CV_32F, outMasks.ptr<float>(i, classId));
// Draw bounding box, colorize and show the mask on the image
drawBox(frame, classId, score, box, objectMask, maskes);
}
}
}
Related
I'm getting an image from classification and it's mask using Mask RCNN.
Now I'm getting the following image
I would like to get the area around the contour as an image without the background.
//Display the label at the top of the bounding box
int baseLine;
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
box.y = max(box.y, labelSize.height);
//rectangle(frame, Point(box.x, box.y - round(1.5 * labelSize.height)), Point(box.x + round(1.5 * labelSize.width), box.y + baseLine), Scalar(255, 255, 255), FILLED);
//putText(frame, label, Point(box.x, box.y), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0, 0, 0), 1);
// Resize the mask, threshold, color and apply it on the image
Scalar color = colors[classId % colors.size()];
// Resize the mask, threshold, color and apply it on the image
resize(objectMask, objectMask, Size(box.width, box.height));
Mat mask = (objectMask > maskThreshold);
Mat coloredRoi = (0.3 * color + 0.7 * frame(box));
coloredRoi.convertTo(coloredRoi, CV_8UC3);
// Draw the contours on the image
vector<Mat> contours;
Mat hierarchy;
mask.convertTo(mask, CV_8U);
findContours(mask, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);
drawContours(coloredRoi, contours, -1, color, 5, LINE_8, hierarchy, 100);
coloredRoi.copyTo(frame(box), mask);
imshow("img", coloredRoi);
cv::waitKey(0);
this is the image showing of just the mask:
source image:
I have a binary image:
I want to remove the bottom two crescent shapes(size and area may change with different images) from the image or at-least differentiate it from the rest.
I tried Hough circle transform to detect the curves as it resembles a portion of a circle, but that code was not working:
int main(int argc, char** argv)
{
Mat src, gray;
src = imread("446.bmp", 1);
namedWindow("src", 1);
imshow("src", src);
waitKey(0);
cvtColor(src, gray, CV_BGR2GRAY);
// Reduce the noise so we avoid false circle detection
GaussianBlur(gray, gray, Size(9, 9), 2, 2);
vector<Vec3f> circles;
// Apply the Hough Transform to find the circles
HoughCircles(gray, circles, CV_HOUGH_GRADIENT, 1, 30, 100, 100, 0, 0);
// Draw the circles detected
for (size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
circle(src, center, 3, Scalar(0, 255, 0), -1, 8, 0);// circle center
circle(src, center, radius, Scalar(0, 0, 255), 3, 8, 0);// circle outline
cout << "center : " << center << "\nradius : " << radius << endl;
}
// Show your results
namedWindow("Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE);
imshow("Hough Circle Transform Demo", src);
waitKey(0);
return 0;
}
But No circle is being drawn or the crescent moon shapes are not being detected at all. Any idea where I went wrong?
EDIT 1- I have added some other images too:
Edit-2 new image to try:-
i made some modification on the code posted for other question
you could try it
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace std;
//! Compute the distance between two points
/*! Compute the Euclidean distance between two points
*
* #param a Point a
* #param b Point b
*/
static double distanceBtwPoints(const cv::Point2f &a, const cv::Point2f &b)
{
double xDiff = a.x - b.x;
double yDiff = a.y - b.y;
return std::sqrt((xDiff * xDiff) + (yDiff * yDiff));
}
int main( int argc, char** argv )
{
Mat src,gray;
src = imread(argv[1]);
if(src.empty())
return -1;
cvtColor( src, gray, COLOR_BGR2GRAY );
gray = gray < 200;
vector<vector<Point> > contours;
findContours(gray.clone(), contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
RotatedRect _minAreaRect;
for (size_t i = 0; i < contours.size(); ++i)
{
double contour_area = contourArea(contours[i]);
_minAreaRect = minAreaRect( Mat(contours[i]) );
Point2f pts[4];
_minAreaRect.points(pts);
double dist0 = distanceBtwPoints(pts[0], pts[1]);
double dist1 = distanceBtwPoints(pts[1], pts[2]);
double angle = 0;
//if(dist0 > dist1 *1.2)
angle =atan2(pts[0].y - pts[1].y,pts[0].x - pts[1].x) * 180.0 / CV_PI;
//if(dist1 > dist0 *1.2)
angle =atan2(pts[1].y - pts[2].y,pts[1].x - pts[2].x) * 180.0 / CV_PI;
if( fabs(angle) > 91 ) // you can try different values
{
if( contour_area < dist0 * dist1 /2 ) // you can try different values
{
//drawContours(src,contours,i,Scalar(0,0,0),-1); // try to uncomment this line
for( int j = 0; j < 4; j++ )
line(src, pts[j], pts[(j+1)%4], Scalar(0, 0, 255), 1, LINE_AA);
}
}
}
imshow("result", src);
waitKey(0);
return 0;
}
I don't think there is an easy solution here unfortunately.
What you might want to try is to detect and label each image component. From here you need to detect which set of pixels looks like a crescent and which does not : as a crescent can be described by a polynomial equations you only need to describe each component (i.e a set of points) as a mathematical equation (using regression methods such as RANSAC) and see if that might be a crescent equation.
I'm trying to move a region of an image to the center, I succeeded in getting its contour and I know how to place this in the center.
But what I want is to move the pixels that are inside the contour (yellow with black) at the center and not just the contour (which is pink by CV_FILLED).
Image:
Code:
//Then segment the image. save in Mat crop
// ---- Center image -----
// pos : contour interest
RotatedRect rr = fitEllipse(contours[pos]);
vector<Point>&contour = contours[pos];
//http://stackoverflow.com/a/29467236/4595387
//difference between the centre of the image and centre of the contour
Point center = Point( crop.cols/2, crop.rows/2 );
int nX = center.x - rr.center.x;
int nY = center.y - rr.center.y;
for (size_t i=0; i< contour.size(); i++)
{
contour[i].x += nX;
contour[i].y += nY;
}
cout << "x: " << rr.center.x;
cout << "y: " << rr.center.y;
//color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
//contour of the image to the center.
cv::drawContours(crop, contours, pos, color, CV_FILLED);
imshow("In",imagen_src);
imshow("Out",crop);
You need basically to play around with copyTo with a mask. The steps are commented in the code. If you need a different background color, just change backgroundColor in the code below.
Code:
#include <opencv2\opencv.hpp>
using namespace cv;
int main()
{
// Read image
Mat3b img = imread("path_to_image");
// Convert to hsv
Mat3b hsv;
cvtColor(img, hsv, COLOR_BGR2HSV);
// Threshold on yellow color (in hsv space)
Mat1b maskOnYellow;
inRange(hsv, Scalar(20, 100, 100), Scalar(40, 255, 255), maskOnYellow);
// Find contours of yellow item
vector<vector<Point>> contours;
findContours(maskOnYellow.clone(), contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
// Create a mask as a filled contour
Mat1b mask(img.rows, img.cols, uchar(0));
drawContours(mask, contours, 0, Scalar(255), CV_FILLED);
// Get the bounding box of the item
Rect box = boundingRect(contours[0]);
// Get the roi in the input image according to the mask
Mat3b item(img(box));
// Create a black image (same size as the yellow item and same background bolor as result image)
// to copy the result of the segmentation
Vec3b backgroundColor(0,0,0); // black
Mat3b segmentedItem(item.rows, item.cols, backgroundColor);
// Copy only the masked part
item.copyTo(segmentedItem, mask(box));
// Compute the center of the image
Point center(img.cols / 2, img.rows / 2);
// Create a result image
Mat3b res(img.rows, img.cols, backgroundColor);
// Compute the rectangle centered in the image, same size as box
Rect centerBox(center.x - box.width/2, center.y - box.height/2, box.width, box.height);
// Put the segmented item in the center of the result image
segmentedItem.copyTo(res(centerBox));
imshow("Result", res);
waitKey();
return 0;
}
Input:
Result:
I want to create mat files in opencv and initialize them to zero(all the pixels to be black). Thus I use
for initialization purpose:
Mat img = Mat::zeros(image.rows, image.cols, CV_8UC1);
After that I have got some rectangles with locations inside that image and I want to draw the correspondent regions of rectangle white. How is it possible to draw a region in mat file?
I have the following function to draw rects. However I want to draw all the rectangle not just the boundaries.
static Mat image_draw(Mat image, vector<Rect> rect, CvScalar color){
for(int i = 0; i < faces.size(); i++)
{
Point pt1(rect[i].x + rect[i].width, rect[i].y + rect[i].height);
Point pt2(rect[i].x, rect[i].y);
rectangle(image, pt1, pt2, color, 5, 8, 0);
}
return image;
}
The exact thing I want to do is to create a heat map for my rectangles so the overlapped bounding boxes to have higher values(close to 255) that the simple non-overlapped rectangles. I change thickness:
img = image_draw( img, rects, cvScalar(255, 102, 255, 0), -1);
Variable rects contains from 0 to 10 rectangle. I want somehow to aggregate the rectangles drawing. Not just redraw again the rectangles.
If I want to functionize it, is somwthing like that: EDIT final solution:
static Mat heatmap2(Mat image1, vector<Rect> faces, CvScalar color, int thickness) {
cv::Mat heatmap(image1.rows, image1.cols, CV_8U,cv::Scalar(0));
for(int i = 0; i < faces.size(); i++)
{
cv::Mat temp(image1.rows, image1.cols , CV_8U, cv::Scalar(0));
Point pt1(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
Point pt2(faces[i].x, faces[i].y);
rectangle(temp, pt1, pt2, color, thickness, 8, 0);
heatmap+=temp;
}
return heatmap;
}
Try this:
cv::Mat heatmap(200,300,CV_8U,cv::Scalar(0));
{
cv::Mat temp(200,300,CV_8U,cv::Scalar(0));
cv::Rect r(10,20,30,30);
cv::rectangle(temp,r,cv::Scalar(100),-1);
heatmap+=temp;
}
{
cv::Mat temp(200,300,CV_8U,cv::Scalar(0));
cv::Rect r(20,25,30,30);
cv::rectangle(temp,r,cv::Scalar(100),-1);
heatmap+=temp;
}
cv::imshow("Heatmap",heatmap);
cv::waitKey();
Result:
From the official OpenCV Documentation (check here), "Thickness of lines that make up the rectangle. Negative values, like CV_FILLED , mean that the function has to draw a filled rectangle."
So give thickness a negative value like -
rectangle(image, pt1, pt2, color, -1, 8, 0);
UPDATE
Use these lines in your code,
for(int i=0; i < rect.size(); i++)
for( int y = rect[i].y; y < rect[i].y + rect[i].height; y++ )
for( int x = rect[i].x; x < rect[i].x + rect[i].width; x++ )
{
image.at<uchar>(y,x) =
saturate_cast<uchar>( image.at<uchar>(y,x) + 50 );
}
Here each Rect will increase the intensity by 50, and when it reaches 255, it will stay 255.
Input Image
Output Image
2 overlapping rect
Just a slight modification to your code should work:
static void draw_rectangles(Mat image, vector<Rect> faces) {
cv::Mat heatmap(image.rows, image.cols, CV_8U,cv::Scalar(0));
for(int i = 0; i < faces.size(); i++)
{
cv::Mat temp = heatmat(faces[i]); // gives you a submatrix of your heatmap pointing at the location of your rectangle
temp += 10; // add 10 grey levels to the existing values. This also modifies heatmap as side-effect
}
imshow("heatmap", heatmap);
waitKey(0);
I have stumbled upon an exception in my application that I am unable to get rid of...
I am trying to write a simple image face recognition program with all three face recognition algorithms(Eigen, Fisher and LBPH).
The unchandled exception is caused by line :
Fisher_prediction = Fisher_model->predict(crop);
and theerror message says: Unhandled exception at at 0x000007FEFDB3A49D in FaceRecognition.exe: Microsoft C++ exception: cv::Exception at memory location 0x00000000002782B0.
and is caused by: msvcr110d.dll!_CxxThrowException(void * pExceptionObject, const _s__ThrowInfo * pThrowInfo) Line 152 C++
Any sugestions where am I going wrong??
here is the rest of the code:
Mat frame = imread("1.jpg");
// Apply the classifier to the frame
if (!frame.empty()) {
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Set Region of Interest
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0; // ic is index of current element
int ac = 0; // ac is area of current element
size_t ib = 0; // ib is index of biggest element
int ab = 0; // ab is area of biggest element
// Iterate through all current elements (detected faces)
for (ic = 0; ic < faces.size(); ic++) {
roi_c.x = faces[ic].x;
roi_c.y = faces[ic].y;
roi_c.width = (faces[ic].width);
roi_c.height = (faces[ic].height);
ac = roi_c.width * roi_c.height; // Get the area of current element (detected face)
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
ab = roi_b.width * roi_b.height; // Get the area of biggest element, at beginning it is same as "current" element
if (ac > ab) {
ib = ic;
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
}
crop = frame(roi_b);
cv::resize(crop, res, Size(img_width, img_height), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
Point pt1(faces[ic].x, faces[ic].y); // Display detected faces on main window - live stream from camera
Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
//rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
/* Calculate the position for annotated text */
int pos_x = std::max(roi_b.tl().x - 10, 0);
int pos_y = std::max(roi_b.tl().y - 10, 0);
if(createdFisher) {
Fisher_prediction = Fisher_model->predict(crop);
QString Fisher_qs = QString::number(Fisher_prediction);
/* Create the text we will annotate the box with */
string Fisher_text = format("Prediction Fisherfaces = %d", Fisher_prediction);
putText(frame, Fisher_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
/* Draw a green rectangle around the detected face */
rectangle(frame, pt1, pt2, MATCH_COLOR, 1);
ui.txtConsole->appendPlainText(QString("Fisherfaces - " + Fisher_qs));
}
if(createdEigen) {
Eigen_prediction = Eigen_model->predict(crop);
QString Eigen_qs = QString::number(Eigen_prediction);
/* Create the text we will annotate the box with */
string Eigen_text = format("Prediction Eigenfaces = %d", Eigen_prediction);
putText(frame, Eigen_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
/* Draw a green rectangle around the detected face */
rectangle(frame, pt1, pt2, MATCH_COLOR, 1);
ui.txtConsole->appendPlainText(QString("Eigenfaces - " + Eigen_qs));
}
if(createdLBPH) {
LBPH_prediction = LBPH_model->predict(crop);
QString LBPH_qs = QString::number(LBPH_prediction);
/* Create the text we will annotate the box with */
string LBPH_text = format("Prediction LBPH = %d", LBPH_prediction);
putText(frame, LBPH_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
/* Draw a green rectangle around the detected face */
rectangle(frame, pt1, pt2, MATCH_COLOR, 1);
ui.txtConsole->appendPlainText(QString("Linear Binary Patern Histogram - " + LBPH_qs));
}
}
putText(frame, text, cvPoint(30, 30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 255), 1, CV_AA);
imshow("original", frame);
if (!crop.empty()) {
imshow("detected", crop);
}
else
destroyWindow("detected");
}
int c = waitKey(0);
All the necessary #include and variables along with the classifier are initialised at the start of the program.
What I was doing wrong is that I was passing a picture to the facerecogniser that was not resized (I am using pictures that are 200x200px in my database) , so the algorithm could not do the face recognition based on a larger resolution image than the one that is in a database.