Resizing an image using opencv c++ maintaining aspect ratio [duplicate] - c++

Is there a way of resizing images of any shape or size to say [500x500] but have the image's aspect ratio be maintained, levaing the empty space be filled with white/black filler?
So say the image is [2000x1000], after getting resized to [500x500] making the actual image itself would be [500x250], with 125 either side being white/black filler.
Something like this:
Input
Output
EDIT
I don't wish to simply display the image in a square window, rather have the image changed to that state and then saved to file creating a collection of same size images with as little image distortion as possible.
The only thing I came across asking a similar question was this post, but its in php.

Not fully optimized, but you can try this:
EDIT handle target size that is not 500x500 pixels and wrapping it up as a function.
cv::Mat GetSquareImage( const cv::Mat& img, int target_width = 500 )
{
int width = img.cols,
height = img.rows;
cv::Mat square = cv::Mat::zeros( target_width, target_width, img.type() );
int max_dim = ( width >= height ) ? width : height;
float scale = ( ( float ) target_width ) / max_dim;
cv::Rect roi;
if ( width >= height )
{
roi.width = target_width;
roi.x = 0;
roi.height = height * scale;
roi.y = ( target_width - roi.height ) / 2;
}
else
{
roi.y = 0;
roi.height = target_width;
roi.width = width * scale;
roi.x = ( target_width - roi.width ) / 2;
}
cv::resize( img, square( roi ), roi.size() );
return square;
}

A general approach:
cv::Mat utilites::resizeKeepAspectRatio(const cv::Mat &input, const cv::Size &dstSize, const cv::Scalar &bgcolor)
{
cv::Mat output;
double h1 = dstSize.width * (input.rows/(double)input.cols);
double w2 = dstSize.height * (input.cols/(double)input.rows);
if( h1 <= dstSize.height) {
cv::resize( input, output, cv::Size(dstSize.width, h1));
} else {
cv::resize( input, output, cv::Size(w2, dstSize.height));
}
int top = (dstSize.height-output.rows) / 2;
int down = (dstSize.height-output.rows+1) / 2;
int left = (dstSize.width - output.cols) / 2;
int right = (dstSize.width - output.cols+1) / 2;
cv::copyMakeBorder(output, output, top, down, left, right, cv::BORDER_CONSTANT, bgcolor );
return output;
}

Alireza's answer is good, however I modified the code slightly so that I don't add the vertical borders when the image fits vertically and I don't add horizontal borders when the image fits horizontally (this is closer to the original request):
cv::Mat utilites::resizeKeepAspectRatio(const cv::Mat &input, const cv::Size &dstSize, const cv::Scalar &bgcolor)
{
cv::Mat output;
// initially no borders
int top = 0;
int down = 0;
int left = 0;
int right = 0;
if( h1 <= dstSize.height)
{
// only vertical borders
top = (dstSize.height - h1) / 2;
down = top;
cv::resize( input, output, cv::Size(dstSize.width, h1));
}
else
{
// only horizontal borders
left = (dstSize.width - w2) / 2;
right = left;
cv::resize( input, output, cv::Size(w2, dstSize.height));
}
return output;
}

You can create another image of the square size you wish, then put your image in the middle of the square image. Something like this:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
int main(int argc, char *argv[])
{
// read an image
cv::Mat image1= cv::imread("/home/hdang/Desktop/colorCode.png");
//resize it
cv::Size newSize = cv::Size(image1.cols/2,image1.rows/2);
cv::resize(image1, image1, newSize, 0, 0, cv::INTER_LINEAR);
//create the square container
int dstWidth = 500;
int dstHeight = 500;
cv::Mat dst = cv::Mat(dstHeight, dstWidth, CV_8UC3, cv::Scalar(0,0,0));
//Put the image into the container, roi is the new position
cv::Rect roi(cv::Rect(0,dst.rows*0.25,image1.cols,image1.rows));
cv::Mat targetROI = dst(roi);
image1.copyTo(targetROI);
//View the result
cv::namedWindow("OpenCV Window");
cv::imshow("OpenCV Window", dst);
// wait key for 5000 ms
cv::waitKey(5000);
return 0;
}

I extended alireza answer to allow a zero allocation answer.
Allow user to give a preallocated, or a cv::Mat as input
cv::resize input image immediatly to output mat
Color top and bottom box with cv::rectangle
#include <opencv2/imgproc.hpp>
void resizeKeepAspectRatio(const cv::Mat& src, cv::Mat& dst, const cv::Size& dstSize, const cv::Scalar& backgroundColor = {})
{
// Don't handle anything in this corner case
if(dstSize.width <= 0 || dstSize.height <= 0)
return;
// Not job is needed here, let's avoid any copy
if(src.cols == dstSize.width && src.rows == dstSize.height)
{
dst = src;
return;
}
// Try not to reallocate memory if possible
cv::Mat output = [&]()
{
if(dst.data != src.data && dst.cols == dstSize.width && dst.rows == dstSize.height && dst.type() == src.type())
return dst;
return cv::Mat(dstSize.height, dstSize.width, src.type());
}();
// 'src' inside 'dst'
const auto imageBox = [&]()
{
const auto h1 = int(dstSize.width * (src.rows / (double)src.cols));
const auto w2 = int(dstSize.height * (src.cols / (double)src.rows));
const bool horizontal = h1 <= dstSize.height;
const auto width = horizontal ? dstSize.width : w2;
const auto height = horizontal ? h1 : dstSize.height;
const auto x = horizontal ? 0 : int(double(dstSize.width - width) / 2.);
const auto y = horizontal ? int(double(dstSize.height - height) / 2.) : 0;
return cv::Rect(x, y, width, height);
}();
cv::Rect firstBox;
cv::Rect secondBox;
if(imageBox.width > imageBox.height)
{
// ┌──────────────► x
// │ ┌────────────┐
// │ │┼┼┼┼┼┼┼┼┼┼┼┼│ firstBox
// │ x────────────►
// │ │ │
// │ ▼────────────┤
// │ │┼┼┼┼┼┼┼┼┼┼┼┼│ secondBox
// │ └────────────┘
// ▼
// y
firstBox.x = 0;
firstBox.width = dstSize.width;
firstBox.y = 0;
firstBox.height = imageBox.y;
secondBox.x = 0;
secondBox.width = dstSize.width;
secondBox.y = imageBox.y + imageBox.height;
secondBox.height = dstSize.height - secondBox.y;
}
else
{
// ┌──────────────► x
// │ ┌──x──────►──┐
// │ │┼┼│ │┼┼│
// │ │┼┼│ │┼┼│
// │ │┼┼│ │┼┼│
// │ └──▼──────┴──┘
// ▼ firstBox secondBox
// y
firstBox.y = 0;
firstBox.height = dstSize.height;
firstBox.x = 0;
firstBox.width = imageBox.x;
secondBox.y = 0;
secondBox.height = dstSize.height;
secondBox.x = imageBox.x + imageBox.width;
secondBox.width = dstSize.width - secondBox.x;
}
// Resizing to final image avoid useless memory allocation
cv::Mat outputImage = output(imageBox);
assert(outputImage.cols == imageBox.width);
assert(outputImage.rows == imageBox.height);
const auto* dataBeforeResize = outputImage.data;
cv::resize(src, outputImage, cv::Size(outputImage.cols, outputImage.rows));
assert(dataBeforeResize == outputImage.data);
const auto drawBox = [&](const cv::Rect& box)
{
if(box.width > 0 && box.height > 0)
{
cv::rectangle(output, cv::Point(box.x, box.y), cv::Point(box.x + box.width, box.y + box.height), backgroundColor, -1);
}
};
drawBox(firstBox);
drawBox(secondBox);
// Finally copy output to dst, like that user can use src & dst to the same cv::Mat
dst = output;
}
With this function, dst mat can be reused without any reallocation.
cv::Mat src(200, 100, CV_8UC3, cv::Scalar(1,100,200));
cv::Size dstSize(300, 400)
cv::Mat dst;
resizeKeepAspectRatio(src, dst, dstSize); // dst get allocated
resizeKeepAspectRatio(src, dst, dstSize); // dst get reused

Related

My C++ code is not detecting objects correctly yolov5

I have a yolov5 onnx file where I trained apples and bananas. I was using python until today, but I decided to switch to c++ to gain some speed. I get correct results when I use yolov5's own onnx files and image in the code I added below. But when I add my own onnx file and my test image it gives me wrong result. You can also find the attached image. What is the problem here?
// Include Libraries.
\#include \<opencv2/opencv.hpp\>
\#include \<fstream\>
// Namespaces.
using namespace cv;
using namespace std;
using namespace cv::dnn;
// Constants.
const float INPUT_WIDTH = 640.0;
const float INPUT_HEIGHT = 640.0;
const float SCORE_THRESHOLD = 0.3;
const float NMS_THRESHOLD = 0.4;
const float CONFIDENCE_THRESHOLD = 0.65;
// Text parameters.
const float FONT_SCALE = 0.7;
const int FONT_FACE = FONT_HERSHEY_SIMPLEX;
const int THICKNESS = 1;
// Colors.
Scalar BLACK = Scalar(0,0,0);
Scalar BLUE = Scalar(255, 178, 50);
Scalar YELLOW = Scalar(0, 255, 255);
Scalar RED = Scalar(0,0,255);
// Draw the predicted bounding box.
void draw_label(Mat& input_image, string label, int left, int top)
{
// Display the label at the top of the bounding box.
int baseLine;
Size label_size = getTextSize(label, FONT_FACE, FONT_SCALE, THICKNESS, &baseLine);
top = max(top, label_size.height);
// Top left corner.
Point tlc = Point(left, top);
// Bottom right corner.
Point brc = Point(left + label_size.width, top + label_size.height + baseLine);
// Draw black rectangle.
rectangle(input_image, tlc, brc, BLACK, FILLED);
// Put the label on the black rectangle.
putText(input_image, label, Point(left, top + label_size.height), FONT_FACE, FONT_SCALE, YELLOW, THICKNESS);
}
vector\<Mat\> pre_process(Mat &input_image, Net &net)
{
// Convert to blob.
Mat blob;
blobFromImage(input_image, blob, 1./255., Size(INPUT_WIDTH, INPUT_HEIGHT), Scalar(), true, false);
net.setInput(blob);
// Forward propagate.
vector<Mat> outputs;
net.forward(outputs, net.getUnconnectedOutLayersNames());
return outputs;
}
Mat post_process(Mat &input_image, vector\<Mat\> &outputs, const vector\<string\> &class_name)
{
// Initialize vectors to hold respective outputs while unwrapping detections.
vector\<int\> class_ids;
vector\<float\> confidences;
vector\<Rect\> boxes;
// Resizing factor.
float x_factor = input_image.cols / INPUT_WIDTH;
float y_factor = input_image.rows / INPUT_HEIGHT;
float *data = (float *)outputs[0].data;
const int dimensions = 85;
const int rows = 25200;
// Iterate through 25200 detections.
for (int i = 0; i < rows; ++i)
{
float confidence = data[4];
// Discard bad detections and continue.
if (confidence >= CONFIDENCE_THRESHOLD)
{
float * classes_scores = data + 5;
// Create a 1x85 Mat and store class scores of 80 classes.
Mat scores(1, class_name.size(), CV_32FC1, classes_scores);
// Perform minMaxLoc and acquire index of best class score.
Point class_id;
double max_class_score;
minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
// Continue if the class score is above the threshold.
if (max_class_score > SCORE_THRESHOLD)
{
// Store class ID and confidence in the pre-defined respective vectors.
confidences.push_back(confidence);
class_ids.push_back(class_id.x);
// Center.
float cx = data[0];
float cy = data[1];
// Box dimension.
float w = data[2];
float h = data[3];
// Bounding box coordinates.
int left = int((cx - 0.5 * w) * x_factor);
int top = int((cy - 0.5 * h) * y_factor);
int width = int(w * x_factor);
int height = int(h * y_factor);
// Store good detections in the boxes vector.
boxes.push_back(Rect(left, top, width, height));
}
}
// Jump to the next column.
data += 85;
}
// Perform Non Maximum Suppression and draw predictions.
vector<int> indices;
NMSBoxes(boxes, confidences, SCORE_THRESHOLD, NMS_THRESHOLD, indices);
for (int i = 0; i < indices.size(); i++)
{
int idx = indices[i];
Rect box = boxes[idx];
int left = box.x;
int top = box.y;
int width = box.width;
int height = box.height;
// Draw bounding box.
rectangle(input_image, Point(left, top), Point(left + width, top + height), BLUE, 3*THICKNESS);
// Get the label for the class name and its confidence.
string label = format("%.2f", confidences[idx]);
label = class_name[class_ids[idx]] + ":" + label;
// Draw class labels.
draw_label(input_image, label, left, top);
//cout<<"The Value is "<<label;
//cout<<endl;
}
return input_image;
}
int main()
{
vector<string> class_list;
ifstream ifs("/Users/admin/Documents/C++/First/obj.names");
string line;
while (getline(ifs, line))
{
class_list.push_back(line);
}
// Load image.
Mat frame;
frame = imread("/Users/admin/Documents/C++/First/test.jpg");
// Load model.
Net net;
net = readNet("/Users/admin/Documents/C++/First/my.onnx");
vector<Mat> detections;
detections = pre_process(frame, net);
Mat img = post_process(frame, detections, class_list);
//Mat img = post_process(frame.clone(), detections, class_list);
// Put efficiency information.
// The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
vector<double> layersTimes;
double freq = getTickFrequency() / 1000;
double t = net.getPerfProfile(layersTimes) / freq;
string label = format("Inference time : %.2f ms", t);
putText(img, label, Point(20, 40), FONT_FACE, FONT_SCALE, RED);
imshow("Output", img);
waitKey(0);
return 0;
}
The photos I use are 640x480. I played around with the size of the photo, thinking it might be related, but the same problem persisted.
The Yolov5 output format is xyxy as can be seen here:
https://github.com/ultralytics/yolov5/blob/bfa1f23045c7c4136a9b8ced9d6be8249ed72692/detect.py#L161
Not xywh as you are assuming in your code

OpenCV zooming in on Mat image without the window border changing c++

I have a window Mat gestures containing an image, I want to zoom in every pixel in the window but keep the border the same size. I have tried resize() but it's resizing the border as well.
For better explanation, I don't want the border that is in the green box to be resized as well as the whole border, but I need the image inside the border to be resized. How can I achieve this?
Set a ROI of the image excluding the border. If you already know the thickness, simply assign a new img from it. Then you can resize and draw cv::rectangle with the thickness of original image.
Following code snippet may not compile since I don't see a reproducible code.
cv::Mat img = cv::imread(...);
const int thick = 3;
const cv::Rect roi(thick, thick, img.width()-2*thick, img.height()-2*thick);
cv::Mat img_roi = img(roi);
cv::resize(...); // resize img_roi
cv::rectangle(...); // draw new border on img_roi, you need to pass a cv::Scalar value from img.at(0, 0) for the color of it.
However, I'm expecting a better idea from someone else.
The basic idea is deciding the scale changed every time on mouse wheel. After you get the current scale (v.s. origin image), you then can get the position and length of rectangle on scaled image.
In my github,checking OnMouseWheel () and RefreshSrcView () in Fastest_Image_Pattern_Matching/ELCVMatchTool/ELCVMatchToolDlg.cpp may give what you want.
Besides, if you only want to use opencv window without MFC framework or other frameworks, check this (pure OpenCV version)
Effect:
Part of the code:
BOOL CELCVMatchToolDlg::OnMouseWheel (UINT nFlags, short zDelta, CPoint pt)
{
POINT pointCursor;
GetCursorPos (&pointCursor);
ScreenToClient (&pointCursor);
// TODO: 在此加入您的訊息處理常式程式碼和 (或) 呼叫預設值
if (zDelta > 0)
{
if (m_iScaleTimes == MAX_SCALE_TIMES)
return TRUE;
else
m_iScaleTimes++;
}
if (zDelta < 0)
{
if (m_iScaleTimes == MIN_SCALE_TIMES)
return TRUE;
else
m_iScaleTimes--;
}
CRect rect;
//GetWindowRect (rect);
GetDlgItem (IDC_STATIC_SRC_VIEW)->GetWindowRect (rect);//重要
if (m_iScaleTimes == 0)
g_dCompensationX = g_dCompensationY = 0;
int iMouseOffsetX = pt.x - (rect.left + 1);
int iMouseOffsetY = pt.y - (rect.top + 1);
double dPixelX = (m_hScrollBar.GetScrollPos () + iMouseOffsetX + g_dCompensationX) / m_dNewScale;
double dPixelY = (m_vScrollBar.GetScrollPos () + iMouseOffsetY + g_dCompensationY) / m_dNewScale;
m_dNewScale = m_dSrcScale * pow (SCALE_RATIO, m_iScaleTimes);
if (m_iScaleTimes != 0)
{
int iWidth = m_matSrc.cols;
int iHeight = m_matSrc.rows;
m_hScrollBar.SetScrollRange (0, int (m_dNewScale * iWidth - m_dSrcScale * iWidth) - 1 + BAR_SIZE);
m_vScrollBar.SetScrollRange (0, int (m_dNewScale * iHeight - m_dSrcScale * iHeight) - 1 + BAR_SIZE);
int iBarPosX = int (dPixelX * m_dNewScale - iMouseOffsetX + 0.5);
m_hScrollBar.SetScrollPos (iBarPosX);
m_hScrollBar.ShowWindow (SW_SHOW);
g_dCompensationX = -iBarPosX + (dPixelX * m_dNewScale - iMouseOffsetX);
int iBarPosY = int (dPixelY * m_dNewScale - iMouseOffsetY + 0.5);
m_vScrollBar.SetScrollPos (iBarPosY);
m_vScrollBar.ShowWindow (SW_SHOW);
g_dCompensationY = -iBarPosY + (dPixelY * m_dNewScale - iMouseOffsetY);
//滑塊大小
SCROLLINFO infoH;
infoH.cbSize = sizeof (SCROLLINFO);
infoH.fMask = SIF_PAGE;
infoH.nPage = BAR_SIZE;
m_hScrollBar.SetScrollInfo (&infoH);
SCROLLINFO infoV;
infoV.cbSize = sizeof (SCROLLINFO);
infoV.fMask = SIF_PAGE;
infoV.nPage = BAR_SIZE;
m_vScrollBar.SetScrollInfo (&infoV);
//滑塊大小
}
else
{
m_hScrollBar.SetScrollPos (0);
m_hScrollBar.ShowWindow (SW_HIDE);
m_vScrollBar.SetScrollPos (0);
m_vScrollBar.ShowWindow (SW_HIDE);
}
RefreshSrcView ();
return CDialogEx::OnMouseWheel (nFlags, zDelta, pt);
}

Problems with forloop and Points

So I'm trying too clean up my code as I was using too many Points written up. So I came up with the idea too use a forloop instead unfortunately I can't seem too get work.
I've changed my points into CVpoint arrays and made a forloop but I cant seem too get too work
Anyone know how I can make this work ? My error is Can't convert CVpoint to Int
My functions :
bool FindWhiteLine(Vec3b white)
{
bool color = false;
uchar blue = white.val[0];
uchar green = white.val[1];
uchar red = white.val[2];
if(blue == 255 && green == 255 && red == 255)
{
color = true;
}
return color;
}
// extends the line until whiteline is found
CvPoint DrawingLines(Mat img , CvPoint point,bool right)
{
int cols = img.cols;
Vec3b drawingLine = img.at<Vec3b>(point); //defines the color at current positions
while(point.x != cols){
if(right == true)
{
point.x = point.x +1; //increases the line too the right
drawingLine = img.at<cv::Vec3b>(point);
if(FindWhiteLine(drawingLine)){ // quites incase white line is found
break;
}
}
else if(right == false)
{
point.x = point.x -1; //Decrease the line too the left
drawingLine = img.at<cv::Vec3b>(point);
if(FindWhiteLine(drawingLine)){ // quites incase white line is found
break;
}
}
}
return point;
}
My main :
void LaneDetector::processImage() {
//http://docs.opencv.org/doc/user_guide/ug_mat.html Handeling images
Mat matImg(m_image);
Mat gray; // for converting to gray
cvtColor(matImg, gray, CV_BGR2GRAY); //Let's make the image gray
Mat canny; //Canny for detecting edges ,http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/canny_detector/canny_detector.html
Canny(gray, canny, 50, 170, 3); //inputing Canny limits
cvtColor(canny, matImg, CV_GRAY2BGR); //Converts back from gray
// get matrix size http://docs.opencv.org/modules/core/doc/basic_structures.html
int rows = matImg.rows;
int cols = matImg.cols;
//Points
Point centerPoint; // Old way
Point centerPointEnd;
CvPoint startPos[4] , endXRight[4] , endxLeft[4]; // new way I tried
for (int i = 0; i< 4; i ++) {
startPos[i].x = cols/2;
endXRight[i].x = DrawingLines(matImg,endXRight[i],true); // error here
endxLeft[i].x = DrawingLines(matImg,endxLeft[i],false);
}
if (m_debug) {
line(matImg, centerPoint,centerPointEnd,cvScalar(0, 0, 255),2, 8);
for (i = 0; i< 4; i ++) {
line(matImg, startPos[i],endXRight[i],cvScalar(0, 0, 255),2, 8);
line(matImg, startPos[i],endXLeft[i],cvScalar(0, 0, 255),2, 8);
}
Error code :
/home/nicho/2015-mini-smart-vehicles/project-template/sources/OpenDaVINCI-msv/apps/lanedetector/src/LaneDetector.cpp:176:25: error: cannot convert ‘CvPoint’ to ‘int’ in assignment
endXRight[i].x = DrawingLines(matImg,endXRight[i],true);
/home/nicho/2015-mini-smart-vehicles/project-template/sources/OpenDaVINCI-msv/apps/lanedetector/src/LaneDetector.cpp:177:24: error: cannot convert ‘CvPoint’ to ‘int’ in assignment
endxLeft[i].x = DrawingLines(matImg,endxLeft[i],false);
The error couldn't be much clearer. The function returns a value of type CvPoint, and you try to assign it to a variable of type int. That can't be done because you can't convert CvPoint to int.
It looks like you want to assign to the point itself, not one of its co-ordinates:
endXRight[i] = DrawingLines(matImg,endXRight[i],true);
^ remove .x

Changing the perspective using Opencv

I'm working on a project, in which I use a chessboard, the problem that I'm facing, is when I recognize the board I want to crop the part of the frame that contains it and put it "straight", for that I'm using the cv::warpPerspective function, bellow is my code and the result that I get :
int main (){
cv::Size board(6,4);
cv::Mat src,result,quad,transformationMatrix;
std::vector<cv::Point2f> imageCorners;
std::vector<cv::Point2f> top, bot;
std::vector<cv::Point2f> not_a_rect_shape;
cv::VideoCapture cap(0);
char fileName[20] = "MYROI";
int index =0;
int key = 0 ;
cap >> src;
while ( key != 27){
cap >> src;
if(cv::findChessboardCorners(src,board,imageCorners,CV_CALIB_CB_FILTER_QUADS)){
int xMin =imageCorners.at(0).x ,xMax = imageCorners.at(0).x;
int yMin = imageCorners.at(0).y, yMax =imageCorners.at(0).y;
for (int i = (imageCorners.size()-1) ; i>0;i--){
if(xMin > imageCorners.at(i-1).x)
xMin = imageCorners.at(i-1).x;
if(xMax < imageCorners.at(i-1).x)
xMax = imageCorners.at(i-1).x;
if(yMin > imageCorners.at(i-1).y)
yMin = imageCorners.at(i-1).y;
if(yMax < imageCorners.at(i-1).y)
yMax = imageCorners.at(i-1).y;
}
cv::Rect myroi(xMin-5,yMin-5,(xMax-xMin)+5,(yMax-yMin)+5);
if ( myroi.area() > 0){
cv::imshow("ROI",(src)(myroi));
result = (src)(myroi);
not_a_rect_shape.clear();
not_a_rect_shape.push_back(imageCorners[0]);
not_a_rect_shape.push_back(imageCorners[board.height-1]);
not_a_rect_shape.push_back(imageCorners[board.area()-board.height-1]);
not_a_rect_shape.push_back(imageCorners[board.area()-1]);
std::vector<cv::Point2f> approx;
cv::approxPolyDP(cv::Mat(not_a_rect_shape),approx,cv::arcLength(cv::Mat(not_a_rect_shape),true)*0.02,true);
if (approx.size()!=4){
std::cout << " Not quadrilateral!"<<std::endl;
not_a_rect_shape.clear();
approx.clear();
not_a_rect_shape.push_back(imageCorners[0]);
not_a_rect_shape.push_back(imageCorners[board.width-1]);
not_a_rect_shape.push_back(imageCorners[board.area()-board.width-1]);
not_a_rect_shape.push_back(imageCorners[board.area()-1]);
cv::approxPolyDP(cv::Mat(not_a_rect_shape),approx,cv::arcLength(cv::Mat(not_a_rect_shape),true)*0.02,true);
}
// center
cv::Point2f center(0,0);
for (int i = 0 ; i <not_a_rect_shape.size(); i++)
center+= not_a_rect_shape[i];
center *=( 1./not_a_rect_shape.size()); // the center position
top.clear();
bot.clear();
// ordering the 4 points
for (int i = 0; i < not_a_rect_shape.size(); i++){
if (not_a_rect_shape[i].y < center.y)
top.push_back(not_a_rect_shape[i]);
else
bot.push_back(not_a_rect_shape[i]);
}
std::cout << center << std::endl;
if(top.size()== 2 && bot.size()==2){
cv::Point2f tl = top[0].x > top[1].x ? top[1] : top[0];
cv::Point2f tr = top[0].x > top[1].x ? top[0] : top[1];
cv::Point2f bl = bot[0].x > bot[1].x ? bot[1] : bot[0];
cv::Point2f br = bot[0].x > bot[1].x ? bot[0] : bot[1];
not_a_rect_shape.clear();
not_a_rect_shape.push_back(tl);
not_a_rect_shape.push_back(tr);
not_a_rect_shape.push_back(br);
not_a_rect_shape.push_back(bl);
// Define the destination image
quad = cv::Mat::zeros(300, 220, CV_8UC3);
//quad = cv::Mat::zeros(result.rows,result.cols,CV_8UC3);
// Corners of the destination image
std::vector<cv::Point2f> quad_pts;
quad_pts.push_back(cv::Point2f(0, 0));
quad_pts.push_back(cv::Point2f(quad.cols, 0));
quad_pts.push_back(cv::Point2f(quad.cols, quad.rows));
quad_pts.push_back(cv::Point2f(0, quad.rows));
transformationMatrix= cv::getPerspectiveTransform(not_a_rect_shape, quad_pts);
cv::warpPerspective(src, quad, transformationMatrix, quad.size()/*perspectiveSize*/,1);
cv::imshow("quadrilateral", quad);
cv::imwrite("result.jpg",result);
cv::imwrite("quadrilateral.jpg",quad);
}
}
}
cv::imshow("src",src);
key = cv::waitKey(10);
}
This an example of a ROI that get :
And this is how it look like after changing the perspective :
And let's say this is what I expect (this size doesn't matter):
Any idea how can I solve this?
I'm using the next code snippet for such problems:
...
// Create a column vector with the coordinates of each point (on the field plane)
cv::Mat xField;
xField.create(4, 1, CV_32FC2);
xField.at<Point2f>(0) = ( Pts[0] );
xField.at<Point2f>(1) = ( Pts[1] );
xField.at<Point2f>(2) = ( Pts[2] );
xField.at<Point2f>(3) = ( Pts[3] );
// same thing for xImage but with the pixel coordinates instead of the field coordinates, same order as in xField
cv::Mat xImage;
xImage.create(4, 1, CV_32FC2);
xImage.at<Point2f>(0) = ( cv::Point2f(0, 0) );
xImage.at<Point2f>(1) = ( cv::Point2f(400, 0) );
xImage.at<Point2f>(2) = ( cv::Point2f(400, 600) );
xImage.at<Point2f>(3) = ( cv::Point2f(0, 600) );
// Compute the homography matrix
cv::Mat H = cv::findHomography(xField,xImage );
xField.release();
xImage.release();
Mat warped;
warpPerspective(frame,warped,H,Size(400,600));
H.release();
...
this code will get image from polygon xField and put it to xImage (here it is rectangle 0,0,400,600).
You mistake here:
change this
not_a_rect_shape.push_back(tl);
not_a_rect_shape.push_back(tr);
not_a_rect_shape.push_back(br);
not_a_rect_shape.push_back(bl);
to this
not_a_rect_shape.push_back(imageCorners[0]);
not_a_rect_shape.push_back(imageCorners[board.area()-board.width]);
not_a_rect_shape.push_back(imageCorners[board.area()-1]);
not_a_rect_shape.push_back(imageCorners[board.width-1]);

Finding HSV Thresholds Via Histograms with OpenCV

I'm trying to write a method that will find the proper threshold values in HSV space for an object placed at the center of the screen. These values are used for an object tracking algorithm. I've tested that piece of code with hand coded threshold values and it works well. The idea behind the method is that it should calculate the histograms for each of the channels and then return the 5th and 95th percentile for each to be used as the threshold values. (credit: How to find RGB/HSV color parameters for color tracking?) The image being passed is a picture of the object to be tracked (which is set by the user before the whole process begins. Here is the code
std::vector<cv::Scalar> HSV_Threshold_Determiner::Get_Threshold_Values(const cv::Mat& image)
{
cv::Mat inputImage;
cv::cvtColor(image, inputImage, CV_BGR2HSV);
std::vector<cv::Mat> bgrPlanes;
cv::split(inputImage, bgrPlanes);
cv::Mat hHist, sHist, vHist;
int hMax = 180, svMax = 256;
float hRanges[] = { 0, (float)hMax };
const float* hRange = { hRanges };
float svRanges[] = { 0, (float)svMax };
const float* svRange = { svRanges };
//float sRanges[] = { 0, 256 };
cv::calcHist(&bgrPlanes[0], 1, 0, cv::Mat(), hHist, 1, &hMax, &hRange);
cv::calcHist(&bgrPlanes[1], 1, 0, cv::Mat(), sHist, 1, &svMax, &svRange);
cv::calcHist(&bgrPlanes[2], 1, 0, cv::Mat(), vHist, 1, &svMax, &svRange);
int totalEntries = image.cols * image.rows;
int fiveCutoff = (int)(totalEntries * .05);
int ninetyFiveCutoff = (int)(totalEntries * .95);
float hTotal = 0, sTotal = 0, vTotal = 0;
bool hMinFound = false, hMaxFound = false, sMinFound = false, sMaxFound = false,
vMinFound = false, vMaxFound = false;
cv::Scalar hThresholds;
cv::Scalar sThresholds;
cv::Scalar vThresholds;
for(int i = 0; i < vHist.rows; ++i)
{
if(i < hHist.rows)
{
hTotal += hHist.at<float>(i, 0);
if(hTotal >= fiveCutoff && !hMinFound)
{
hThresholds.val[0] = i;
hMinFound = true;
}
else if(hTotal>= ninetyFiveCutoff && !hMaxFound)
{
hThresholds.val[1] = i;
hMaxFound = true;
}
}
sTotal += sHist.at<float>(i, 0);
vTotal += vHist.at<float>(i, 0);
if(sTotal >= fiveCutoff && !sMinFound)
{
sThresholds.val[0] = i;
sMinFound = true;
}
else if(sTotal >= ninetyFiveCutoff && !sMaxFound)
{
sThresholds.val[1] = i;
sMaxFound = true;
}
if(vTotal >= fiveCutoff && !vMinFound)
{
vThresholds.val[0] = i;
vMinFound = true;
}
else if(vTotal >= ninetyFiveCutoff && !vMaxFound)
{
vThresholds.val[1] = i;
vMaxFound = true;
}
if(vMaxFound && sMaxFound && hMaxFound)
{
break;
}
}
std::vector<cv::Scalar> returnVect;
returnVect.push_back(hThresholds);
returnVect.push_back(sThresholds);
returnVect.push_back(vThresholds);
return returnVect;
}
What I am trying to do is sum up the number of entries in each bucket until I get to a number that is greater than or equal to five percent and ninety-five percent of the total. Unfortunately the numbers I get are never close to the ones I get if I do the thresholding by hand.
Mat img = ... // from camera or some other source
// STEP 1: learning phase
Mat hsv, imgThreshed, processed, denoised;
cv::GaussianBlur(img, denoised, cv::Size(5,5), 2, 2); // remove noise
cv::cvtColor(denoised, hsv, CV_BGR2HSV);
// lets say we picked manually a region of 100x100 px with the interested color/object using mouse
cv::Mat roi = hsv (cv::Range(mousex-50, mousey+50), cv::Range(mousex-50, mousey+50));
// must split all channels to get Hue only
std::vector<cv::Mat> hsvPlanes;
cv::split(roi, hsvPlanes);
// compute statistics for Hue value
cv::Scalar mean, stddev;
cv::meanStdDev(hsvPlanes[0], mean, stddev);
// ensure we get 95% of all valid Hue samples (statistics 3*sigma rule)
float minHue = mean[0] - stddev[0]*3;
float maxHue = mean[0] + stddev[0]*3;
// STEP 2: detection phase
cv::inRange(hsvPlanes[0], cv::Scalar(minHue), cv::Scalar(maxHue), imgThreshed);
imshow("thresholded", imgThreshed);
cv_erode(imgThreshed, processed, 5); // minimizes noise
cv_dilate(processed, processed, 20); // maximize left regions
imshow("final", processed);
//STEP 3: do some blob/contour detection on processed image & find maximum blob/region, etc ...
A much simpler solution - just calculate mean & std. deviation for a region of interest, i.e. containing the Hue value.
Since Hue is the most stable component in the image, the other components saturation & value should be discarded as they vary too much. However you can still compute mean for them if needed.