Related
Im using a C++ openCV program for first principles Algorithm development for HDL(Verilog) image object detection. I've finally managed to get HDL version up to the point of canny detection. In order to validate the two, both need to have identical output. I have found their are subtle differences that I thing are being contributed to by the openCV imread colour to grayscale conversion biasing green. The smoothed image is overall brighter in the openCV C++ method. From looking at the rgb2gray method it appears openCV used a bias ie (RX+GY+B*Z)/3 while in HDL I have been using (R+G+B)/3 as I require it to complete Gaussian, Sobel and Canny filters. Human visualisation is secondary and multiplication by a non-int is undesirable.
Is there a standard linear grayscale conversion for conversion or a means to override the existing method?
...
int main()
{
int thold = 15;
clock_t start;
double duration;
const int sobelX[3][3] = { {-1, 0, 1}, {-2, 0, 2}, {-1, 0, 1} }; //Where origionally floats in python
const int sobelY[3][3] = { {-1, -2, -1}, {0, 0, 0}, {1, 2, 1} }; //Where origionally floats in python
const int kernel[5][5] = { {1,6,12,6,1},
{6,42,79,42,6},
{12,79,148,79,12},
{6,42,79,42,6},
{1,6,12,6,1} };// 1/732
// Above normalised kernal for smoothing, see origional python script for method
start = std::clock();
int height, width, intPixel, tSx, tSy, tS, dirE, dirEE, maxDir, curPoint, contDirection, cannyImgPix, nd, tl, tm, tr, mr, br, bm, bl, ml = 0;
int contNum = 128;
int contPixCount = 0;
int curContNum = 0;
int contPlace = 0;
int oldContPlace = 0;
int g = 0;
bool maxPoint;
struct pixel {
int number;
int h;
int w;
};
std::vector<pixel> contourList;
//double floatPixel = 0.0;
int kernalCumulator = 0;
const int mp = 3;
// Scalar color(0, 0, 255);
// duration = ((clock()) - start) / (double)CLOCKS_PER_SEC;
// start = clock();
// cout << "Start image in" << duration << '\n';
// Mat dst;
Mat rawImg = imread("C:\\Users\\&&&\\Documents\\pycode\\paddedGS.png",0);
printf("%d",rawImg.type());
// Mat rawImg = imread("C:\\Users\\&&&\\Documents\\openCV_Master\\openCVexample\\openCVexample\\brace200.jpg ", 0);
height = rawImg.rows;
width = rawImg.cols;
cout << "Height of image " << height << '\n';
cout << "Width of image " << width << '\n';
Mat filteredImg = Mat::zeros(height, width, CV_8U);
printf("%d", filteredImg.type());
Mat sobelImg = Mat::zeros(height, width, CV_8U);
Mat directionImg = Mat::zeros(height, width, CV_8U);
Mat cannyImg = Mat::zeros(height, width, CV_8U);
Mat contourImg = Mat::zeros(height, width, CV_16U);
// rawImg.convertTo(rawImg, CV_8UC1);
duration = ((clock()) - start) / (double)CLOCKS_PER_SEC;
start = clock();
cout << "Start image in" << duration << '\n';
// Loop to threshold already grayscaled image
/*
for (int h = 0; h < (height); h++)
{
for (int w = 0; w < (width); w++)
{
g = (int)rawImg.at<uchar>(h, w,0);
cout << g << "g";
g+= (int)rawImg.at<uchar>(h, w, 1);
cout << g << "g";
g+= (int)rawImg.at<uchar>(h, w, 2);
cout << g << "g";
g = g/3;
rawGImg.at<uchar>(h,w) = g;
}
}
*/
// imshow("thresholded Image", rawImg);
// waitKey();
// Loop to smooth using Gausian 5 x 5 kernal
// imshow("raw Image", rawImg);
for (int h = 3; h < (height - 3); h++)
{
for (int w = 3; w < (width - 3); w++)
{
if (rawImg.at<uchar>(h, w) >=6 )//Thresholding included
{
for (int xk = 0; xk < 5; xk++)
{
for (int yk = 0; yk < 5; yk++)
{
intPixel = rawImg.at<uchar>((h + (xk - mp)), (w + (yk - mp)));
kernalCumulator += intPixel*(kernel[xk][yk]);//Mutiplier required as rounding is making number go above 255, better solution?
}
}
}
else
kernalCumulator = 0;
kernalCumulator = kernalCumulator / 732;
if (kernalCumulator < 0 || kernalCumulator > 255)
{
// cout << "kernal Value: " << kernalCumulator;
// cout << " intPixel:" << intPixel << '\n';
}
filteredImg.at<uchar>(h, w) = (uchar)kernalCumulator;
kernalCumulator = 0;
}
}
Our vision does not perceive linearly the brightness, so it makes sense for usual applications to use some sort of transformation that tries to mimic the human perception.
For your application, you have 2 options: either use a similar transformation in HDL (which might not be easy or desired), or make a custom rgb to grayscale for OpenCV which uses the same transformation you use.
A short snippet (more like pseudocode, you'll have to figure out the details) for this would be something like:
cv::Mat linearRgbToGray(const cv::Mat &color) {
cv::Mat gray(color.size(), CV_8UC1);
for (int i = 0; i < color.rows; i++)
for (int j = 0; j < color.cols; j++)
gray.at(i, j) = (color.at(i, j)[0] + color.at(i, j)[1] + color.at(i, j)[2]) / 3;
}
As per Paul92's advice above
cv::Mat linearRgbToGray(const cv::Mat &color) {
cv::Mat gray(color.size(), CV_8UC1);
for (int i = 0; i < color.rows; i++)
for (int j = 0; j < color.cols; j++)
gray.at<uchar>(i, j) = ((color.at<cv::Vec3b>(i, j)[0] + color.at<cv::Vec3b>(i, j)[1] + color.at<cv::Vec3b>(i, j)[2]) / 3);
return gray;
}
The above code worked and overcame out of bounds errors I experienced earlier. Thank you, Rob.
I use an algorithm of panorama stitching from opencv, in order to stitch 2 or 3 images into one new result image.
I have coordinates of points in each source image. I need to calculate what are the new coordinates for these points in the result image.
I describe below the algorithm. My code is similar to a sample "stitching_detailed" from opencv (branch 3.4). A result_mask of type Mat is produced, maybe it is the solution? But I don't know how to use it. I found a related question here but not on stitching.
Any idea?
Here is the algorithm (for detailed code: stitching_detailed.cpp):
Find features for each image:
Ptr<FeaturesFinder> finder = makePtr<SurfFeaturesFinder>()
vector<ImageFeatures> features(num_images);
for (int i = 0; i < num_images; ++i)
{
(*finder)(images[i], features[i]);
}
Make pairwise_matches:
vector<MatchesInfo> pairwise_matches;
Ptr<FeaturesMatcher> matcher = makePtr<BestOf2NearestMatcher>(false, match_conf);
(*matcher)(features, pairwise_matches);
Reorder the images:
vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
# here some code to reorder 'images'
Estimate an homography in cameras:
vector<CameraParams> cameras;
Ptr<Estimator> estimator = makePtr<HomographyBasedEstimator>();
(*estimator)(features, pairwise_matches, cameras);
Convert to CV_32F:
for (size_t i = 0; i < cameras.size(); ++i)
{
Mat R;
cameras[i].R.convertTo(R, CV_32F);
cameras[i].R = R;
}
Execute a BundleAdjuster:
Ptr<detail::BundleAdjusterBase> adjuster = makePtr<detail::BundleAdjusterRay>();
adjuster->setConfThresh(conf_thresh);
adjuster->setRefinementMask(refine_mask);
(*adjuster)(features, pairwise_matches, cameras);
Compute a value for warped_image_scale:
for (int i = 0; i < cameras.size(); ++i)
focals.push_back(cameras[i].focal);
float warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
Do wave correction:
vector<Mat> rmats;
for (size_t i = 0; i < cameras.size(); ++i)
rmats.push_back(cameras[i].R.clone());
waveCorrect(rmats, wave_correct);
for (size_t i = 0; i < cameras.size(); ++i)
cameras[i].R = rmats[i];
Create a warper:
Ptr<WarperCreator> warper_creator = makePtr<cv::SphericalWarper>();
Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
Create a blender and feed it:
Ptr<Blender> blender;
for (size_t i = 0; i < cameras.size(); ++i)
{
full_img = input_imgs[img_idx];
if (!is_compose_scale_set)
{
is_compose_scale_set = true;
compose_scale = /* … */
}
if (abs(compose_scale - 1) > 1e-1)
resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT);
else
img = full_img;
// Warp the current image
warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
// Compensate exposure
compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
dilate(masks_warped[img_idx], dilated_mask, Mat());
resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT);
mask_warped = seam_mask & mask_warped;
if (!blender)
{
blender = Blender::createDefault(blend_type, try_gpu);
Size dst_sz = resultRoi(corners, sizes).size();
float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
MultiBandBlender *mb = dynamic_cast<MultiBandBlender *>(blender.get());
mb->setNumBands(static_cast<int>(ceil(log(blend_width) / log(2.)) - 1.));
blender->prepare(corners, sizes);
}
// Blend the current image
blender->feed(img_warped_s, mask_warped, corners[i]);
}
Then, use the blender:
Mat result, result_mask;
blender->blend(result, result_mask);
// The result image is in 'result'
When I was a school boy, I foundopencv/samples/cpp/stitching_detailed.cpp in OpenCV samples folder. At that time, my programming skills were very poor. I can't understand it even though I racked my brains. This question attracts my attention, and arouses my memory. After a whole night of hard work and debugging, I finally get it.
Basic steps:
Given the three images: blue.png, green.png, and red.png
We can get the stitching result(result.png) using the stitching_detailed.cpp.
.
blender->blend(result, result_mask);
imwrite("result.png", result);
imwrite("result_mask.png", result_mask);
I choose the centers from the three images, and calculate the corresponding coordinates (warped) on the stitching image, and draw in solid as follow:
Warping images (auxiliary)...
Compensating exposure...
Blending ...
Warp each center point, and draw solid circle.
[408, 204] => [532, 224]
[408, 204] => [359, 301]
[408, 204] => [727, 320]
Check `result.png`, `result_mask.png` and `result2.png`!
Done!
This is the function calcWarpedPoint I wrote to calculate the warped point on the stitching image:
cv::Point2f calcWarpedPoint(
const cv::Point2f& pt,
InputArray K, // Camera K parameter
InputArray R, // Camera R parameter
Ptr<RotationWarper> warper, // The Rotation Warper
const std::vector<cv::Point> &corners,
const std::vector<cv::Size> &sizes)
{
// Calculate the wrapped point using camera parameter.
cv::Point2f dst = warper->warpPoint(pt, K, R);
// Calculate the stitching image roi using corners and sizes.
// the corners and sizes have already been calculated.
cv::Point2f tl = cv::detail::resultRoi(corners, sizes).tl();
// Finally adjust the wrapped point to the stitching image.
return cv::Point2f(dst.x - tl.x, dst.y - tl.y);
}
This is example code snippet:
std::cout << "\nWarp each center point, and draw solid circle.\n";
std::vector<cv::Scalar> colors = { {255,0,0}, {0, 255, 0}, {0, 0, 255} };
for (int idx = 0; idx < img_names.size(); ++idx) {
img = cv::imread(img_names[idx]);
Mat K;
cameras[idx].K().convertTo(K, CV_32F);
Mat R = cameras[idx].R;
cv::Point2f cpt = cv::Point2f(img.cols / 2, img.rows / 2);
cv::Point pt = calcWarpedPoint(cpt, K, R, warper, corners, sizes);
cv::circle(result, pt, 5, colors[idx], -1, cv::LINE_AA);
std::cout << cpt << " => " << pt << std::endl;
}
std::cout << "\nCheck `result.png`, `result_mask.png` and `result2.png`!\n";
imwrite("result2.png", result);
The full code:
/*
* Author : Kinght-金(https://stackoverflow.com/users/3547485/)
* Created : 2019/03/01 23:00 (CST)
* Finished : 2019/03/01 07:50 (CST)
*
* Modified on opencv401/samples/cpp/stitching_detailed.cpp
* From https://github.com/opencv/opencv/blob/4.0.1/samples/cpp/stitching_detailed.cpp
*
*
* Description: A simple opencv(4.0.1) image stitching code for Stack Overflow answers.
* For https://stackoverflow.com/questions/54904718/compute-coordinates-from-source-images-after-stitching/54953792#comment96681412_54953792
*
*/
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/opencv_modules.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
using namespace std;
using namespace cv;
using namespace cv::detail;
//! img_names are the input image (full) paths
// You can download from using the links from the answer.
//! Blue: https://i.stack.imgur.com/Yz3U1.png
//! Green: https://i.stack.imgur.com/AbUTH.png
//! Red: https://i.stack.imgur.com/9wcGc.png
vector<String> img_names = {"D:/stitching/blue.png", "D:/stitching/green.png", "D:/stitching/red.png"};
//! The function to calculate the warped point on the stitching image.
cv::Point2f calcWarpedPoint(
const cv::Point2f& pt,
InputArray K, // Camera K parameter
InputArray R, // Camera R parameter
Ptr<RotationWarper> warper, // The Rotation Warper
const std::vector<cv::Point> &corners,
const std::vector<cv::Size> &sizes)
{
// Calculate the wrapped point
cv::Point2f dst = warper->warpPoint(pt, K, R);
// Calculate the stitching image roi using corners and sizes,
// the corners and sizes have already been calculated.
cv::Point2f tl = cv::detail::resultRoi(corners, sizes).tl();
// Finally adjust the wrapped point
return cv::Point2f(dst.x - tl.x, dst.y - tl.y);
}
int main(int argc, char* argv[])
{
double work_megapix = 0.6;
double seam_megapix = 0.1;
double compose_megapix = -1;
float conf_thresh = 1.f;
float match_conf = 0.3f;
float blend_strength = 5;
// Check if have enough images
int num_images = static_cast<int>(img_names.size());
if (num_images < 2)
{
std::cout << "Need more images\n";
return -1;
}
double work_scale = 1, seam_scale = 1, compose_scale = 1;
bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
//(1) 创建特征查找器
Ptr<Feature2D> finder = ORB::create();
// (2) 读取图像,适当缩放,并计算图像的特征描述
Mat full_img, img;
vector<ImageFeatures> features(num_images);
vector<Mat> images(num_images);
vector<Size> full_img_sizes(num_images);
double seam_work_aspect = 1;
for (int i = 0; i < num_images; ++i)
{
full_img = imread(img_names[i]);
full_img_sizes[i] = full_img.size();
if (full_img.empty())
{
cout << "Can't open image " << img_names[i] << std::endl;
return -1;
}
if (!is_work_scale_set)
{
work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
is_work_scale_set = true;
}
resize(full_img, img, Size(), work_scale, work_scale, INTER_LINEAR_EXACT);
if (!is_seam_scale_set)
{
seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
seam_work_aspect = seam_scale / work_scale;
is_seam_scale_set = true;
}
computeImageFeatures(finder, img, features[i]);
features[i].img_idx = i;
std::cout << "Features in image #" << i + 1 << ": " << features[i].keypoints.size() << std::endl;
resize(full_img, img, Size(), seam_scale, seam_scale, INTER_LINEAR_EXACT);
images[i] = img.clone();
}
full_img.release();
img.release();
// (3) 创建图像特征匹配器,计算匹配信息
vector<MatchesInfo> pairwise_matches;
Ptr<FeaturesMatcher> matcher = makePtr<BestOf2NearestMatcher>(false, match_conf);
(*matcher)(features, pairwise_matches);
matcher->collectGarbage();
//! (4) 剔除外点,保留最确信的大成分
// Leave only images we are sure are from the same panorama
vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
vector<Mat> img_subset;
vector<String> img_names_subset;
vector<Size> full_img_sizes_subset;
for (size_t i = 0; i < indices.size(); ++i)
{
img_names_subset.push_back(img_names[indices[i]]);
img_subset.push_back(images[indices[i]]);
full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
}
images = img_subset;
img_names = img_names_subset;
full_img_sizes = full_img_sizes_subset;
// Check if we still have enough images
num_images = static_cast<int>(img_names.size());
if (num_images < 2)
{
std::cout << "Need more images\n";
return -1;
}
//!(5) 估计 homography
Ptr<Estimator> estimator = makePtr<HomographyBasedEstimator>();
vector<CameraParams> cameras;
if (!(*estimator)(features, pairwise_matches, cameras))
{
cout << "Homography estimation failed.\n";
return -1;
}
for (size_t i = 0; i < cameras.size(); ++i)
{
Mat R;
cameras[i].R.convertTo(R, CV_32F);
cameras[i].R = R;
std::cout << "\nInitial camera intrinsics #" << indices[i] + 1 << ":\nK:\n" << cameras[i].K() << "\nR:\n" << cameras[i].R << std::endl;
}
//(6) 创建约束调整器
Ptr<detail::BundleAdjusterBase> adjuster = makePtr<detail::BundleAdjusterRay>();
adjuster->setConfThresh(conf_thresh);
Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
refine_mask(0, 0) = 1;
refine_mask(0, 1) = 1;
refine_mask(0, 2) = 1;
refine_mask(1, 1) = 1;
refine_mask(1, 2) = 1;
adjuster->setRefinementMask(refine_mask);
if (!(*adjuster)(features, pairwise_matches, cameras))
{
cout << "Camera parameters adjusting failed.\n";
return -1;
}
// Find median focal length
vector<double> focals;
for (size_t i = 0; i < cameras.size(); ++i)
{
focals.push_back(cameras[i].focal);
}
sort(focals.begin(), focals.end());
float warped_image_scale;
if (focals.size() % 2 == 1)
warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
else
warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
std::cout << "\nWarping images (auxiliary)... \n";
vector<Point> corners(num_images);
vector<UMat> masks_warped(num_images);
vector<UMat> images_warped(num_images);
vector<Size> sizes(num_images);
vector<UMat> masks(num_images);
// Preapre images masks
for (int i = 0; i < num_images; ++i)
{
masks[i].create(images[i].size(), CV_8U);
masks[i].setTo(Scalar::all(255));
}
// Warp images and their masks
Ptr<WarperCreator> warper_creator = makePtr<cv::CylindricalWarper>();
if (!warper_creator)
{
cout << "Can't create the warper \n";
return 1;
}
//! Create RotationWarper
Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
//! Calculate warped corners/sizes/mask
for (int i = 0; i < num_images; ++i)
{
Mat_<float> K;
cameras[i].K().convertTo(K, CV_32F);
float swa = (float)seam_work_aspect;
K(0, 0) *= swa; K(0, 2) *= swa;
K(1, 1) *= swa; K(1, 2) *= swa;
corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
sizes[i] = images_warped[i].size();
warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
}
vector<UMat> images_warped_f(num_images);
for (int i = 0; i < num_images; ++i)
images_warped[i].convertTo(images_warped_f[i], CV_32F);
std::cout << "Compensating exposure... \n";
//! 计算曝光度,调整图像曝光,减少亮度差异
Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(ExposureCompensator::GAIN_BLOCKS);
if (dynamic_cast<BlocksCompensator*>(compensator.get()))
{
BlocksCompensator* bcompensator = dynamic_cast<BlocksCompensator*>(compensator.get());
bcompensator->setNrFeeds(1);
bcompensator->setNrGainsFilteringIterations(2);
bcompensator->setBlockSize(32, 32);
}
compensator->feed(corners, images_warped, masks_warped);
Ptr<SeamFinder> seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR);
seam_finder->find(images_warped_f, corners, masks_warped);
// Release unused memory
images.clear();
images_warped.clear();
images_warped_f.clear();
masks.clear();
Mat img_warped, img_warped_s;
Mat dilated_mask, seam_mask, mask, mask_warped;
Ptr<Blender> blender;
double compose_work_aspect = 1;
for (int img_idx = 0; img_idx < num_images; ++img_idx)
{
// Read image and resize it if necessary
full_img = imread(img_names[img_idx]);
if (!is_compose_scale_set)
{
is_compose_scale_set = true;
compose_work_aspect = compose_scale / work_scale;
// Update warped image scale
warped_image_scale *= static_cast<float>(compose_work_aspect);
warper = warper_creator->create(warped_image_scale);
// Update corners and sizes
for (int i = 0; i < num_images; ++i)
{
cameras[i].focal *= compose_work_aspect;
cameras[i].ppx *= compose_work_aspect;
cameras[i].ppy *= compose_work_aspect;
Size sz = full_img_sizes[i];
if (std::abs(compose_scale - 1) > 1e-1)
{
sz.width = cvRound(full_img_sizes[i].width * compose_scale);
sz.height = cvRound(full_img_sizes[i].height * compose_scale);
}
Mat K;
cameras[i].K().convertTo(K, CV_32F);
Rect roi = warper->warpRoi(sz, K, cameras[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
}
}
if (abs(compose_scale - 1) > 1e-1)
resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT);
else
img = full_img;
full_img.release();
Size img_size = img.size();
Mat K, R;
cameras[img_idx].K().convertTo(K, CV_32F);
R = cameras[img_idx].R;
// Warp the current image : img => img_warped
warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
img_warped.convertTo(img_warped_s, CV_16S);
img_warped.release();
img.release();
mask.release();
dilate(masks_warped[img_idx], dilated_mask, Mat());
resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT);
mask_warped = seam_mask & mask_warped;
if (!blender)
{
blender = Blender::createDefault(Blender::MULTI_BAND, false);
Size dst_sz = resultRoi(corners, sizes).size();
float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
if (blend_width < 1.f){
blender = Blender::createDefault(Blender::NO, false);
}
else
{
MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(blender.get());
mb->setNumBands(static_cast<int>(ceil(log(blend_width) / log(2.)) - 1.));
}
blender->prepare(corners, sizes);
}
blender->feed(img_warped_s, mask_warped, corners[img_idx]);
}
/* ===========================================================================*/
// Blend image
std::cout << "\nBlending ...\n";
Mat result, result_mask;
blender->blend(result, result_mask);
imwrite("result.png", result);
imwrite("result_mask.png", result_mask);
std::cout << "\nWarp each center point, and draw solid circle.\n";
std::vector<cv::Scalar> colors = { {255,0,0}, {0, 255, 0}, {0, 0, 255} };
for (int idx = 0; idx < img_names.size(); ++idx) {
img = cv::imread(img_names[idx]);
Mat K;
cameras[idx].K().convertTo(K, CV_32F);
Mat R = cameras[idx].R;
cv::Point2f cpt = cv::Point2f(img.cols / 2, img.rows / 2);
cv::Point pt = calcWarpedPoint(cpt, K, R, warper, corners, sizes);
cv::circle(result, pt, 5, colors[idx], -1, cv::LINE_AA);
std::cout << cpt << " => " << pt << std::endl;
}
std::cout << "\nCheck `result.png`, `result_mask.png` and `result2.png`!\n";
imwrite("result2.png", result);
std::cout << "\nDone!\n";
/* ===========================================================================*/
return 0;
}
Some links maybe useful:
stitching_detailed.cpp : https://github.com/opencv/opencv/blob/4.0.1/samples/cpp/stitching_detailed.cpp
waper->warp(), warpPoint(), warpRoi() https://github.com/opencv/opencv/blob/master/modules/stitching/src/warpers.cpp#L153
resultRoi() https://github.com/opencv/opencv/blob/master/modules/stitching/src/util.cpp#L116
Other links maybe interesting:
Converting opencv remap code from c++ to python
Split text lines in scanned document
How do I use the relationships between Flann matches to determine a sensible homography?
I'm working in a detector for half bodies, in order to improve the performance of a normal people detector. I know there are more ways to deal with occlusion but this is what i was asked for to do in my end of degree project. My problem is that I'm not getting a good performance, more over, I'm getting kind a pattern in wich 4 rectangles that represent the detections are shown in almost the same position, not even representing a half body.
I have a set of images with 414 images of top-half bodies cropped by myself, used as positive samples, and 8520 negative images. All of them sized 64x64. I extracted the HOG descriptors as follows
int i;
string imgname, index;
HOGDescriptor hog (Size(64,64), Size(16,16), Size(8,8), Size(8,8), 9, 1, -1, HOGDescriptor::L2Hys, 0.2,false, HOGDescriptor::DEFAULT_NLEVELS, false);
vector<float> pos_rec_descript;
vector<Point> locations;
size_t SizeDesc;
SizeDesc = hog.getDescriptorSize();
FileStorage fpd ("Pos_Descriptors.yml", FileStorage::WRITE);
for (i = 1; i < 415; i++) { // 2416 images in ./pos_rec
stringstream a;
a << i;
imgname = "./pos_rec3/img" + a.str();
imgname += ".png";
Mat img = imread(imgname, CV_LOAD_IMAGE_COLOR);
hog.compute(img, pos_rec_descript, Size (16,16), Size (0,0),locations);
fpd << "Descriptores" + a.str() << pos_rec_descript;
}
fpd.release();
And I did the same with the negative samples.
Then, I trained a SVM as follows.
#define POS 414
#define NEG 8520
#define TOTAL 8934
#define DESCRIPT 1764
float trainingData[TOTAL][DESCRIPT];
int labels[TOTAL];
fstream doc;
void set_labels(){
int i;
for (i = 0; i < TOTAL; i++){
if (i < POS) {
labels[i] = 1;
}
else{
labels[i] = -1;
}
}
return;
}
int main(int, char**)
{
FileStorage fsv ("supvec.yml", FileStorage::WRITE);
FileStorage ftd ("TrainData.yml", FileStorage::WRITE);
//FileStorage flm ("Labels.yml", FileStorage::WRITE);
FileStorage fpd ("../HOG_descriptors/Pos_Descriptors.yml", FileStorage::READ);
FileStorage fnd ("../HOG_descriptors_neg/Neg_Descriptors.yml", FileStorage::READ);
set_labels();
// Set up training data
vector <float> pos_D, neg_D, train_D ;
int k = 0;
for (int i = 1; i < POS+1; i++) {
stringstream a;
a << i;
fpd["Descriptores" + a.str()] >> pos_D;
for (int j = 0; j < pos_D.size() ; j++){
train_D.push_back(pos_D[j]);
}
}
fpd.release();
for (int i = 1; i < NEG+1; i++) {
stringstream a;
a << i;
fnd["Descriptores" + a.str()] >> neg_D;
for (int j = 0; j < neg_D.size() ; j++){
train_D.push_back(neg_D[j]);
}
}
fnd.release();
for (int i = 0; i < TOTAL; i++){
for (int j = 0; j < DESCRIPT; j++){
trainingData[i][j] = train_D[k];
k++;
}
}
Mat trainingDataMat(TOTAL, DESCRIPT, CV_32FC1, trainingData);
//memcpy(trainingDataMat.data, train_D.data(), train_D.size()*sizeof(float));
Mat labelsMat(TOTAL, 1, CV_32SC1, labels);
//ftd << "trainingDataMat" << trainingDataMat;
//flm << "labelsMat" << labelsMat;
// Train the SVM
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));
/*Ptr<TrainData> autoTrainData = TrainData::create(trainingDataMat, ROW_SAMPLE, labelsMat);
ParamGrid Cgrid = SVM::getDefaultGrid(SVM::C);
ParamGrid gammaGrid = SVM::getDefaultGrid(SVM::GAMMA);
ParamGrid pGrid = SVM::getDefaultGrid(SVM::P);
pGrid.logStep = 1;
ParamGrid nuGrid = SVM::getDefaultGrid(SVM::NU);
nuGrid.logStep = 1;
ParamGrid coeffGrid = SVM::getDefaultGrid(SVM::COEF);
coeffGrid.logStep = 1;
ParamGrid degreeGrid = SVM::getDefaultGrid(SVM::DEGREE);
degreeGrid.logStep = 1; */
cout << "Está entrenando..." << endl;
//svm->trainAuto(autoTrainData, 10, Cgrid, gammaGrid, pGrid, nuGrid, coeffGrid, degreeGrid, false);
svm->train(trainingDataMat, ROW_SAMPLE, labelsMat);
svm->save("SVM3_WS16_P0_LINEAR.yml");
I've tried with both LINEAR and RBF kernels (that's why you can see an autotrain part of the code commented that I used to swap between types of SVM) but none of them seems to work. Actually, they give nearly the same responses, something that makes me think that, maybe the training phase or the detection phase (code below) are ruining the whole project.
This is how I load the SVM for the HOG detector and try it over images
using namespace cv;
using namespace std;
using namespace cv::ml;
// static void help()
// {
// printf(
// "\nDemonstrate the use of the HoG descriptor using\n"
// " HOGDescriptor::hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());\n"
// "Usage:\n"
// "./peopledetect (<image_filename> | <image_list>.txt)\n\n");
// }
void get_svm_detector(const Ptr<SVM>& svm, vector< float > & hog_detector );
void get_svm_detector(const Ptr<SVM>& svm, vector< float > & hog_detector )
{
// get the support vectors
Mat sv = svm->getSupportVectors();
const int sv_total = sv.rows;
// get the decision function
Mat alpha, svidx;
double rho = svm->getDecisionFunction(0, alpha, svidx);
CV_Assert( alpha.total() == 1 && svidx.total() == 1 && sv_total == 1 );
CV_Assert( (alpha.type() == CV_64F && alpha.at<double>(0) == 1.) ||
(alpha.type() == CV_32F && alpha.at<float>(0) == 1.f) );
CV_Assert( sv.type() == CV_32F );
hog_detector.clear();
hog_detector.resize(sv.cols + 1);
memcpy(&hog_detector[0], sv.ptr(), sv.cols*sizeof(hog_detector[0]));
hog_detector[sv.cols] = (float)-rho;
}
int main(int argc, char** argv)
{
Mat img;
FILE* f = 0;
char _filename[1024];
if( argc == 1 )
{
printf("Usage: peopledetect (People_imgs | People_imgs.txt)\n");
return 0;
}
img = imread(argv[1]);
if( img.data )
{
strcpy(_filename, argv[1]);
}
else
{
f = fopen(argv[1], "rt");
if(!f)
{
fprintf( stderr, "ERROR: the specified file could not be loaded\n");
return -1;
}
}
// Load SVM
Ptr<SVM> svm = SVM::create();
svm = cv::Algorithm::load<ml::SVM>("../SVM_Train/SVM3_WS16_P0_LINEAR.yml");
HOGDescriptor hog (Size(64,64), Size(16,16), Size(8,8), Size(8,8), 9, 1, -1, HOGDescriptor::L2Hys, 0.2,false, HOGDescriptor::DEFAULT_NLEVELS, false);
vector <float> hog_detector;
get_svm_detector (svm, hog_detector);
hog.setSVMDetector(hog_detector);
namedWindow("people detector", 1);
for(;;)
{
char* filename = _filename;
if(f)
{
if(!fgets(filename, (int)sizeof(_filename)-2, f))
break;
//while(*filename && isspace(*filename))
// ++filename;
if(filename[0] == '#')
continue;
int l = (int)strlen(filename);
while(l > 0 && isspace(filename[l-1]))
--l;
filename[l] = '\0';
img = imread(filename);
}
printf("%s:\n", filename);
if(!img.data)
continue;
fflush(stdout);
vector<Rect> found, found_filtered, searchLocations;
vector<double> found_weights;
double t = (double)getTickCount();
// run the detector with default parameters. to get a higher hit-rate
// (and more false alarms, respectively), decrease the hitThreshold and
// groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
hog.detectMultiScale(img, found, found_weights, 0, Size(16,16), Size(0,0), 1.01, 2);
//hog.detect(img, found, 0, Size(16,16), Size(0,0), searchLocations);
t = (double)getTickCount() - t;
printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency());
size_t i, j;
for( i = 0; i < found.size(); i++ )
{
Rect r = found[i];
for( j = 0; j < found.size(); j++ )
if( j != i && (r & found[j]) == r)
break;
if( j == found.size() )
found_filtered.push_back(r);
}
for( i = 0; i < found_filtered.size(); i++ )
{
Rect r = found_filtered[i];
// the HOG detector returns slightly larger rectangles than the real objects.
// so we slightly shrink the rectangles to get a nicer output.
r.x += cvRound(r.width*0.1);
r.width = cvRound(r.width*0.7);
r.y += cvRound(r.height*0.07);
r.height = cvRound(r.height*0.7);
rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 2);
imshow("people detector", img);
waitKey(0);
}
//imshow("people detector", img);
//string imgname = "./Responses/Win_Stride16_4.png";
//imwrite(imgname, img);
int c = waitKey(0) & 255;
if( c == 'q' || c == 'Q' || !f)
break;
}
if(f)
fclose(f);
return 0;
}
I have checked all dimensions for the descriptors, every Mat seems to be ok. But at the time I use detectMultiScale, it shows things like this:
Image 1: It's strange because is missing lots of detections
Image 2: Here I realized there was a kind of pattern with this 4 rects
My problem is that no matter what I change (descriptors, Kernel, winStride and Padding in detectMultiScale), there is always very similar responses, and nothing indicates that there is a correct detection there.
I'm not very sure about how I'm giving the support vectors to HOG, but is the only way I found to do it (found it in one of the post from StackOverflow).
If any of you has any idea of what is going on here, and why the responses are not changing from one configuration to another, I would be greatly thankfull. This code is giving me headaches since weeks now. I've been changing parametres on fucntions, on HOG, changing Kernels, trying different set of images, but nothing seems to give great changes on the final result.
I found the codes in stackoverflow and did some changes ,but the result of it is strange ,the mask is all white or all black.Now I want to know how to use GraphCut to find seeam line between two overlapping two images.thanks!
int main()
{
Mat image0=imread("F:\\1\\1.jpg");
Mat image1=imread("F:\\1\\2.jpg");
image0.convertTo(image0,CV_32F,1.0/255.0);
cv::imshow("image0",image0);
image1.convertTo(image1,CV_32F,1.0/255.0);
// our corners are just at (0,0)
cv::Point corner1;
corner1.x = 0;
corner1.y = 0;
cv::Point corner2;
corner2.x = 0;
corner2.y = 0;
std::vector<cv::Point> corners;
corners.push_back(corner1);
corners.push_back(corner2);
std::vector<cv::Mat> masks;
//Mat imageMask0;
//Mat imageMask1;
Mat imageMask0(image0.size(),CV_8U);
imageMask0(Rect(0,0,imageMask0.cols,imageMask0.rows)).setTo(255);
Mat imageMask1(image1.size(),CV_8U);
imageMask1(Rect(0,0,imageMask1.cols,imageMask1.rows)).setTo(255);
masks.push_back(imageMask0);
masks.push_back(imageMask1);
std::vector<cv::Mat> sources;
sources.push_back(image0);
sources.push_back(image1);
cv::detail::GraphCutSeamFinder *seam_finder = new cv::detail::GraphCutSeamFinder();
seam_finder->find(sources, corners, masks);
cv::imshow("mask",masks[1]);
masks[0].convertTo(masks[0],CV_8UC3,255);
cv::imwrite("F:\\1\\998.jpg",masks[0]);
masks[1].convertTo(masks[1],CV_8UC3,255);
cv::imwrite("F:\\1\\999.jpg",masks[1]);
printf("%lu\n", masks.size());
//for(int i = 0; i < masks.size(); i++)
//{
// std::cout << "MASK = "<< std::endl << " " << masks.at(i) << std::endl << std::endl;
// }
cv::waitKey();
return 0;
}
Then how to fix the code to make it right.
After I do some image manipulation, and apply mask, I get what I want. I can clearly see on imshow result of "crop" that there's gray pixels in the middle of image.
I'm trying to get the maximum pixel value location. I've checked the crop.channels(), which returns 1.
Mat mask = drawing2;
drawContours(mask, contours, -1, Scalar(255), CV_FILLED);
Mat dist;
distanceTransform( cannyInv, dist, CV_DIST_L2, 3 );
normalize(dist,dist,0.0,1.0,NORM_MINMAX);
Mat crop;
dist.copyTo(crop, mask);
cout << "max.. "<< *std::max_element(crop.begin<double>(),crop.end<double>()) <<endl;
which returns max.. 4.25593e-08
for(int y = 0; y < crop.rows; y++)
{
for(int x = 0; x < crop.cols; x++)
{
if (crop.at<unsigned char>(x,y) > 0){
cout << "X........"<<x<<" Y......"<<y<< " = "<<crop.at<unsigned char>(x,y) <<endl;
}
}
}
The output is:
X........604 Y......479 = ¿
X........607 Y......479 =
X........610 Y......479 = ¿
Help me please
PD: I know that there's similar question. But this is specific problem.
I'm not sure how I solved it. A lot of time has passed. But the code that currently I have and it works is this:
Mat dist=Mat::zeros(480,640, CV_8UC1);;
distanceTransform( cannyInv, dist, CV_DIST_L2, 3 );
Mat distNorm;
dist.convertTo(distNorm, CV_8UC1,1,0);
Mat result= Mat::zeros(480,640, CV_8UC1);
distNorm.copyTo(result, mask);
Mat tmp=Mat::zeros(480,640, CV_8UC1);
Mat fik=Mat::zeros(480,640, CV_8UC3);
for(int i = 0; i < result.rows; i++)
{
for(int j = 0; j < result.cols; j++)
{
if ( result.at< uchar >( i,j ) > 0){
uchar val = result.at< uchar >( i,j );
if(val>maxVal){
if(val>0){
cv::circle(tmp,cvPoint(j,i),2,255,-1);
}
maxVal=val;
maxX = j;
maxY = i;
}
}
}
}
Are you sure that normalizing the Mat automatically converts it from uchar to double? It's very likely the data is still stored as uchars and you're reading wrong numbers from it.
Try dist.convertTo(dist, CV_64F);
Print the numbers as doubles everywhere
OR work only with uchars.
Try this code:
cout << "X........"
<< x
<< " Y......"
<< y
<< " = "
<< (double) crop.at< unsigned char>(x,y) <<endl;