nppi resize function with 3 channels getting strange output - c++

I'm getting a strange error when using nppi geometry transform functions from nppi cuda libraries. The code is here:
#include <nppi.h>
#include <nppi_geometry_transforms.h>
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgcodecs.hpp>
#include <vector>
void write(const cv::Mat &mat1, const std::string &path) {
auto mat2 = cv::Mat(mat1.rows, mat1.cols, CV_8UC4);
for (int i = 0; i < mat1.rows; i++) {
for (int j = 0; j < mat1.cols; j++) {
auto &bgra = mat2.at<cv::Vec4b>(i, j);
auto &rgb = mat1.at<cv::Vec3b>(i, j);
bgra[0] = rgb[2];
bgra[1] = rgb[1];
bgra[2] = rgb[0];
bgra[3] = UCHAR_MAX;
}
}
std::vector<int> compression_params;
compression_params.push_back(cv::IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9);
cv::imwrite(path, mat2, compression_params);
}
int main() {
std::cout << "Hello, World!" << std::endl;
auto mat = cv::Mat(256, 256, CV_8UC3);
for (int i = 0; i < mat.rows; i++) {
for (int j = 0; j < mat.cols; j++) {
auto &rgb = mat.at<cv::Vec3b>(i, j);
rgb[0] = (uint8_t)j;
rgb[1] = (uint8_t)i;
rgb[2] = (uint8_t)(UCHAR_MAX - j);
}
}
write(mat, "./test.png");
uint8_t *gpuBuffer1;
uint8_t *gpuBuffer2;
cudaMalloc(&gpuBuffer1, mat.total());
cudaMalloc(&gpuBuffer2, mat.total());
cudaMemcpy(gpuBuffer1, mat.data, mat.total(), cudaMemcpyHostToDevice);
auto status = nppiResize_8u_C3R(
gpuBuffer1, mat.cols * 3, {.width = mat.cols, .height = mat.rows},
{.x = 0, .y = 0, .width = mat.cols, .height = mat.rows}, gpuBuffer2,
mat.cols * 3, {.width = mat.cols, .height = mat.rows},
{.x = 0, .y = 0, .width = mat.cols, .height = mat.rows},
NPPI_INTER_NN);
if (status != NPP_SUCCESS) {
std::cerr << "Error executing Resize -- code: " << status << std::endl;
}
auto mat2 = cv::Mat(mat.rows, mat.cols, CV_8UC3);
cudaMemcpy(mat2.data, gpuBuffer2, mat.total(), cudaMemcpyDeviceToHost);
write(mat2, "./test1.png");
}
Basically I display a rainbow picture. Then write it to the GPU then resize it to the EXACT same size, then copy it back to the host then display it again. What I'm getting is garbled data in about 2/3s of the return picture.
First picture is the input picture.
Second input picture is the output picture.
I expect both pictures to be the same.
If I adjust the ROI with offsets and change the width and height for the destination buffer the pixels in the top 1/3 of the resized picture actually moves and resizes correctly. But the rest of the picture is garbled. Not sure what's wrong. Does anyone with experience in cuda nppi libraries or image processing in general have an idea what's going on?
CMake file included below for convenience to anyone who wants to compile it. You have to have opencv and cuda toolkit installed as C++ libs:
cmake_minimum_required(VERSION 3.18)
project(test_nppi)
enable_language(CUDA)
set(CMAKE_CXX_STANDARD 17)
find_package(CUDAToolkit REQUIRED)
find_package(OpenCV)
message(STATUS ${CUDAToolkit_INCLUDE_DIRS})
add_executable(test_nppi main.cu)
target_link_libraries(test_nppi ${OpenCV_LIBS} CUDA::nppig)
target_include_directories(test_nppi PUBLIC ${OpenCV_INCLUDE_DIRS} ${CUDAToolkit_INCLUDE_DIRS})
set_target_properties(test_nppi PROPERTIES
CUDA_SEPARABLE_COMPILATION ON)
I've used the nppi resize function for single channel pictures before and I don't have this issue. The 3 channel nppi resize function is getting weird output and I'm thinking I'm not completely understanding the input parameters. The Step is multiplied by 3 because of 3 color channels, but all other sizes just are measuring the dimensions by pixels; and the sizes of src and destination are the same... not sure what I'm not understanding here.

The issue is that mat.total() equals the total number of pixels, and not the total number of bytes.
According to OpenCV documentation:
total () const
Returns the total number of array elements.
In you code sample, mat.total() equals 256*256, while total number of bytes equals 256*256*3 (RGB applies 3 bytes per pixel).
(In OpenCV terminology "array element" is equivalent to image pixel).
cudaMemcpy(gpuBuffer1, mat.data, mat.total()... copies only 1/3 of the total image bytes, so only the upper 1/3 of the image data is valid.
According to this post, the correct way for computing the number of bytes is:
size_t mat_size_in_bytes = mat.step[0] * mat.rows;
In most cases for CV_8UC3, mat.step[0] = mat.cols*3, but for covering all the cases, we better use mat.step[0].
Corrected code sample:
#include "nppi.h"
#include "nppi_geometry_transforms.h"
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgcodecs.hpp"
#include <vector>
void write(const cv::Mat& mat1, const std::string& path) {
auto mat2 = cv::Mat(mat1.rows, mat1.cols, CV_8UC4);
for (int i = 0; i < mat1.rows; i++) {
for (int j = 0; j < mat1.cols; j++) {
auto& bgra = mat2.at<cv::Vec4b>(i, j);
auto& rgb = mat1.at<cv::Vec3b>(i, j);
bgra[0] = rgb[2];
bgra[1] = rgb[1];
bgra[2] = rgb[0];
bgra[3] = UCHAR_MAX;
}
}
std::vector<int> compression_params;
compression_params.push_back(cv::IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9);
cv::imwrite(path, mat2, compression_params);
}
int main() {
std::cout << "Hello, World!" << std::endl;
auto mat = cv::Mat(256, 256, CV_8UC3);
auto mat2 = cv::Mat(mat.rows, mat.cols, CV_8UC3);
for (int i = 0; i < mat.rows; i++) {
for (int j = 0; j < mat.cols; j++) {
auto& rgb = mat.at<cv::Vec3b>(i, j);
rgb[0] = (uint8_t)j;
rgb[1] = (uint8_t)i;
rgb[2] = (uint8_t)(UCHAR_MAX - j);
}
}
write(mat, "./test.png");
uint8_t* gpuBuffer1;
uint8_t* gpuBuffer2;
size_t mat_size_in_bytes = mat.step[0] * mat.rows; // https://stackoverflow.com/questions/26441072/finding-the-size-in-bytes-of-cvmat
size_t mat2_size_in_bytes = mat2.step[0] * mat2.rows;
cudaMalloc(&gpuBuffer1, mat_size_in_bytes);
cudaMalloc(&gpuBuffer2, mat2_size_in_bytes);
cudaMemcpy(gpuBuffer1, mat.data, mat_size_in_bytes, cudaMemcpyHostToDevice);
NppiSize oSrcSize = { mat.cols, mat.rows };
NppiRect oSrcRectROI = { 0, 0, mat.cols, mat.rows };
NppiSize oDstSize = { mat2.cols, mat2.rows };
NppiRect oDstRectROI = { 0, 0, mat2.cols, mat2.rows };
auto status = nppiResize_8u_C3R(
gpuBuffer1, mat.step[0], oSrcSize,
oSrcRectROI, gpuBuffer2,
mat2.step[0], oDstSize,
oDstRectROI,
NPPI_INTER_NN);
if (status != NPP_SUCCESS) {
std::cerr << "Error executing Resize -- code: " << status << std::endl;
}
cudaMemcpy(mat2.data, gpuBuffer2, mat2_size_in_bytes, cudaMemcpyDeviceToHost);
write(mat2, "./test1.png");
}
Output:

Related

Compute coordinates from source images after stitching

I use an algorithm of panorama stitching from opencv, in order to stitch 2 or 3 images into one new result image.
I have coordinates of points in each source image. I need to calculate what are the new coordinates for these points in the result image.
I describe below the algorithm. My code is similar to a sample "stitching_detailed" from opencv (branch 3.4). A result_mask of type Mat is produced, maybe it is the solution? But I don't know how to use it. I found a related question here but not on stitching.
Any idea?
Here is the algorithm (for detailed code: stitching_detailed.cpp):
Find features for each image:
Ptr<FeaturesFinder> finder = makePtr<SurfFeaturesFinder>()
vector<ImageFeatures> features(num_images);
for (int i = 0; i < num_images; ++i)
{
(*finder)(images[i], features[i]);
}
Make pairwise_matches:
vector<MatchesInfo> pairwise_matches;
Ptr<FeaturesMatcher> matcher = makePtr<BestOf2NearestMatcher>(false, match_conf);
(*matcher)(features, pairwise_matches);
Reorder the images:
vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
# here some code to reorder 'images'
Estimate an homography in cameras:
vector<CameraParams> cameras;
Ptr<Estimator> estimator = makePtr<HomographyBasedEstimator>();
(*estimator)(features, pairwise_matches, cameras);
Convert to CV_32F:
for (size_t i = 0; i < cameras.size(); ++i)
{
Mat R;
cameras[i].R.convertTo(R, CV_32F);
cameras[i].R = R;
}
Execute a BundleAdjuster:
Ptr<detail::BundleAdjusterBase> adjuster = makePtr<detail::BundleAdjusterRay>();
adjuster->setConfThresh(conf_thresh);
adjuster->setRefinementMask(refine_mask);
(*adjuster)(features, pairwise_matches, cameras);
Compute a value for warped_image_scale:
for (int i = 0; i < cameras.size(); ++i)
focals.push_back(cameras[i].focal);
float warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
Do wave correction:
vector<Mat> rmats;
for (size_t i = 0; i < cameras.size(); ++i)
rmats.push_back(cameras[i].R.clone());
waveCorrect(rmats, wave_correct);
for (size_t i = 0; i < cameras.size(); ++i)
cameras[i].R = rmats[i];
Create a warper:
Ptr<WarperCreator> warper_creator = makePtr<cv::SphericalWarper>();
Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
Create a blender and feed it:
Ptr<Blender> blender;
for (size_t i = 0; i < cameras.size(); ++i)
{
full_img = input_imgs[img_idx];
if (!is_compose_scale_set)
{
is_compose_scale_set = true;
compose_scale = /* … */
}
if (abs(compose_scale - 1) > 1e-1)
resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT);
else
img = full_img;
// Warp the current image
warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
// Compensate exposure
compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
dilate(masks_warped[img_idx], dilated_mask, Mat());
resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT);
mask_warped = seam_mask & mask_warped;
if (!blender)
{
blender = Blender::createDefault(blend_type, try_gpu);
Size dst_sz = resultRoi(corners, sizes).size();
float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
MultiBandBlender *mb = dynamic_cast<MultiBandBlender *>(blender.get());
mb->setNumBands(static_cast<int>(ceil(log(blend_width) / log(2.)) - 1.));
blender->prepare(corners, sizes);
}
// Blend the current image
blender->feed(img_warped_s, mask_warped, corners[i]);
}
Then, use the blender:
Mat result, result_mask;
blender->blend(result, result_mask);
// The result image is in 'result'
When I was a school boy, I foundopencv/samples/cpp/stitching_detailed.cpp in OpenCV samples folder. At that time, my programming skills were very poor. I can't understand it even though I racked my brains. This question attracts my attention, and arouses my memory. After a whole night of hard work and debugging, I finally get it.
Basic steps:
Given the three images: blue.png, green.png, and red.png
We can get the stitching result(result.png) using the stitching_detailed.cpp.
.
blender->blend(result, result_mask);
imwrite("result.png", result);
imwrite("result_mask.png", result_mask);
I choose the centers from the three images, and calculate the corresponding coordinates (warped) on the stitching image, and draw in solid as follow:
Warping images (auxiliary)...
Compensating exposure...
Blending ...
Warp each center point, and draw solid circle.
[408, 204] => [532, 224]
[408, 204] => [359, 301]
[408, 204] => [727, 320]
Check `result.png`, `result_mask.png` and `result2.png`!
Done!
This is the function calcWarpedPoint I wrote to calculate the warped point on the stitching image:
cv::Point2f calcWarpedPoint(
const cv::Point2f& pt,
InputArray K, // Camera K parameter
InputArray R, // Camera R parameter
Ptr<RotationWarper> warper, // The Rotation Warper
const std::vector<cv::Point> &corners,
const std::vector<cv::Size> &sizes)
{
// Calculate the wrapped point using camera parameter.
cv::Point2f dst = warper->warpPoint(pt, K, R);
// Calculate the stitching image roi using corners and sizes.
// the corners and sizes have already been calculated.
cv::Point2f tl = cv::detail::resultRoi(corners, sizes).tl();
// Finally adjust the wrapped point to the stitching image.
return cv::Point2f(dst.x - tl.x, dst.y - tl.y);
}
This is example code snippet:
std::cout << "\nWarp each center point, and draw solid circle.\n";
std::vector<cv::Scalar> colors = { {255,0,0}, {0, 255, 0}, {0, 0, 255} };
for (int idx = 0; idx < img_names.size(); ++idx) {
img = cv::imread(img_names[idx]);
Mat K;
cameras[idx].K().convertTo(K, CV_32F);
Mat R = cameras[idx].R;
cv::Point2f cpt = cv::Point2f(img.cols / 2, img.rows / 2);
cv::Point pt = calcWarpedPoint(cpt, K, R, warper, corners, sizes);
cv::circle(result, pt, 5, colors[idx], -1, cv::LINE_AA);
std::cout << cpt << " => " << pt << std::endl;
}
std::cout << "\nCheck `result.png`, `result_mask.png` and `result2.png`!\n";
imwrite("result2.png", result);
The full code:
/*
* Author : Kinght-金(https://stackoverflow.com/users/3547485/)
* Created : 2019/03/01 23:00 (CST)
* Finished : 2019/03/01 07:50 (CST)
*
* Modified on opencv401/samples/cpp/stitching_detailed.cpp
* From https://github.com/opencv/opencv/blob/4.0.1/samples/cpp/stitching_detailed.cpp
*
*
* Description: A simple opencv(4.0.1) image stitching code for Stack Overflow answers.
* For https://stackoverflow.com/questions/54904718/compute-coordinates-from-source-images-after-stitching/54953792#comment96681412_54953792
*
*/
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/opencv_modules.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
using namespace std;
using namespace cv;
using namespace cv::detail;
//! img_names are the input image (full) paths
// You can download from using the links from the answer.
//! Blue: https://i.stack.imgur.com/Yz3U1.png
//! Green: https://i.stack.imgur.com/AbUTH.png
//! Red: https://i.stack.imgur.com/9wcGc.png
vector<String> img_names = {"D:/stitching/blue.png", "D:/stitching/green.png", "D:/stitching/red.png"};
//! The function to calculate the warped point on the stitching image.
cv::Point2f calcWarpedPoint(
const cv::Point2f& pt,
InputArray K, // Camera K parameter
InputArray R, // Camera R parameter
Ptr<RotationWarper> warper, // The Rotation Warper
const std::vector<cv::Point> &corners,
const std::vector<cv::Size> &sizes)
{
// Calculate the wrapped point
cv::Point2f dst = warper->warpPoint(pt, K, R);
// Calculate the stitching image roi using corners and sizes,
// the corners and sizes have already been calculated.
cv::Point2f tl = cv::detail::resultRoi(corners, sizes).tl();
// Finally adjust the wrapped point
return cv::Point2f(dst.x - tl.x, dst.y - tl.y);
}
int main(int argc, char* argv[])
{
double work_megapix = 0.6;
double seam_megapix = 0.1;
double compose_megapix = -1;
float conf_thresh = 1.f;
float match_conf = 0.3f;
float blend_strength = 5;
// Check if have enough images
int num_images = static_cast<int>(img_names.size());
if (num_images < 2)
{
std::cout << "Need more images\n";
return -1;
}
double work_scale = 1, seam_scale = 1, compose_scale = 1;
bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
//(1) 创建特征查找器
Ptr<Feature2D> finder = ORB::create();
// (2) 读取图像,适当缩放,并计算图像的特征描述
Mat full_img, img;
vector<ImageFeatures> features(num_images);
vector<Mat> images(num_images);
vector<Size> full_img_sizes(num_images);
double seam_work_aspect = 1;
for (int i = 0; i < num_images; ++i)
{
full_img = imread(img_names[i]);
full_img_sizes[i] = full_img.size();
if (full_img.empty())
{
cout << "Can't open image " << img_names[i] << std::endl;
return -1;
}
if (!is_work_scale_set)
{
work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
is_work_scale_set = true;
}
resize(full_img, img, Size(), work_scale, work_scale, INTER_LINEAR_EXACT);
if (!is_seam_scale_set)
{
seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
seam_work_aspect = seam_scale / work_scale;
is_seam_scale_set = true;
}
computeImageFeatures(finder, img, features[i]);
features[i].img_idx = i;
std::cout << "Features in image #" << i + 1 << ": " << features[i].keypoints.size() << std::endl;
resize(full_img, img, Size(), seam_scale, seam_scale, INTER_LINEAR_EXACT);
images[i] = img.clone();
}
full_img.release();
img.release();
// (3) 创建图像特征匹配器,计算匹配信息
vector<MatchesInfo> pairwise_matches;
Ptr<FeaturesMatcher> matcher = makePtr<BestOf2NearestMatcher>(false, match_conf);
(*matcher)(features, pairwise_matches);
matcher->collectGarbage();
//! (4) 剔除外点,保留最确信的大成分
// Leave only images we are sure are from the same panorama
vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
vector<Mat> img_subset;
vector<String> img_names_subset;
vector<Size> full_img_sizes_subset;
for (size_t i = 0; i < indices.size(); ++i)
{
img_names_subset.push_back(img_names[indices[i]]);
img_subset.push_back(images[indices[i]]);
full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
}
images = img_subset;
img_names = img_names_subset;
full_img_sizes = full_img_sizes_subset;
// Check if we still have enough images
num_images = static_cast<int>(img_names.size());
if (num_images < 2)
{
std::cout << "Need more images\n";
return -1;
}
//!(5) 估计 homography
Ptr<Estimator> estimator = makePtr<HomographyBasedEstimator>();
vector<CameraParams> cameras;
if (!(*estimator)(features, pairwise_matches, cameras))
{
cout << "Homography estimation failed.\n";
return -1;
}
for (size_t i = 0; i < cameras.size(); ++i)
{
Mat R;
cameras[i].R.convertTo(R, CV_32F);
cameras[i].R = R;
std::cout << "\nInitial camera intrinsics #" << indices[i] + 1 << ":\nK:\n" << cameras[i].K() << "\nR:\n" << cameras[i].R << std::endl;
}
//(6) 创建约束调整器
Ptr<detail::BundleAdjusterBase> adjuster = makePtr<detail::BundleAdjusterRay>();
adjuster->setConfThresh(conf_thresh);
Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
refine_mask(0, 0) = 1;
refine_mask(0, 1) = 1;
refine_mask(0, 2) = 1;
refine_mask(1, 1) = 1;
refine_mask(1, 2) = 1;
adjuster->setRefinementMask(refine_mask);
if (!(*adjuster)(features, pairwise_matches, cameras))
{
cout << "Camera parameters adjusting failed.\n";
return -1;
}
// Find median focal length
vector<double> focals;
for (size_t i = 0; i < cameras.size(); ++i)
{
focals.push_back(cameras[i].focal);
}
sort(focals.begin(), focals.end());
float warped_image_scale;
if (focals.size() % 2 == 1)
warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
else
warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
std::cout << "\nWarping images (auxiliary)... \n";
vector<Point> corners(num_images);
vector<UMat> masks_warped(num_images);
vector<UMat> images_warped(num_images);
vector<Size> sizes(num_images);
vector<UMat> masks(num_images);
// Preapre images masks
for (int i = 0; i < num_images; ++i)
{
masks[i].create(images[i].size(), CV_8U);
masks[i].setTo(Scalar::all(255));
}
// Warp images and their masks
Ptr<WarperCreator> warper_creator = makePtr<cv::CylindricalWarper>();
if (!warper_creator)
{
cout << "Can't create the warper \n";
return 1;
}
//! Create RotationWarper
Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
//! Calculate warped corners/sizes/mask
for (int i = 0; i < num_images; ++i)
{
Mat_<float> K;
cameras[i].K().convertTo(K, CV_32F);
float swa = (float)seam_work_aspect;
K(0, 0) *= swa; K(0, 2) *= swa;
K(1, 1) *= swa; K(1, 2) *= swa;
corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
sizes[i] = images_warped[i].size();
warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
}
vector<UMat> images_warped_f(num_images);
for (int i = 0; i < num_images; ++i)
images_warped[i].convertTo(images_warped_f[i], CV_32F);
std::cout << "Compensating exposure... \n";
//! 计算曝光度,调整图像曝光,减少亮度差异
Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(ExposureCompensator::GAIN_BLOCKS);
if (dynamic_cast<BlocksCompensator*>(compensator.get()))
{
BlocksCompensator* bcompensator = dynamic_cast<BlocksCompensator*>(compensator.get());
bcompensator->setNrFeeds(1);
bcompensator->setNrGainsFilteringIterations(2);
bcompensator->setBlockSize(32, 32);
}
compensator->feed(corners, images_warped, masks_warped);
Ptr<SeamFinder> seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR);
seam_finder->find(images_warped_f, corners, masks_warped);
// Release unused memory
images.clear();
images_warped.clear();
images_warped_f.clear();
masks.clear();
Mat img_warped, img_warped_s;
Mat dilated_mask, seam_mask, mask, mask_warped;
Ptr<Blender> blender;
double compose_work_aspect = 1;
for (int img_idx = 0; img_idx < num_images; ++img_idx)
{
// Read image and resize it if necessary
full_img = imread(img_names[img_idx]);
if (!is_compose_scale_set)
{
is_compose_scale_set = true;
compose_work_aspect = compose_scale / work_scale;
// Update warped image scale
warped_image_scale *= static_cast<float>(compose_work_aspect);
warper = warper_creator->create(warped_image_scale);
// Update corners and sizes
for (int i = 0; i < num_images; ++i)
{
cameras[i].focal *= compose_work_aspect;
cameras[i].ppx *= compose_work_aspect;
cameras[i].ppy *= compose_work_aspect;
Size sz = full_img_sizes[i];
if (std::abs(compose_scale - 1) > 1e-1)
{
sz.width = cvRound(full_img_sizes[i].width * compose_scale);
sz.height = cvRound(full_img_sizes[i].height * compose_scale);
}
Mat K;
cameras[i].K().convertTo(K, CV_32F);
Rect roi = warper->warpRoi(sz, K, cameras[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
}
}
if (abs(compose_scale - 1) > 1e-1)
resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT);
else
img = full_img;
full_img.release();
Size img_size = img.size();
Mat K, R;
cameras[img_idx].K().convertTo(K, CV_32F);
R = cameras[img_idx].R;
// Warp the current image : img => img_warped
warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
img_warped.convertTo(img_warped_s, CV_16S);
img_warped.release();
img.release();
mask.release();
dilate(masks_warped[img_idx], dilated_mask, Mat());
resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT);
mask_warped = seam_mask & mask_warped;
if (!blender)
{
blender = Blender::createDefault(Blender::MULTI_BAND, false);
Size dst_sz = resultRoi(corners, sizes).size();
float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
if (blend_width < 1.f){
blender = Blender::createDefault(Blender::NO, false);
}
else
{
MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(blender.get());
mb->setNumBands(static_cast<int>(ceil(log(blend_width) / log(2.)) - 1.));
}
blender->prepare(corners, sizes);
}
blender->feed(img_warped_s, mask_warped, corners[img_idx]);
}
/* ===========================================================================*/
// Blend image
std::cout << "\nBlending ...\n";
Mat result, result_mask;
blender->blend(result, result_mask);
imwrite("result.png", result);
imwrite("result_mask.png", result_mask);
std::cout << "\nWarp each center point, and draw solid circle.\n";
std::vector<cv::Scalar> colors = { {255,0,0}, {0, 255, 0}, {0, 0, 255} };
for (int idx = 0; idx < img_names.size(); ++idx) {
img = cv::imread(img_names[idx]);
Mat K;
cameras[idx].K().convertTo(K, CV_32F);
Mat R = cameras[idx].R;
cv::Point2f cpt = cv::Point2f(img.cols / 2, img.rows / 2);
cv::Point pt = calcWarpedPoint(cpt, K, R, warper, corners, sizes);
cv::circle(result, pt, 5, colors[idx], -1, cv::LINE_AA);
std::cout << cpt << " => " << pt << std::endl;
}
std::cout << "\nCheck `result.png`, `result_mask.png` and `result2.png`!\n";
imwrite("result2.png", result);
std::cout << "\nDone!\n";
/* ===========================================================================*/
return 0;
}
Some links maybe useful:
stitching_detailed.cpp : https://github.com/opencv/opencv/blob/4.0.1/samples/cpp/stitching_detailed.cpp
waper->warp(), warpPoint(), warpRoi() https://github.com/opencv/opencv/blob/master/modules/stitching/src/warpers.cpp#L153
resultRoi() https://github.com/opencv/opencv/blob/master/modules/stitching/src/util.cpp#L116
Other links maybe interesting:
Converting opencv remap code from c++ to python
Split text lines in scanned document
How do I use the relationships between Flann matches to determine a sensible homography?

C++/OpenCV Can't initialize 3D Mat

I have a problem with initializing a 3D Mat with openCV.
I would like to create a 3D matrix of size (rows x cols x 16), rows and cols being the dimensions of an image given earlier in the program. I tried I can not say how many different methods, and all return to me more or less the same thing: the dimensions of my matrices are worth 0 or -858993460.
My code lines :
Mat image_Conv;
int rows = imageBicubic.rows;
int cols = imageBicubic.cols;
image_Conv = Mat::zeros(rows, cols, CV_32FC(16));
Can you tell me why I have this problem? Of course I read all the posts that speak, read the doc opencv on the class Mat, but nothing works, I still have the same problem. I specify that my data in the Mat will be float.
The code :
// Include standard headers
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include <ctime>
#include <iostream>
using namespace std;
//#include <opencv.hpp>
#include <opencv/cv.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv/highgui.h>
using namespace cv;
////////////////////////////////////////
// main file
int main()
{
string fileName = "myImage.jpg";
Mat imageSrc = cv::imread(fileName, CV_LOAD_IMAGE_UNCHANGED); // Read the file
if (!imageSrc.data) // Check for invalid input
{
cout << "Could not open or find the image\n";
return 1;
}
cout << "Loaded " << fileName << " (" << imageSrc.channels() << " channels)\n";
//int colorTransform = (imageSrc.channels() == 4) ? CV_BGRA2RGBA : (imageSrc.channels() == 3) ? CV_BGR2RGB : CV_GRAY2RGB;
//cv::cvtColor(imageSrc, imageSrc, colorTransform);
imageSrc.convertTo(imageSrc, CV_32F, 1 / 255.0, 0.0);
int SliceSizeWidth = imageSrc.cols / 2;
int sliceShiftWidth = imageSrc.cols / 4;
int sliceWidthNumber = (imageSrc.cols / sliceShiftWidth) - 1;
int SliceSizeHeight = imageSrc.rows / 2;
int sliceShiftHeight = imageSrc.rows / 4;
int sliceHeightNumber = (imageSrc.rows / sliceShiftHeight) - 1;
for (int sliceIndexHeight = 0; sliceIndexHeight < sliceHeightNumber; sliceIndexHeight++)
{
for (int sliceIndexWidth = 0; sliceIndexWidth < sliceWidthNumber; sliceIndexWidth++)
{
Mat patchImage = imageSrc(Rect(sliceIndexWidth*sliceShiftWidth, sliceIndexHeight*sliceShiftHeight, SliceSizeWidth, SliceSizeHeight));
Mat patchImageCopy;
patchImage.copyTo(patchImageCopy); // Deep copy => data are contiguous in patchImageCopy
Mat imageBicubic;
resize(patchImageCopy, imageBicubic, Size(2 * patchImage.cols, 2 * patchImage.rows), INTER_CUBIC);
Mat image_Padding;
int padding = 1;
copyMakeBorder(imageBicubic, image_Padding, padding, padding, padding, padding, BORDER_CONSTANT, Scalar(0));
Mat image_Conv;
int rows = imageBicubic.rows;
int cols = imageBicubic.cols;
image_Conv = Mat::zeros(rows, cols, CV_32FC(16));
/* rest of the code I have to write */
image_Conv.convertTo(image_Conv, CV_8U, 255.0, 0.0);
string nameBase = fileName.substr(0, fileName.find('.'));
string nameExt = fileName.substr(fileName.find('.'), fileName.length() - nameBase.length());
string strH = to_string(sliceIndexHeight);
string strW = to_string(sliceIndexWidth);
string outFileName = nameBase + "_H" + strH + "W" + strW + nameExt;
imwrite(outFileName, image_Conv);
}
}
return 0;
}
PS : Most of the code is not mine, I have to use it for my internship and can only edit between the lines :
resize(patchImageCopy, imageBicubic, Size(2 * patchImage.cols, 2 * patchImage.rows), INTER_CUBIC);
and
image_Conv.convertTo(image_Conv, CV_8U, 255.0, 0.0);
Thank you for your help !
EDIT : My first problem is solved, but it seems that it didn't work after all. I suppose that Mat::zeros set all the Mat elements at 0, right ? But if I write
cout << image_Conv.at<float>(0,0,0) << endl;
I have the error : "Unhandled exception at 0x000007FEFD4FA06D in xxxxxx.exe: Microsoft C++ exception: cv::Exception at memory location 0x000000000023E540.".
I don't know what the problem is with the memory and how to fix it.
My goal is to fill my matrix element by element thanks to several for loops which will be realized several operations, before the result is written in the element of my corresponding Mat. I did that why 3D and 4D arrays, and maybe it's the easiest solution, to do all the calculs with arrays, but I can't go from a 3D array to a 3D Mat or a 3D Mat to a 3D array.
just tested this on visual studio 2015, opencv 3.4
cv::Mat mat = cv::Mat::zeros(5, 5, CV_32FC(16));
this works fine.
You should be able to create a multi-dimensional matrix filled with 0-values using:
int size[3] = { 5, 4, 3 };
cv::Mat M(3, size, CV_32F, cv::Scalar(0));
You can iterate over the matrix with M.at(i,j,k) (only for 3D matrix created as above):
for (int i = 0; i < size[0]; i++) {
for (int j = 0; j < size[1]; j++) {
for (int k = 0; k < size[2]; k++) {
M.at<float>(i,j,k) = i*12+j*3+k;
}
}
}
for (int i = 0; i < size[0]; i++) {
for (int j = 0; j < size[1]; j++) {
for (int k = 0; k < size[2]; k++) {
std::cout << "M(" << i << ", " << j << ", " << k << "): " << M.at<float>(i,j,k) << std::endl;
}
}
}
Alternatively, you should be able to create a 2D matrix with multiple channels with:
cv::Mat M(5, 4, CV_32FC(3), cv::Scalar(0));
To iterate over the 2D matrix and over the channels:
for (int i = 0; i < M.rows; i++) {
for (int j = 0; j < M.cols; j++) {
for (int k = 0; k < M.channels(); k++) {
M.at<cv::Vec<float, 3> >(i,j)[k] = i*M.cols*M.channels()+j*M.channels()+k;
}
}
}

from float array to mat , concatenate blocks of image

I have an image 800x800 which is broken down to 16 blocks of 200x200.
(you can see previous post here)
These blocks are : vector<Mat> subImages;
I want to use float pointers on them , so I am doing :
float *pdata = (float*)( subImages[ idxSubImage ].data );
1) Now, I want to be able to get again the same images/blocks, going from float array to Mat data.
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
namedWindow( "Display window", WINDOW_AUTOSIZE );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
imshow( "Display window", mf );
waitKey(0);
}
}
So , the problem is that I am receiving an
OpenCV Error: Assertion failed
in imshow.
2) How can I recombine all the blocks to obtain the original 800x800 image?
I tried something like:
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
Mat big( 800,800,CV_32F );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
Rect roi(j,i,200,200);
mf.copyTo( big(roi) );
}
}
imwrite( "testing" , big );
This gives me :
OpenCV Error: Assertion failed (!fixedSize()) in release
in mf.copyTo( big(roi) );.
First, you need to know where are your subimages into the big image. To do this, you can save the rect of each subimage into the vector<Rect> smallImageRois;
Then you can use pointers (keep in mind that subimages are not continuous), or simply use copyTo to the correct place:
Have a look:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
Mat3b img = imread("path_to_image");
resize(img, img, Size(800, 800));
Mat grayImg;
cvtColor(img, grayImg, COLOR_BGR2GRAY);
grayImg.convertTo(grayImg, CV_32F);
int N = 4;
if (((grayImg.rows % N) != 0) || ((grayImg.cols % N) != 0))
{
// Error
return -1;
}
Size graySize = grayImg.size();
Size smallSize(grayImg.cols / N, grayImg.rows / N);
vector<Mat> smallImages;
vector<Rect> smallImageRois;
for (int i = 0; i < graySize.height; i += smallSize.height)
{
for (int j = 0; j < graySize.width; j += smallSize.width)
{
Rect rect = Rect(j, i, smallSize.width, smallSize.height);
smallImages.push_back(grayImg(rect));
smallImageRois.push_back(rect);
}
}
// Option 1. Using pointer to subimage data.
Mat big1(800, 800, CV_32F);
int big1step = big1.step1();
float* pbig1 = big1.ptr<float>(0);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
float* pdata = (float*)smallImages[idx].data;
int step = smallImages[idx].step1();
Rect roi = smallImageRois[idx];
for (int i = 0; i < smallSize.height; ++i)
{
for (int j = 0; j < smallSize.width; ++j)
{
pbig1[(roi.y + i) * big1step + (roi.x + j)] = pdata[i * step + j];
}
}
}
// Option 2. USing copyTo
Mat big2(800, 800, CV_32F);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
smallImages[idx].copyTo(big2(smallImageRois[idx]));
}
return 0;
}
For concatenating the sub-images into a single squared image, you can use the following function:
// Important: all patches should have exactly the same size
Mat concatPatches(vector<Mat> &patches) {
assert(patches.size() > 0);
// make it square
const int patch_width = patches[0].cols;
const int patch_height = patches[0].rows;
const int patch_stride = ceil(sqrt(patches.size()));
Mat image = Mat::zeros(patch_stride * patch_height, patch_stride * patch_width, patches[0].type());
for (size_t i = 0, iend = patches.size(); i < iend; i++) {
Mat &patch = patches[i];
const int offset_x = (i % patch_stride) * patch_width;
const int offset_y = (i / patch_stride) * patch_height;
// copy the patch to the output image
patch.copyTo(image(Rect(offset_x, offset_y, patch_width, patch_height)));
}
return image;
}
It takes a vector of sub-images (or patches as I refer them to) and concatenates them into a squared image. Example usage:
vector<Mat> patches;
vector<Scalar> colours = {Scalar(255, 0, 0), Scalar(0, 255, 0), Scalar(0, 0, 255)};
// fill vector with circles of different colours
for(int i = 0; i < 16; i++) {
Mat patch = Mat::zeros(100,100, CV_32FC3);
circle(patch, Point(50,50), 40, colours[i % 3], -1);
patches.push_back(patch);
}
Mat img = concatPatches(patches);
imshow("img", img);
waitKey();
Will produce the following image
print the values of i and j before creating Mat mf and I believe you will soon be able to find the error.
Hint 1: i and j will be 0 the first time
Hint 2: Use the copyTo() with a ROI like:
cv::Rect roi(0,0,200,200);
src.copyTo(dst(roi))
Edit:
Hint 3: Try not to do such pointer fiddling, you will get in trouble. Especially if you're ignoring the step (like you seem to do).

Create a single image from images array

Hi I'm trying to create a single image from multiple images in opencv.
images I use are the same size.
what I do is reshaping them to single line and then try to merge them together with my new image.
I create new image with size of 2 images and pass the array but I recieve error EXC_BAD_ACCESS(code=1, address = ..)
note: sizes of images are correct
size of single image : [170569 x 1]
size of new_image : [170569 x 2]
my code is below.
thank you
int main(){
Mat image[2];
image[0]= imread("image1.jpg",0);
image[1]= imread("image2.jpg",0);
image[0] = image[0].reshape(0, 1); //SINGLE LINE
image[1] = image[1].reshape(0, 1); //SINGLE LINE
int size = sizeof(image)/sizeof(Mat);
Mat new_image(image[0].cols,size,CV_32FC1,image);
}
Mat new_image;
vconcat(image[0],image[1],new_image);
If I understand well than you need to concatenate 2 image of same size into one Mat. I wrote this a very quick code to perform this task.
U can change the argument to the function to be a pointer and add other handlers to care about the variant size image.
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
cv::Mat cvConcatenateMat(const cv::Mat &image1, const cv::Mat &image2, bool isCol CV_DEFAULT(true)){
if (isCol) {
cv::Mat mergeMat = cv::Mat(image1.rows, image1.cols + image2.cols, image1.type());
for (int j = 0; j < image1.rows; j++) {
for (int i = 0; i < image1.cols; i++) {
mergeMat.at<cv::Vec3b>(j,i) = image1.at<cv::Vec3b>(j,i);
}
for (int i = image1.cols; i < mergeMat.cols; i++) {
mergeMat.at<cv::Vec3b>(j,i) = image2.at<cv::Vec3b>(j,i);
}
}
return mergeMat;
} else {
cv::Mat mergeMat = cv::Mat(image1.rows + image2.rows, image1.cols, image1.type());
for (int j = 0; j < image1.cols; j++) {
for (int i = 0; i < image1.rows; i++) {
mergeMat.at<cv::Vec3b>(i,j) = image1.at<cv::Vec3b>(i,j);
}
for (int i = image1.rows; i < mergeMat.rows; i++) {
mergeMat.at<cv::Vec3b>(i,j) = image2.at<cv::Vec3b>(i-image1.rows,j);
}
}
return mergeMat;
}
}
int main(int argc, const char * argv[]) {
cv::Mat image1 = cv::imread("img1.jpg");
cv::Mat image2 = cv::imread("img2.jpg");
cv::resize(image2, image2, image1.size());
cv::Mat outImage = cvConcatenateMat(image1, image2, false);
cv::imshow("out image", outImage);
cv::waitKey(0);
return 0;
}

Different results with cvDFT and DFT in OpenCV 2.4.8

I'm having problems with the DFT function in OpenCV 2.4.8 for c++.
I used an image of a 10 phases sinus curve to compare the old cvDFT() with the newer c++ function DFT() (one dimensional DFT row-wise).
The old version gives me logical results: very high peak at pixel 0 and 10, the rest being almost 0.
The new version gives me strange results with peaks all over the spectrum.
Here is my code:
#include "stdafx.h"
#include <opencv2\core\core_c.h>
#include <opencv2\core\core.hpp>
#include <opencv2\imgproc\imgproc_c.h>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\highgui\highgui_c.h>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\legacy\compat.hpp>
using namespace cv;
void OldMakeDFT(Mat original, double* result)
{
const int width = original.cols;
const int height = 1;
IplImage* fftBlock = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
IplImage* imgReal = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
IplImage* imgImag = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
IplImage* imgDFT = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 2);
Rect roi(0, 0, width, 1);
Mat image_roi = original(roi);
fftBlock->imageData = (char*)image_roi.data;
//cvSaveImage("C:/fftBlock1.png", fftBlock);
cvConvert(fftBlock, imgReal);
cvMerge(imgReal, imgImag, NULL, NULL, imgDFT);
cvDFT(imgDFT, imgDFT, (CV_DXT_FORWARD | CV_DXT_ROWS));
cvSplit(imgDFT, imgReal, imgImag, NULL, NULL);
double re,imag;
for (int i = 0; i < width; i++)
{
re = ((float*)imgReal->imageData)[i];
imag = ((float*)imgImag->imageData)[i];
result[i] = re * re + imag * imag;
}
cvReleaseImage(&imgReal);
cvReleaseImage(&imgImag);
cvReleaseImage(&imgDFT);
cvReleaseImage(&fftBlock);
}
void MakeDFT(Mat original, double* result)
{
const int width = original.cols;
const int height = 1;
Mat fftBlock(1,width, CV_8UC1);
Rect roi(0, 0, width, height);
Mat image_roi = original(roi);
image_roi.copyTo(fftBlock);
//imwrite("C:/fftBlock2.png", fftBlock);
Mat planes[] = {Mat_<float>(fftBlock), Mat::zeros(fftBlock.size(), CV_32F)};
Mat complexI;
merge(planes, 2, complexI);
dft(complexI, complexI, DFT_ROWS); //also tried with DFT_COMPLEX_OUTPUT | DFT_ROWS
split(complexI, planes);
double re, imag;
for (int i = 0; i < width; i++)
{
re = (float)planes[0].data[i];
imag = (float)planes[1].data[i];
result[i] = re * re + imag * imag;
}
}
bool SinusFFTTest()
{
const int size = 1024;
Mat sinTest(size,size,CV_8UC1, Scalar(0));
const int n_sin_curves = 10;
double deg_step = (double)n_sin_curves*360/size;
for (int j = 0; j < size; j++)
{
for (int i = 0; i <size; i++)
{
sinTest.data[j*size+i] = 127.5 * sin(i*deg_step*CV_PI/180) + 127.5;
}
}
double* result1 = new double[size];
double* result2 = new double[size];
OldMakeDFT(sinTest,result1);
MakeDFT(sinTest,result2);
bool identical = true;
for (int i = 0; i < size; i++)
{
if (abs(result1[i] - result2[i]) > 1000)
{
identical = false;
break;
}
}
delete[] result1;
delete[] result2;
return identical;
}
int _tmain(int argc, _TCHAR* argv[])
{
if (SinusFFTTest())
{
printf("identical");
}
else
{
printf("different");
}
getchar();
return 0;
}
Could someone explain the difference?
imgReal - is not filled with zeroes by default.
The bug in in the MakeDFT() function:
re = (float)planes[0].data[i];
imag = (float)planes[1].data[i];
data[i]'s type is uchar, and its conversion to float is not right.
The fix:
re = planes[0].at<float>(0,i);
imag = planes[1].at<float>(0,i);
After this change, the old and the new DFT versions gives the same results. Or, you can use cv::magnitude() instead of calculating the sum of squares of re and imag:
Mat magn;
magnitude(planes[0], planes[1], magn);
for (int i = 0; i < width; i++)
result[i] = pow(magn.at<float>(0,i),2);
This gives also the same result as the old cvDFT.