OpenCV C++ code to Obj-C++ - c++

I was suggested an algorithm to prepare an image for OCR, the code giving to me is great! However it is not compatible with the iOS build of OpenCV it seems, there are a few different naming conventions and I am having a hard time converting the code to Obj-C++.
Could someone rewrite it for Obj-C++?
Here is the original code:
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdarg.h>
#include "opencv2/opencv.hpp"
#include "fstream"
#include "iostream"
using namespace std;
using namespace cv;
//-----------------------------------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------------------------------
void CalcBlockMeanVariance(Mat& Img,Mat& Res,float blockSide=21) // blockSide - the parameter (set greater for larger font on image)
{
Mat I;
Img.convertTo(I,CV_32FC1);
Res=Mat::zeros(Img.rows/blockSide,Img.cols/blockSide,CV_32FC1);
Mat inpaintmask;
Mat patch;
Mat smallImg;
Scalar m,s;
for(int i=0;i<Img.rows-blockSide;i+=blockSide)
{
for (int j=0;j<Img.cols-blockSide;j+=blockSide)
{
patch=I(Range(i,i+blockSide+1),Range(j,j+blockSide+1));
cv::meanStdDev(patch,m,s);
if(s[0]>0.01) // Thresholding parameter (set smaller for lower contrast image)
{
Res.at<float>(i/blockSide,j/blockSide)=m[0];
}else
{
Res.at<float>(i/blockSide,j/blockSide)=0;
}
}
}
cv::resize(I,smallImg,Res.size());
cv::threshold(Res,inpaintmask,0.02,1.0,cv::THRESH_BINARY);
Mat inpainted;
smallImg.convertTo(smallImg,CV_8UC1,255);
inpaintmask.convertTo(inpaintmask,CV_8UC1);
inpaint(smallImg, inpaintmask, inpainted, 5, INPAINT_TELEA);
cv::resize(inpainted,Res,Img.size());
Res.convertTo(Res,CV_32FC1,1.0/255.0);
}
//-----------------------------------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------------------------------
int main( int argc, char** argv )
{
namedWindow("Img");
namedWindow("Edges");
//Mat Img=imread("D:\\ImagesForTest\\BookPage.JPG",0);
Mat Img=imread("Test2.JPG",0);
Mat res;
Img.convertTo(Img,CV_32FC1,1.0/255.0);
CalcBlockMeanVariance(Img,res);
res=1.0-res;
res=Img+res;
imshow("Img",Img);
cv::threshold(res,res,0.85,1,cv::THRESH_BINARY);
cv::resize(res,res,cv::Size(res.cols/2,res.rows/2));
imwrite("result.jpg",res*255);
imshow("Edges",res);
waitKey(0);
return 0;
}
My attempt: (gives back blue image with black spots)
void CalcBlockMeanVariance(cv::Mat& Img,cv::Mat& Res,float blockSide=21) // blockSide - the parameter (set greater for larger font on image)
{
cv::Mat I;
Img.convertTo(I,CV_32FC1);
Res=cv::Mat::zeros(Img.rows/blockSide,Img.cols/blockSide,CV_32FC1);
cv::Mat inpaintmask;
cv::Mat patch;
cv::Mat smallImg;
cv::Scalar m,s;
for(int i=0;i<Img.rows-blockSide;i+=blockSide)
{
for (int j=0;j<Img.cols-blockSide;j+=blockSide)
{
patch=I(cv::Rect(j,i,blockSide,blockSide));
cv::meanStdDev(patch,m,s);
if(s[0]>0.01) // Thresholding parameter (set smaller for lower contrast image)
{
Res.at<float>(i/blockSide,j/blockSide)=m[0];
}else
{
Res.at<float>(i/blockSide,j/blockSide)=0;
}
}
}
cv::resize(I,smallImg,Res.size());
cv::threshold(Res,inpaintmask,0.02,1.0,CV_THRESH_BINARY);
cv::Mat inpainted;
smallImg.convertTo(smallImg,CV_8UC1,255);
inpaintmask.convertTo(inpaintmask,CV_8UC1);
inpaint(smallImg, inpaintmask, inpainted, 5, CV_INPAINT_TELEA);
cv::resize(inpainted,Res,Img.size());
Res.convertTo(Res,CV_32FC1,1.0/255.0);
}
Calling the method.
_img = [self cvMatFromUIImage:_endImage];
cv::cvtColor(_img, _img, CV_RGB2GRAY);
_img.convertTo(_img, CV_32FC1, 1.0/255.0);
CalcBlockMeanVariance(_img, _res);
_res = 1.0 - _res;
_res = _img + _res;
cv::threshold(_res,_res,0.85,1,cv::THRESH_BINARY);
cv::resize(_res,_res,cv::Size(_res.cols/2,_res.rows/2));
_endImage = [self UIImageFromMat:_res];

You can simply separate your c++ code in an another NSObject class and rename the filename to OpenCVUtilities.mm from OpenCVUtilities.m. mm suggests Objective -C should use C++ complier for the class and you don't need to convert the code to Objective-C for that, it will work as it is.
And after doing this you will need to change few settings in your Project -> Build Settings as shown in the image below
For more assitance you can download the project from here
Cheers.

Related

OpenCv C++: Image Not Warping Correctly

I am learning how to stitch two images together using the below link but whatever I do to calculate the homography and warpPerspective, two images won't stitch together.
https://learnopencv.com/feature-based-image-alignment-using-opencv-c-python/
Below is the source code for image stitching
Include Section
#include <opencv2/features2d.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/types.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <vector>
#include <iostream>
Global Variables
using namespace std;
using namespace cv;
const float inlier_threshold = 2.5f; // Distance threshold to identify inliers
const float nn_match_ratio = 0.8f; // Nearest neighbor matching ratio
Main Function
int main(void)
{
puts("opening");
Mat img1 = imread("uttower_right.jpg", IMREAD_GRAYSCALE); // To be Aligned
Mat img2 = imread("large2_uttower_left.jpg", IMREAD_GRAYSCALE); // Reference
Mat img3 = Mat(img2.rows, img2.cols, CV_8UC1);
//img2.copyTo(img3);
Mat homography;
vector<KeyPoint> kpts1, kpts2;
Mat desc1, desc2;
puts("Have opened");
Ptr<AKAZE> akaze = AKAZE::create();
akaze->detectAndCompute(img1, noArray(), kpts1, desc1);
akaze->detectAndCompute(img2, noArray(), kpts2, desc2);
puts("have commputed akaze");
BFMatcher matcher(NORM_HAMMING);
vector< vector<DMatch> > nn_matches;
matcher.knnMatch(desc1, desc2, nn_matches, 2);
puts("Have done match");
vector<KeyPoint> matched1, matched2;
vector<Point2f> inliers1, inliers2;
for (size_t i = 0; i < nn_matches.size(); i++) {
DMatch first = nn_matches[i][0];
float dist1 = nn_matches[i][0].distance;
float dist2 = nn_matches[i][1].distance;
if (dist1 < nn_match_ratio * dist2) {
matched1.push_back(kpts1[first.queryIdx]);
matched2.push_back(kpts2[first.trainIdx]);
inliers1.push_back(kpts1[first.queryIdx].pt);
inliers2.push_back(kpts1[first.trainIdx].pt);
}
}
printf("Matches %d %d\n", matched1.size(), matched2.size());
homography = findHomography(inliers1, inliers2, RANSAC);
warpPerspective(img1, img3, homography, img2.size());
//Display input and output
imshow("Input1", img1);
imshow("Input2", img2);
imshow("Input3", img3);
waitKey(0);
return 0;
}
Images used

OpenCV SIFT key points extraction isuue

I tried to extract SIFT key points. It is working fine for a sample image I downloaded (height 400px width 247px horizontal and vertical resolutions 300dpi). Below image shows the extracted points.
Then I tried to apply the same code to a image that was taken and edited by me (height 443px width 541px horizontal and vertical resolutions 72dpi).
To create the above image I rotated the original image then removed its background and resized it using Photoshop, but my code, for that image doesn't extract features like in the first image.
See the result :
It just extract very few points. I expect a result as in the first case.
For the second case when I'm using the original image without any edit the program gives points as the first case.
Here is the simple code I have used
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv2\nonfree\nonfree.hpp>
using namespace cv;
int main(){
Mat src, descriptors,dest;
vector<KeyPoint> keypoints;
src = imread(". . .");
cvtColor(src, src, CV_BGR2GRAY);
SIFT sift;
sift(src, src, keypoints, descriptors, false);
drawKeypoints(src, keypoints, dest);
imshow("Sift", dest);
cvWaitKey(0);
return 0;
}
What I'm doing wrong here? what do I need to do to get a result like in the first case to my own image after resizing ?
Thank you!
Try set nfeatures parameter (may be other parameters also need adjustment) in SIFT constructor.
Here is constructor definition from reference:
SIFT::SIFT(int nfeatures=0, int nOctaveLayers=3, double contrastThreshold=0.04, double edgeThreshold=10, double sigma=1.6)
Your code will be:
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv2\nonfree\nonfree.hpp>
using namespace cv;
using namespace std;
int main(){
Mat src, descriptors,dest;
vector<KeyPoint> keypoints;
src = imread("D:\\ImagesForTest\\leaf.jpg");
cvtColor(src, src, CV_BGR2GRAY);
SIFT sift(2000,3,0.004);
sift(src, src, keypoints, descriptors, false);
drawKeypoints(src, keypoints, dest);
imshow("Sift", dest);
cvWaitKey(0);
return 0;
}
The result:
Dense sampling example:
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include "opencv2/nonfree/nonfree.hpp"
int main(int argc, char* argv[])
{
cv::initModule_nonfree();
cv::namedWindow("result");
cv::Mat bgr_img = cv::imread("D:\\ImagesForTest\\lena.jpg");
if (bgr_img.empty())
{
exit(EXIT_FAILURE);
}
cv::Mat gray_img;
cv::cvtColor(bgr_img, gray_img, cv::COLOR_BGR2GRAY);
cv::normalize(gray_img, gray_img, 0, 255, cv::NORM_MINMAX);
cv::DenseFeatureDetector detector(12.0f, 1, 0.1f, 10);
std::vector<cv::KeyPoint> keypoints;
detector.detect(gray_img, keypoints);
std::vector<cv::KeyPoint>::iterator itk;
for (itk = keypoints.begin(); itk != keypoints.end(); ++itk)
{
std::cout << itk->pt << std::endl;
cv::circle(bgr_img, itk->pt, itk->size, cv::Scalar(0,255,255), 1, CV_AA);
cv::circle(bgr_img, itk->pt, 1, cv::Scalar(0,255,0), -1);
}
cv::Ptr<cv::DescriptorExtractor> descriptorExtractor = cv::DescriptorExtractor::create("SURF");
cv::Mat descriptors;
descriptorExtractor->compute( gray_img, keypoints, descriptors);
// SIFT returns large negative values when it goes off the edge of the image.
descriptors.setTo(0, descriptors<0);
imshow("result",bgr_img);
cv::waitKey();
return 0;
}
The result:

Output producing 4 images side by side for single image provided in gradient calculation

Following code is used to calculate the normalized gradient at all the pixels of image. But on using imshow on calculated gradient, instead of showing gradient for provided image its showing gradient of provided image 4 times (side by side).
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/core/core.hpp>
#include<iostream>
#include<math.h>
using namespace cv;
using namespace std;
Mat mat2gray(const Mat& src)
{
Mat dst;
normalize(src, dst, 0.0, 1.0, NORM_MINMAX);
return dst;
}
Mat setImage(Mat srcImage){
//GaussianBlur(srcImage,srcImage,Size(3,3),0.5,0.5);
Mat avgImage = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
Mat gradient = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
Mat norMagnitude = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
Mat orientation = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
//Mat_<uchar> srcImagetemp = srcImage;
float dx,dy;
for(int i=0;i<srcImage.rows-1;i++){
for(int j=0;j<srcImage.cols-1;j++){
dx=srcImage.at<float>(i,j+1)-srcImage.at<float>(i,j);
dy=srcImage.at<float>(i+1,j)-srcImage.at<float>(i,j);
gradient.at<float>(i,j)=sqrt(dx*dx+dy*dy);
orientation.at<float>(i,j)=atan2(dy,dx);
//cout<<gradient.at<float>(i,j)<<endl;
}
}
GaussianBlur(gradient,avgImage,Size(7,7),3,3);
for(int i=0;i<srcImage.rows;i++){
for(int j=0;j<srcImage.cols;j++){
norMagnitude.at<float>(i,j)=gradient.at<float>(i,j)/max(avgImage.at<float>(i,j),float(4));
//cout<<norMagnitude.at<float>(i,j)<<endl;
}
}
imshow("b",(gradient));
waitKey();
return norMagnitude;
}
int main(int argc,char **argv){
Mat image=imread(argv[1]);
cvtColor( image,image, CV_BGR2GRAY );
Mat newImage=setImage(image);
imshow("a",(newImage));
waitKey();
}
Your incoming source image is of type CV_8UC1, and yet you read it as floats:
dx=srcImage.at<float>(i,j+1)-srcImage.at<float>(i,j);
dy=srcImage.at<float>(i+1,j)-srcImage.at<float>(i,j);
If running under the debugger, this should have thrown an assertion, which would have highlighted the problem.
Try changing those lines to use unsigned char as follows:
dx=(float)(srcImage.at<unsigned char>(i,j+1)-srcImage.at<unsigned char>(i,j));
dy=(float)(srcImage.at<unsigned char>(i+1,j)-srcImage.at<unsigned char>(i,j));

Use OpenCV Threshold with Kinect Image

I'm trying to use the OpenCV Threshold with the depthImage retrieved by the OpenCV VideoCapture module, but I get the following error:
OpenCV Error: Bad argument in unknown function,
file PATHTOOPENCV\opencv\modules\core\src\matrix.cpp line 646
My code is as follows:
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/gpu/gpu.hpp"
cv::VideoCapture kinect;
cv::Mat rgbMap;
cv::Mat dispMap;
bool newFrame;
void setup()
{
kinect.open(CV_CAP_OPENNI);
newFrame = false;
}
void update()
{
if(kinect.grab())
{
kinect.retrieve( rgbMap, CV_CAP_OPENNI_BGR_IMAGE);
kinect.retrieve( dispMap, CV_CAP_OPENNI_DISPARITY_MAP );
newFrame = true;
}
}
void draw()
{
if(newFrame)
{
cv::Mat * _thresSrc = new cv::Mat(dispMap);
cv::Mat * _thresDst = new cv::Mat(dispMap);
cvThreshold(_thresSrc, _thresDst, 24, 255, CV_THRESH_BINARY);
// Draw _thresDst;
delete _thresSrc;
delete _thresDst;
newFrame = false;
}
}
Thank you very much for your help
To start things off, you are mixing the C interface with the C++ interface, and they shouldn't be mixed together!
cv::Mat belongs to the C++ interface, and cvThreshold() belongs to the C. You should use cv::threshold() instead:
double cv::threshold(const Mat& src, Mat& dst, double thresh, double maxVal, int thresholdType)
Parameters:
src – Source array (single-channel, 8-bit of 32-bit floating point)
dst – Destination array; will have the same size and the same type as src
thresh – Threshold value
maxVal – Maximum value to use with THRESH_BINARY and THRESH_BINARY_INV thresholding types
thresholdType – Thresholding type (see the discussion)

Camera calibration with opencv (Assertion failed fault)

I am trying to get camera calibration parameters by using opencv camera calibration functions. I have a video and trying to find the calibration parameters and find the points in the video which inclused a checkboard in different psoitions. But i couldnt passed the calibration phase yet. I can find the corner of the checkboard and show them in openCV window but when it comes to line:
calibrateCamera(objectPoints,imagePoints..............)
it throws exception and stops.
I get the following error: OpenCV error: Assertion failed 0 &&nimages==int imagePoints1.total ()&&
Here is my code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "C:/opencv/include/opencv/cv.h"
#include <iostream>
#include <math.h>
using namespace cv;
using namespace std;
std::vector<cv::Point2f> imageCorners;
std::vector<cv::Point3f> objectCorners;
std::vector<std::vector<cv::Point3f>> objectPoints;
std::vector<std::vector<cv::Point2f>> imagePoints;
void addPoints(const std::vector<cv::Point2f>&imageCorners, const std::vector<cv::Point3f>& objectCorners)
{
// 2D image points from one view
imagePoints.push_back(imageCorners);
// corresponding 3D scene points
objectPoints.push_back(objectCorners);
}
int main()
{
int key;
cv::Mat image;
cv::Mat gray_image;
VideoCapture cap("here goes path of the file");
if (!cap.isOpened()) // check if we succeeded
cout<<"failed";
else
cout<<"success";
cvNamedWindow( "video",0);
cv::Size boardSize(8,6);
// output Matrices
cv::Mat cameraMatrix;
std::vector<cv::Mat> rvecs, tvecs;
cv::Mat distCoeffs;
for (int i=0; i<boardSize.height; i++)
{
for (int j=0; j<boardSize.width; j++)
{
objectCorners.push_back(cv::Point3f(i, j, 0.0f));
}
}
int frame=1;
int corner_count=0;
while(1)
{
if(cap.read(image))
{
frame++;
if(frame%20==0)
{
if(waitKey(30) >= 0) break;
bool found = cv::findChessboardCorners(image, boardSize, imageCorners);
cvtColor( image, gray_image, CV_RGB2GRAY );
addPoints(imageCorners, objectCorners);
//bool found = cv::findChessboardCorners(image,boardSize, imageCorners);
cv::drawChessboardCorners(gray_image,boardSize, imageCorners,found);
imshow( "video", gray_image );
}
}
else
break;
}
int flag=0;
std::string text="";
for (int i=1; i<imagePoints.size();i++)
{
std::stringstream out;
out << imagePoints[i];
text=out.str();
cout<<text<<endl;
}
calibrateCamera(objectPoints,imagePoints,gray_image.size(), cameraMatrix, distCoeffs, rvecs, tvecs, flag);
return 0;
}
Print the size of all your std::vector, I suspect you are passing an empty vector to that function.
EDIT:
I've shared some instructions in this answer on how to do camera calibration. Those references include working source code. You'll probably have to do a small adaptation on those programs so they work with video instead.
you should look at this:
http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#bool findChessboardCorners(InputArray image, Size patternSize, OutputArray corners, int flags)
it says your Source chessboard view must be an 8-bit grayscale or color image.
so you must use this:
bool found = cv::findChessboardCorners(gray_image, boardSize, imageCorners);