OpenCV SIFT key points extraction isuue - c++

I tried to extract SIFT key points. It is working fine for a sample image I downloaded (height 400px width 247px horizontal and vertical resolutions 300dpi). Below image shows the extracted points.
Then I tried to apply the same code to a image that was taken and edited by me (height 443px width 541px horizontal and vertical resolutions 72dpi).
To create the above image I rotated the original image then removed its background and resized it using Photoshop, but my code, for that image doesn't extract features like in the first image.
See the result :
It just extract very few points. I expect a result as in the first case.
For the second case when I'm using the original image without any edit the program gives points as the first case.
Here is the simple code I have used
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv2\nonfree\nonfree.hpp>
using namespace cv;
int main(){
Mat src, descriptors,dest;
vector<KeyPoint> keypoints;
src = imread(". . .");
cvtColor(src, src, CV_BGR2GRAY);
SIFT sift;
sift(src, src, keypoints, descriptors, false);
drawKeypoints(src, keypoints, dest);
imshow("Sift", dest);
cvWaitKey(0);
return 0;
}
What I'm doing wrong here? what do I need to do to get a result like in the first case to my own image after resizing ?
Thank you!

Try set nfeatures parameter (may be other parameters also need adjustment) in SIFT constructor.
Here is constructor definition from reference:
SIFT::SIFT(int nfeatures=0, int nOctaveLayers=3, double contrastThreshold=0.04, double edgeThreshold=10, double sigma=1.6)
Your code will be:
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv2\nonfree\nonfree.hpp>
using namespace cv;
using namespace std;
int main(){
Mat src, descriptors,dest;
vector<KeyPoint> keypoints;
src = imread("D:\\ImagesForTest\\leaf.jpg");
cvtColor(src, src, CV_BGR2GRAY);
SIFT sift(2000,3,0.004);
sift(src, src, keypoints, descriptors, false);
drawKeypoints(src, keypoints, dest);
imshow("Sift", dest);
cvWaitKey(0);
return 0;
}
The result:
Dense sampling example:
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include "opencv2/nonfree/nonfree.hpp"
int main(int argc, char* argv[])
{
cv::initModule_nonfree();
cv::namedWindow("result");
cv::Mat bgr_img = cv::imread("D:\\ImagesForTest\\lena.jpg");
if (bgr_img.empty())
{
exit(EXIT_FAILURE);
}
cv::Mat gray_img;
cv::cvtColor(bgr_img, gray_img, cv::COLOR_BGR2GRAY);
cv::normalize(gray_img, gray_img, 0, 255, cv::NORM_MINMAX);
cv::DenseFeatureDetector detector(12.0f, 1, 0.1f, 10);
std::vector<cv::KeyPoint> keypoints;
detector.detect(gray_img, keypoints);
std::vector<cv::KeyPoint>::iterator itk;
for (itk = keypoints.begin(); itk != keypoints.end(); ++itk)
{
std::cout << itk->pt << std::endl;
cv::circle(bgr_img, itk->pt, itk->size, cv::Scalar(0,255,255), 1, CV_AA);
cv::circle(bgr_img, itk->pt, 1, cv::Scalar(0,255,0), -1);
}
cv::Ptr<cv::DescriptorExtractor> descriptorExtractor = cv::DescriptorExtractor::create("SURF");
cv::Mat descriptors;
descriptorExtractor->compute( gray_img, keypoints, descriptors);
// SIFT returns large negative values when it goes off the edge of the image.
descriptors.setTo(0, descriptors<0);
imshow("result",bgr_img);
cv::waitKey();
return 0;
}
The result:

Related

Obstacle detection for ground robot using aerial image

I want to perform obstacle detection for a ground robot by using a picture taken by a drone of the area the ground robot will cover. Since I have limited background in image processing I am not sure how to carry this out. I tried using the following method but the result is not very accurate. It detects very small edges also and it does not work well with aerial images.
#pragma once
#include <string>
#include <iostream>
#include <vector>
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
//----------------------------------------------------------
// MAIN
//----------------------------------------------------------
int main(int argc, char* argv[])
{
// src image
Mat src;
//grayscale image
Mat gray;
// edges image
Mat edges;
//dst image
Mat dst;
//eroded image
Mat erosion;
//smoothed result
Mat result;
//----------------------------------------------------------
// Image loading
//----------------------------------------------------------
namedWindow("result");
namedWindow("src");
namedWindow("edges");
src = imread("C:/Users/HP/Desktop/SDP/obstacle detection/obstacle detection/obstacle detection/shapes.jpg");
//----------------------------------------------------------
//Specifying size and type of image
//----------------------------------------------------------
edges = Mat::zeros(src.size(), CV_8UC1);
dst = Mat::zeros(src.size(), CV_8UC1);
gray= Mat::zeros(src.size(), CV_8UC1);
erosion = Mat::zeros(src.size(), CV_8UC1);
result = Mat::zeros(src.size(), CV_8UC1);
//----------------------------------------------------------
//Converting from RGB to grayscale
//----------------------------------------------------------
cvtColor(src, gray, COLOR_BGR2GRAY);
//----------------------------------------------------------
//Edge Detetcion using OpenCV Canny Edge Detector function
//----------------------------------------------------------
Canny(gray, edges, 80, 255);
//----------------------------------------------------------
//Filling in the non-obstacle areas with white
//----------------------------------------------------------
for (int i = 0; i<edges.cols; ++i)
{
int j = edges.rows - 1;
for (j = edges.rows - 1; j>0; --j)
{
if (edges.at<uchar>(j, i)>0)
{
break;
}
}
dst(Range(j, dst.rows - 1), Range(i, i + 1)) = 255;
}
//----------------------------------------------------------
// Appying erosion function to remove noise
//----------------------------------------------------------
Mat element = getStructuringElement(MORPH_RECT, Size(10, 10));
erode(dst,erosion,element);
//----------------------------------------------------------
//Smoothing the edges to get result
//----------------------------------------------------------
GaussianBlur(erosion, result, Size(5,5), 4);
//----------------------------------------------------------
// Displaying the intermediate and final resulting images
//----------------------------------------------------------
namedWindow("src", WINDOW_NORMAL);
imshow("src", src);
namedWindow("edges", WINDOW_NORMAL);
imshow("edges", edges);
namedWindow("dst", WINDOW_NORMAL);
imshow("dst", dst);
namedWindow("erosion", WINDOW_NORMAL);
imshow("erosion", erosion);
namedWindow("result", WINDOW_NORMAL);
imshow("result", result);
//----------------------------------------------------------
// Wait key press
//----------------------------------------------------------
waitKey(0);
destroyAllWindows();
return 0;
}
The code takes in an image, converts it to grayscale. Then canny edge detection is used to detect edges of all the objects in the image. This edge detected image s filled with white color starting from the bottom until an edge is detected. The process continues until the whole image is covered. The result is a binary image with white color for areas without obstacles and and black color for obstacles. The opencv function erode is then used to remove unnecessary noise.
I would really appreciate it if I get suggestions on how to improve this or use any other technique.
I suggest thresholding the image for a color range matching the ground. This approach works well, if the color of your ground does not change too much (which is the case in your src image). You might want to check out this OpenCV example (Python).

How to draw detected object with SIFT features on OpenCV 3.1?

Using this code to find matches between images:
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/xfeatures2d/nonfree.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main(int argc, char *argv[]) {
//cv::initModule_nonfree();
//initModule_features2d();
Mat img_1 = imread("C:/Users/Dan/Desktop/0.jpg", 1);
Mat img_2 = imread("C:/Users/Dan/Desktop/0.jpg", 1);
cv::Ptr<Feature2D> f2d = xfeatures2d::SIFT::create();
//-- Step 1: Detect the keypoints:
std::vector<KeyPoint> keypoints_1, keypoints_2;
f2d->detect(img_1, keypoints_1);
f2d->detect(img_2, keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors_1, descriptors_2;
f2d->compute(img_1, keypoints_1, descriptors_1);
f2d->compute(img_2, keypoints_2, descriptors_2);
Mat out0;
drawKeypoints(img_1, keypoints_1, out0);
imshow("KeyPoint0.jpg", out0);
//-- Step 3: Matching descriptor vectors using BFMatcher :
BFMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_1, descriptors_2, matches);
Mat img_matches = Mat::zeros( img_1.size(), CV_8UC3 );
drawMatches(img_1,keypoints_1,img_2,keypoints_2,matches,img_matches);
imshow("matches", img_matches);
waitKey(0); // Keep window there until user presses 'q' to quit.
return 0;
}
Since OpenCV 3.1 functions were changed, I looked for example code using SURF or SIFT, but could not find any.
How to modify this code so it will draw contours around detected objects similar to OpenCV version?
You will need to use findHomography to get the transformation that relates your training image (img_1) to the image to be detected (img_2)
Then you can simply do a perspectiveTransform on a bounding box of your training image (at origin) using the homography obtained, to place the correct bounding box on the detected image
Related code taken from ORB detection example
Mat inlier_mask, homography;
vector<KeyPoint> inliers1, inliers2;
vector<DMatch> inlier_matches;
if(matched1.size() >= 4) {
homography = findHomography(Points(matched1), Points(matched2),
RANSAC, ransac_thresh, inlier_mask);
}
for(unsigned i = 0; i < matched1.size(); i++) {
if(inlier_mask.at<uchar>(i)) {
int new_i = static_cast<int>(inliers1.size());
inliers1.push_back(matched1[i]);
inliers2.push_back(matched2[i]);
inlier_matches.push_back(DMatch(new_i, new_i, 0));
}
}
stats.inliers = (int)inliers1.size();
stats.ratio = stats.inliers * 1.0 / stats.matches;
vector<Point2f> new_bb;
perspectiveTransform(object_bb, new_bb, homography);
Mat frame_with_bb = frame.clone();
if(stats.inliers >= bb_min_inliers) {
drawBoundingBox(frame_with_bb, new_bb);
}
Mat res;
drawMatches(first_frame, inliers1, frame_with_bb, inliers2,
inlier_matches, res,
Scalar(255, 0, 0), Scalar(255, 0, 0));

Output producing 4 images side by side for single image provided in gradient calculation

Following code is used to calculate the normalized gradient at all the pixels of image. But on using imshow on calculated gradient, instead of showing gradient for provided image its showing gradient of provided image 4 times (side by side).
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/core/core.hpp>
#include<iostream>
#include<math.h>
using namespace cv;
using namespace std;
Mat mat2gray(const Mat& src)
{
Mat dst;
normalize(src, dst, 0.0, 1.0, NORM_MINMAX);
return dst;
}
Mat setImage(Mat srcImage){
//GaussianBlur(srcImage,srcImage,Size(3,3),0.5,0.5);
Mat avgImage = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
Mat gradient = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
Mat norMagnitude = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
Mat orientation = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
//Mat_<uchar> srcImagetemp = srcImage;
float dx,dy;
for(int i=0;i<srcImage.rows-1;i++){
for(int j=0;j<srcImage.cols-1;j++){
dx=srcImage.at<float>(i,j+1)-srcImage.at<float>(i,j);
dy=srcImage.at<float>(i+1,j)-srcImage.at<float>(i,j);
gradient.at<float>(i,j)=sqrt(dx*dx+dy*dy);
orientation.at<float>(i,j)=atan2(dy,dx);
//cout<<gradient.at<float>(i,j)<<endl;
}
}
GaussianBlur(gradient,avgImage,Size(7,7),3,3);
for(int i=0;i<srcImage.rows;i++){
for(int j=0;j<srcImage.cols;j++){
norMagnitude.at<float>(i,j)=gradient.at<float>(i,j)/max(avgImage.at<float>(i,j),float(4));
//cout<<norMagnitude.at<float>(i,j)<<endl;
}
}
imshow("b",(gradient));
waitKey();
return norMagnitude;
}
int main(int argc,char **argv){
Mat image=imread(argv[1]);
cvtColor( image,image, CV_BGR2GRAY );
Mat newImage=setImage(image);
imshow("a",(newImage));
waitKey();
}
Your incoming source image is of type CV_8UC1, and yet you read it as floats:
dx=srcImage.at<float>(i,j+1)-srcImage.at<float>(i,j);
dy=srcImage.at<float>(i+1,j)-srcImage.at<float>(i,j);
If running under the debugger, this should have thrown an assertion, which would have highlighted the problem.
Try changing those lines to use unsigned char as follows:
dx=(float)(srcImage.at<unsigned char>(i,j+1)-srcImage.at<unsigned char>(i,j));
dy=(float)(srcImage.at<unsigned char>(i+1,j)-srcImage.at<unsigned char>(i,j));

Image edge smoothing with opencv

I am trying to smooth output image edges using opencv framework, I am trying following steps. Steps took from here https://stackoverflow.com/a/17175381/790842
int lowThreshold = 10.0;
int ratio = 3;
int kernel_size = 3;
Mat src_gray,detected_edges,dst,blurred;
/// Convert the image to grayscale
cvtColor( result, src_gray, CV_BGR2GRAY );
/// Reduce noise with a kernel 3x3
cv::blur( src_gray, detected_edges, cv::Size(5,5) );
/// Canny detector
cv::Canny( detected_edges, detected_edges, lowThreshold, lowThreshold*ratio, kernel_size );
//Works fine upto here I am getting perfect edge mask
cv::dilate(detected_edges, blurred, result);
//I get Assertion failed (src.channels() == 1 && func != 0) in countNonZero ERROR while doing dilate
result.copyTo(blurred, blurred);
cv::blur(blurred, blurred, cv::Size(3.0,3.0));
blurred.copyTo(result, detected_edges);
UIImage *image = [UIImageCVMatConverter UIImageFromCVMat:result];
I want help whether if I am going in right way, or what am I missing?
Thanks for any suggestion and help.
Updated:
I have got an image like below got from grabcut algorithm, now I want to apply edge smoothening to the image, as you can see the image is not smooth.
Do you want to get something like this?
If yes, then here is the code:
#include <iostream>
#include <vector>
#include <string>
#include <fstream>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main(int argc, char **argv)
{
cv::namedWindow("result");
Mat img=imread("TestImg.png");
Mat whole_image=imread("D:\\ImagesForTest\\lena.jpg");
whole_image.convertTo(whole_image,CV_32FC3,1.0/255.0);
cv::resize(whole_image,whole_image,img.size());
img.convertTo(img,CV_32FC3,1.0/255.0);
Mat bg=Mat(img.size(),CV_32FC3);
bg=Scalar(1.0,1.0,1.0);
// Prepare mask
Mat mask;
Mat img_gray;
cv::cvtColor(img,img_gray,cv::COLOR_BGR2GRAY);
img_gray.convertTo(mask,CV_32FC1);
threshold(1.0-mask,mask,0.9,1.0,cv::THRESH_BINARY_INV);
cv::GaussianBlur(mask,mask,Size(21,21),11.0);
imshow("result",mask);
cv::waitKey(0);
// Reget the image fragment with smoothed mask
Mat res;
vector<Mat> ch_img(3);
vector<Mat> ch_bg(3);
cv::split(whole_image,ch_img);
cv::split(bg,ch_bg);
ch_img[0]=ch_img[0].mul(mask)+ch_bg[0].mul(1.0-mask);
ch_img[1]=ch_img[1].mul(mask)+ch_bg[1].mul(1.0-mask);
ch_img[2]=ch_img[2].mul(mask)+ch_bg[2].mul(1.0-mask);
cv::merge(ch_img,res);
cv::merge(ch_bg,bg);
imshow("result",res);
cv::waitKey(0);
cv::destroyAllWindows();
}
And I think this link will be interestiong for you too: Poisson Blending
I have followed the following steps to smooth the edges of the Foreground I got from GrabCut.
Create a binary image from the mask I got from GrabCut.
Find the contour of the binary image.
Create an Edge Mask by drawing the contour points. It gives the boundary edges of the Foreground image I got from GrabCut.
Then follow the steps define in https://stackoverflow.com/a/17175381/790842

Image Contour Detection Error: OpenCV, C++

I am trying to write a program to detect contours within an image using OpenCV in the C++ environment.
The problem with it is that I don't get a compile error, but instead a runtime error. I have no idea why; I followed the book and OpenCV documentation sources to build the code below and it should work fine but it doesn't... any ideas on what might be wrong...?
#include "iostream"
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv\ml.h>
#include<opencv\cxcore.h>
#include <iostream>
#include <string>
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/highgui/highgui.hpp> // Video write
using namespace cv;
using namespace std;
Mat image; Mat image_gray; Mat image_gray2; Mat threshold_output;
int thresh=100, max_thresh=255;
int main(int argc, char** argv) {
//Load Image
image =imread("C:/Users/Tomazi/Pictures/Opencv/ayo.bmp");
//Convert Image to gray & blur it
cvtColor( image,
image_gray,
CV_BGR2GRAY );
blur( image_gray,
image_gray2,
Size(3,3) );
//Threshold Gray&Blur Image
threshold(image_gray2,
threshold_output,
thresh,
max_thresh,
THRESH_BINARY);
//2D Container
vector<vector<Point>> contours;
//Fnd Countours Points, (Imput Image, Storage, Mode1, Mode2, Offset??)
findContours(threshold_output,
contours, // a vector of contours
CV_RETR_EXTERNAL, // retrieve the external contours
CV_CHAIN_APPROX_NONE,
Point(0, 0)); // all pixels of each contours
// Draw black contours on a white image
Mat result(threshold_output.size(),CV_8U,Scalar(255));
drawContours(result,contours,
-1, // draw all contours
Scalar(0), // in black
2); // with a thickness of 2
//Create Window
char* DisplayWindow = "Source";
namedWindow(DisplayWindow, CV_WINDOW_AUTOSIZE);
imshow(DisplayWindow, contours);
waitKey(0);
return 1;
}
I bet that you are using the MSVC IDE. Anyway, your code has a lot of problems and I've covered most of them on Stackoverflow. Here they go:
Escape the slashes
Code safely and check the return of the calls
How Visual Studio loads files at runtime
I suspect that your problem is that imread() is failing because it didn't found the file. The links above will help you fix that.