Image Manipulation-Outline - c++

Given below is the code that I am using to find the difference between 2 images.
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include<iostream>
int main()
{
char a,b;
cv::Mat frame;
cv::Mat frame2;
VideoCapture cap(0);
if(!cap.isOpened())
{
cout<<"Camera is not connected"<<endl;
getchar();
exit(0);
}
Mat edges;
namedWindow("Camera Feed",1);
cout<<"Ready for background?(y/Y)"<<endl;
cin>>a;
if(a=='y'||a=='Y')
{
cap>>frame;
cv::cvtColor(frame,frame,CV_RGB2GRAY);
cv::GaussianBlur(frame,frame,cv::Size(51,51),2.00,0,BORDER_DEFAULT);
}
cv::waitKey(5000);
cout<<"Ready for foreground?(y/Y)"<<endl;
cin>>b;
if(b=='y'||b=='Y')
{
cap>>frame2;
cv::cvtColor(frame2,frame2,CV_RGB2GRAY);
cv::GaussianBlur(frame2,frame2,cv::Size(51,51),2.00,0,BORDER_DEFAULT);
}
cv::Mat frame3;
cv::absdiff(frame,frame2,frame3);
imwrite("img_bw.jpg",frame3);
return 0;
}
The output is something like this.
I wanted to know if there is any way I can draw something like an outline around the body. Thanks.

I just tried the following method.
First dilated the grayscale image, then applied adaptive thresholding on the image.
Later found contours in the image, and on the assumption that your body will be biggest blob in the image, drew outline for the biggest blob.
import cv2
import numpy as np
img = cv2.imread('sofqn.jpg')
gray = cv2.imread('sofqn.jpg',0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10))
gray = cv2.dilate(gray,kernel)
thresh = cv2.adaptiveThreshold(gray,255,0,1,11,2)
cont,hier = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
max_area = -1
best_cnt = None
for cnt in cont:
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
best_cnt = cnt
cv2.drawContours(img,[best_cnt],0,[0,255,0],2)
Below is the result :

Related

How can i copy the images using corresponding mask in opencv c++?

Code :
#include <iostream>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace std;
using namespace cv;
int main()
{
// Mat faceImage = imread("/Users/softech66/Downloads/images 2/unvese_space.png");
// Mat glassBGR = imread("/Users/softech66/Downloads/images 2/sun.png");
// /Users/softech66/Downloads/images 2/sun_mask.bmp
// resize(glassBGR,glassBGR,Size(300,100));
// check each pixel of glass and if its white(255,255,255) then change it with face image pixels
Mat img2 = imread("/Users/softech66/Downloads/images 2/sun.png",0);
Mat img4 = imread("/Users/softech66/Downloads/images 2/univese_space.png",0);
Mat img3 = imread("/Users/softech66/Downloads/images 2/sun_mask.bmp",0);
Mat img;
cvtColor(imread("/Users/softech66/Downloads/images 2/sun.png", IMREAD_COLOR), img , cv::COLOR_RGB2RGBA);
Mat mask = imread("/Users/softech66/Downloads/images 2/sun_mask.bmp", IMREAD_GRAYSCALE);
for(int r = 0; r < img.rows; r++){
for(int c = 0; c < img.cols; c++){
uchar alpha = 0;
if(r < mask.rows && c < mask.cols)
alpha = mask.at<uchar>(r, c);
img.at<Vec4b>(r, c)[3] = alpha;
}
}
imwrite("result.png", img);
Mat roi=img4(Rect(0,0,img.cols,img2.rows));
Mat mask1(roi.rows,roi.cols,roi.depth(),Scalar(1));
img.copyTo(roi,mask1);
// img4.copyTo(img);
imshow("img3", img4);
// imshow("img33", img4);
waitKey(0);
}
The images above are come in pairs, as you need to use the corresponding “mask” image to let OpenCV to only copy the circle region into the universe space image. Note for the mask image, the extension name is “.bmp”. This is because the bmp format provide higher precision for the bit storage. But it makes no difference for OpenCV to load them by using “imread(..., 0)” in the same manner with the second parameter “0” to indicate a single channel or grey image to be load.
Moon Mask
Earth Mask
Sun Mask
Universe Space
Moon image
Earth image
Sun image
Result image

OpenCv Aruco marker is not detected when i parse the Mat directly

Hope you can help me out. I try to use Aruco marker for finding the corners of an Window. So added an marker to each corner. But when i tried to detected the marker they don't get found. If it take a photo with an smartphone and then try to detect the marker on the Photo it works. Here is a minimum working Code example:
#include <opencv2/opencv.hpp>
#include <stdlib.h>
#include <string>
#include <opencv2/aruco.hpp>
using namespace cv;
using namespace std;
using namespace aruco;
Dictionary dictionary;
int markerSize = 200;
vector<int> ids;
vector< vector<Point2f> > corners;
Mat addMarkertoImage(const Mat image){
Mat retImage, marker0;
image.copyTo(retImage);
drawMarker(dictionary, 0, markerSize, marker0, 1);
unsigned char value;
//Add marker to top left corner
for(int i = 0; i<marker0.rows; i++)
for(int j = 0; j<marker0.cols; j++){
value = marker0.at<uchar>(i,j);
Vec3b colorValue = Vec3b(value, value, value);
retImage.at<Vec3b>(i,j) = colorValue;}
return retImage;
}
int main(){
Mat image, image_marker;
dictionary = getPredefinedDictionary(cv::aruco::DICT_6X6_250);
image = imread( "bild.jpg", 1 );
image_marker = addMarkertoImage(image);
detectMarkers(image_marker, dictionary, corners, ids);
drawDetectedMarkers(image_marker, corners, ids);
imshow("Display Image", image_marker);
waitKey(0);
return 0;
}
Any Ideas, why this does not work? But with an Photo taken by my iPhone camera is working?
You meanwhile i solved the Problem on my own, the problem was the marker were to big to detected, with the default configuration. So i needed only to update the dectorparamenters and the example in the question worked.
Edit:
Because i was asked of the changed parameters here. I added this code snipped with the changes:
ImageFinder::ImageFinder() {
//initalize dictionary with markers....
dictionary = getPredefinedDictionary(cv::aruco::DICT_6X6_250);
dp = new DetectorParameters();
dp->minDistanceToBorder = 0;
dp->adaptiveThreshWinSizeMax = 400;
}

Output producing 4 images side by side for single image provided in gradient calculation

Following code is used to calculate the normalized gradient at all the pixels of image. But on using imshow on calculated gradient, instead of showing gradient for provided image its showing gradient of provided image 4 times (side by side).
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/core/core.hpp>
#include<iostream>
#include<math.h>
using namespace cv;
using namespace std;
Mat mat2gray(const Mat& src)
{
Mat dst;
normalize(src, dst, 0.0, 1.0, NORM_MINMAX);
return dst;
}
Mat setImage(Mat srcImage){
//GaussianBlur(srcImage,srcImage,Size(3,3),0.5,0.5);
Mat avgImage = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
Mat gradient = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
Mat norMagnitude = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
Mat orientation = Mat::zeros(srcImage.rows,srcImage.cols,CV_32F);
//Mat_<uchar> srcImagetemp = srcImage;
float dx,dy;
for(int i=0;i<srcImage.rows-1;i++){
for(int j=0;j<srcImage.cols-1;j++){
dx=srcImage.at<float>(i,j+1)-srcImage.at<float>(i,j);
dy=srcImage.at<float>(i+1,j)-srcImage.at<float>(i,j);
gradient.at<float>(i,j)=sqrt(dx*dx+dy*dy);
orientation.at<float>(i,j)=atan2(dy,dx);
//cout<<gradient.at<float>(i,j)<<endl;
}
}
GaussianBlur(gradient,avgImage,Size(7,7),3,3);
for(int i=0;i<srcImage.rows;i++){
for(int j=0;j<srcImage.cols;j++){
norMagnitude.at<float>(i,j)=gradient.at<float>(i,j)/max(avgImage.at<float>(i,j),float(4));
//cout<<norMagnitude.at<float>(i,j)<<endl;
}
}
imshow("b",(gradient));
waitKey();
return norMagnitude;
}
int main(int argc,char **argv){
Mat image=imread(argv[1]);
cvtColor( image,image, CV_BGR2GRAY );
Mat newImage=setImage(image);
imshow("a",(newImage));
waitKey();
}
Your incoming source image is of type CV_8UC1, and yet you read it as floats:
dx=srcImage.at<float>(i,j+1)-srcImage.at<float>(i,j);
dy=srcImage.at<float>(i+1,j)-srcImage.at<float>(i,j);
If running under the debugger, this should have thrown an assertion, which would have highlighted the problem.
Try changing those lines to use unsigned char as follows:
dx=(float)(srcImage.at<unsigned char>(i,j+1)-srcImage.at<unsigned char>(i,j));
dy=(float)(srcImage.at<unsigned char>(i+1,j)-srcImage.at<unsigned char>(i,j));

OpenCV Having issues with cv::FAST

I'm trying to use the open CV FAST algorithim in order to detect corners from a video feed. The method call and set-up seems pretty straight forward yet I'm running into a few problems. When I try and use this code
while(run)
{
clock_t begin,end;
img = cvQueryFrame(capture);
key = cvWaitKey(10);
cvShowImage("stream",img);
//Cv::FAST variables
int threshold=9;
vector<KeyPoint> keypoints;
if(key=='a'){
//begin = clock();
Mat mat(tempImg);
FAST(mat,keypoints,threshold,true);
//end = clock();
//cout << "\n TIME FOR CALCULATION: " << double(diffClock(begin,end)) << "\n" ;
}
I get this error:
OpenCV Error: Assertion failed (image.data && image.type() == CV_8U) in unknown
function, file ........\ocv\opencv\src\cvaux\cvfast.cpp, line 6039
So I figured its a problem with the depth of the image so I when I add this:
IplImage* tempImg = cvCreateImage(Size(img->width,img->height),8,1);
cvCvtColor(img,tempImg,CV_8U);
I get:
OpenCV Error: Bad number of channels (Incorrect number of channels for this conv
ersion code) in unknown function, file ........\ocv\opencv\src\cv\cvcolor.cpp
, line 2238
I've tried using a Mat instead of a IplImage to capture but I keep getting the same kind of errors.
Any suggestions or help?
Thanks in advance.
The entire file just to make it easier for anyone:
#include "cv.h"
#include "cvaux.hpp"
#include "highgui.h"
#include <time.h>
#include <iostream>
double diffClock(clock_t begin, clock_t end);
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
//Create Mat img for camera capture
IplImage* img;
bool run = true;
CvCapture* capture= 0;
capture = cvCaptureFromCAM(-1);
int key =0;
cvNamedWindow("stream", 1);
while(run)
{
clock_t begin,end;
img = cvQueryFrame(capture);
key = cvWaitKey(10);
cvShowImage("stream",img);
//Cv::FAST variables
int threshold=9;
vector<KeyPoint> keypoints;
if(key=='a'){
//begin = clock();
IplImage* tempImg = cvCreateImage(Size(img->width,img->height),8,1);
cvCvtColor(img,tempImg,CV_8U);
Mat mat(img);
FAST(mat,keypoints,threshold,true);
//end = clock();
//cout << "\n TIME FOR CALCULATION: " << double(diffClock(begin,end)) << "\n" ;
}
else if(key=='x'){
run= false;
}
}
cvDestroyWindow( "stream" );
return 0;
}
Whenever you have a problem using the OpenCV API go check the tests/examples available in the source code: fast.cpp
This practice is extremely useful and educational. Now, if you take a look at that code you will notice that the image gets converted to grayscale before calling cv::FAST() on it:
Mat mat(tempImg);
Mat gray;
cvtColor(mat, gray, CV_BGR2GRAY);
FAST(gray,keypoints,threshold,true);
Seems pretty straight forward, indeed.
You need change this
cvCvtColor(img,tempImg,CV_8U);
To
cvCvtColor(img,tempImg,CV_BGR2GRAY);
You can read this
Good Luck
I started getting the same message with code that had worked previously, and i was certain my Mat was U8 grayscale. It turned out that one of the images i was trying to process was no longer there. So in my case it was a misleading error message.
Take a look at this sample code. The code you are using looks quite outdated opencv, in this sample you will find how feature detectors should be used now.
The sample is generic for several feature detectors (including FAST) so that is like it looks a bit more complicated.
http://code.opencv.org/projects/opencv/repository/entry/branches/2.4/opencv/samples/cpp/matching_to_many_images.cpp
You will also find more samples in the parent directory.
Please follow the following code to have your desired result. For showing an example, I am considering an image only but you can simply use the same idea for video frames
Mat img = imread("IMG.jpg", IMREAD_UNCHANGED);
if( img.empty())
{
cout << "File not available for reading"<<endl;
return -1;
}
Mat grayImage;
if(img.channels() >2){
cvtColor( img, grayImage, CV_BGR2GRAY ); // converting color to gray image
}
else{
grayImage = img;
}
double sigma = 1;
GaussianBlur(grayImage, grayImage, Size(), sigma, sigma); // applying gaussian blur to remove some noise,if present
int thresholdCorner = 40;
vector<KeyPoint> keypointsCorners;
FAST(grayImage,keypointsCorners,thresholdCorner,true); // applying FAST key point detector
if(keypointsCorners.size() > 0){
cout << keypointsCorners.size() << endl;
}
// Drawing a circle around corners
for( int i = 0; i < keypointsCorners.size(); i++ )
{
circle( grayImage, keypointsCorners.at(i).pt, 5, Scalar(0), 2, 8, 0 );
}
cv::namedWindow("Display Image");
cv::imshow("Display Image", grayImage);
cvWaitKey(0);
cvDestroyWindow( "Display Image" );

Camera calibration with opencv (Assertion failed fault)

I am trying to get camera calibration parameters by using opencv camera calibration functions. I have a video and trying to find the calibration parameters and find the points in the video which inclused a checkboard in different psoitions. But i couldnt passed the calibration phase yet. I can find the corner of the checkboard and show them in openCV window but when it comes to line:
calibrateCamera(objectPoints,imagePoints..............)
it throws exception and stops.
I get the following error: OpenCV error: Assertion failed 0 &&nimages==int imagePoints1.total ()&&
Here is my code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "C:/opencv/include/opencv/cv.h"
#include <iostream>
#include <math.h>
using namespace cv;
using namespace std;
std::vector<cv::Point2f> imageCorners;
std::vector<cv::Point3f> objectCorners;
std::vector<std::vector<cv::Point3f>> objectPoints;
std::vector<std::vector<cv::Point2f>> imagePoints;
void addPoints(const std::vector<cv::Point2f>&imageCorners, const std::vector<cv::Point3f>& objectCorners)
{
// 2D image points from one view
imagePoints.push_back(imageCorners);
// corresponding 3D scene points
objectPoints.push_back(objectCorners);
}
int main()
{
int key;
cv::Mat image;
cv::Mat gray_image;
VideoCapture cap("here goes path of the file");
if (!cap.isOpened()) // check if we succeeded
cout<<"failed";
else
cout<<"success";
cvNamedWindow( "video",0);
cv::Size boardSize(8,6);
// output Matrices
cv::Mat cameraMatrix;
std::vector<cv::Mat> rvecs, tvecs;
cv::Mat distCoeffs;
for (int i=0; i<boardSize.height; i++)
{
for (int j=0; j<boardSize.width; j++)
{
objectCorners.push_back(cv::Point3f(i, j, 0.0f));
}
}
int frame=1;
int corner_count=0;
while(1)
{
if(cap.read(image))
{
frame++;
if(frame%20==0)
{
if(waitKey(30) >= 0) break;
bool found = cv::findChessboardCorners(image, boardSize, imageCorners);
cvtColor( image, gray_image, CV_RGB2GRAY );
addPoints(imageCorners, objectCorners);
//bool found = cv::findChessboardCorners(image,boardSize, imageCorners);
cv::drawChessboardCorners(gray_image,boardSize, imageCorners,found);
imshow( "video", gray_image );
}
}
else
break;
}
int flag=0;
std::string text="";
for (int i=1; i<imagePoints.size();i++)
{
std::stringstream out;
out << imagePoints[i];
text=out.str();
cout<<text<<endl;
}
calibrateCamera(objectPoints,imagePoints,gray_image.size(), cameraMatrix, distCoeffs, rvecs, tvecs, flag);
return 0;
}
Print the size of all your std::vector, I suspect you are passing an empty vector to that function.
EDIT:
I've shared some instructions in this answer on how to do camera calibration. Those references include working source code. You'll probably have to do a small adaptation on those programs so they work with video instead.
you should look at this:
http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#bool findChessboardCorners(InputArray image, Size patternSize, OutputArray corners, int flags)
it says your Source chessboard view must be an 8-bit grayscale or color image.
so you must use this:
bool found = cv::findChessboardCorners(gray_image, boardSize, imageCorners);