Adding images using opencv - c++

I'm trying to add several images using opencv. I think that my source code should be correct and it compiles without any problems. But when I start the program an error occurs in the for-loop. The problem is that I don't understand why this is happening.
#include <iostream>
#include <sys/types.h>
#include <dirent.h>
#include <errno.h>
#include <vector>
#include <string>
#include <fstream>
#include <cv.h>
#include <highgui.h>
using namespace std;
int get_files(string dir,
vector<string> &files);
int main( int argc, char** argv ){
//---------- Get the names of all *.png files in the directory
string directory = string(".");
vector<string> files = vector<string>();
get_files(directory,files);
//---------- Erase all filenames that aren't *.png files
vector<string> files_format = vector<string>();
for (unsigned int ii = 0; ii < files.size(); ii++) {
files_format.push_back(files[ii]);
string::iterator it;
string format;
files_format[ii].erase(0,files_format[ii].length()-3);
if (files_format[ii] != "png") files.erase(files.begin() + ii);
}
files.erase(files.begin()); // in order to remove the ".." in the beginning
int number_of_files = files.size();
//---------- Create the necessary images
if (files.size() == 0)
return -1;
IplImage* img_firstimage = cvLoadImage(files[0].c_str());
IplImage* img_totalsum = cvCreateImage(cvGetSize(img_firstimage), 8, 1 );
cvCvtColor(img_firstimage, img_totalsum, CV_BGR2GRAY );
//---------- Apply threshold
cvThreshold(img_totalsum, img_totalsum, 150, 1, 1);
//---------- Add all the images
for (unsigned int ii=1; ii < files.size(); ii++){
IplImage* img_load = cvLoadImage(files[ii].c_str());
IplImage* img_add = cvCreateImage(cvGetSize(img_load), 8, 1 );
cvCvtColor(img_load, img_add, CV_BGR2GRAY );
cvThreshold(img_add, img_add, 150, 1, 1);
//----- add the image to the total sum -----
cvAdd(img_totalsum, img_add, img_totalsum);
// ----- release images -----
cvReleaseImage(&img_load);
cvReleaseImage(&img_add);
}
//---------- Invert the total sum image
// -> dense regions are plotted in black
//cvNot(img_totalsum, img_totalsum);
cvNot(img_firstimage, img_firstimage);
//---------- Show the images
cvShowImage("Density Distribution", img_totalsum);
cvShowImage("Negative", img_firstimage);
cvWaitKey(0);
// ----- release images -----
cvReleaseImage(&img_firstimage);
cvReleaseImage(&img_totalsum);
return 0;
}
int get_files(string dir,
vector<string> &files){
DIR *dp;
struct dirent *dirp;
if((dp = opendir(dir.c_str())) == NULL) {
cout << "Error(" << errno << ") opening " << dir << endl;
return errno;
}
while ((dirp = readdir(dp)) != NULL) {
files.push_back(string(dirp->d_name));
}
closedir(dp);
return 0;

It seems, you release your img_add in every loop iteration, but it is created only once. Move the cvReleaseImage(&img_add); instruction outside (directly under) your for loop. That should fix it.
EDIT:
Okay, seems, you fixed that already. Does it work now?
Btw, creating and releasing the img_add inside of your for loop for every newly loaded image is not necessary and is possibly slower, because of the multiple memory allocation and deallocation. You should better allocate it befor entering the loop and release it after the loop.

I solved the problem. I had some other files in the working directory, that weren't *.png files and then the loop didn't work. Absolutely clear that the program couldn't load the other files and work with them... I just don't understand, why the part of the program isn't working that should take care of this problem... Somehow the if (files_format[ii] != "png") files.erase(files.begin() + ii); didn't work properly

Related

Converting webcam program to process one image

I am currently trying to modify a program that takes in a webcam stream as input. The problem is, when I try to alter the program to use a single image, it doesn't display the output that I am expecting e.g. with video stream (code below)
#include "opencv2/opencv.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include "BackgroundRemover.h"
#include "SkinDetector.h"
#include "FaceDetector.h"
#include "FingerCount.h"
using namespace cv;
using namespace std;
int main(int, char**) {
VideoCapture videoCapture(0);
videoCapture.set(CV_CAP_PROP_SETTINGS, 1);
if (!videoCapture.isOpened()) {
cout << "Can't find camera!" << endl;
return -1;
}
Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover;
SkinDetector skinDetector;
FaceDetector faceDetector;
FingerCount fingerCount;
for (int i = 0; i < 2; i++)
{
videoCapture >> frame;
frameOut = frame.clone();
skinDetector.drawSkinColorSampler(frameOut);
foreground = backgroundRemover.getForeground(frame);
faceDetector.removeFaces(frame, foreground);
handMask = skinDetector.getSkinMask(foreground);
fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut);
imshow("output", frameOut);
imshow("foreground", foreground);
imshow("handMask", handMask);
imshow("handDetection", fingerCountDebug);
if (i == 0)
{
backgroundRemover.calibrate(frame);
skinDetector.calibrate(frame);
}
}
waitKey(0);
}
The output shows a detection. Whereas, if I modify the code so that frame doesn't read from the video stream, the output shows nothing at all. Can anybody help to fix this? EDIT: Due to confusion from some members of the community, the modified code is below that reads in a single image:
#include "opencv2/opencv.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include "BackgroundRemover.h"
#include "SkinDetector.h"
#include "FaceDetector.h"
#include "FingerCount.h"
using namespace cv;
using namespace std;
int main(int, char**) {
string imageName("C:/Users/whoever/Desktop/hand_test.jpg"); // by default
Mat image;
image = imread(imageName.c_str(), IMREAD_COLOR); // Read the file
Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover;
SkinDetector skinDetector;
FaceDetector faceDetector;
FingerCount fingerCount;
for (int i = 0; i < 2; i++)
{
frame = image;
frameOut = frame.clone();
skinDetector.drawSkinColorSampler(frameOut);
foreground = backgroundRemover.getForeground(frame);
faceDetector.removeFaces(frame, foreground);
handMask = skinDetector.getSkinMask(foreground);
fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut);
imshow("output", frameOut);
imshow("foreground", foreground);
imshow("handMask", handMask);
imshow("handDetection", fingerCountDebug);
if (i == 0)
{
cout << "Calibrating...";
backgroundRemover.calibrate(frame);
skinDetector.calibrate(frame);
}
}
waitKey(0);
}
The original code processes different images captured from the camera each time it goes round the loop and outputs the differences. Since you are now using the same image every time round there are never any differences hence the output is completely blank. (Note that it will still be playing the output as a video, just a constantly blank one)
The first line in the for loop is where it grabs a new image from the camera:
videoCapture >> frame;
As you can see in your updated code you are removing this and just using the same image again:
frame = image;
Try saving 2 different images instead and have the program load in a different one each time round the loop.
Here is a fairly brute force way to do it which you could improve to load a different file each time it loops, use arrays and so on:
string imageName1("C:/Users/whoever/Desktop/hand_test_1.jpg"); // by default
string imageName2("C:/Users/whoever/Desktop/hand_test_2.jpg"); // by default
Mat image1;
Mat image2;
image1 = imread(imageName1.c_str(), IMREAD_COLOR); // Read the file
image2 = imread(imageName2.c_str(), IMREAD_COLOR); // Read the file
Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover;
SkinDetector skinDetector;
FaceDetector faceDetector;
FingerCount fingerCount;
for (int i = 0; i < 2; i++)
{
if (i = 0) { frame = image1 } else { frame = image2 };
...

Either segmentation fault 11 or (-215) N >= K

I am trying to create a Bag of visual Words program, but I am running into an issue. Every time I run the program, I either get a segmentation fault: 11 error, or if I change the dictSize variable I get a error: (-215) N >= K in function kmeans. I have tried resizing the images, using different ones but nothing seems to help. Here is what I have up to now:
#include <opencv2/core/core.hpp>
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <iostream>
#include <stdio.h>
#include <dirent.h>
#include <string.h>
using namespace std;
using namespace cv;
int main(int argc, const char** argv) {
//=================================== LEARN ===================================
struct dirent *de = NULL;
DIR *d = NULL;
d = opendir(argv[1]);
if(d == NULL)
{
perror("Couldn't open directory");
return(2);
}
Mat input;
vector<KeyPoint> keypoints;
Mat descriptor;
Mat featuresUnclustered;
Ptr<DescriptorExtractor> detector = xfeatures2d::SIFT::create();
while((de = readdir(d))){
if ((strcmp(de->d_name,".") != 0) && (strcmp(de->d_name,"..") != 0) && (strcmp(de->d_name,".DS_Store") != 0)) {
char fullPath[] = "./";
strcat(fullPath, argv[1]);
strcat(fullPath, de->d_name);
printf("Current File: %s\n",fullPath);
input = imread(fullPath,CV_LOAD_IMAGE_GRAYSCALE);
cout << "Img size => x: " << input.size().width << ", y: " << input.size().height << endl;
// If the incoming frame is too big, resize it
if (input.size().width > 3000) {
double ratio = (3000.0)/(double)input.size().width;
resize(input, input, cvSize(0, 0), ratio, ratio);
cout << "New size => x: " << input.size().width << ", y: " << input.size().height << endl;
}
detector->detect(input, keypoints);
detector->compute(input, keypoints, descriptor);
featuresUnclustered.push_back(descriptor);
}
}
closedir(d);
int dictSize = 200;
TermCriteria tc(CV_TERMCRIT_ITER,100,0.001);
int retries = 1;
int flags = KMEANS_PP_CENTERS;
BOWKMeansTrainer bowTrainer(dictSize,tc,retries,flags);
Mat dictionary = bowTrainer.cluster(featuresUnclustered);
FileStorage fs("dict.yml",FileStorage::WRITE);
fs << "vocabulary" << dictionary;
fs.release();
return 0;
}
char fullPath[] = "./";
strcat(fullPath, argv[1]);
strcat(fullPath, de->d_name);
That part of your code is a serious bug (undefined behavior, but seg fault most likely).
strcat does not allocate any extra space for the concatenation. It only overwrites whatever follows the terminating null in the first string.
Your fullPath is allocated with just enough space for the initial 2 characters plus terminating null. Whatever follows that terminating null may be memory belonging to some other part of your program.
If you know the maximum file path length for your OS, you can use the crude correction of putting that max plus 2 as a number (or named constant) between the [] in the declaration of fullPath.
The less crude correction is to compute the required lenth of the string you want to build and malloc that much space (be sure to count the terminating null) and combine the three strings there.

debug assertion failed vc\include\vector vector iterator + offset out of range in chamferMatching Opencv

I am stuck with the problem while implementing chamfer matching program in OpenCV
https:// code.ros.org/trac/opencv/browser/trunk/opencv/samples/cpp/chamfer.cpp?rev=4194
Following is the code it is reading
template image
and test image
, I am using VS 2008 and OpenCV2.4.6
#include "stdafx.h"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/contrib/contrib.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main( int argc, char** argv )
{
IplImage *src;
src = cvLoadImage("C:\\Users\\JOSHI\\Desktop\\Images\\logo_in_clutter.png",1);
Mat img=cvarrToMat(src);
imshow("Mat",img);
src = cvLoadImage("C:\\Users\\JOSHI\\Desktop\\Images\\logo.png",1);
Mat tpl=cvarrToMat(src);
imshow("Mat",tpl);
Mat cimg;
// if the image and the template are not edge maps but normal grayscale images,
// you might want to uncomment the lines below to produce the maps. You can also
// run Sobel instead of Canny.
Canny(img, img, 5, 50, 3);
Canny(tpl, tpl, 5, 50, 3);
vector<vector<Point> > results;
vector<float> costs;
int best = chamerMatching( img, tpl, results, costs );
if( best < 0 )
{
cout << "not found;\n";
return 0;
}
size_t i, n = results[best].size();
for( i = 0; i < n; i++ )
{
Point pt = results[best][i];
if( pt.inside(Rect(0, 0, cimg.cols, cimg.rows)) )
cimg.at<Vec3b>(pt) = Vec3b(0, 255, 0);
}
imshow("result", cimg);
waitKey();
return 0;
}
this is the error image
can you suggest me why I am getting this error as I am new to OpenCV and Image Processing
I had same problem. Solution: http://code.opencv.org/issues/3603
You need to download opencv from source, open the chamfermatching.cpp and comment line:
~Matching()
{
for (size_t i = 0; i<templates.size(); i++) {
//delete templates[i];
}
}
Then you need to rebuild opencv. After this it should work.

dispose is not a member of GDI+ Image class

So I have a program that pulls random images from a folder and creates a collage out of them, and set it to the windows wallpaper. Which seems to work fine. So I thought I would put in a sleep timer and let it automatically update itself without me having to run it every half hour or what ever. I did that and it works great, but I ran into a problem of a memory leak that wasn't noticed before I started looping it. I am attempting to dispose of the GDI+objects, but I keep getting the error that dispose is not a member of GDIplus::Image
I am loading an picture into an Image object, then resizing it and putting it into an array of Images, then I would like to dispose of the first Image. I would then like to dispose of the array after I finish working with the images in it.
This is being done with an old copy of VS2005.
#include <windows.h>
#include <objidl.h>
#include <gdiplus.h>
#include <string>
#include <iostream>
#include <vector>
#include <dirent.h>
#include <time.h>
#include <fstream>
#include <stdlib.h>
#include <stdio.h>
#include <conio.h>
#include "cwp05rnd.h"
using namespace Gdiplus;
using namespace std;
#pragma comment (lib,"Gdiplus.lib")
#pragma comment (lib, "user32.lib")
int main()
{
GdiplusStartupInput gdiplusStartupInput;
ULONG_PTR gdiplusToken;
HDC hdc;
Graphics graphics(hdc);
GdiplusStartup(&gdiplusToken, &gdiplusStartupInput, NULL);
CLSID jpegClsid;
GetEncoderClsid(L"image/jpeg", &jpegClsid);
SetCurrentDirectoryA("E:\\Photos");
ofstream outfile;
outfile.open ("outimgs.txt");
ofstream outfile2;
outfile2.open("imgpos.txt");
srand(time(NULL));
init_genrand(time(NULL));
vector<string> dirlist;
DIR *d;
struct dirent *dir;
int i=0;
d=opendir(".");
if (d)
{
while ((dir=readdir(d)) != NULL)
{
i++;
dirlist.push_back(dir->d_name);
}
closedir(d);
}
Image wp(L"E:\\Dropbox\\Photos\\wallpaper.jpg");
Graphics* wpimage = Graphics::FromImage(&wp);
int r;
int rvsize=100;
int rv[100]={0};
string img;
std::wstring wimg;
const wchar_t* rimg;
double cwidth;
double cheight;
double ratio;
int nheight;
int counter=0;
int full = 0;
int tries = 0;
int hfull = 0;
int imgnum =0;
int last=0;
Image* newpic[10];
while ( tries <10)
{
redo:
tries++;
int newrv=0;
while (newrv ==0)
{
r=genrand_int32()%i;
for (int k=0; k < rvsize; k++)
{
if (rv[k] > 0 && rv[k]==r )
{
break;
}
if (rv[k]==0 && r < i)
{
newrv =1;
rv[k]=r;
last=k;
break;
}
if (rv[k] ==0)
{
break;
}
}
}
img = dirlist[r];
if (img[0]=='.')
{
newrv=0;
goto redo;
}
wimg = std::wstring(img.begin(),img.end());
rimg = wimg.c_str();
Image pic(rimg);
cwidth = pic.GetWidth();
cheight = pic.GetHeight();
if (cheight ==0)
{
outfile2 << "error" << img << endl;
rv[last]=0;
system("pause");
goto redo;
}
ratio = cwidth/cheight;
nheight = nwidth/ratio;
pic.RotateFlip(Rotate180FlipNone);
pic.RotateFlip(Rotate180FlipNone);
newpic[imgnum] = pic.GetThumbnailImage(nwidth,nheight,NULL,NULL);
delete pic[0];
imgnum = imgnum + 1;
}
then there is a long section of flips and rotates on the images in newpic, according to various random values.
wpimage->DrawImage(newpic[k],(j*nwidth),(((k+1)*whitespace)+htot),nwidth,nh[k]);
wp.Save(L"C:\\Temp\\wallpaper\\nwallpaper.jpg", &jpegClsid, NULL);
delete newpic;
setWall();
delete wpimage;
delete wp;
return 0;
}
When I attempt to delete the Image objects, I get an error that says either it can not delete objects that are not pointers, or it cannot convert from GDIplus::Image to void*
Any advice would be appreciated.
I noticed you have Image pic(rimg);
But you are doing delete pic[0];
pic is not a pointer.. not dynamically allocated or something... nor is a array (or maybe it is.. but intuitively I think no..)
* Add *
Oh yeah, if you already solved this, suggests you close the question or at least mention it...

Blank frames when explicitly copying pixel values in OpenCV

I have been porting some video processing code to C++ using OpenCV 2.4.3. The following test program closely mimics how my code will read each frame from a video, operate on its contents, and then write new frames to a new video file.
Strangely, the output frames are entirely black when the pixels are set individually, but are written correctly when the entire frame is cloned.
In practice, I'd use the two macros to access and assign desired values, but the sequential scan used in the example shows the idea more clearly.
Does anyone know where I'm going wrong?
test.cpp:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <string>
using namespace std;
using namespace cv;
#define RGB_REF(PR,NC,R,C,CH) (*((PR) + ((3*(NC)*(R)+(C))+(CH))))
#define GRAY_REF(PR,NC,R,C) (*((PR) + (NC)*(R)+(C)))
int main(int argc, char* argv[])
{
string video_path(argv[1]);
cerr << "Video path is " + video_path + "\n";
VideoCapture capture(video_path);
if ( !capture.isOpened() )
{
cerr << "Input file could not be opened\n";
return 1;
} else
{
string output_path(argv[2]);
VideoWriter output;
int ex = (int)capture.get(CV_CAP_PROP_FOURCC);
Size S = Size((int) capture.get(CV_CAP_PROP_FRAME_WIDTH),
(int) capture.get(CV_CAP_PROP_FRAME_HEIGHT));
output.open(output_path,ex,capture.get(CV_CAP_PROP_FPS),S,true);
if ( !output.isOpened() )
{
cerr << "Output file could not be opened\n";
return 1;
}
unsigned int numFrames = (unsigned int) capture.get(CV_CAP_PROP_FRAME_COUNT);
unsigned int m = (unsigned int) capture.get(CV_CAP_PROP_FRAME_HEIGHT);
unsigned int n = (unsigned int) capture.get(CV_CAP_PROP_FRAME_WIDTH);
unsigned char* im = (unsigned char*) malloc(m*n*3*sizeof(unsigned char));
unsigned char* bw = (unsigned char*) malloc(m*n*3*sizeof(unsigned char));
Mat frame(m,n,CV_8UC3,im);
Mat outputFrame(m,n,CV_8UC3,bw);
for (size_t i=0; i<numFrames; i++)
{
capture >> frame;
for (size_t x=0;x<(3*m*n);x++)
{
bw[x] = im[x];
}
output << outputFrame; // blank frames
// output << frame; // works
// output << (outputFrame = frame); // works
}
}
}
When you query a frame from VideoCapture as capture >> frame;, frame is modified. Say, it has a new data buffer. So im no longer points to the buffer of frame.
Try
bm[x] = frame.ptr()[x];