Unspecified error (The function is not implemented.) - c++

I want any advice how to solve this Error
I'm trying a sample code to check the opencv_contrib Extra modules using CMake
This is the error message:
And this is the sample code which I used
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
/// Function header
void thresh_callback(int, void*);
/** #function main */
int main(int argc, char** argv)
{
/// Load source image and convert it to gray
src = imread("Baraya.jpg", 1);
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
/// Create Window
char* source_window = "Source";
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
imshow(source_window, src);
createTrackbar(" Threshold:", "Source", &thresh, max_thresh, thresh_callback);
thresh_callback(0, 0);
waitKey(0);
return(0);
}
/** #function thresh_callback */
void thresh_callback(int, void*)
{
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
threshold(src_gray, threshold_output, thresh, 255, THRESH_BINARY);
/// Find contours
findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Approximate contours to polygons + get bounding rects and circles
vector<vector<Point> > contours_poly(contours.size());
vector<Rect> boundRect(contours.size());
vector<Point2f>center(contours.size());
vector<float>radius(contours.size());
for (int i = 0; i < contours.size(); i++)
{
approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true);
boundRect[i] = boundingRect(Mat(contours_poly[i]));
minEnclosingCircle((Mat)contours_poly[i], center[i], radius[i]);
}
/// Draw polygonal contour + bonding rects + circles
Mat drawing = Mat::zeros(threshold_output.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(drawing, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point());
rectangle(drawing, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0);
circle(drawing, center[i], (int)radius[i], color, 2, 8, 0);
}
/// Show in a window
namedWindow("Contours", CV_WINDOW_AUTOSIZE);
imshow("Contours", drawing);
}

There is no error in the code you posted. It works well if you installed OpenCV properly. This error is because you didn't build with any windowing system .
This error is from here:
#if defined (HAVE_WIN32UI) // see window_w32.cpp
#elif defined (HAVE_GTK) // see window_gtk.cpp
#elif defined (HAVE_COCOA) // see window_carbon.cpp
#elif defined (HAVE_CARBON)
#elif defined (HAVE_QT) // see window_QT.cpp
#elif defined (WINRT) && !defined (WINRT_8_0) // see window_winrt.cpp
#else
// No windowing system present at compile time ;-(
//
// We will build place holders that don't break the API but give an error
// at runtime. This way people can choose to replace an installed HighGUI
// version with a more capable one without a need to recompile dependent
// applications or libraries.
void cv::setWindowTitle(const String&, const String&)
{
CV_Error(Error::StsNotImplemented, "The function is not implemented. "
"Rebuild the library with Windows, GTK+ 2.x or Carbon support. "
"If you are on Ubuntu or Debian, install libgtk2.0-dev and pkg-config, then re-run cmake or configure script");
}
So re-run CMAKE with WITH_WIN32UI, since you are on Windows, and eventually with WITH_QT.

Related

Crop and show image that i get from Haar Cascade

I have trained Haar cascade and now i need to work with founded object. How i can crop it from original image and show in new window?(or show multiple window if i found 2 object on image). There is my code (opencv ver 2.4.13):
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
int main(void)
{
CascadeClassifier trafficLightCascader;
string Cascade_name = "TrafficLight.xml";
if (!trafficLightCascader.load(Cascade_name))
{
cout << "Can't load the face feature data" << endl;
return -1;
}
vector<Rect> trafficLights;
Mat src = imread("6копия.png");
CvRect AssignRect = Rect(0, 0, src.cols, src.rows / 2);
Mat srcImage = src(AssignRect);
Mat grayImage(srcImage.rows, srcImage.cols, CV_8UC1);
cvtColor(srcImage, grayImage, CV_BGR2GRAY);
equalizeHist(grayImage, grayImage);
trafficLightCascader.detectMultiScale(grayImage, trafficLights, 1.1, 1, 0, Size(3,3));
for (int i = 0; i < trafficLights.size(); ++i)
{
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}
imshow("src", src);
waitKey(0);
return 0;}
Your trafficLights vector is holding each rectangle's data of found objects. You just need to take left&top coordinates, width and height of each rectangle and you already have them. All you need is cropping each rectangle by creating Mat format of them and showing in different frames.
You can check here to learn more about cropping.
Here is the code which you need:
for (int i = 0; i < trafficLights.size(); ++i)
{
Rect crop_found(trafficLights[i].x,trafficLights[i].y, trafficLights[i].width, trafficLights[i].height);
Mat found(src, crop_found);
imshow(to_string(i),found);
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}

OpenCV last convexity defect not right

I am trying to write code to track hands. I am using the convexity defects function to find fingers, but for some reason, there seems to always be a problem with the last defect.
Here is a picture of what I'm talking about (sorry, i''m new to the forum, so cannot post images)
The cyan line is the contours, the yellow line is the hull points, and the red lines are the defect points. As you can see the last defect point detects the defect from the wrong side of the contour.
Here is my code:
#include "opencv2\opencv.hpp"
using namespace cv;
using namespace std;
int main() {
VideoCapture cap(0);
Mat src, gray, background, binary, diff;
cap >> background;
cvtColor(background, background, CV_BGR2GRAY);
vector<vector<Point>> contours;
vector < vector<int>> hullI = vector<vector<int>>(1);
vector < vector<Point>> hullP = vector<vector<Point>>(1);
vector<Vec4i> defects;
while (waitKey(30)!='q') {
cap >> src;
cvtColor(src, gray, CV_BGR2GRAY);
blur(gray, gray, Size(3, 3));
absdiff(gray, background, diff);
threshold(diff, binary, 15, 255, THRESH_BINARY);
erode(binary, binary, Mat(Size(5, 5), CV_8U));
imshow("binary", binary);
findContours(binary, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
if (!contours.empty()) {
sort(contours.begin(), contours.end(), [](vector<Point> a, vector<Point> b) { return a.size() > b.size(); });
drawContours(src, contours, 0, Scalar(255, 255, 0));
convexHull(contours[0], hullI[0]);
convexHull(contours[0], hullP[0]);
drawContours(src, hullP, 0, Scalar(0, 255, 255));
if (hullI[0].size() > 2) {
convexityDefects(contours[0], hullI[0], defects);
for (Vec4i defect : defects) {
line(src, contours[0][defect[0]], contours[0][defect[2]], Scalar(0, 0, 255));
line(src, contours[0][defect[1]], contours[0][defect[2]], Scalar(0, 0, 255));
}
}
}
imshow("src", src);
char key = waitKey(30);
if (key == 'q')break;
else if (key == 'p') waitKey();
else if (key == 'b') {
cap >> background;
cvtColor(background, background, CV_BGR2GRAY);
}
}
}
I have confirmed through experiments that it is always the last defect in the defect vector that this happens too. Is this a bug in opencv or am I doing something wrong?
i tested your code (with a small modification) with the image below (OpenCV version is 3.2).
as you can see on the result image it works as expected. probably you are using an old version of OpenCV and getting a buggy result. (i think it was a bug recently fixed)
#include "opencv2\opencv.hpp"
using namespace cv;
using namespace std;
int main() {
//VideoCapture cap(0);
Mat src, gray, background, binary, diff;
//cap >> background;
//cvtColor(background, background, CV_BGR2GRAY);
vector<vector<Point> > contours;
vector < vector<int> > hullI = vector<vector<int> >(1);
vector < vector<Point> > hullP = vector<vector<Point> >(1);
vector<Vec4i> defects;
src = imread("hand.png");
cvtColor(src, gray, CV_BGR2GRAY);
blur(gray, gray, Size(3, 3));
threshold(gray, binary, 150, 255, THRESH_BINARY_INV);
//erode(binary, binary, Mat(Size(5, 5), CV_8U));
imshow("binary", binary);
findContours(binary, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
if (!contours.empty()) {
sort(contours.begin(), contours.end(), [](vector<Point> a, vector<Point> b) { return a.size() > b.size(); });
drawContours(src, contours, 0, Scalar(255, 255, 0));
convexHull(contours[0], hullI[0]);
convexHull(contours[0], hullP[0]);
drawContours(src, hullP, 0, Scalar(0, 255, 255));
if (hullI[0].size() > 2) {
convexityDefects(contours[0], hullI[0], defects);
for (Vec4i defect : defects) {
line(src, contours[0][defect[0]], contours[0][defect[2]], Scalar(0, 0, 255));
line(src, contours[0][defect[1]], contours[0][defect[2]], Scalar(0, 0, 255));
}
}
}
imshow("result", src);
char key = waitKey(0);
return 0;
}
I have a solution which involves detecting the skin using OpenCV. I implemented it using python, which you can convert easily to C++.
I obtained the HSV values of the image you uploaded using:
hsv_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
This the range for the HSV values of human skin:
l = np.array([0, 48, 80], dtype = "uint8")
u = np.array([20, 255, 255], dtype = "uint8")
skin_img = cv2.inRange(hsv_img, l, u)
cv2.imshow("Hand", skin_img)
I then performed morphological dilation and obtained the following:
You can now apply contour hull and find convexity defects.

opencv getPerspectiveTransform not working

so I am working on an assignment in which I have to classify road signs based on input images. So naturally I used the canny function, and findContours, followed by approxPolyPD in order to get the corners of the image that I will be transforming.
However for some reason, I keep getting an error when I attempt to use getPerspectiveTransform for the next step. Please help.
Error:
OpenCV Error: Assertion failed (0 <= i && i < (int)vv.size()) in getMat_, file /home/path_to_opencv/opencv/modules/core/src/matrix.cpp, line 1192
terminate called after throwing an instance of 'cv::Exception'
what(): /home/path_to_opencv/opencv/modules/core/src/matrix.cpp:1192: error: (-215) 0 <= i && i < (int)vv.size() in function getMat_
Aborted (core dumped)
Code used:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#define WARPED_XSIZE 200
#define WARPED_YSIZE 300
using namespace cv;
using namespace std;
Mat src; Mat src_gray, warped_result; Mat dst;
Mat speed_80, speed_40;
int canny_thresh = 154;
#define VERY_LARGE_VALUE 100000
#define NO_MATCH 0
#define STOP_SIGN 1
#define SPEED_LIMIT_40_SIGN 2
#define SPEED_LIMIT_80_SIGN 3
RNG rng(12345);
/** #function main */
int main(int argc, char** argv)
{
int sign_recog_result = NO_MATCH;
speed_40 = imread("speed_40.bmp", 0);
speed_80 = imread("speed_80.bmp", 0);
// you run your program on these three examples (uncomment the two lines below)
//string sign_name = "stop4";
string sign_name = "speedsign12";
//string sign_name = "speedsign3";
//string sign_name = "speedsign4";
string final_sign_input_name = sign_name + ".jpg";
string final_sign_output_name = sign_name + "_result" + ".jpg";
/// Load source image and convert it to gray
src = imread(final_sign_input_name, 1);
/// Convert image to gray and blur it
cvtColor(src, src_gray, COLOR_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
warped_result = Mat(Size(WARPED_XSIZE, WARPED_YSIZE), src_gray.type());
// here you add the code to do the recognition, and set the variable
// sign_recog_result to one of STOP_SIGN, SPEED_LIMIT_40_SIGN, SPEED_LIMIT_80_SIGN, or NO_MATCH
// PART 1 of Assignment 2
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Canny(src_gray, canny_output, canny_thresh, canny_thresh*2, 3);
findContours(canny_output, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE, Point(0, 0));
vector<vector<Point> > contours_poly(contours.size());
for (unsigned int i = 0; i < contours.size(); ++i) {
approxPolyDP(Mat(contours[i]), contours_poly[i], contours_poly[i].size()*.02, true);
}
// Part 2 of Assignment 2
vector<vector<Point> > transform_result(contours_poly.size());
warped_result = getPerspectiveTransform(contours_poly, transform_result);
warpPerspective(src, dst, warped_result, dst.size());
//imshow("input", src);
//imshow("output", dst);
/*
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
for(unsigned int i = 0; i< contours_poly.size(); i++ ) {
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours_poly, i, color, 2, 8, hierarchy, 0, Point() );
}
// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
//*/
// Returning to the predetermined code.
string text;
if (sign_recog_result == SPEED_LIMIT_40_SIGN) text = "Speed 40";
else if (sign_recog_result == SPEED_LIMIT_80_SIGN) text = "Speed 80";
else if (sign_recog_result == STOP_SIGN) text = "Stop";
else if (sign_recog_result == NO_MATCH) text = "Fail";
int fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;
double fontScale = 2;
int thickness = 3;
cv::Point textOrg(10, 130);
cv::putText(src, text, textOrg, fontFace, fontScale, Scalar::all(255), thickness, 8);
/// Create Window
char* source_window = "Result";
namedWindow(source_window, WINDOW_AUTOSIZE);
imshow(source_window, src);
imwrite(final_sign_output_name, src);
waitKey(0);
return(0);
}

how to track foreground in video and draw rectangle on it

I'm working in a motion detection script that tracking people.
I used foreground function MOG2 and it does what I want to do. In the next step, I want to draw a rectangle around moving people but I get an error when I run it.
Are there any ideas on how to fix it?
The error:
OpenCV Error: Bad flag (parameter or structure field) (Unrecognized or unsupported array type) in cvGetMat, file /home/shar/opencv/modules/core/src/array.cpp, line 2494
terminate called after throwing an instance of 'cv::Exception'
what(): /home/shar/opencv/modules/core/src/array.cpp:2494: error: (-206) Unrecognized or unsupported array type in function cvGetMat
Aborted
This is my code:
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
#include <iostream>
#include <sstream>
#include <opencv2/video/background_segm.hpp>
#include <opencv2/video/background_segm.hpp>
using namespace std;
using namespace cv;
int main()
{
//Openthevideofile
cv::VideoCapture capture(0);
cv::Mat frame;
Mat colored;
//foregroundbinaryimage
cv::Mat foreground;
int largest_area=0;
int largest_contour_index=0;
Rect bounding_rect;
cv::namedWindow("ExtractedForeground");
vector<vector<Point> > contours; // Vector for storing contour
vector<Vec4i> hierarchy;
findContours( frame, contours, hierarchy,CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
//checkifvideosuccessfullyopened
if (!capture.isOpened())
return 0;
//currentvideoframe
//TheMixtureofGaussianobject
//used with all default parameters
cv::Ptr<cv::BackgroundSubtractorMOG2> pMOG2 = cv::createBackgroundSubtractorMOG2();
bool stop(false);
// testing the bounding box
//forallframesinvideo
while(!stop){
//readnextframeifany
if(!capture.read(frame))
break;
//updatethebackground
//andreturntheforeground
float learningRate = 0.01; // or whatever
cv::Mat foreground;
pMOG2->apply(frame, foreground, learningRate);
//learningrate
//Complementtheimage
cv::threshold(foreground,foreground,128,255,cv::THRESH_BINARY_INV);
//showforeground
for( int i = 0; i< contours.size(); i++ )
{
// Find the area of contour
double a=contourArea( contours[i],false);
if(a>largest_area){
largest_area=a;cout<<i<<" area "<<a<<endl;
// Store the index of largest contour
largest_contour_index=i;
// Find the bounding rectangle for biggest contour
bounding_rect=boundingRect(contours[i]);
}
}
Scalar color( 255,255,255); // color of the contour in the
//Draw the contour and rectangle
drawContours( frame, contours,largest_contour_index, color, CV_FILLED,8,hierarchy);
rectangle(frame, bounding_rect, Scalar(0,255,0),2, 8,0);
cv::imshow("ExtractedForeground",foreground);
cv::imshow("colord",colored);
//introduceadelay
//orpresskeytostop
if(cv::waitKey(10)>=0)
stop=true;
}
}
Your code fails because you are calling findContours on frame, which is not initialized until the while loop.
You have also issues in finding the largest contour, since you don't reset largest_area and largest_contour_index at each iteration, so it will fail in case you don't find a contour in the current frame.
This code should be what you meant to do.
You can find a related answer here.
The code here is the port to OpenCV 3.0.0, plus noise removal using morphological open.
#include <opencv2\opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
Ptr<BackgroundSubtractorMOG2> bg = createBackgroundSubtractorMOG2(500, 16, false);
VideoCapture cap(0);
Mat3b frame;
Mat1b fmask;
Mat kernel = getStructuringElement(MORPH_RECT, Size(3,3));
for (;;)
{
// Capture frame
cap >> frame;
// Background subtraction
bg->apply(frame, fmask, -1);
// Clean foreground from noise
morphologyEx(fmask, fmask, MORPH_OPEN, kernel);
// Find contours
vector<vector<Point>> contours;
findContours(fmask.clone(), contours, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
if (!contours.empty())
{
// Get largest contour
int idx_largest_contour = -1;
double area_largest_contour = 0.0;
for (int i = 0; i < contours.size(); ++i)
{
double area = contourArea(contours[i]);
if (area_largest_contour < area)
{
area_largest_contour = area;
idx_largest_contour = i;
}
}
if (area_largest_contour > 200)
{
// Draw
Rect roi = boundingRect(contours[idx_largest_contour]);
drawContours(frame, contours, idx_largest_contour, Scalar(0, 0, 255));
rectangle(frame, roi, Scalar(0, 255, 0));
}
}
imshow("frame", frame);
imshow("mask", fmask);
if (cv::waitKey(30) >= 0) break;
}
return 0;
}

debug assertion error when using findcontour function of opencv with kinect 2

I want to use kinect sdk 2.0 with opencv for shape detection. for that i want to get contour of for the image frame. But i am getting assertion error when I use findcontour function of opencv.
here is the code I used:
void Kinect::ProcessColor(RGBQUAD* pBuffer, int nWidth, int nHeight)
{
// Make sure we've received valid data
if (pBuffer && (nWidth == cColorWidth) && (nHeight == cColorHeight))
{
// Draw the data with OpenCV
Mat ColorImage(nHeight, nWidth, CV_8UC4, pBuffer);
//Mat ColorImage;
//ColorImage = imread("C:/Users/IWP/Desktop/Untitled.png", CV_LOAD_IMAGE_COLOR);
//imshow("ColorImage", ColorImage);
// Convert it to gray
Mat src_gray;
cvtColor(ColorImage, src_gray, CV_BGR2GRAY);
// Canny
Mat src_canny;
Canny(src_gray, src_canny, 75, 125, 3);
//threshold(src_gray, src_canny, 128, 255, CV_THRESH_BINARY);
// find the contours
vector< vector<Point> > contours;
findContours(src_canny, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
Mat showImage;
resize(src_canny, showImage, Size(nWidth / 2, nHeight / 2));
imshow("ColorImage", showImage);////imshow("ColorImage", ColorImage);
}
}