OpenCV: kmeans segmentation C++ - c++

I'm writing a program to recognize the numbers on the image.
I do not see an error in the code. You receive a runtime exception. Indicates Mat.
I use Visual Studio 2015 Community and OpenCV 3.1.
Kmeans segmentation code found on stackoverflow.
Exeption:
test_opencv_nuget.exe!cv::Mat::at >(int i0, int i1)Строка 918 C++
mat.inl.hpp file
line:
CV_DbgAssert((unsigned)(i1 * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels()));
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
VideoCapture cap;
if (!cap.open(0))
return 0;
Mat camera_image;
Mat gray_scale_image;
Mat binary_image;
Mat otsu_binary_image;
Mat laplas_circuit_image;
for (;;)
{
cap >> camera_image;
cvtColor(camera_image, gray_scale_image, cv::COLOR_RGB2GRAY);
threshold(gray_scale_image, otsu_binary_image, 0, 255, CV_THRESH_OTSU);
Mat laplas_kernel = (Mat_<float>(3, 3) << 0, 1, 0,
1, -4, 1,
0, 1, 0);
filter2D(otsu_binary_image, laplas_circuit_image, -1, laplas_kernel);
Mat samples(otsu_binary_image.rows * otsu_binary_image.cols, 3, CV_32F);
for (int y = 0; y < otsu_binary_image.rows; y++)
for (int x = 0; x < otsu_binary_image.cols; x++)
for (int z = 0; z < 3; z++)
samples.at<float>(y + x*otsu_binary_image.rows, z) = otsu_binary_image.at<Vec3b>(y, x)[z];
int clusterCount = 5;
Mat labels;
int attempts = 5;
Mat centers;
kmeans(samples, clusterCount, labels, TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 10000, 0.0001), attempts, KMEANS_PP_CENTERS, centers);
Mat kmeans_image(otsu_binary_image.size(), otsu_binary_image.type());
for (int y = 0; y < otsu_binary_image.rows; y++)
for (int x = 0; x < otsu_binary_image.cols; x++)
{
int cluster_idx = labels.at<int>(y + x*otsu_binary_image.rows, 0);
kmeans_image.at<Vec3b>(y, x)[0] = centers.at<float>(cluster_idx, 0);
kmeans_image.at<Vec3b>(y, x)[1] = centers.at<float>(cluster_idx, 1);
kmeans_image.at<Vec3b>(y, x)[2] = centers.at<float>(cluster_idx, 2);
}
imshow("original", camera_image);
imshow("gray_scale", gray_scale_image);
imshow("otsu", otsu_binary_image);
imshow("laplas_circuit", laplas_circuit_image);
imshow("clusters", kmeans_image);
char c = cvWaitKey(33);
if (c == 27) break; // press ESC
}
return 0;
}

Related

Why the video is performing very slow in deep learning opencv

This is HandKeypoint Detection using caffe deep learning on opencv the problem is When i run the code it is performing very slow untill the video is freezing for sometime but when i tried by using a static image it is performing as normal. I have declare the Net class outside of the while loop too but it is still the same the video does not run smothly. How to make the video to run smoothly?
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/dnn.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::dnn;
const int POSE_PAIRS[20][2] =
{
{0,1}, {1,2}, {2,3}, {3,4}, // thumb
{0,5}, {5,6}, {6,7}, {7,8}, // index
{0,9}, {9,10}, {10,11}, {11,12}, // middle
{0,13}, {13,14}, {14,15}, {15,16}, // ring
{0,17}, {17,18}, {18,19}, {19,20} // small
};
string protoFile = "/home/hanish/Test_Opencv/HandKeyPoint/Models/pose_deploy.prototxt";
string weightsFile = "/home/hanish/Test_Opencv/HandKeyPoint/Models/pose_iter_102000.caffemodel";
int nPoints = 22;
int main(int argc, char **argv)
{
float thresh = 0.01;
VideoCapture cap(0);
Mat frame, frameCopy;
int frameWidth = cap.get(CAP_PROP_FRAME_WIDTH);
int frameHeight = cap.get(CAP_PROP_FRAME_HEIGHT);
float aspect_ratio = frameWidth/(float)frameHeight;
int inHeight = 368;
int inWidth = (int(aspect_ratio*inHeight) * 8) / 8;
cout << "inWidth = " << inWidth << " ; inHeight = " << inHeight << endl;
Net net = readNetFromCaffe(protoFile, weightsFile);
net.setPreferableBackend(DNN_TARGET_CPU);
while(1)
{
cap >> frame;
frameCopy = frame.clone();
Mat inpBlob = blobFromImage(frame, 1.0 / 255, Size(inWidth, inHeight), Scalar(0, 0, 0), false, false);
net.setInput(inpBlob);
Mat output = net.forward();
int H = output.size[2];
int W = output.size[3];
// find the position of the body parts
vector<Point> points(nPoints);
for (int n=0; n < nPoints; n++)
{
// Probability map of corresponding body's part.
Mat probMap(H, W, CV_32F, output.ptr(0,n));
resize(probMap, probMap, Size(frameWidth, frameHeight));
Point maxLoc;
double prob;
minMaxLoc(probMap, 0, &prob, 0, &maxLoc);
if (prob > thresh)
{
circle(frameCopy, cv::Point((int)maxLoc.x, (int)maxLoc.y), 8, Scalar(0,255,255), -1);
putText(frameCopy, cv::format("%d", n), cv::Point((int)maxLoc.x, (int)maxLoc.y), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 0, 255), 2);
}
points[n] = maxLoc;
}
int nPairs = sizeof(POSE_PAIRS)/sizeof(POSE_PAIRS[0]);
for (int n = 0; n < nPairs; n++)
{
// lookup 2 connected body/hand parts
Point2f partA = points[POSE_PAIRS[n][0]];
Point2f partB = points[POSE_PAIRS[n][1]];
if (partA.x<=0 || partA.y<=0 || partB.x<=0 || partB.y<=0)
continue;
line(frame, partA, partB, Scalar(0,255,255), 8);
circle(frame, partA, 8, Scalar(0,0,255), -1);
circle(frame, partB, 8, Scalar(0,0,255), -1);
}
// imshow("Output-Keypoints", frameCopy);
imshow("Output-Skeleton", frame);
waitKey(1);
}
return 0;
}

C++ OpenCV How can I get the values of Binary Histogram image 0 and 255?

My code shows me values that are not accurate and i am not sure what else to try. My goal is to get the values of y such as rows, so that I can read the image and put it in an array. Ive looked at examples and Stack Overflow is literally my last option.
#include<iostream>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
int main()
{
Mat Rgb;
Mat Grey;
Mat image;
//Mat Histogram;
Rgb = imread("license.jpg", WINDOW_AUTOSIZE);
cvtColor(Rgb, Grey, cv::COLOR_BGR2GRAY);
threshold(Grey, image, 150, 250, THRESH_BINARY);
int histogram[255];
for (int i = 0; i < 255; i++)
{
histogram[i] = 0;
}
for (int y = 0; y < image.rows; y++)
//for (int x = 0; x < image.cols; x++)
histogram[(int)image.at<uchar>(y)]++;
//histogram[(int)image.at<uchar>(y, x)]++;
for (int i = 0; i < 255; i++)
cout << histogram[i] << " ";
// draw the histograms
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound((double)hist_w / 255);
Mat histImage(hist_h, hist_w, CV_8UC1, Scalar(255, 255, 255));
int max = histogram[0];
for (int i = 1; i < 256; i++) {
if (max < histogram[i]) {
max = histogram[i];
}
}
for (int i = 0; i < 255; i++) {
histogram[i] = ((double)histogram[i] / max)*histImage.rows;
}
for (int i = 0; i < 255; i++)
{
line(histImage, Point(bin_w*(i), hist_h),
Point(bin_w*(i), hist_h - histogram[i]),
Scalar(0, 0, 0), 1, 8, 0);
}
imshow("Image", image);
waitKey(0);
cv::destroyAllWindows();
return 0;
}
Results have numbers like 319 and other values and I am only looking to get 0 or 255

How to improve otsu threshold output

I am using otsu threshold on an image.
Here is the input image :
Here is the output :
Here is the code I am using:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <math.h>
using namespace std;
using namespace cv;
int main(int argc, char const *argv[]) {
title("Text Extractor");
string win_name = "textextractor";
Mat img_a;
img_a = imread("../input/test_c.jpg");
Mat img_a_gray;
cvtColor(img_a, img_a_gray, CV_BGR2GRAY);
Mat img_a_blur;
GaussianBlur(img_a_gray, img_a_blur, Size(3, 3), 0, 0);
Mat img_a_thres;
// adaptiveThreshold(img_a_blur, img_a_thres, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 5, 4);
threshold(img_a_blur, img_a_thres, 0, 255, THRESH_OTSU);
namedWindow(win_name + "_a", CV_WINDOW_AUTOSIZE);
imshow(win_name + "_a", img_a_thres);
imwrite("../output/output_a.jpg", img_a_thres);
waitKey(0);
return 0;
}
The problem is that output has a black region on the bottom and on the left. What can I do to minimize/remove this ?
Edit:
I tried equalizeHist() and I am getting this:
Will try out breaking image into pieces and working them separately.
Sorry, my bad. The previous one is using adaptive filtering. Using Otsu I get this:
There is no change in otsu's output :/
Edit 2: Completed the Feng Tan algorithm, it gives better results but text looses clarity.
Code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/photo/photo.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <math.h>
using namespace std;
using namespace cv;
int main(int argc, char const *argv[]) {
string win_name = "textextractor";
Mat img_c;
img_c = imread("../input/sample.jpg");
Mat img_c_gray;
cvtColor(img_c, img_c_gray, CV_BGR2GRAY);
Mat img_c_bin = Mat::zeros(img_c_gray.rows, img_c_gray.cols, CV_8UC1);
int s_win = 17;
int l_win = 35;
double min_tau = 10;
Rect roi_s = Rect(-s_win/2, -s_win/2, s_win, s_win);
Rect roi_l = Rect(-l_win/2, -l_win/2, l_win, l_win);
Rect img_c_roi = Rect(0, 0, img_c_gray.cols, img_c_gray.rows);
for (size_t r = 0; r < img_c_gray.rows; r++) {
for (size_t c = 0; c < img_c_gray.cols; c++) {
double pthres = 255;
Rect sROI = roi_s + Point(c, r);
sROI = sROI & img_c_roi;
if(sROI.width == 0 || sROI.height == 0) {
continue;
}
Rect lROI = roi_l + Point(c, r);
lROI = lROI & img_c_roi;
if(lROI.width == 0 || lROI.height == 0) {
continue;
}
Mat sROI_gray = img_c_gray(sROI);
Mat lROI_gray = img_c_gray(lROI);
double s_stdDev = 0;
double l_stdDev = 0;
double s_mean = 0;
double l_mean = 0;
double l_min = DBL_MAX;
for (size_t r = 0; r < sROI_gray.rows; r++) {
for (size_t c = 0; c < sROI_gray.cols; c++) {
s_mean += sROI_gray.at<unsigned char>(r, c);
}
}
s_mean = s_mean / static_cast<double> (sROI_gray.cols * sROI_gray.rows);
for (size_t r = 0; r < sROI_gray.rows; r++) {
for (size_t c = 0; c < sROI_gray.cols; c++) {
double diff = sROI_gray.at<unsigned char> (r, c) - s_mean;
s_stdDev += diff * diff;
}
}
s_stdDev = sqrt(s_stdDev / static_cast<int> (sROI_gray.cols * sROI_gray.rows));
for (size_t r = 0; r < lROI_gray.rows; r++) {
for (size_t c = 0; c < lROI_gray.cols; c++) {
l_mean += lROI_gray.at<unsigned char> (c, r);
if(lROI_gray.at<unsigned char> (r, c) < l_min) {
l_min = lROI_gray.at<unsigned char> (r, c);
}
}
}
l_mean = l_mean / static_cast<double> (lROI_gray.cols * lROI_gray.rows);
for (size_t r = 0; r < lROI_gray.rows; r++) {
for (size_t c = 0; c < lROI_gray.cols; c++) {
double diff = lROI_gray.at<unsigned char> (r, c) - l_mean;
l_stdDev += diff * diff;
}
}
l_stdDev = sqrt(l_stdDev / static_cast<double> (lROI_gray.cols * lROI_gray.rows));
double tau = ((s_mean - l_min) * (1 - s_stdDev / l_stdDev)) / 2.0;
if(tau < min_tau) {
tau = min_tau;
}
double threshold = s_mean - tau;
unsigned char pixel_val = img_c_gray.at<unsigned char>(r, c);
if(pixel_val >= threshold) {
img_c_bin.at<unsigned char> (r, c) = 255;
} else {
img_c_bin.at<unsigned char> (r, c) = 0;
}
}
}
namedWindow(win_name + "_c", CV_WINDOW_AUTOSIZE);
imshow(win_name + "_c", img_c_bin);
imwrite("../output/output_c.jpg", img_c_bin);
waitKey(0);
return 0;
}
Output:
This is what I was able to obtain after some trial and run. Initially I median blurred the original image. Then I applied adpative threshold to the blurred image.
This is what I got:
1. Adaptive Threshold using Gaussian filter:
2. Adaptive Threshold using Mean filter:
From here on you can carry out a series of morphological operations that best suits your final image. :)
You should try using CLAHE.
I tried it on MATLAB using:
Ia = imread('FHXTJ.jpg');
I = rgb2gray(Ia);
A = adapthisteq(I, 'clipLimit', 0.02, 'Distribution', 'rayleigh');
Result:
Note: You can apply thresholding on this image. Otsu should work fine now.

Training svm for images in opencv

I have already reffered to the following links:
Link 1 and Link 2
From the above i have to managed to write the following :
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
using namespace cv;
using namespace std;
int main(){
int num_files = 2;
int width = 128, height = 128;
Mat image[2];
image[0] = imread("Tomato.jpg");
image[1] = imread("Melon.jpg");
Mat new_image(2,height*width,CV_32FC1); //Training sample from input images
int ii = 0;
for (int i = 0; i < num_files; i++){
Mat temp = image[i];
ii = 0;
for (int j = 0; j < temp.rows; j++){
for (int k = 0; k < temp.cols; k++){
new_image.at<float>(i, ii++) = temp.at<uchar>(j, k);
}
}
}
//new_image.push_back(image[0].reshape(0, 1));
//new_image.push_back(image[1].reshape(0, 1));
Mat labels(num_files, 1, CV_32FC1);
labels.at<float>(0, 0) = 1.0;//tomato
labels.at<float>(1, 0) = -1.0;//melon
imshow("New image", new_image);
printf("%f %f", labels.at<float>(0, 0), labels.at<float>(1, 0));
CvSVMParams params;
params.svm_type = CvSVM::C_SVC;
params.kernel_type = CvSVM::LINEAR;
params.gamma = 3;
params.degree = 3;
params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);
CvSVM svm;
svm.train(new_image, labels, Mat(), Mat(), params);
svm.save("svm.xml"); // saving
svm.load("svm.xml"); // loading
Mat test_img = imread("Tomato.jpg");
test_img=test_img.reshape(0, 1);
imshow("shit_image", test_img);
test_img.convertTo(test_img, CV_32FC1);
svm.predict(test_img);
waitKey(0);
}
I get the following error:
unsupported format or combination of formats, input sample must have 32FC1 type in cvPreparePredictData ...
I followed all steps in the second link. All matrices are 32FC1 type.
What am I missing?
Is there something wrong with the svm parameters ?
The error is caused when i try to predict a result.
check 1) Tomato.jpg and Melon.jpg size is 128*128 ?
2) both images are grayscale?
if not. try this code: I just add resize(), cvtColor() and print result
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
#include <opencv2\imgproc\imgproc.hpp>
using namespace cv;
using namespace std;
int main(){
int num_files = 2;
int width = 128, height = 128;
Mat image[2];
image[0] = imread("Tomato.jpg", 0);
image[1] = imread("Melon.jpg", 0);
resize(image[0], image[0], Size(width, height));
resize(image[1], image[1], Size(width, height));
Mat new_image(2, height*width, CV_32FC1); //Training sample from input images
int ii = 0;
for (int i = 0; i < num_files; i++){
Mat temp = image[i];
ii = 0;
for (int j = 0; j < temp.rows; j++){
for (int k = 0; k < temp.cols; k++){
new_image.at<float>(i, ii++) = temp.at<uchar>(j, k);
}
}
}
//new_image.push_back(image[0].reshape(0, 1));
//new_image.push_back(image[1].reshape(0, 1));
Mat labels(num_files, 1, CV_32FC1);
labels.at<float>(0, 0) = 1.0;//tomato
labels.at<float>(1, 0) = -1.0;//melon
imshow("New image", new_image);
printf("%f %f", labels.at<float>(0, 0), labels.at<float>(1, 0));
CvSVMParams params;
params.svm_type = CvSVM::C_SVC;
params.kernel_type = CvSVM::LINEAR;
params.gamma = 3;
params.degree = 3;
params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);
CvSVM svm;
svm.train(new_image, labels, Mat(), Mat(), params);
svm.save("svm.xml"); // saving
svm.load("svm.xml"); // loading
Mat test_img = imread("Tomato.jpg", 0);
resize(test_img, test_img, Size(width, height));
test_img = test_img.reshape(0, 1);
imshow("shit_image", test_img);
test_img.convertTo(test_img, CV_32FC1);
float res = svm.predict(test_img);
if (res > 0)
cout << endl << "Tomato";
else
cout << endl << "Melon";
waitKey(0);
}

Finding only big blobs on image

Hello I'm trying to find characters on this image.
This is my image after some preprocessing I recieved this image.
Now I'm trying to do connected component labeling to find blobs. however I get a lot of small blobs too.
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs);
int main(int argc, char **argv)
{
Mat img = imread("adaptive.png", 0);
if(!img.data) {
cout << "File not found" << endl;
return -1;
}
namedWindow("binary");
namedWindow("labelled");
Mat output = Mat::zeros(img.size(), CV_8UC3);
Mat binary;
vector < vector<Point2i > > blobs;
threshold(img, binary, 0, 1, THRESH_BINARY_INV);
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<Vec3b>(y,x)[0] = b;//Vec3b RGB color order
output.at<Vec3b>(y,x)[1] = g;
output.at<Vec3b>(y,x)[2] = r;
}
}
imshow("binary", img);
imshow("labelled", output);
waitKey(0);
return 0;
}
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs)
{
blobs.clear();
Mat label_image;
binary.convertTo(label_image, CV_32SC1);
int label_count = 2; // starts at 2 because 0,1 are used already
for(int y=0; y < label_image.rows; y++) {
int *row = (int*)label_image.ptr(y);
for(int x=0; x < label_image.cols; x++) {
if(row[x] != 1) {
continue;
}
Rect rect;
floodFill(label_image, Point(x,y), label_count, &rect, 0, 0, 4);
vector <Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
int *row2 = (int*)label_image.ptr(i);
for(int j=rect.x; j < (rect.x+rect.width); j++) {
if(row2[j] != label_count) {
continue;
}
blob.push_back(Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
so with this algorithm I recieve blobs
but when I do
if(blobs.size()>50) {
blob.push_back(Point2i(j,i));
}
I recieve black screen. however when I try to
if(blob.size()<50){
blob.push_back(Point2i(j,i));
}
I recieve small blobs what can be the actual problem here ?
Guess you want to store those "big" blobs?
If so, change the following code
blobs.push_back(blob);
label_count++;
to this:
if(blob.size() > 50){
blobs.push_back(blob);
}
label_count++;
And you can receive picture like this: