I am trying to apply adaptive thresholding to an image of an A4 paper as shown below:
I use the code below to apply the image manipulation:
+ (UIImage *)processImageWithOpenCV:(UIImage*)inputImage {
cv::Mat cvImage = [inputImage CVMat];
cv::Mat res;
cv::cvtColor(cvImage, cvImage, CV_RGB2GRAY);
cvImage.convertTo(cvImage,CV_32FC1,1.0/255.0);
CalcBlockMeanVariance(cvImage,res);
res=1.0-res;
res=cvImage+res;
cv::threshold(res,res, 0.85, 1, cv::THRESH_BINARY);
cv::resize(res, res, cv::Size(res.cols/2,res.rows/2));
return [UIImage imageWithCVMat:cvImage];
}
void CalcBlockMeanVariance(cv::Mat Img,cv::Mat Res,float blockSide=13) // blockSide - the parameter (set greater for larger font on image)
{
cv::Mat I;
Img.convertTo(I,CV_32FC1);
Res=cv::Mat::zeros(Img.rows/blockSide,Img.cols/blockSide,CV_32FC1);
cv::Mat inpaintmask;
cv::Mat patch;
cv::Mat smallImg;
cv::Scalar m,s;
for(int i=0;i<Img.rows-blockSide;i+=blockSide)
{
for (int j=0;j<Img.cols-blockSide;j+=blockSide)
{
patch=I(cv::Rect(j,i,blockSide,blockSide));
cv::meanStdDev(patch,m,s);
if(s[0]>0.01) // Thresholding parameter (set smaller for lower contrast image)
{
Res.at<float>(i/blockSide,j/blockSide)=m[0];
}else
{
Res.at<float>(i/blockSide,j/blockSide)=0;
}
}
}
cv::resize(I,smallImg,Res.size());
cv::threshold(Res,inpaintmask,0.02,1.0,cv::THRESH_BINARY);
cv::Mat inpainted;
smallImg.convertTo(smallImg,CV_8UC1,255);
inpaintmask.convertTo(inpaintmask,CV_8UC1);
inpaint(smallImg, inpaintmask, inpainted, 5, cv::INPAINT_TELEA);
cv::resize(inpainted,Res,Img.size());
Res.convertTo(Res,CV_8UC3);
}
Although the inputted image is greyscaled, it outputs an yellowish image as shown below:
My hypothesis is that whilst conversion between the cv::Mat and UIImage, something happened leading to the color image, however I can not figure out how to fix this issue.
**please ignore the status bar as these images are screenshots of the iOS app.
Update:
I have tried using CV_8UC1 instead of CV_8UC3 for Res.convertTo() and added cvtColor(Res, Res, CV_GRAY2BGR); but am still getting very similar results.
Could it be the conversion between cv::mat and UIImage which is causing this problem??
I want my image to be like this shown below.
You can use OpenCV framework and implement below code
+(UIImage *)blackandWhite:(UIImage *)processedImage
{
cv::Mat original = [MMOpenCVHelper cvMatGrayFromAdjustedUIImage:processedImage];
cv::Mat new_image = cv::Mat::zeros( original.size(), original.type() );
original.convertTo(new_image, -1, 1.4, -50);
original.release();
UIImage *blackWhiteImage=[MMOpenCVHelper UIImageFromCVMat:new_image];
new_image.release();
return blackWhiteImage;
}
+ (cv::Mat)cvMatGrayFromAdjustedUIImage:(UIImage *)image
{
cv::Mat cvMat = [self cvMatFromAdjustedUIImage:image];
cv::Mat grayMat;
if ( cvMat.channels() == 1 ) {
grayMat = cvMat;
}
else {
grayMat = cv :: Mat( cvMat.rows,cvMat.cols, CV_8UC1 );
cv::cvtColor( cvMat, grayMat, cv::COLOR_BGR2GRAY );
}
return grayMat;
}
+ (cv::Mat)cvMatFromAdjustedUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
return cvMat;
}
+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize() * cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGImageRef imageRef = CGImageCreate(cvMat.cols, // Width
cvMat.rows, // Height
8, // Bits per component
8 * cvMat.elemSize(), // Bits per pixel
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNone | kCGBitmapByteOrderDefault, // Bitmap info flags
provider, // CGDataProviderRef
NULL, // Decode
false, // Should interpolate
kCGRenderingIntentDefault); // Intent
UIImage *image = [[UIImage alloc] initWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return image;
}
its working for me check the output for your document
Try this:
+ (UIImage *)processImageWithOpenCV:(UIImage*)inputImage {
cv::Mat cvImage = [inputImage CVMat];
threshold(cvImage, cvImage, 128, 255, cv::THRESH_BINARY);
return [UIImage imageWithCVMat:cvImage];
}
Result image:
Related
I have some simple code that performs canny edge detection and overlays the edges on the original image.
The code works, but I'd like the edges to be drawn in black, currently they're drawn in white.
static void sketchImage(Mat srcColor, Mat& dst, bool sketchMode, int debugType)
{
Mat srcGray;
Mat edgesBgr;
cvtColor(srcColor, srcGray, CV_BGRA2GRAY);
cvtColor(srcColor, srcColor, CV_BGRA2BGR);
GaussianBlur(srcGray, srcGray, cvSize(5, 5),1.2,1.2);
GaussianBlur(srcColor, srcColor, cvSize(5,5), 1.4, 1.4);
CvSize size = srcColor.size();
Mat edges = Mat(size, CV_8U);
Canny(srcGray, edges, 150, 150);
cvtColor(edges, edgesBgr, cv::COLOR_GRAY2BGR);
dst = srcColor + edgesBgr;
}
I'm sure this is pretty simple but I'm fairly new to openCV and I'd appreciate any help.
Full code as requested:
#import "ViewController.h"
#import "opencv2/highgui.hpp"
#import "opencv2/core.hpp"
#import "opencv2/opencv.hpp"
#import "opencv2/imgproc.hpp"
#interface ViewController ()
#property (weak, nonatomic) IBOutlet UIImageView *display;
#property (strong, nonatomic) UIImage* image;
#property (strong, nonatomic) UIImage* backup;
#property NSInteger clickflag;
#end
#implementation ViewController
using namespace cv;
- (IBAction)convert_click:(id)sender {
NSLog(#"Clicked");
if (_clickflag == 0)
{
cv::Mat cvImage, cvBWImage;
UIImageToMat(_image, cvImage);
//cv::cvtColor(cvImage, cvBWImage, CV_BGR2GRAY);
//cvBWImage = cvImage;
cartoonifyImage(cvImage, cvBWImage, false, 0);
_image = MatToUIImage(cvBWImage);
[_display setImage:_image];
_clickflag = 1;
}
else if(_clickflag == 1)
{
_image = _backup;
[_display setImage:_image];
_clickflag = 0;
}
}
static UIImage* MatToUIImage(const cv::Mat& m)
{
//CV_Assert(m.depth() == CV_8U);
NSData *data = [NSData dataWithBytes:m.data length:m.step*m.rows];
CGColorSpaceRef colorSpace = m.channels() == 1 ?
CGColorSpaceCreateDeviceGray() : CGColorSpaceCreateDeviceRGB();
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(m.cols, m.rows, m.elemSize1()*8, m.elemSize()*8,
m.step[0], colorSpace, kCGImageAlphaNoneSkipLast|kCGBitmapByteOrderDefault,
provider, NULL, false, kCGRenderingIntentDefault);
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return finalImage;
}
static void UIImageToMat(const UIImage* image, cv::Mat& m)
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
m.create(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8,
m.step[0], colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
}
static void sketchImage(Mat srcColor, Mat& dst, bool sketchMode, int debugType)
{
Mat srcGray;
Mat edgesBgr;
cvtColor(srcColor, srcGray, CV_BGRA2GRAY);
cvtColor(srcColor, srcColor, CV_BGRA2BGR);
GaussianBlur(srcGray, srcGray, cvSize(5, 5),1.2,1.2);
GaussianBlur(srcColor, srcColor, cvSize(5,5), 1.4, 1.4);
CvSize size = srcColor.size();
Mat edges = Mat(size, CV_8U);
Canny(srcGray, edges, 150, 150);
cvtColor(edges, edgesBgr, cv::COLOR_GRAY2BGR);
//edgesBgr = edgesBgr.inv();
NSLog(#"%d, %d\n", srcColor.size().height, srcColor.size().width);
NSLog(#"%d, %d\n", edgesBgr.size().height, edgesBgr.size().width);
dst = edgesBgr + srcColor;
}
- (void)viewDidLoad {
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
_image = [UIImage imageNamed:#"Robben.jpg"];
_backup = [UIImage imageNamed:#"Robben.jpg"];
_clickflag = 0;
[_display setImage:_image];
}
- (void)didReceiveMemoryWarning {
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
#end
static void sketchImage(Mat srcColor, Mat& dst, bool sketchMode, int debugType)
{
Mat srcGray;
cvtColor(srcColor, srcGray, CV_BGRA2GRAY);
cvtColor(srcColor, srcColor, CV_BGRA2BGR);
GaussianBlur(srcGray, srcGray, cvSize(5, 5),1.2,1.2);
GaussianBlur(srcColor, srcColor, cvSize(5,5), 1.4, 1.4);
CvSize size = srcColor.size();
Mat edges = Mat(size, CV_8U);
Canny(srcGray, edges, 150, 150);
dst = srcColor.clone();
dst.setTo(0,edges);
}
You could apply bitwise_not(dst,dst) so that white becomes black and black becomes white !
void bitwise_not(InputArray src, OutputArray dst, InputArray
mask=noArray())
I'm using OpenCV to test image operations. But using the following method results in an error, which i can't explain to myself.
- (cv::Mat)cvMatGrayFromUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat = cv::Mat(rows, cols, CV_8UC1); // 8 bits per component, 1 channel
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNone |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
return cvMat;
}
When i run this method, i get the following output into the xcode console
Aug 17 11:14:24 OpenCVDemo[1250] <Error>: CGBitmapContextCreate: unsupported parameter combination: set CGBITMAP_CONTEXT_LOG_ERRORS environmental variable to see the details
Aug 17 11:14:24 OpenCVDemo[1250] <Error>: CGContextDrawImage: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable.
For example here is an method, which is working fine.
- (cv::Mat)cvMat3ChannelFromUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat rgba(rows, cols, CV_8UC4, cvScalar(1,2,3,4)); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(rgba.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
rgba.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
cv::Mat bgr( rgba.rows, rgba.cols, CV_8UC3 );
cv::Mat alpha( rgba.rows, rgba.cols, CV_8UC1 );
cv::Mat out[] = { bgr, alpha };
// rgba[0] -> bgr[2], rgba[1] -> bgr[1],
// rgba[2] -> bgr[0], rgba[3] -> alpha[0]
int from_to[] = { 0,2, 1,1, 2,0, 3,3 };
mixChannels( &rgba, 1, out, 2, from_to, 4 );
return bgr;
}
I hope here is somebody who can explain, why the gray-method is not working.
- (cv::Mat)cvMat3ChannelFromUIImage:(UIImage *)image
{
UIImage *theImage = [UIImage imageWithData:UIImagePNGRepresentation(image)];
CGColorSpaceRef colorSpace = CGImageGetColorSpace(theImage.CGImage);
CGFloat cols = theImage.size.width;
CGFloat rows = theImage.size.height;
cv::Mat rgba(rows, cols, CV_8UC4, cvScalar(1,2,3,4)); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(rgba.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
rgba.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), theImage.CGImage);
CGContextRelease(contextRef);
cv::Mat bgr( rgba.rows, rgba.cols, CV_8UC3 );
cv::Mat alpha( rgba.rows, rgba.cols, CV_8UC1 );
cv::Mat out[] = { bgr, alpha };
// rgba[0] -> bgr[2], rgba[1] -> bgr[1],
// rgba[2] -> bgr[0], rgba[3] -> alpha[0]
int from_to[] = { 0,2, 1,1, 2,0, 3,3 };
mixChannels( &rgba, 1, out, 2, from_to, 4 );
return bgr;
}
//Mat Conversion
+ (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols,rows;
if (image.imageOrientation == UIImageOrientationLeft
|| image.imageOrientation == UIImageOrientationRight) {
cols = image.size.height;
rows = image.size.width;
}
else{
cols = image.size.width;
rows = image.size.height;
}
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
cv::Mat cvMatTest;
cv::transpose(cvMat, cvMatTest);
if (image.imageOrientation == UIImageOrientationLeft
|| image.imageOrientation == UIImageOrientationRight) {
}
else{
return cvMat;
}
cvMat.release();
cv::flip(cvMatTest, cvMatTest, 1);
return cvMatTest;
}
//Gray Conversion
+ (cv::Mat)cvMatGrayFromUIImage:(UIImage *)image
{
cv::Mat cvMat = [self cvMatFromUIImage:image];
cv::Mat grayMat;
if ( cvMat.channels() == 1 ) {
grayMat = cvMat;
}
else {
grayMat = cv :: Mat( cvMat.rows,cvMat.cols, CV_8UC1 );
cv::cvtColor( cvMat, grayMat, CV_BGR2GRAY );
}
return grayMat;
}
//Call the above method
cv::Mat grayImage = [self cvMatGrayFromUIImage:"your image"];
I am storing frames in real time from a CvVideoCamera using the - (void)processImage:(Mat&)image delegate method.
After storing the images I average them all into one image to replicate a long exposure shot using this code:
Mat merge(Vector<Mat> frames, double alpha)
{
Mat firstFrame = frames.front();
Mat exposed = firstFrame * alpha;
for (int i = 1; i < frames.size(); i++) {
Mat frame = frames[i];
exposed += frame * alpha;
}
return exposed;
}
After getting the averaged image back I convert it back to a UIImage, but the image I get back is in a strange color space, does anyone know how I can fix this?
Conversion code:
+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
if ( cvMat.elemSize() == 1 ) {
colorSpace = CGColorSpaceCreateDeviceGray();
}
else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData( (__bridge CFDataRef)data );
CGImageRef imageRef = CGImageCreate( cvMat.cols, cvMat.rows, 8, 8 * cvMat.elemSize(), cvMat.step[0], colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault );
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease( imageRef );
CGDataProviderRelease( provider );
CGColorSpaceRelease( colorSpace );
return finalImage;
}
Here is an example(note: the plane in the middle is because I am recording off a computer monitor)
I'm trying to convert a CGImageRef to an OpenCV cv::Mat.
Everything works fine with 4 channels images, but for grayscale images it crashes because the CGImageRef GetRowBytes function returns a value larger than the mat.step[0] value (which equals width of the image).
For example, I have a 500 pixels wide grayscale CGImageRef and the CGImageGetBytesPerRow function returns 512.
Why is it returning this value ? And how can I create my cv::Mat correctly ?
- (cv::Mat) CVGrayscaleMatWithCGImage:(CGImageRef)image
{
// NSLog(#"%zu", CGImageGetBytesPerRow(image)); -> return 512
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGFloat cols = CGImageGetWidth(image);
CGFloat rows = CGImageGetHeight(image);
cv::Mat cvMat = cv::Mat(rows, cols, CV_8UC1); // 8 bits per component, 1 channel
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,
cols,
rows,
8,
cvMat.step[0], // Bytes per row -> return 500
colorSpace,
kCGImageAlphaNone |
kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
return cvMat;
}
I finally got it to work. Bytes Per Row must be multiple of 16.
Here is the code:
- (cv::Mat) CVGrayscaleMatWithCGImage:(CGImageRef)image
{
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGFloat cols = CGImageGetWidth(image);
CGFloat rows = CGImageGetHeight(image);
cv::Mat cvMat = cv::Mat(rows, cols, CV_8UC1); // 8 bits per component, 1 channel
unsigned long rowBytes = cvMat.step[0];
if (rowBytes % 16) {
rowBytes = ((rowBytes / 16) + 1) * 16;
}
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,
cols,
rows,
8,
rowBytes,
colorSpace,
kCGImageAlphaNone |
kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
return cvMat;
}
Now I am trying to do k-means clustering in iOS. To do k-means, I converted from UIImage to cv::Mat and made function to cluster cv::Mat. The function does not work well.
The result looks like almost good, but right side cols get black. I read openCV reference and I have no idea what's wrong.
The code is below. If someone help me, it is going to be really appriciated.
Excuse for my poor English...
- (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(
cvMat.cols, //width
cvMat.rows, //height
8, //bits per component
8 * cvMat.elemSize(), //bits per pixel
cvMat.step[0], //bytesPerRow
colorSpace, //colorspace
kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
provider, //CGDataProviderRef
NULL, //decode
false, //should interpolate
kCGRenderingIntentDefault //intent
);
// Getting UIImage from CGImage
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return finalImage;
}
- (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(
cvMat.data, // Pointer to data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
return cvMat;
}
- (cv::Mat)kMeansClustering:(cv::Mat)input
{
cv::Mat samples(input.rows * input.cols, 3, CV_32F);
for( int y = 0; y < input.rows; y++ ){
for( int x = 0; x < input.cols; x++ ){
for( int z = 0; z < 3; z++){
samples.at<float>(y + x*input.rows, z) = input.at<cv::Vec3b>(y,x)[z];
}
}
}
int clusterCount = 20;
cv::Mat labels;
int attempts = 5;
cv::Mat centers;
kmeans(samples, clusterCount, labels, cv::TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 100, 0.01), attempts, cv::KMEANS_PP_CENTERS, centers );
cv::Mat new_image( input.rows, input.cols, input.type());
for( int y = 0; y < input.rows; y++ ){
for( int x = 0; x < input.cols; x++ )
{
int cluster_idx = labels.at<int>(y + x*input.rows,0);
new_image.at<cv::Vec3b>(y,x)[0] = centers.at<float>(cluster_idx, 0);
new_image.at<cv::Vec3b>(y,x)[1] = centers.at<float>(cluster_idx, 1);
new_image.at<cv::Vec3b>(y,x)[2] = centers.at<float>(cluster_idx, 2);
}
}
return new_image;
}
You are supplying kMeansClustering with a four-channel image, it wants 3 channels. Try losing the alpha channel.
Add this at the top of the function:
cv::cvtColor(input , input , CV_RGBA2RGB);
so it looks like this:
- (cv::Mat)kMeansClustering:(cv::Mat)input
{
cv::cvtColor(input , input , CV_RGBA2RGB);
cv::Mat samples(input.rows * input.cols, 3, CV_32F);