Conversion of Mat to UIImage from CvVideoCamera - c++

I am storing frames in real time from a CvVideoCamera using the - (void)processImage:(Mat&)image delegate method.
After storing the images I average them all into one image to replicate a long exposure shot using this code:
Mat merge(Vector<Mat> frames, double alpha)
{
Mat firstFrame = frames.front();
Mat exposed = firstFrame * alpha;
for (int i = 1; i < frames.size(); i++) {
Mat frame = frames[i];
exposed += frame * alpha;
}
return exposed;
}
After getting the averaged image back I convert it back to a UIImage, but the image I get back is in a strange color space, does anyone know how I can fix this?
Conversion code:
+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
if ( cvMat.elemSize() == 1 ) {
colorSpace = CGColorSpaceCreateDeviceGray();
}
else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData( (__bridge CFDataRef)data );
CGImageRef imageRef = CGImageCreate( cvMat.cols, cvMat.rows, 8, 8 * cvMat.elemSize(), cvMat.step[0], colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault );
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease( imageRef );
CGDataProviderRelease( provider );
CGColorSpaceRelease( colorSpace );
return finalImage;
}
Here is an example(note: the plane in the middle is because I am recording off a computer monitor)

Related

OpenCV - How to apply adaptive threshold to an image on iOS

I am trying to apply adaptive thresholding to an image of an A4 paper as shown below:
I use the code below to apply the image manipulation:
+ (UIImage *)processImageWithOpenCV:(UIImage*)inputImage {
cv::Mat cvImage = [inputImage CVMat];
cv::Mat res;
cv::cvtColor(cvImage, cvImage, CV_RGB2GRAY);
cvImage.convertTo(cvImage,CV_32FC1,1.0/255.0);
CalcBlockMeanVariance(cvImage,res);
res=1.0-res;
res=cvImage+res;
cv::threshold(res,res, 0.85, 1, cv::THRESH_BINARY);
cv::resize(res, res, cv::Size(res.cols/2,res.rows/2));
return [UIImage imageWithCVMat:cvImage];
}
void CalcBlockMeanVariance(cv::Mat Img,cv::Mat Res,float blockSide=13) // blockSide - the parameter (set greater for larger font on image)
{
cv::Mat I;
Img.convertTo(I,CV_32FC1);
Res=cv::Mat::zeros(Img.rows/blockSide,Img.cols/blockSide,CV_32FC1);
cv::Mat inpaintmask;
cv::Mat patch;
cv::Mat smallImg;
cv::Scalar m,s;
for(int i=0;i<Img.rows-blockSide;i+=blockSide)
{
for (int j=0;j<Img.cols-blockSide;j+=blockSide)
{
patch=I(cv::Rect(j,i,blockSide,blockSide));
cv::meanStdDev(patch,m,s);
if(s[0]>0.01) // Thresholding parameter (set smaller for lower contrast image)
{
Res.at<float>(i/blockSide,j/blockSide)=m[0];
}else
{
Res.at<float>(i/blockSide,j/blockSide)=0;
}
}
}
cv::resize(I,smallImg,Res.size());
cv::threshold(Res,inpaintmask,0.02,1.0,cv::THRESH_BINARY);
cv::Mat inpainted;
smallImg.convertTo(smallImg,CV_8UC1,255);
inpaintmask.convertTo(inpaintmask,CV_8UC1);
inpaint(smallImg, inpaintmask, inpainted, 5, cv::INPAINT_TELEA);
cv::resize(inpainted,Res,Img.size());
Res.convertTo(Res,CV_8UC3);
}
Although the inputted image is greyscaled, it outputs an yellowish image as shown below:
My hypothesis is that whilst conversion between the cv::Mat and UIImage, something happened leading to the color image, however I can not figure out how to fix this issue.
**please ignore the status bar as these images are screenshots of the iOS app.
Update:
I have tried using CV_8UC1 instead of CV_8UC3 for Res.convertTo() and added cvtColor(Res, Res, CV_GRAY2BGR); but am still getting very similar results.
Could it be the conversion between cv::mat and UIImage which is causing this problem??
I want my image to be like this shown below.
You can use OpenCV framework and implement below code
+(UIImage *)blackandWhite:(UIImage *)processedImage
{
cv::Mat original = [MMOpenCVHelper cvMatGrayFromAdjustedUIImage:processedImage];
cv::Mat new_image = cv::Mat::zeros( original.size(), original.type() );
original.convertTo(new_image, -1, 1.4, -50);
original.release();
UIImage *blackWhiteImage=[MMOpenCVHelper UIImageFromCVMat:new_image];
new_image.release();
return blackWhiteImage;
}
+ (cv::Mat)cvMatGrayFromAdjustedUIImage:(UIImage *)image
{
cv::Mat cvMat = [self cvMatFromAdjustedUIImage:image];
cv::Mat grayMat;
if ( cvMat.channels() == 1 ) {
grayMat = cvMat;
}
else {
grayMat = cv :: Mat( cvMat.rows,cvMat.cols, CV_8UC1 );
cv::cvtColor( cvMat, grayMat, cv::COLOR_BGR2GRAY );
}
return grayMat;
}
+ (cv::Mat)cvMatFromAdjustedUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
return cvMat;
}
+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize() * cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGImageRef imageRef = CGImageCreate(cvMat.cols, // Width
cvMat.rows, // Height
8, // Bits per component
8 * cvMat.elemSize(), // Bits per pixel
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNone | kCGBitmapByteOrderDefault, // Bitmap info flags
provider, // CGDataProviderRef
NULL, // Decode
false, // Should interpolate
kCGRenderingIntentDefault); // Intent
UIImage *image = [[UIImage alloc] initWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return image;
}
its working for me check the output for your document
Try this:
+ (UIImage *)processImageWithOpenCV:(UIImage*)inputImage {
cv::Mat cvImage = [inputImage CVMat];
threshold(cvImage, cvImage, 128, 255, cv::THRESH_BINARY);
return [UIImage imageWithCVMat:cvImage];
}
Result image:

Overlay canny edges on image in black

I have some simple code that performs canny edge detection and overlays the edges on the original image.
The code works, but I'd like the edges to be drawn in black, currently they're drawn in white.
static void sketchImage(Mat srcColor, Mat& dst, bool sketchMode, int debugType)
{
Mat srcGray;
Mat edgesBgr;
cvtColor(srcColor, srcGray, CV_BGRA2GRAY);
cvtColor(srcColor, srcColor, CV_BGRA2BGR);
GaussianBlur(srcGray, srcGray, cvSize(5, 5),1.2,1.2);
GaussianBlur(srcColor, srcColor, cvSize(5,5), 1.4, 1.4);
CvSize size = srcColor.size();
Mat edges = Mat(size, CV_8U);
Canny(srcGray, edges, 150, 150);
cvtColor(edges, edgesBgr, cv::COLOR_GRAY2BGR);
dst = srcColor + edgesBgr;
}
I'm sure this is pretty simple but I'm fairly new to openCV and I'd appreciate any help.
Full code as requested:
#import "ViewController.h"
#import "opencv2/highgui.hpp"
#import "opencv2/core.hpp"
#import "opencv2/opencv.hpp"
#import "opencv2/imgproc.hpp"
#interface ViewController ()
#property (weak, nonatomic) IBOutlet UIImageView *display;
#property (strong, nonatomic) UIImage* image;
#property (strong, nonatomic) UIImage* backup;
#property NSInteger clickflag;
#end
#implementation ViewController
using namespace cv;
- (IBAction)convert_click:(id)sender {
NSLog(#"Clicked");
if (_clickflag == 0)
{
cv::Mat cvImage, cvBWImage;
UIImageToMat(_image, cvImage);
//cv::cvtColor(cvImage, cvBWImage, CV_BGR2GRAY);
//cvBWImage = cvImage;
cartoonifyImage(cvImage, cvBWImage, false, 0);
_image = MatToUIImage(cvBWImage);
[_display setImage:_image];
_clickflag = 1;
}
else if(_clickflag == 1)
{
_image = _backup;
[_display setImage:_image];
_clickflag = 0;
}
}
static UIImage* MatToUIImage(const cv::Mat& m)
{
//CV_Assert(m.depth() == CV_8U);
NSData *data = [NSData dataWithBytes:m.data length:m.step*m.rows];
CGColorSpaceRef colorSpace = m.channels() == 1 ?
CGColorSpaceCreateDeviceGray() : CGColorSpaceCreateDeviceRGB();
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(m.cols, m.rows, m.elemSize1()*8, m.elemSize()*8,
m.step[0], colorSpace, kCGImageAlphaNoneSkipLast|kCGBitmapByteOrderDefault,
provider, NULL, false, kCGRenderingIntentDefault);
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return finalImage;
}
static void UIImageToMat(const UIImage* image, cv::Mat& m)
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
m.create(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8,
m.step[0], colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
}
static void sketchImage(Mat srcColor, Mat& dst, bool sketchMode, int debugType)
{
Mat srcGray;
Mat edgesBgr;
cvtColor(srcColor, srcGray, CV_BGRA2GRAY);
cvtColor(srcColor, srcColor, CV_BGRA2BGR);
GaussianBlur(srcGray, srcGray, cvSize(5, 5),1.2,1.2);
GaussianBlur(srcColor, srcColor, cvSize(5,5), 1.4, 1.4);
CvSize size = srcColor.size();
Mat edges = Mat(size, CV_8U);
Canny(srcGray, edges, 150, 150);
cvtColor(edges, edgesBgr, cv::COLOR_GRAY2BGR);
//edgesBgr = edgesBgr.inv();
NSLog(#"%d, %d\n", srcColor.size().height, srcColor.size().width);
NSLog(#"%d, %d\n", edgesBgr.size().height, edgesBgr.size().width);
dst = edgesBgr + srcColor;
}
- (void)viewDidLoad {
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
_image = [UIImage imageNamed:#"Robben.jpg"];
_backup = [UIImage imageNamed:#"Robben.jpg"];
_clickflag = 0;
[_display setImage:_image];
}
- (void)didReceiveMemoryWarning {
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
#end
static void sketchImage(Mat srcColor, Mat& dst, bool sketchMode, int debugType)
{
Mat srcGray;
cvtColor(srcColor, srcGray, CV_BGRA2GRAY);
cvtColor(srcColor, srcColor, CV_BGRA2BGR);
GaussianBlur(srcGray, srcGray, cvSize(5, 5),1.2,1.2);
GaussianBlur(srcColor, srcColor, cvSize(5,5), 1.4, 1.4);
CvSize size = srcColor.size();
Mat edges = Mat(size, CV_8U);
Canny(srcGray, edges, 150, 150);
dst = srcColor.clone();
dst.setTo(0,edges);
}
You could apply bitwise_not(dst,dst) so that white becomes black and black becomes white !
void bitwise_not(InputArray src, OutputArray dst, InputArray
mask=noArray())

Converting to UIImage from cvMat alters image

Whenever I try to convert my binary image back to a UIImage it changes the areas that should be white to a dark blue. I have seen that other people have been having similar issues, but have found no solution.
Here is the code I am using to convert to a UIImage
- (UIImage *)UIImageFromMat:(cv::Mat)image
{
NSData *data = [NSData dataWithBytes:image.data length:image.elemSize()*image.total()];
CGColorSpaceRef colorSpace;
if (image.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((CFDataRef)data);//CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(image.cols, //width
image.rows, //height
8, //bits per component
8 * image.elemSize(), //bits per pixel
image.step.p[0], //bytesPerRow
colorSpace, //colorspace
kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
provider, //CGDataProviderRef
NULL, //decode
false, //should interpolate
kCGRenderingIntentDefault //intent
);
// Getting UIImage from CGImage
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
//[self.imgView setImage:finalImage];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return finalImage;
}
Can anyone provide some insight as to why my code for converting to a UIImage changes the image? And how to fix it?
Found that I need to apply this code before converting to a UIImage
res.convertTo(res, CV_8UC3, 255.0);
I do not know what is the reason but you can fix an image using
CGImageRef CGImageCreateWithMaskingColors (
CGImageRef image,
const CGFloat components[]
);
Description Creates a bitmap image by masking an existing bitmap
image with the provided color values. Any image sample with color
value {c[1], ... c[N]} where min[i] <= c[i] <= max[i] for 1 <= i <= N
is masked out (that is, not painted). This means that anything
underneath the unpainted samples, such as the current fill color,
shows through.
So you will need something like
UIImage *your_image; // do not forget to initialize it
const CGFloat blueMasking[6] = {0.0, 0.05, 0.0, 0.05, 0.95, 1.0};
CGImageRef image = CGImageCreateWithMaskingColors(your_image.CGImage, blueMasking);
UIImage *new_image = [UIImage imageWithCGImage:image];
https://stackoverflow.com/questions/22429538/turn-all-pixels-of-chosen-color-to-white-uiimage/22429897#22429897

How to use CGContextDrawImage?

I need some help using the CGContextDrawImage. I have the following code which will create a Bitmap context and convert the pixel data to CGImageRef. Now I need to display that image using CGContextDrawImage. I'm not very clear on how I'm supposed to use that. The following is my code:
- (void)drawBufferWidth:(int)width height:(int)height pixels:(unsigned char*)pixels
{
const int area = width *height;
const int componentsPerPixel = 4;
unsigned char pixelData[area * componentsPerPixel];
for(int i = 0; i<area; i++)
{
const int offset = i * componentsPerPixel;
pixelData[offset] = pixels[0];
pixelData[offset+1] = pixels[1];
pixelData[offset+2] = pixels[2];
pixelData[offset+3] = pixels[3];
}
const size_t BitsPerComponent = 8;
const size_t BytesPerRow=((BitsPerComponent * width) / 8) * componentsPerPixel;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef gtx = CGBitmapContextCreate(pixelData, width, height, BitsPerComponent, BytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast);
CGImageRef myimage = CGBitmapContextCreateImage(gtx);
//What code should go here to display the image?
CGContextRelease(gtx);
CGImageRelease(myimage);
}
Any help or a sample piece of code would be great. Thanks in advance!
Create a file named : MyDrawingView.h
#import <UIKit/UIKit.h>
#import <QuartzCore/QuartzCore.h>
#interface MyDrawingView : UIView
{
}
#end
now create a file named : MyDrawingView.m
#import "MyChartView.h"
#implementation MyDrawingView
- (id)initWithFrame:(CGRect)frame
{
self = [super initWithFrame:frame];
if (self) {
// Initialization code
// Write your initialization code if any.
}
return self;
}
// Only override drawRect: if you perform custom drawing.
- (void)drawRect:(CGRect)rect
{
// Drawing code
// Create Current Context To Draw
CGContextRef context = UIGraphicsGetCurrentContext();
UIImage *image = [UIImage imageNamed:#"image.jpg"];
// Draws your image at given poing
[image drawAtPoint:CGPointMake(10, 10)];
}
// now to use it in your view
#import "MyChartView.h"
MyDrawingView *drawView = [[MyDrawingView alloc] initWithFrame:CGRectMake(0, 0, 200, 200)];
[self.view addSubview:drawView];
// whenever you want to update that view call
[drawView setNeedsDisplay];
// its your method to process(draw) image
- (void)drawBufferWidth:(int)width height:(int)height pixels:(unsigned char*)pixels
{
const int area = width *height;
const int componentsPerPixel = 4;
unsigned char pixelData[area * componentsPerPixel];
for(int i = 0; i<area; i++)
{
const int offset = i * componentsPerPixel;
pixelData[offset] = pixels[0];
pixelData[offset+1] = pixels[1];
pixelData[offset+2] = pixels[2];
pixelData[offset+3] = pixels[3];
}
const size_t BitsPerComponent = 8;
const size_t BytesPerRow=((BitsPerComponent * width) / 8) * componentsPerPixel;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef gtx = CGBitmapContextCreate(pixelData, width, height, BitsPerComponent, BytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast);
CGImageRef myimage = CGBitmapContextCreateImage(gtx);
// Convert to UIImage
UIImage *image = [UIImage imageWithCGImage:myimage];
// Create a rect to display
CGRect imageRect = CGRectMake(0, 0, image.size.width, image.size.height);
// Here is the Two Snnipets to draw image
CGContextRef context = UIGraphicsGetCurrentContext();
// Transform image
CGContextTranslateCTM(context, 0, image.size.height);
CGContextScaleCTM(context, 1.0, -1.0);
// Finaly Draw your image
CGContextDrawImage(context, imageRect, image.CGImage);
// You can also use following to draw your image in 'drawRect' method
// [[UIImage imageWithCGImage:myimage] drawInRect:CGRectMake(0, 0, 145, 15)];
CGContextRelease(gtx);
CGImageRelease(myimage);
}
If there is anyone else dealing with this problem, please try inserting the following code into Ereka's code. Dipen's solution seems a little too much. Right after the comment "//What code should go here to display the image," put the following code:
CGRect myContextRect = CGRectMake (0, 0, width, height);
CGContextDrawImage (gtx, myContextRect, myimage);
CGImageRef imageRef = CGBitmapContextCreateImage(gtx);
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
UIImageView *imgView = [[UIImageView alloc] initWithImage:finalImage];
[self.view addSubview:imgView];

OpenCV : k-means clustering in iOS

Now I am trying to do k-means clustering in iOS. To do k-means, I converted from UIImage to cv::Mat and made function to cluster cv::Mat. The function does not work well.
The result looks like almost good, but right side cols get black. I read openCV reference and I have no idea what's wrong.
The code is below. If someone help me, it is going to be really appriciated.
Excuse for my poor English...
- (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(
cvMat.cols, //width
cvMat.rows, //height
8, //bits per component
8 * cvMat.elemSize(), //bits per pixel
cvMat.step[0], //bytesPerRow
colorSpace, //colorspace
kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
provider, //CGDataProviderRef
NULL, //decode
false, //should interpolate
kCGRenderingIntentDefault //intent
);
// Getting UIImage from CGImage
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return finalImage;
}
- (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(
cvMat.data, // Pointer to data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
return cvMat;
}
- (cv::Mat)kMeansClustering:(cv::Mat)input
{
cv::Mat samples(input.rows * input.cols, 3, CV_32F);
for( int y = 0; y < input.rows; y++ ){
for( int x = 0; x < input.cols; x++ ){
for( int z = 0; z < 3; z++){
samples.at<float>(y + x*input.rows, z) = input.at<cv::Vec3b>(y,x)[z];
}
}
}
int clusterCount = 20;
cv::Mat labels;
int attempts = 5;
cv::Mat centers;
kmeans(samples, clusterCount, labels, cv::TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 100, 0.01), attempts, cv::KMEANS_PP_CENTERS, centers );
cv::Mat new_image( input.rows, input.cols, input.type());
for( int y = 0; y < input.rows; y++ ){
for( int x = 0; x < input.cols; x++ )
{
int cluster_idx = labels.at<int>(y + x*input.rows,0);
new_image.at<cv::Vec3b>(y,x)[0] = centers.at<float>(cluster_idx, 0);
new_image.at<cv::Vec3b>(y,x)[1] = centers.at<float>(cluster_idx, 1);
new_image.at<cv::Vec3b>(y,x)[2] = centers.at<float>(cluster_idx, 2);
}
}
return new_image;
}
You are supplying kMeansClustering with a four-channel image, it wants 3 channels. Try losing the alpha channel.
Add this at the top of the function:
cv::cvtColor(input , input , CV_RGBA2RGB);
so it looks like this:
- (cv::Mat)kMeansClustering:(cv::Mat)input
{
cv::cvtColor(input , input , CV_RGBA2RGB);
cv::Mat samples(input.rows * input.cols, 3, CV_32F);