What is the difference between kmeans and cvKMeans2 algorithms in OpenCV? - c++

I want to find dominant N colors on the picture. For this purpose I decided to use KMeans algorithm. My project written on C, that is way I used cvKMeans2 algorithm. But it gives me very strange results. Then I decided to try kmeans algorithm on OpenCV C++. It gives me more accurate results. So, where is my fault? Could someone explain it to me?
1. I used this image for test.
2. Implementation on C.
#include <cv.h>
#include <highgui.h>
#define CLUSTERS 3
int main(int argc, char **argv) {
const char *filename = "test_12.jpg";
IplImage *tmp = cvLoadImage(filename);
if (!tmp) {
return -1;
}
IplImage *src = cvCloneImage(tmp);
cvCvtColor(tmp, src, CV_BGR2RGB);
CvMat *samples = cvCreateMat(src->height * src->width, 3, CV_32F);
for (int i = 0; i < samples->height; i++) {
samples->data.fl[i * 3 + 0] = (uchar) src->imageData[i * 3 + 0];
samples->data.fl[i * 3 + 1] = (uchar) src->imageData[i * 3 + 1];
samples->data.fl[i * 3 + 2] = (uchar) src->imageData[i * 3 + 2];
}
CvMat *labels = cvCreateMat(samples->height, 1, CV_32SC1);
CvMat *centers = cvCreateMat(CLUSTERS, 3, CV_32FC1);
int flags = 0;
int attempts = 5;
cvKMeans2(samples, CLUSTERS, labels,
cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 10000, 0.005),
attempts, 0, flags, centers);
int rows = 40;
int cols = 300;
IplImage *des = cvCreateImage(cvSize(cols, rows), 8, 3);
int part = 4000;
int r = 0;
int u = 0;
for (int y = 0; y < 300; ++y) {
for (int x = 0; x < 40; ++x) {
if (u >= part) {
r++;
part = (r + 1) * part;
}
des->imageData[(300 * x + y) * 3 + 0] = static_cast<char>(centers->data.fl[r * 3 + 0]);
des->imageData[(300 * x + y) * 3 + 1] = static_cast<char>(centers->data.fl[r * 3 + 1]);
des->imageData[(300 * x + y) * 3 + 2] = static_cast<char>(centers->data.fl[r * 3 + 2]);
u++;
}
}
IplImage *dominant_colors = cvCloneImage(des);
cvCvtColor(des, dominant_colors, CV_BGR2RGB);
cvNamedWindow("dominant_colors", CV_WINDOW_AUTOSIZE);
cvShowImage("dominant_colors", dominant_colors);
cvWaitKey(0);
cvDestroyWindow("dominant_colors");
cvReleaseImage(&src);
cvReleaseImage(&des);
cvReleaseMat(&labels);
cvReleaseMat(&samples);
return 0;
}
3. Implementation on C++.
#include <cv.h>
#include <opencv/cv.hpp>
#define CLUSTERS 3
int main(int argc, char **argv) {
const cv::Mat &tmp = cv::imread("test_12.jpg");
cv::Mat src;
cv::cvtColor(tmp, src, CV_BGR2RGB);
cv::Mat samples(src.rows * src.cols, 3, CV_32F);
for (int y = 0; y < src.rows; y++)
for (int x = 0; x < src.cols; x++)
for (int z = 0; z < 3; z++)
samples.at<float>(y + x * src.rows, z) = src.at<cv::Vec3b>(y, x)[z];
int attempts = 5;
cv::Mat labels;
cv::Mat centers;
kmeans(samples, CLUSTERS, labels, cv::TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 1000, 0.005),
attempts, cv::KMEANS_PP_CENTERS, centers);
cv::Mat colors(cv::Size(CLUSTERS * 100, 30), tmp.type());
int p = 100;
int cluster_id = 0;
for (int x = 0; x < CLUSTERS * 100; x++) {
for (int y = 0; y < 30; y++) {
if (x >= p) {
cluster_id++;
p = (cluster_id + 1) * 100;
}
colors.at<cv::Vec3b>(y, x)[0] = static_cast<uchar>(centers.at<float>(cluster_id, 0));
colors.at<cv::Vec3b>(y, x)[1] = static_cast<uchar>(centers.at<float>(cluster_id, 1));
colors.at<cv::Vec3b>(y, x)[2] = static_cast<uchar>(centers.at<float>(cluster_id, 2));
}
}
cv::Mat dominant_colors;
cv::cvtColor(colors, dominant_colors, CV_RGB2BGR);
cv::imshow("dominant_colors", dominant_colors);
cv::waitKey(0);
return 0;
}
4. Result of code on C.
5. Result of code on C++.

I found my mistake. It is related to IplImage's widthStep field. As I read here widthStep gets padded up to a multiple of 4 for performance reasons. If widthStep is equal to 30 it will padded up to 32.
int h = src->height;
int w = src->width;
int c = 3;
int delta = 0;
for (int i = 0, y = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
for (int k = 0; k < c; ++k, y++) {
samples->data.fl[i * w * c + c * j + k] = (uchar) src->imageData[delta + i * w * c + c * j + k];
}
}
delta += src->widthStep - src->width * src->nChannels;
}
With pointers
for (int x = 0, i = 0; x < src->height; ++x) {
auto *ptr = (uchar *) (src->imageData + x * src->widthStep);
for (int y = 0; y < src->width; ++y, i++) {
for (int j = 0; j < 3; ++j) {
samples->data.fl[i * 3 + j] = ptr[3 * y + j];
}
}
}

Related

How to set OpenCV Mat as Tensorflow Lite input and output?

I'm trying to use GPU Delegate in Tensorflow Lite on iOS. My model has inputs and outputs as OpenCV BGR image ([258, 540, 3]). How can I set inputs and outputs in C++ tensorflow lite interpreter? I tried to use this code
int input = interpreter->inputs()[0];
float* out = interpreter->typed_tensor<float>(input);
NSData* slicedData = [self inputDataFromCvMat:slicedImage];
uint8_t* in = (uint8_t*) slicedData.bytes;
ProcessInputWithFloatModel(in, out, WIDTH, HEIGHT, CHANNELS);
void ProcessInputWithFloatModel(uint8_t* input, float* buffer, int image_width, int image_height, int image_channels) {
for (int y = 0; y < wanted_input_height; ++y) {
float* out_row = buffer + (y * wanted_input_width * wanted_input_channels);
for (int x = 0; x < wanted_input_width; ++x) {
const int in_x = (y * image_width) / wanted_input_width;
const int in_y = (x * image_height) / wanted_input_height;
uint8_t* input_pixel =
input + (in_y * image_width * image_channels) + (in_x * image_channels);
float* out_pixel = out_row + (x * wanted_input_channels);
for (int c = 0; c < wanted_input_channels; ++c) {
out_pixel[c] = (input_pixel[c] - input_mean) / input_std;
}
}
}
}
- (NSData *)inputDataFromCvMat:(Mat)image {
NSMutableData *inputData = [[NSMutableData alloc] initWithCapacity:0];
for (int row = 0; row < HEIGHT + 10; row++) {
for (int col = 0; col < WIDTH + 10; col++) {
Vec3b intensity = image.at<Vec3b>(row, col);
int blue = intensity.val[0];
int green = intensity.val[1];
int red = intensity.val[2];
// we need to put pixel values in BGR (model was trained with opencv)
[inputData appendBytes:&blue length:sizeof(blue)];
[inputData appendBytes:&green length:sizeof(green)];
[inputData appendBytes:&red length:sizeof(red)];
}
}
return inputData;
}
but I don't know what is wrong
After some research, I managed to get it working
const int wanted_input_width = 258;
const int wanted_input_height = 540;
const int wanted_input_channels = 3;
Mat image = ...
// write to input
int input = interpreter->inputs()[0];
float* out = interpreter->typed_tensor<float>(input);
uint8_t* in = image.ptr<uint8_t>(0);
ProcessInputWithFloatModel(in, out);
// run interpreter
if (interpreter->Invoke() != kTfLiteOk) {
LOG(FATAL) << "Failed to invoke!";
}
// get output
int output_idx = interpreter->outputs()[0];
float* output = interpreter->typed_output_tensor<float>(output_idx);
Mat outputMat = ProcessOutputWithFloatModel(output);
/// Preprocess the input image and feed the TFLite interpreter buffer for a float model.
void ProcessInputWithFloatModel(uint8_t* input, float* buffer) {
for (int y = 0; y < wanted_input_height; ++y) {
float* out_row = buffer + (y * wanted_input_width * wanted_input_channels);
for (int x = 0; x < wanted_input_width; ++x) {
uint8_t* input_pixel = input + (y * wanted_input_width * wanted_input_channels) + (x * wanted_input_channels);
float* out_pixel = out_row + (x * wanted_input_channels);
for (int c = 0; c < wanted_input_channels; ++c) {
out_pixel[c] = input_pixel[c] / 255.0f;
}
}
}
}
Mat ProcessOutputWithFloatModel(float* input) {
cv::Mat image = cv::Mat::zeros(wanted_input_height, wanted_input_width, CV_8UC3);
for (int y = 0; y < wanted_input_height; ++y) {
for (int x = 0; x < wanted_input_width; ++x) {
float* input_pixel = input + (y * wanted_input_width * wanted_input_channels) + (x * wanted_input_channels);
cv::Vec3b & color = image.at<cv::Vec3b>(cv::Point(x, y));
color[0] = (uchar) floor(input_pixel[0] * 255.0f);
color[1] = (uchar) floor(input_pixel[1] * 255.0f);
color[2] = (uchar) floor(input_pixel[2] * 255.0f);
}
}
return image;
}

Implementing Sobel Operator

I am trying to implement a sobel operator in both horizontal and vertical direction. But somehow I am getting the reverse output. The code I have attached below. For the horizontal mask
char mask [3][3]= {{-1,-2,-1},{0,0,0},{1,2,1}};
void masking(Mat image){
Mat temImage= image.clone();
for (int i = 1; i < image.rows-1; i++)
{
for (int j = 1; j < image.cols-1; j++)
{
for(int k=0;k<3;k++)
{
int pixel1 = image.at<Vec3b>(i-1,j-1)[k] * -1;
int pixel2 = image.at<Vec3b>(i,j-1)[k] * -2;
int pixel3 = image.at<Vec3b>(i+1,j-1)[k] * -1;
int pixel4 = image.at<Vec3b>(i-1,j)[k] * 0;
int pixel5 = image.at<Vec3b>(i,j)[k] * 0;
int pixel6 = image.at<Vec3b>(i+1,j)[k] * 0;
int pixel7 = image.at<Vec3b>(i-1,j+1)[k] * 1;
int pixel8 = image.at<Vec3b>(i,j+1)[k] * 2;
int pixel9 = image.at<Vec3b>(i+1,j+1)[k] * 1;
int sum = pixel1 + pixel2 + pixel3 + pixel4 + pixel5 + pixel6 + pixel7 + pixel8 + pixel9;
if(sum < 0)
{
sum = 0;
}
if(sum > 255)
sum = 255;
temImage.at<Vec3b>(i,j)[k] = sum;
}
}
}
//printf("conter = %d",counter);
imshow( "Display", temImage );
imwrite("output1.png",temImage);
}
I am getting the output as
where as for the vertical mask
char mask [3][3]= {{-1,0,1},{-2,0,2},{-1,0,1}};
void masking(Mat image){
Mat temImage= image.clone();
for (int i = 1; i < image.rows-1; i++)
{
for (int j = 1; j < image.cols-1; j++)
{
for(int k=0;k<3;k++)
{
int pixel1 = image.at<Vec3b>(i-1,j-1)[k] * -1;
int pixel2 = image.at<Vec3b>(i,j-1)[k] * 0;
int pixel3 = image.at<Vec3b>(i+1,j-1)[k] * 1;
int pixel4 = image.at<Vec3b>(i-1,j)[k] * -2;
int pixel5 = image.at<Vec3b>(i,j)[k] * 0;
int pixel6 = image.at<Vec3b>(i+1,j)[k] * 2;
int pixel7 = image.at<Vec3b>(i-1,j+1)[k] * -1;
int pixel8 = image.at<Vec3b>(i,j+1)[k] * 0;
int pixel9 = image.at<Vec3b>(i+1,j+1)[k] * 1;
int sum = pixel1 + pixel2 + pixel3 + pixel4 + pixel5 + pixel6 + pixel7 + pixel8 + pixel9;
if(sum < 0)
{
sum = 0;
}
if(sum > 255)
sum = 255;
temImage.at<Vec3b>(i,j)[k] = sum;
}
}
}
//printf("conter = %d",counter);
imshow( "Display", temImage );
imwrite("output1.png",temImage);
}
I am getting output as
The main function is attached below
int main( int argc, char** argv ){
Mat input_image = imread("sobel1.jpg",1);
masking(input_image);
waitKey(0);
return 0;
}
According the the guide https://www.tutorialspoint.com/dip/sobel_operator.htm I should get reverse output. Can anyone help me in this
The original image is
No, the tutorial is not wrong, it talks about masks and not gradients. The weak point of that tutorial is that it doesn't mention we are calculating horizontal gradients using what they call the vertical mask.

Edge detection for color images CannyAlgorithm

This is how I managed to use a Sobel Kernel on a GRAYSCALE image.However,I dont actually get how to modify it for a color image.
void Soble()
{
Mat img;
int w = 3;
int k = w / 2;
char fname[MAX_PATH];
openFileDlg(fname);
img = imread(fname, CV_LOAD_IMAGE_GRAYSCALE);
gaussianFiltering(img);
Mat destinationImg = img.clone();
float sobelY[3][3] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 };
float sobelX[3][3] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 };
for (int i = k; i < img.rows - k; i++)
{
for (int j = k; j < img.cols - k; j++)
{
float Gx = 0, Gy = 0;
for (int l = 0; l < w; l++)
{
for (int p = 0; p < w; p++)
{
Gx += img.at<uchar>(i + l - k, j + p - k)*sobelX[l][p];
Gy += img.at<uchar>(i + l - k, j + p - k)*sobelY[l][p];
}
}
destinationImg.at<uchar>(i, j) = sqrt(Gx*Gx + Gy * Gy) / (4 * sqrt(2));
}
}
imshow("Intermediar",destinationImg);
imshow("Initial", img);
waitKey(0);
}
I thought of using each RGB chanel but it does not work and even give some errors.
float GxR = 0, GyR = 0;
float GxG = 0, GyG = 0;
float GxB = 0, GyB = 0;
for (int l = 0; l < w; l++)
{
for (int p = 0; p < w; p++)
{
GxR += img.at<Vec3b>[0](i + l - k, j + p - k)*sobelX[l][p];
GxG += img.at<Vec3b>[1](i + l - k, j + p - k)*sobelX[l][p];
GxB += img.at<Vec3b>[2](i + l - k, j + p - k)*sobelX[l][p];
GyR += img.at<Vec3b>[0](i + l - k, j + p - k)*sobelY[l][p];
GyG += img.at<Vec3b>[1](i + l - k, j + p - k)*sobelY[l][p];
GyB += img.at<Vec3b>[2](i + l - k, j + p - k)*sobelY[l][p];
}
}
destinationImg.at<Vec3b>[0](i, j) = sqrt(GxR*GxR + GyR * GyR) / (4 * sqrt(2));
destinationImg.at<Vec3b>[1](i, j) = sqrt(GxG*GxG + GyB * GyB) / (4 * sqrt(2));
destinationImg.at<Vec3b>[2](i, j) = sqrt(GxG*GxG + GyG * GyG) / (4 * sqrt(2));
Can you please explain how can this code must be rewritten?
You access the image data the wrong way.
destinationImg.at<Vec3b>[0](i, j)
destinationImg is a Mat of type Vec3b. That means it's a 2d array of three dimensional vectors.
You'r [ ] operator is in the wrong place...
The subscript error message tells you that you're using that operator on something that is neither a pointer nor an array which is not possible.
You get the other error message because you have that operator where the (i,j) is expected.
First you have to get one of these vectors, then you can get its elements.
destinationImg.at<Vec3b>(i,j) will give you the vector at i,j.
destinationImg.at<Vec3b>(i,j)[0] will give you the first element of that vector.
Example from the OpenCV documentation:
Vec3b intensity = img.at<Vec3b>(y, x);
uchar blue = intensity.val[0];
uchar green = intensity.val[1];
uchar red = intensity.val[2];
http://docs.opencv.org/2.4.13.2/doc/user_guide/ug_mat.html

Implementation of Harris Corner Detector using Sobel and Gaussian Blur in C++

I want to implement the harris corner detector. I found this page to be very helpful, since it shows how the detector is implemented using the basic opencv functions (like gaussianBlur and Sobel):
https://compvisionlab.wordpress.com/2013/03/02/harris-interest-point-detection-implementation-opencv/
Now I even want to implement Gaussian Blur and Sobel. If I run my Gaussian or Sobel over some Images it works but in combination with my Corner Detector it does not work. Can anybody help me please. The full Code is below, thx.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray, dst;
int thresh = 200;
int max_thresh = 255;
char* source_window = "Source Image";
char* corners_window = "Corner Image";
/// Function header
void cornerHarris_demo(int, void*);
void cornerHarrisMe(int, int, double);
int xGradient(Mat, int, int);
int yGradient(Mat, int, int);
void SobelMe(Mat&,Mat&,int,int);
int borderCheck(int M, int x);
void SepGaussian(Mat&, Mat&, int, int);
/** #function main */
int main(int argc, char** argv)
{
/// Load source image and convert it to gray
src = imread("data/a-real-big-church.jpg", 1);
//Mat src_gray(src.size(), CV_8UC1);
cvtColor(src, src_gray, CV_BGR2GRAY);
/// Create a window and a trackbar
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
createTrackbar("Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo);
imshow(source_window, src);
cornerHarris_demo(0, 0);
waitKey(0);
return(0);
}
/** #function cornerHarris_demo */
void cornerHarris_demo(int, void*)
{
Mat dst_norm, dst_norm_scaled;
/// Detector parameters
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
/// Detecting corners
cornerHarrisMe(blockSize, apertureSize, k);
/// Normalizing
normalize(dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat());
convertScaleAbs(dst_norm, dst_norm_scaled);
/// Drawing a circle around corners
for (int j = 0; j < dst_norm.rows; j++)
{
for (int i = 0; i < dst_norm.cols; i++)
{
if ((int)dst_norm.at<float>(j, i) > thresh)
{
circle(dst_norm_scaled, Point(i, j), 5, Scalar(255), 2, 8, 0);
}
}
}
/// Showing the result
namedWindow(corners_window, CV_WINDOW_AUTOSIZE);
imshow(corners_window, dst_norm_scaled);
}
void cornerHarrisMe(int blockSize, int apertureSize, double k)
{
Mat x2y2, xy, mtrace, x_der, y_der, x2_der, y2_der, xy_der, x2g_der, y2g_der, xyg_der;
//1: calculate x and y derivative of image via Sobel
SobelMe(src_gray, x_der, 1, 0);
SobelMe(src_gray, y_der, 0, 1);
//2: calculate other three images in M
pow(x_der, blockSize, x2_der);
pow(y_der, blockSize, y2_der);
multiply(x_der, y_der, xy_der);
//3: gaussain
SepGaussian(x2_der, x2g_der, 1, 0);
SepGaussian(y2_der, y2g_der, 0, 1);
SepGaussian(xy_der, xyg_der, 1, 1);
//4. calculating R with k
multiply(x2g_der, y2g_der, x2y2);
multiply(xyg_der, xyg_der, xy);
pow((x2g_der + y2g_der), blockSize, mtrace);
dst = (x2y2 - xy) - k * mtrace;
}
// gradient in the x direction
int xGradient(Mat image, int x, int y)
{
return image.at<uchar>(y - 1, x - 1) +
2 * image.at<uchar>(y, x - 1) +
image.at<uchar>(y + 1, x - 1) -
image.at<uchar>(y - 1, x + 1) -
2 * image.at<uchar>(y, x + 1) -
image.at<uchar>(y + 1, x + 1);
}
// gradient in the y direction
int yGradient(Mat image, int x, int y)
{
return image.at<uchar>(y - 1, x - 1) +
2 * image.at<uchar>(y - 1, x) +
image.at<uchar>(y - 1, x + 1) -
image.at<uchar>(y + 1, x - 1) -
2 * image.at<uchar>(y + 1, x) -
image.at<uchar>(y + 1, x + 1);
}
void SobelMe(Mat& source, Mat& destination, int xOrder, int yOrder){
int gradX, gradY, sum;
destination = source.clone();
if (xOrder == 1 && yOrder == 0){
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradX = xGradient(source, x, y);
sum = abs(gradX);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
else if (xOrder == 0 && yOrder == 1){
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradY = yGradient(source, x, y);
sum = abs(gradY);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
else if (xOrder == 1 && yOrder == 1)
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradX = xGradient(source, x, y);
gradY = yGradient(source, x, y);
sum = abs(gradX) + abs(gradY);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
int borderCheck(int M, int x){
if (x < 0)
return -x - 1;
if (x >= M)
return 2 * M - x - 1;
return x;
}
void SepGaussian(Mat& source, Mat& desination, int sigmaX, int sigmaY){
// coefficients of 1D gaussian kernel with sigma = 1
double coeffs[] = { 0.0545, 0.2442, 0.4026, 0.2442, 0.0545 };
Mat tempX, tempY;
float sum, x1, y1;
desination = source.clone();
tempY = source.clone();
tempX = source.clone();
// along y - direction
if (sigmaX == 0 && sigmaY == 1){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
y1 = borderCheck(source.rows, y - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y1, x);
}
desination.at<uchar>(y, x) = sum;
}
}
}
// along x - direction
else if (sigmaX == 1 && sigmaY == 0){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
x1 = borderCheck(source.cols, x - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y, x1);
}
desination.at<uchar>(y, x) = sum;
}
}
}
// along xy - direction
else if (sigmaX == 1 && sigmaY == 1){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
y1 = borderCheck(source.rows, y - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y1, x);
}
tempY.at<uchar>(y, x) = sum;
}
}
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
x1 = borderCheck(source.cols, x - i);
sum = sum + coeffs[i + 2] * tempY.at<uchar>(y, x1);
}
desination.at<uchar>(y, x) = sum;
}
}
}
}
The Result:
Here is the a picture of the Result.
The Result is now the other way around, it detects areas where are no Corners.
In case there are some questions, feel free to ask me.

opencv filter on multi-dimension Mat

i want to transport the follow codes into c++:
gaussFilter = fspecial('gaussian', 2*neighSize+1, 0.5*neighSize);
pointFeature = imfilter(pointFeature, gaussFilter, 'symmetric');
where the pointFeature is a [height, width, 24] array.
i try to use filter2D, but it only support the 2D array.
so i want to know if there are functions in opencv that can filtering the multi-dimensional array?
You can use separable kernel filters for make anydimentional filter.
If you are using OpenCV, you could try this for a 3 Dimensional MatND:
void Smooth3DHist(cv::MatND &hist, const int& kernDimension)
{
assert(hist.dims == 3);
int x_size = hist.size[0];
int y_size = hist.size[1];
int z_size = hist.size[2];
int xy_size = x_size*y_size;
cv::Mat kernal = cv::getGaussianKernel(kernDimension, -1, CV_32F);
// Filter XY dimensions for every Z
for (int z = 0; z < z_size; z++)
{
float *ind = (float*)hist.data + z * xy_size; // sub-matrix pointer
cv::Mat subMatrix(2, hist.size, CV_32F, ind);
cv::sepFilter2D(subMatrix, subMatrix, CV_32F, kernal.t(), kernal, Point(-1,-1), 0.0, cv::BORDER_REPLICATE);
}
// Filter Z dimension
float* kernGauss = (float *)kernal.data;
unsigned kernSize = kernal.total();
int kernMargin = (kernSize - 1)/2;
float* lineBuffer = new float[z_size + 2*kernMargin];
for (int y = 0; y < y_size; y++)
{
for (int x = 0; x < x_size; x++)
{
// Copy along Z dimension into a line buffer
float* z_ptr = (float*)hist.data + y * x_size + x;//same as hist.ptr<float>(0, y, x)
for (int z = 0; z < z_size; z++, z_ptr += xy_size)
{
lineBuffer[z + kernMargin] = *z_ptr;
}
// Replicate borders
for (int m = 0; m < kernMargin; m++)
{
lineBuffer[m] = lineBuffer[kernMargin];// replicate left side
lineBuffer[z_size + 2*kernMargin - 1 - m] = lineBuffer[kernMargin + z_size - 1];//replicate right side
}
// Filter line buffer 1D - convolution
z_ptr = (float*)hist.data + y * x_size + x;
for (int z = 0; z < z_size; z++, z_ptr += xy_size)
{
*z_ptr = 0.0f;
for (unsigned k = 0; k < kernSize; k++)
{
*z_ptr += lineBuffer[z+k]*kernGauss[k];
}
}
}
}
delete [] lineBuffer;
}