I'm computing Homography for different shapes i.e (QUAD, HEXAGON, TRIANGLE). This logic is working fine for QUAD but not working for Hexagon or triangle. Looks like there is a problem with my matrix calculation logic. Can you pls review?
Here is my code:
#import <Foundation/Foundation.h>
#import "EigenWrapper.h"
#import <Eigen/Dense>
#implementation EigenWrapper
constexpr double W = 800;
constexpr double H = 800;
constexpr size_t NQUAD = 4;
constexpr size_t NTRI = 3;
constexpr size_t NHEX = 6;
Eigen::Matrix3d Homography;
//QUAD
Eigen::Matrix<double,2,NQUAD> DSTQUAD;
Eigen::Matrix<double,2*NQUAD,9> AQUAD;
Eigen::Matrix<double,2,NQUAD> SRCQUAD;
//Triangle
Eigen::Matrix<double,2,NTRI> DSTTRI;
Eigen::Matrix<double,2*NTRI,9> ATRI;
Eigen::Matrix<double,2,NTRI> SRCTRI;
//HEXA
Eigen::Matrix<double,2,NHEX> DSTHEX;
Eigen::Matrix<double,2*NHEX,9> AHEX;
Eigen::Matrix<double,2,NHEX> SRCHEX;
- (instancetype)initEigenWrapper:(size_t)sides {
return self;
}
+ (double*)eigenGetHomogprahyMatrix :(double)topLeftX : (double)topLeftY :(double)topRightX :(double)topRightY :(double)bottomLeftX :(double)bottomLeftY :(double)bottomRightX :(double)bottomRightY {
SRCQUAD <<
0, W, 0, W,
0, 0, H, H;
DSTQUAD <<
topLeftX, topRightX, bottomLeftX, bottomRightX,
topLeftY, topRightY,bottomLeftY, bottomRightY;
AQUAD.setZero();
for (size_t i = 0; i < NQUAD; i++) {
const double x_ = DSTQUAD(0,i), y_ = DSTQUAD(1,i);
const double x = SRCQUAD(0,i), y = SRCQUAD(1,i);
AQUAD(2*i,0) = AQUAD(2*i+1,3) = x_;
AQUAD(2*i,1) = AQUAD(2*i+1,4) = y_;
AQUAD(2*i,2) = AQUAD(2*i+1,5) = 1;
AQUAD(2*i,6) = -x*x_;
AQUAD(2*i,7) = -x*y_;
AQUAD(2*i,8) = -x;
AQUAD(2*i+1,6) = -y*x_;
AQUAD(2*i+1,7) = -y*y_;
AQUAD(2*i+1,8) = -y;
}
Eigen::JacobiSVD<Eigen::Matrix<double,2*NQUAD,9>> svd(AQUAD, Eigen::ComputeFullV);
Eigen::Matrix<double,9,1> h = svd.matrixV().col(8);
Homography <<
h(0), h(1), h(2),
h(3), h(4), h(5),
h(6), h(7), h(8);
for (int i = 0; i < 10; i++) {
printf("\n: %f", Homography.array().data()[i]);
}
return Homography.array().data();
}
+ (double *)eigenGetHomogprahyTriangle :(double)middleX :(double)middleY :(double)bottomLeftX :(double)bottomLeftY :(double)bottomRightX :(double)bottomRightY {
SRCTRI <<
0, W, W/2,
H, H, 0;
DSTTRI <<
bottomLeftX, bottomRightX, middleX,
bottomLeftY, bottomRightY, middleY;
ATRI.setZero();
for (size_t i = 0; i < NTRI; i++) {
const double x_ = DSTTRI(0,i), y_ = DSTTRI(1,i);
const double x = SRCTRI(0,i), y = SRCTRI(1,i);
ATRI(2*i,0) = ATRI(2*i+1,3) = x_;
ATRI(2*i,1) = ATRI(2*i+1,4) = y_;
ATRI(2*i,2) = ATRI(2*i+1,5) = 1;
ATRI(2*i,6) = -x*x_;
ATRI(2*i,7) = -x*y_;
ATRI(2*i,8) = -x;
ATRI(2*i+1,6) = -y*x_;
ATRI(2*i+1,7) = -y*y_;
ATRI(2*i+1,8) = -y;
}
Eigen::JacobiSVD<Eigen::Matrix<double,2*NTRI,9>> svd(ATRI, Eigen::ComputeFullV);
Eigen::Matrix<double,9,1> h = svd.matrixV().col(8);
Homography <<
h(0), h(1), h(2),
h(3), h(4), h(5),
h(6), h(7), h(8);
for (int i = 0; i < 10; i++) {
printf("\n: %f", Homography.array().data()[i]);
}
return Homography.array().data();
}
+ (double *)eigenGetHomogprahyMatrixHexa:(double)topLeftX :(double)topLeftY :(double)topRightX :(double)topRightY :(double)middleLeftX :(double)middleLeftY :(double)middleRightX :(double)middleRightY :(double)bottomLeftX :(double)bottomLeftY :(double)bottomRightX :(double)bottomRightY {
SRCHEX <<
0, W, W/2, 0, W, W,
0, 0, H/2, H, H, H/2;
DSTHEX <<
topLeftX, topRightX, middleLeftX,bottomLeftX, bottomRightX, middleRightX,
topLeftY, topRightY, middleLeftY, bottomLeftY, bottomRightY, middleRightY;
AHEX.setZero();
for (size_t i = 0; i < NHEX; i++) {
const double x_ = DSTHEX(0,i), y_ = DSTHEX(1,i);
const double x = SRCHEX(0,i), y = SRCHEX(1,i);
AHEX(2*i,0) = AHEX(2*i+1,3) = x_;
AHEX(2*i,1) = AHEX(2*i+1,4) = y_;
AHEX(2*i,2) = AHEX(2*i+1,5) = 1;
AHEX(2*i,6) = -x*x_;
AHEX(2*i,7) = -x*y_;
AHEX(2*i,8) = -x;
AHEX(2*i+1,6) = -y*x_;
AHEX(2*i+1,7) = -y*y_;
AHEX(2*i+1,8) = -y;
}
Eigen::JacobiSVD<Eigen::Matrix<double,2*NHEX,9>> svd(AHEX, Eigen::ComputeFullV);
Eigen::Matrix<double,9,1> h = svd.matrixV().col(8);
Homography <<
h(0), h(1), h(2),
h(3), h(4), h(5),
h(6), h(7), h(8);
for (int i = 0; i < 10; i++) {
printf("\n: %f", Homography.array().data()[i]);
}
return Homography.array().data();
}
#end
After computing homogrphy, I'm just wrapping the texture image with OpenCV. I'm also want to change the background of the out image from black to transparent.
cv::Mat sourceImage = imread("texture.png", cv::IMREAD_COLOR);
for (int i=0; i<9; i++) {
printf(":- %.7f", homographyMatrix[i]);
}
cv::Matx33d H(homographyMatrix[0], homographyMatrix[3], homographyMatrix[6],
homographyMatrix[1], homographyMatrix[4], homographyMatrix[7],
homographyMatrix[2], homographyMatrix[5], homographyMatrix[8]);
cv::Mat destImage;
cv::warpPerspective(sourceImage, destImage, H, cv::Size(width,height),
cv::INTER_LINEAR | cv::WARP_INVERSE_MAP);
UIImage * deImage = MatToUIImage(destImage);
Here is the outPut for Hexagon
Here is the outPut for Triangle
Thanks
I'm trying to use GPU Delegate in Tensorflow Lite on iOS. My model has inputs and outputs as OpenCV BGR image ([258, 540, 3]). How can I set inputs and outputs in C++ tensorflow lite interpreter? I tried to use this code
int input = interpreter->inputs()[0];
float* out = interpreter->typed_tensor<float>(input);
NSData* slicedData = [self inputDataFromCvMat:slicedImage];
uint8_t* in = (uint8_t*) slicedData.bytes;
ProcessInputWithFloatModel(in, out, WIDTH, HEIGHT, CHANNELS);
void ProcessInputWithFloatModel(uint8_t* input, float* buffer, int image_width, int image_height, int image_channels) {
for (int y = 0; y < wanted_input_height; ++y) {
float* out_row = buffer + (y * wanted_input_width * wanted_input_channels);
for (int x = 0; x < wanted_input_width; ++x) {
const int in_x = (y * image_width) / wanted_input_width;
const int in_y = (x * image_height) / wanted_input_height;
uint8_t* input_pixel =
input + (in_y * image_width * image_channels) + (in_x * image_channels);
float* out_pixel = out_row + (x * wanted_input_channels);
for (int c = 0; c < wanted_input_channels; ++c) {
out_pixel[c] = (input_pixel[c] - input_mean) / input_std;
}
}
}
}
- (NSData *)inputDataFromCvMat:(Mat)image {
NSMutableData *inputData = [[NSMutableData alloc] initWithCapacity:0];
for (int row = 0; row < HEIGHT + 10; row++) {
for (int col = 0; col < WIDTH + 10; col++) {
Vec3b intensity = image.at<Vec3b>(row, col);
int blue = intensity.val[0];
int green = intensity.val[1];
int red = intensity.val[2];
// we need to put pixel values in BGR (model was trained with opencv)
[inputData appendBytes:&blue length:sizeof(blue)];
[inputData appendBytes:&green length:sizeof(green)];
[inputData appendBytes:&red length:sizeof(red)];
}
}
return inputData;
}
but I don't know what is wrong
After some research, I managed to get it working
const int wanted_input_width = 258;
const int wanted_input_height = 540;
const int wanted_input_channels = 3;
Mat image = ...
// write to input
int input = interpreter->inputs()[0];
float* out = interpreter->typed_tensor<float>(input);
uint8_t* in = image.ptr<uint8_t>(0);
ProcessInputWithFloatModel(in, out);
// run interpreter
if (interpreter->Invoke() != kTfLiteOk) {
LOG(FATAL) << "Failed to invoke!";
}
// get output
int output_idx = interpreter->outputs()[0];
float* output = interpreter->typed_output_tensor<float>(output_idx);
Mat outputMat = ProcessOutputWithFloatModel(output);
/// Preprocess the input image and feed the TFLite interpreter buffer for a float model.
void ProcessInputWithFloatModel(uint8_t* input, float* buffer) {
for (int y = 0; y < wanted_input_height; ++y) {
float* out_row = buffer + (y * wanted_input_width * wanted_input_channels);
for (int x = 0; x < wanted_input_width; ++x) {
uint8_t* input_pixel = input + (y * wanted_input_width * wanted_input_channels) + (x * wanted_input_channels);
float* out_pixel = out_row + (x * wanted_input_channels);
for (int c = 0; c < wanted_input_channels; ++c) {
out_pixel[c] = input_pixel[c] / 255.0f;
}
}
}
}
Mat ProcessOutputWithFloatModel(float* input) {
cv::Mat image = cv::Mat::zeros(wanted_input_height, wanted_input_width, CV_8UC3);
for (int y = 0; y < wanted_input_height; ++y) {
for (int x = 0; x < wanted_input_width; ++x) {
float* input_pixel = input + (y * wanted_input_width * wanted_input_channels) + (x * wanted_input_channels);
cv::Vec3b & color = image.at<cv::Vec3b>(cv::Point(x, y));
color[0] = (uchar) floor(input_pixel[0] * 255.0f);
color[1] = (uchar) floor(input_pixel[1] * 255.0f);
color[2] = (uchar) floor(input_pixel[2] * 255.0f);
}
}
return image;
}
I am using Win 7,64, MS2012(C++) and OpenCV 2.4.11
I write seeded region growing algorithm but I down't know why the results not desired.
The goal is that extract pectoral muscle(left corner) of image with SRG algorithm.
input image :
here is my code (main function)
img = imread("C:/Users/Zimabi/Downloads/region_growing/jhgray.jpg",0);
Mat J1 = Mat::zeros(img.rows,img.cols,CV_8UC1);
int x=15,y=15;
int reg_mean = img.at<uchar>(x,y);
int reg_size = 1;
int neg_pos = 0;
Mat neg_list = Mat::zeros(img.cols*img.rows,3,CV_8UC1);
double pixdist = 0, reg_maxdist = 0.1;
int xn = 0,yn = 0;
Mat neigb = Mat::zeros(4,2,CV_8UC1);
int nei1 [4] = {-1,1,0,0};
int nei2 [4] = {0,0,-1,1};
while ((pixdist < reg_maxdist) && (reg_size < img.cols*img.rows))
{
int inx,min1=1000;
for (int r = 0; r < 4; r++)
{
xn = x + nei1[r] ;
yn = y + nei2[r];
bool ins=(xn>=1)&&(yn>=1)&&(xn<=img.rows)&&(yn<= img.cols);
if (ins && (J1.at<uchar>(xn,yn) == 0))
{
neg_pos = neg_pos+1;
neg_list.at<uchar>(neg_pos,0) = xn;
neg_list.at<uchar>(neg_pos,1) = yn;
neg_list.at<uchar>(neg_pos,2) = img.at<uchar>(xn,yn);
J1.at<uchar>(xn,yn)=255;
}
}
Mat x1 = Mat::zeros(neg_pos,1,CV_8UC1);
for (int i3 = 0; i3 <neg_pos ; i3++)
{
x1.at<uchar>(i3,0) = abs(neg_list.at<uchar>(i3,2) - reg_mean);
if (x1.at<uchar>(i3,0)<min1)
{
min1 = x1.at<uchar>(i3,0);
inx = i3;
}
}
pixdist = min1;
J1.at<uchar>(x,y)=255;
reg_size=reg_size+1;
reg_mean= (reg_mean*reg_size + neg_list.at<uchar> (inx,2))/(reg_size+1);
x = neg_list.at<uchar>(inx,0);
y = neg_list.at<uchar>(inx,1);
neg_list.at<uchar>(inx,0) = neg_list.at<uchar>(neg_pos,0);
neg_list.at<uchar>(inx,1) = neg_list.at<uchar>(neg_pos,1);
neg_list.at<uchar>(inx,2) = neg_list.at<uchar>(neg_pos,2);
neg_pos=neg_pos-1;
}
imshow("J",J1);
waitKey(0);
return 0;
Regards
I'm doing some debugging and so I'm dumping image files to look at the predictions and transformations.
I can create a caffe::Datum from cv::Mat:
cv::Mat matrix;
// ... initialize matrix
caffe::Datum datum;
caffe::CVMatToDatum(matrix, &datum)
but how do I create a cv::Mat from caffe::Datum? The following code gives the fatal exception "Datum not encoded":
caffe::Datum datum;
// ... initialize datum
cv::Mat matrix;
matrix = DecodeDatumToCVMat(datum, true);
You can use following functions.
cv::Mat DatumToCVMat(const Datum& datum){
int datum_channels = datum.channels();
int datum_height = datum.height();
int datum_width = datum.width();
string strData = datum.data();
cv::Mat cv_img;
if (strData.size() != 0)
{
cv_img.create(datum_height, datum_width, CV_8UC(datum_channels));
const string& data = datum.data();
std::vector<char> vec_data(data.c_str(), data.c_str() + data.size());
for (int h = 0; h < datum_height; ++h) {
uchar* ptr = cv_img.ptr<uchar>(h);
int img_index = 0;
for (int w = 0; w < datum_width; ++w) {
for (int c = 0; c < datum_channels; ++c) {
int datum_index = (c * datum_height + h) * datum_width + w;
ptr[img_index++] = static_cast<uchar>(vec_data[datum_index]);
}
}
}
}
else
{
cv_img.create(datum_height, datum_width, CV_32FC(datum_channels));
for (int h = 0; h < datum_height; ++h) {
float* ptr = cv_img.ptr<float>(h);
int img_index = 0;
for (int w = 0; w < datum_width; ++w) {
for (int c = 0; c < datum_channels; ++c) {
ptr[img_index++] = static_cast<float>(datum.float_data(img_index));
}
}
}
}
return cv_img;
}
all code :
http://lab.deepaivision.com/2017/06/opencv-mat-caffe-datum-datum-mat.html
Addition to the answer by #ttagu99 , in the float section there is a slight bug:
The line:
ptr[img_index++] = static_cast<float>(datum.float_data(img_index));
Should be in fact:
int datum_index = (c * datum_height + h) * datum_width + w;
ptr[img_index++] = static_cast<float>(datum->float_data(datum_index));
This fixes the order of writing the data to the matrix. Otherwise, you only see lines of color.
accroding to this
http://answers.opencv.org/question/15917/how-to-access-data-from-a-cvmat/
I have tried to create a 3D matrix
void AutomaticMacbethDetection::DrawMacbethROI(ColorCheckerBatchRGB ColorCheckerBatchRGB, int *** raw_frame,int _width, int _height,int colorOrder)
{
cv::Mat src;
if (colorOrder == -1)
{
const int sizes[3]={_height,_width,3};
src = cv::Mat::zeros(3, sizes, CV_32F);
}else
{
const int sizes[3]={_height,_width,1};
src = cv::Mat::zeros(3, sizes, CV_32F);
}
std::vector<float> channel;
if (colorOrder == -1)
{
for (int w = 0; w < _width; w++)
{
for (int h = 0; h < _height; h++)
{
float temp =raw_frame[h][w][0];
channel.push_back(temp);
src.at<float>(h,w,0) = temp;
src.at<float>(h,w,1) = raw_frame[h][w][1];
src.at<float>(h,w,2) = raw_frame[h][w][2];
}
}
}
else
{
for (int w = 0; w < _width; w++)
{
for (int h = 0; h < _height; h++)
{
float temp =raw_frame[h][w][0];
channel.push_back(temp);
src.at<float>(h,w,0) = temp;
}
}
float divider = Utilities::tprctile(channel,99.2);
src = src/divider;
}
cv::imshow("test", src);
cv::waitKey(0);
}
my function supports both RGB and Raw Image so I need to create a 100x100x1 matrix or a 100x100x3 matrix, depending on the image type.
however I get an exception in imshow()
OpenCV Error: Assertion failed (p[-1] <= 2) in cv::Mat::MSize::operator (), file
C:\buildslave64\win64_amdocl\2_4_PackSlave-win64-vc11-shared\opencv\modules\cor
e\include\opencv2/core/mat.hpp, line 712
can you please explain what is the problem?
It looks to me that you are trying to set a 3-layer zero mat in both cases:
const int sizes[3]={_height,_width,1};
src = cv::Mat::zeros(3, sizes, CV_32F);
C++: static MatExpr Mat::zeros(int ndims, const int* sz, int type) states that the first argument is the dimentions. This should here be 1 if you want a 1-layer mat.
The solution I have found for this is using CV_32FC3 which means each cell of the matrix (x,y) has 3 values in it.
this is how you init the 3D matrix.
src = cv::Mat::zeros(_height,_width, CV_32FC3);
and now you have 3 cells which you need to access like so:
src.at<cv::Vec3f>(h,w)[0]
src.at<cv::Vec3f>(h,w)[1]
src.at<cv::Vec3f>(h,w)[2]
Please notice I'm using Vec3f and not float like i'm using in CV_32F
void AutomaticMacbethDetection::DrawMacbethROI(ColorCheckerBatchRGB ColorCheckerBatchRGB, int *** raw_frame,int _width, int _height,int colorOrder)
{
cv::Mat src;
std::vector<float> channel;
if (colorOrder != -1 )
{
src = cv::Mat::zeros(_height,_width, CV_32F);
for (int w = 0; w < _width; w++)
{
for (int h = 0; h < _height; h++)
{
float temp =raw_frame[h][w][0];
channel.push_back(temp);
src.at<float>(h,w) = temp;
}
}
float divider = Utilities::tprctile(channel,99.2);
src = src/divider;
}
else
{
src = cv::Mat::zeros(_height,_width, CV_32FC3);
for (int w = 0; w < _width; w++)
{
for (int h = 0; h < _height; h++)
{
float temp =raw_frame[h][w][0];
channel.push_back(temp);
src.at<cv::Vec3f>(h,w)[0] = raw_frame[h][w][0];
src.at<cv::Vec3f>(h,w)[1] = raw_frame[h][w][1];
src.at<cv::Vec3f>(h,w)[2] = raw_frame[h][w][2];
}
}
float divider = Utilities::tprctile(channel,99.2);
src = src/divider;
}
cv::resize(src,src,cv::Size(),0.3,0.3,cv::INTER_LINEAR);
cv::imshow("detected", src);
cv::waitKey(0);