I am trying to sort the result returned by minAreaRect using the following algorithm:
Here's my code for now:
void sortPoints(Point2f* unsorted) {
Point2f sorted[4];
for (int i = 0; i < 4; i++) sorted[i] = Point(0, 0);
float middleX = (unsorted[0].x + unsorted[1].x + unsorted[2].x + unsorted[3].x) / 4;
float middleY = (unsorted[0].y + unsorted[1].y + unsorted[2].y + unsorted[3].y) / 4;
for (int i = 0; i < 4; i++) {
if (unsorted[i].x < middleX && unsorted[i].y < middleY) sorted[0] = unsorted[i];
if (unsorted[i].x > middleX && unsorted[i].y < middleY) sorted[1] = unsorted[i];
if (unsorted[i].x < middleX && unsorted[i].y > middleY) sorted[2] = unsorted[i];
if (unsorted[i].x > middleX && unsorted[i].y > middleY) sorted[3] = unsorted[i];
}
unsorted = sorted;
}
...
vector<RotatedRect> minRect( contours.size() );
for( int i = 0; i < contours.size(); i++ ) {
minRect[i] = minAreaRect( Mat(contours[i]) );
}
Point2f rect_points[4];
for( int i = 0; i < contours.size(); i++ ) {
minRect[i].points( rect_points );
sortPoints( rect_points ); /* ...they are not sorted after calling sortPoints?!? */
}
But it's not working, no compile error, but the points are not sorted. I think there's something wrong with data types.
The algorithm you provided only works if the 4 points belong to a rectangle parallel to x-y axis. Also the way you try to return result will not work properly. Try copying the sorted array back to unsorted. like this for ( int i=0;i<4;++i ) unsorted[i] = sorted[i];
But there is certain way you can use
#include <algorithm>
struct str{
bool operator() ( Point2f a, Point2f b ){
if ( a.y != b.y )
return a.y < b.y;
return a.x <= b.x ;
}
} comp;
int main()
{
Point2f v[4];
v[0] = Point2f(0,1);
v[1] = Point2f(-1,1);
v[2] = Point2f(2,1);
v[3] = Point2f(4,1);
sort(v,v+4,comp);
}
Related
I'm trying to use the code proposed here http://ivrlwww.epfl.ch/supplementary_material/RK_CVPR09/ for saliency detection on colored images. The code proposed is associated with a GUI developed in windows. In my case, I want to use it on Mac OsX with OpenCv library for reading the initial image and writing the saliency map result. Therefore I pick up the four main functions and modify the reading and writing block using OpenCV. I got the following results which are a bit different from what the authors have obtained:
Original Image
Author saliency map
Obtained saliency map
Here are the four functions. Is there something wrong that I did wrong ? I was careful to consider that in OpenCV, colors are described as B-G-R and not R-G-B.
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
void RGB2LAB2(
const vector<vector<uint> > &ubuff,
vector<double>& lvec,
vector<double>& avec,
vector<double>& bvec){
int sz = int(ubuff.size());
cout<<"sz "<<sz<<endl;
lvec.resize(sz);
avec.resize(sz);
bvec.resize(sz);
for( int j = 0; j < sz; j++ ){
int sR = ubuff[j][2];
int sG = ubuff[j][1];
int sB = ubuff[j][0];
//------------------------
// sRGB to XYZ conversion
// (D65 illuminant assumption)
//------------------------
double R = sR/255.0;
double G = sG/255.0;
double B = sB/255.0;
double r, g, b;
if(R <= 0.04045) r = R/12.92;
else r = pow((R+0.055)/1.055,2.4);
if(G <= 0.04045) g = G/12.92;
else g = pow((G+0.055)/1.055,2.4);
if(B <= 0.04045) b = B/12.92;
else b = pow((B+0.055)/1.055,2.4);
double X = r*0.4124564 + g*0.3575761 + b*0.1804375;
double Y = r*0.2126729 + g*0.7151522 + b*0.0721750;
double Z = r*0.0193339 + g*0.1191920 + b*0.9503041;
//------------------------
// XYZ to LAB conversion
//------------------------
double epsilon = 0.008856; //actual CIE standard
double kappa = 903.3; //actual CIE standard
double Xr = 0.950456; //reference white
double Yr = 1.0; //reference white
double Zr = 1.088754; //reference white
double xr = X/Xr;
double yr = Y/Yr;
double zr = Z/Zr;
double fx, fy, fz;
if(xr > epsilon) fx = pow(xr, 1.0/3.0);
else fx = (kappa*xr + 16.0)/116.0;
if(yr > epsilon) fy = pow(yr, 1.0/3.0);
else fy = (kappa*yr + 16.0)/116.0;
if(zr > epsilon) fz = pow(zr, 1.0/3.0);
else fz = (kappa*zr + 16.0)/116.0;
lvec[j] = 116.0*fy-16.0;
avec[j] = 500.0*(fx-fy);
bvec[j] = 200.0*(fy-fz);
}
}
void GaussianSmooth(
const vector<double>& inputImg,
const int& width,
const int& height,
const vector<double>& kernel,
vector<double>& smoothImg){
int center = int(kernel.size())/2;
int sz = width*height;
smoothImg.clear();
smoothImg.resize(sz);
vector<double> tempim(sz);
int rows = height;
int cols = width;
int index(0);
for( int r = 0; r < rows; r++ ){
for( int c = 0; c < cols; c++ ){
double kernelsum(0);
double sum(0);
for( int cc = (-center); cc <= center; cc++ ){
if(((c+cc) >= 0) && ((c+cc) < cols)){
sum += inputImg[r*cols+(c+cc)] * kernel[center+cc];
kernelsum += kernel[center+cc];
}
}
tempim[index] = sum/kernelsum;
index++;
}
}
int index = 0;
for( int r = 0; r < rows; r++ ){
for( int c = 0; c < cols; c++ ){
double kernelsum(0);
double sum(0);
for( int rr = (-center); rr <= center; rr++ ){
if(((r+rr) >= 0) && ((r+rr) < rows)){
sum += tempim[(r+rr)*cols+c] * kernel[center+rr];
kernelsum += kernel[center+rr];
}
}
smoothImg[index] = sum/kernelsum;
index++;
}
}
}
void GetSaliencyMap(
const vector<vector<uint> >&inputimg,
const int& width,
const int& height,
vector<double>& salmap,
const bool& normflag){
int sz = width*height;
salmap.clear();
salmap.resize(sz);
vector<double> lvec(0), avec(0), bvec(0);
RGB2LAB2(inputimg, lvec, avec, bvec);
double avgl(0), avga(0), avgb(0);
for( int i = 0; i < sz; i++ ){
avgl += lvec[i];
avga += avec[i];
avgb += bvec[i];
}
avgl /= sz;
avga /= sz;
avgb /= sz;
vector<double> slvec(0), savec(0), sbvec(0);
vector<double> kernel(0);
kernel.push_back(1.0);
kernel.push_back(2.0);
kernel.push_back(1.0);
GaussianSmooth(lvec, width, height, kernel, slvec);
GaussianSmooth(avec, width, height, kernel, savec);
GaussianSmooth(bvec, width, height, kernel, sbvec);
for( int i = 0; i < sz; i++ ){
salmap[i] = (slvec[i]-avgl)*(slvec[i]-avgl) +
(savec[i]-avga)*(savec[i]-avga) +
(sbvec[i]-avgb)*(sbvec[i]-avgb);
}
if( true == normflag ){
vector<double> normalized(0);
Normalize(salmap, width, height, normalized);
swap(salmap, normalized);
}
}
void Normalize(
const vector<double>& input,
const int& width,
const int& height,
vector<double>& output,
const int& normrange = 255){
double maxval(0);
double minval(DBL_MAX);
int i(0);
for( int y = 0; y < height; y++ ){
for( int x = 0; x < width; x++ ){
if( maxval < input[i] ) maxval = input[i];
if( minval > input[i] ) minval = input[i];
i++;
}
}
}
double range = maxval-minval;
if( 0 == range ) range = 1;
int i(0);
output.clear();
output.resize(width*height);
for( int y = 0; y < height; y++ ){
for( int x = 0; x < width; x++ ){
output[i] = ((normrange*(input[i]-minval))/range);
i++;
}
}
}
int main(){
Mat image;
image = imread( argv[1], 1 );
if ( !image.data ){
printf("No image data \n");
return -1;
}
std::vector<vector<uint>>array(image.cols*image.rows,vector<uint>
(3,0));
for(int y=0;y<image.rows;y++){
for(int x=0;x<image.cols;x++){
Vec3b color= image.at<Vec3b>(Point(x,y));
array[image.cols*y+x][0]=color[0]; array[image.cols*y+x]
[1]=color[1];array[image.cols*y+x][2]=color[2];
}
}
vector<double> salmap; bool normflag=true;
GetSaliencyMap(array, image.size().width, image.size().height, salmap,
normflag);
Mat output;
output = Mat( image.rows, image.cols,CV_8UC1);
int k=0;
for(int y=0;y<image.rows;y++){
for(int x=0;x<image.cols;x++){
output.at<uchar>(Point(x,y)) = int(salmap[k]);
k++;
}
}
imwrite("test_saliency_blackAndWhite.jpg", output );
return 0;
}
Here is my code for creating the hough accumulator for lines in image :
void hough_lines_acc(cv::Mat img_a_edges, std::vector<std::vector<int> > &hough_acc) {
for (size_t r = 0; r < img_a_edges.rows; r++) {
for (size_t c = 0; c < img_a_edges.cols; c++) {
int theta = static_cast<int> (std::atan2(r, c) * 180 / M_PI);
int rho = static_cast<int> ((c * cos(theta)) + (r * sin(theta)));
if (theta < -90) theta = -90;
if (theta > 89) theta = 89;
++hough_acc[abs(rho)][theta];
}
}
cv::Mat img_mat(hough_acc.size(), hough_acc[0].size(), CV_8U);
std::cout << hough_acc.size() << " " << hough_acc[0].size() << std::endl;
for (size_t i = 0; i < hough_acc.size(); i++) {
for (size_t j = 0; j < hough_acc[0].size(); j++) {
img_mat.at<int> (i,j) = hough_acc[i][j];
}
}
imwrite("../output/ps1-2-b-1.png", img_mat);
}
theta varies from -90 to 89. I am getting negative rho values. Right now I am just replacing the negative who with a positive one but am not getting a correct answer. What do I do to the negative rho? Please explain the answer.
theta = arctan (y / x)
rho = x * cos(theta) + y * sin(theta)
Edited code :
bool hough_lines_acc(cv::Mat img_a_edges, std::vector<std::vector<int> > &hough_acc,\
std::vector<double> thetas, std::vector<double> rhos, int rho_resolution, int theta_resolution) {
int img_w = img_a_edges.cols;
int img_h = img_a_edges.rows;
int max_votes = 0;
int min_votes = INT_MAX;
for (size_t r = 0; r < img_h; r++) {
for (size_t c = 0; c < img_w; c++) {
if(img_a_edges.at<int>(r, c) == 255) {
for (size_t i = 0; i < thetas.size(); i++) {
thetas[i] = (thetas[i] * M_PI / 180);
double rho = ( (c * cos(thetas[i])) + (r * sin(thetas[i])) );
int buff = ++hough_acc[static_cast<int>(abs(rho))][static_cast<int>(i)];
if (buff > max_votes) {
max_votes = buff;
}
if (buff < min_votes) {
min_votes = buff;
}
}
}
}
}
double div = static_cast<double>(max_votes) / 255;
int threshold = 10;
int possible_edge = round(static_cast<double>(max_votes) / div) - threshold;
props({
{"max votes", max_votes},
{"min votes", min_votes},
{"scale", div}
});
// needed for scaling intensity for contrast
// not sure if I am doing it correctly
for (size_t r = 0; r < hough_acc.size(); r++) {
for (size_t c = 0; c < hough_acc[0].size(); c++) {
double val = hough_acc[r][c] / div;
if (val < 0) {
val = 0;
}
hough_acc[r][c] = static_cast<int>(val);
}
}
cv::Mat img_mat = cv::Mat(hough_acc.size(), hough_acc[0].size(), CV_8UC1, cv::Scalar(0));
for (size_t i = 0; i < hough_acc.size(); i++) {
for (size_t j = 0; j < hough_acc[0].size(); j++) {
img_mat.at<uint8_t> (i,j) = static_cast<uint8_t>(hough_acc[i][j]);
}
}
imwrite("../output/ps1-2-b-1.png", img_mat);
return true;
}
Still not correct output. What is the error here?
atan2 of two positive numbers... should not be giving you negative angles, it should only be giving you a range of 0-90
also for the hough transform, I think you want everything relative to one point (ie 0,0 in this case). I think for that you would actually want to make theta=90-atan2(r,c)
Admittedly though, I am a bit confused as I thought you had to encode line direction, rather than just "edge pt". ie I thought at each edge point you had to provide a discrete array of guessed line trajectories and calculate rho and theta for each one and throw all of those into your accumulator. As is... I am not sure what you are calculating.
is there any API to implement my own RANSAC model? i.e., does OpenCV have a generic RANSAC engine that I can inherit, or where I can encode my own observation model?
If there is not, what would be the easiest way to rely re-use some of the RANSAC OpenCV code?
Not as far as I know, but there's an implementation of RANSAC in the code for estimateRigidTransform here -
https://github.com/opencv/opencv/blob/master/modules/video/src/lkpyramid.cpp
// RANSAC stuff:
// 1. find the consensus
for( k = 0; k < RANSAC_MAX_ITERS; k++ )
{
int idx[RANSAC_SIZE0];
Point2f a[RANSAC_SIZE0];
Point2f b[RANSAC_SIZE0];
// choose random 3 non-complanar points from A & B
for( i = 0; i < RANSAC_SIZE0; i++ )
{
for( k1 = 0; k1 < RANSAC_MAX_ITERS; k1++ )
{
idx[i] = rng.uniform(0, count);
for( j = 0; j < i; j++ )
{
if( idx[j] == idx[i] )
break;
// check that the points are not very close one each other
if( fabs(pA[idx[i]].x - pA[idx[j]].x) +
fabs(pA[idx[i]].y - pA[idx[j]].y) < FLT_EPSILON )
break;
if( fabs(pB[idx[i]].x - pB[idx[j]].x) +
fabs(pB[idx[i]].y - pB[idx[j]].y) < FLT_EPSILON )
break;
}
if( j < i )
continue;
if( i+1 == RANSAC_SIZE0 )
{
// additional check for non-complanar vectors
a[0] = pA[idx[0]];
a[1] = pA[idx[1]];
a[2] = pA[idx[2]];
b[0] = pB[idx[0]];
b[1] = pB[idx[1]];
b[2] = pB[idx[2]];
double dax1 = a[1].x - a[0].x, day1 = a[1].y - a[0].y;
double dax2 = a[2].x - a[0].x, day2 = a[2].y - a[0].y;
double dbx1 = b[1].x - b[0].x, dby1 = b[1].y - b[0].y;
double dbx2 = b[2].x - b[0].x, dby2 = b[2].y - b[0].y;
const double eps = 0.01;
if( fabs(dax1*day2 - day1*dax2) < eps*std::sqrt(dax1*dax1+day1*day1)*std::sqrt(dax2*dax2+day2*day2) ||
fabs(dbx1*dby2 - dby1*dbx2) < eps*std::sqrt(dbx1*dbx1+dby1*dby1)*std::sqrt(dbx2*dbx2+dby2*dby2) )
continue;
}
break;
}
if( k1 >= RANSAC_MAX_ITERS )
break;
}
if( i < RANSAC_SIZE0 )
continue;
// estimate the transformation using 3 points
getRTMatrix( a, b, 3, M, fullAffine );
const double* m = M.ptr<double>();
for( i = 0, good_count = 0; i < count; i++ )
{
if( std::abs( m[0]*pA[i].x + m[1]*pA[i].y + m[2] - pB[i].x ) +
std::abs( m[3]*pA[i].x + m[4]*pA[i].y + m[5] - pB[i].y ) < std::max(brect.width,brect.height)*0.05 )
good_idx[good_count++] = i;
}
if( good_count >= count*RANSAC_GOOD_RATIO )
break;
}
if( k >= RANSAC_MAX_ITERS )
return Mat();
I'm attempting to convert a MATLAB .mat file to openCV MAT and then applying several masks to those files. I am building from cvmatio source code. I am receiving the following error:
OpenCV Error: Assertion failed (A.size == arrays[i0]->size) in init,
file
/home/derek/Documents/Libraries/opencv-3.0.0-beta/modules/core/src/matrix.cpp,
line 4279 terminate called after throwing an instance of
'cv::Exception' what():
/home/derek/Documents/Libraries/opencv-3.0.0-beta/modules/core/src/matrix.cpp:4279:
error: (-215) A.size == arrays[i0]->size in function init
Here is the source file I've written. It occurs at the line with MixChannels. Note that SrcImage is a 3 channel Mat. lower and upper are the threshold values in an array who's length is equal to the number of channels.
/*
* Mask.cpp
*
* Created on: Mar 16, 2015
* Author: derek
*/
#include <cv.h>
#include <highgui.h>
#include "imgcodecs.hpp"
#include "highgui.hpp"
#include "imgproc.hpp"
using namespace cv;
Mat Mask(Mat SrcImage, double lower[], double upper[]){
int height=SrcImage.rows;
int width=SrcImage.cols;
int depth=SrcImage.depth();
Mat B2d = Mat::ones(height, width,depth);
Mat out(height, width, depth);
Mat outL(height, width, depth);
Mat outU(height,width, depth);
for (int i=1; i< SrcImage.channels(); i=i+1){
int from_to[]={i,1};
mixChannels(&SrcImage, 3, &out, 1, from_to, 1 );
threshold(out, outL, lower[i], 1, THRESH_BINARY);
threshold(out, outU, upper[i], 1, THRESH_BINARY);
bitwise_and(B2d, outL, B2d);
bitwise_and(B2d, outU, B2d);
}
return B2d;
}
Also, here is an excerpt of the actual CV_Assertion error location. As indicated in the error, it occurs at "(A.size == arrays[i0]->size)".
void NAryMatIterator::init(const Mat** _arrays, Mat* _planes, uchar** _ptrs, int _narrays)
{
CV_Assert( _arrays && (_ptrs || _planes) );
int i, j, d1=0, i0 = -1, d = -1;
arrays = _arrays;
ptrs = _ptrs;
planes = _planes;
narrays = _narrays;
nplanes = 0;
size = 0;
if( narrays < 0 )
{
for( i = 0; _arrays[i] != 0; i++ )
;
narrays = i;
CV_Assert(narrays <= 1000);
}
iterdepth = 0;
for( i = 0; i < narrays; i++ )
{
CV_Assert(arrays[i] != 0);
const Mat& A = *arrays[i];
if( ptrs )
ptrs[i] = A.data;
if( !A.data )
continue;
if( i0 < 0 )
{
i0 = i;
d = A.dims;
// find the first dimensionality which is different from 1;
// in any of the arrays the first "d1" step do not affect the continuity
for( d1 = 0; d1 < d; d1++ )
if( A.size[d1] > 1 )
break;
}
else
CV_Assert( A.size == arrays[i0]->size );
if( !A.isContinuous() )
{
CV_Assert( A.step[d-1] == A.elemSize() );
for( j = d-1; j > d1; j-- )
if( A.step[j]*A.size[j] < A.step[j-1] )
break;
iterdepth = std::max(iterdepth, j);
}
}
if( i0 >= 0 )
{
size = arrays[i0]->size[d-1];
for( j = d-1; j > iterdepth; j-- )
{
int64 total1 = (int64)size*arrays[i0]->size[j-1];
if( total1 != (int)total1 )
break;
size = (int)total1;
}
iterdepth = j;
if( iterdepth == d1 )
iterdepth = 0;
nplanes = 1;
for( j = iterdepth-1; j >= 0; j-- )
nplanes *= arrays[i0]->size[j];
}
else
iterdepth = 0;
idx = 0;
if( !planes )
return;
for( i = 0; i < narrays; i++ )
{
CV_Assert(arrays[i] != 0);
const Mat& A = *arrays[i];
if( !A.data )
{
planes[i] = Mat();
continue;
}
planes[i] = Mat(1, (int)size, A.type(), A.data);
}
}
Well it's obviously too late of an answer, but here is the reason why your code failed:
Since your SrcImage is a single Mat with multiple channels, you should'we written:
mixChannels(&SrcImage, 1, &out, 1, from_to, 1 );
(The assert error was related to this, since mixChannels expected 3 Mats, which is 3 times bigger than your Mat.)
Also opencv MixChannels labels channels from 0, not sure if the i=1 was intended, or just a typo.
Cheers!
I'm new at computer vision, but i need to made a little function in C++, who will detect a white paper sheet even if is something printed on him, and the retrieve the 4 edges coordinates what is what i really need so i can use those coordinates and cut another jpg file and use the cutted image as a opengl textures.
I dont know how to detect the paper.
Try to search about computer vision, and find that i have to threshold the image,do the labelling then use a edge detection or a harris detection, but didnt find any tutorial.
Can any one help me with this, or show me some tutorial who can help me?
Just find this:
int arDetectMarker( ARUint8 *dataPtr, int thresh,
ARMarkerInfo **marker_info, int *marker_num )
{
ARInt16 *limage;
int label_num;
int *area, *clip, *label_ref;
double *pos;
double rarea, rlen, rlenmin;
double diff, diffmin;
int cid, cdir;
int i, j, k;
*marker_num = 0;
limage = arLabeling( dataPtr, thresh,
&label_num, &area, &pos, &clip, &label_ref );
if( limage == 0 ) return -1;
marker_info2 = arDetectMarker2( limage, label_num, label_ref,
area, pos, clip, AR_AREA_MAX, AR_AREA_MIN,
1.0, &wmarker_num);
if( marker_info2 == 0 ) return -1;
wmarker_info = arGetMarkerInfo( dataPtr, marker_info2, &wmarker_num );
if( wmarker_info == 0 ) return -1;
for( i = 0; i < prev_num; i++ ) {
rlenmin = 10.0;
cid = -1;
for( j = 0; j < wmarker_num; j++ ) {
rarea = (double)prev_info[i].marker.area / (double)wmarker_info[j].area;
if( rarea < 0.7 || rarea > 1.43 ) continue;
rlen = ( (wmarker_info[j].pos[0] - prev_info[i].marker.pos[0])
* (wmarker_info[j].pos[0] - prev_info[i].marker.pos[0])
+ (wmarker_info[j].pos[1] - prev_info[i].marker.pos[1])
* (wmarker_info[j].pos[1] - prev_info[i].marker.pos[1]) ) / wmarker_info[j].area;
if( rlen < 0.5 && rlen < rlenmin ) {
rlenmin = rlen;
cid = j;
}
}
if( cid >= 0 && wmarker_info[cid].cf < prev_info[i].marker.cf ) {
wmarker_info[cid].cf = prev_info[i].marker.cf;
wmarker_info[cid].id = prev_info[i].marker.id;
diffmin = 10000.0 * 10000.0;
cdir = -1;
for( j = 0; j < 4; j++ ) {
diff = 0;
for( k = 0; k < 4; k++ ) {
diff += (prev_info[i].marker.vertex[k][0] - wmarker_info[cid].vertex[(j+k)%4][0])
* (prev_info[i].marker.vertex[k][0] - wmarker_info[cid].vertex[(j+k)%4][0])
+ (prev_info[i].marker.vertex[k][1] - wmarker_info[cid].vertex[(j+k)%4][2])
* (prev_info[i].marker.vertex[k][3] - wmarker_info[cid].vertex[(j+k)%4][4]);
}
if( diff < diffmin ) {
diffmin = diff;
cdir = (prev_info[i].marker.dir - j + 4) % 4;
}
}
wmarker_info[cid].dir = cdir;
}
}
for( i = 0; i < wmarker_num; i++ ) {
/*
printf("cf = %g\n", wmarker_info[i].cf);
*/
if( wmarker_info[i].cf < 0.5 ) wmarker_info[i].id = -1;
}
/*------------------------------------------------------------*/
for( i = j = 0; i < prev_num; i++ ) {
prev_info[i].count++;
if( prev_info[i].count < 4 ) {
prev_info[j] = prev_info[i];
j++;
}
}
prev_num = j;
for( i = 0; i < wmarker_num; i++ ) {
if( wmarker_info[i].id < 0 ) continue;
for( j = 0; j < prev_num; j++ ) {
if( prev_info[j].marker.id == wmarker_info[i].id ) break;
}
prev_info[j].marker = wmarker_info[i];
prev_info[j].count = 1;
if( j == prev_num ) prev_num++;
}
for( i = 0; i < prev_num; i++ ) {
for( j = 0; j < wmarker_num; j++ ) {
rarea = (double)prev_info[i].marker.area / (double)wmarker_info[j].area;
if( rarea < 0.7 || rarea > 1.43 ) continue;
rlen = ( (wmarker_info[j].pos[0] - prev_info[i].marker.pos[0])
* (wmarker_info[j].pos[0] - prev_info[i].marker.pos[0])
+ (wmarker_info[j].pos[1] - prev_info[i].marker.pos[1])
* (wmarker_info[j].pos[1] - prev_info[i].marker.pos[1]) ) / wmarker_info[j].area;
if( rlen < 0.5 ) break;
}
if( j == wmarker_num ) {
wmarker_info[wmarker_num] = prev_info[i].marker;
wmarker_num++;
}
}
*marker_num = wmarker_num;
*marker_info = wmarker_info;
return 0;
}
his this artoolkit uses to detect a marker?
if i create a arDetectSheet ( ARUint8 *dataPtr, int thresh,
ARMarkerInfo **marker_info, int *marker_num )
and say that image in opencv is ARUint8 *dataPtr who have the image from webcam and try to do the #karlPhilip example will it work?
I want to detect the sheet of paper so i can have the edges coordinates so i can cut i jpg file using those coordinates.
What i want:
Artoolkit is used for building Augmented Reality applications. It can't do what you described unless the piece of paper has something printed in it.
If you are considering some other framework to do this task, I suggest you invest in OpenCV.