I'm trying to create some code that takes the color of some picture and transfere to another. For this I'm using opencv for segmentation and treatment of the pictures.
I create a class named Shape that contains the contour of one shape ( find with opencv findContours() that creates a std::vector> ), a random string that serves as id for the object and a cv::Vec3b for the color of the shape.
But when I iterate over the contours found by opencv, and create one object(Shape class) and want to added to a list, it just add some of them then thows Segmentation error on console.
Here is my code:
#include <random>
#include <string>
#include <algorithm>
#include <opencv2/opencv.hpp>
using contourType = std::vector < std::vector < cv::Point > >;
std::string randomString ( )
{
auto randchar = [ ]( ) -> char {
const char charset[ ] =
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
const size_t max_index = ( sizeof ( charset ) - 1);
return charset[ rand ( ) % max_index ];
};
std::string str ( 14, 0 );
std::generate_n ( str.begin ( ), 14, randchar );
return str;
};
class Shape {
public:
std::string idShape;
std::vector < cv::Point > contour;
cv::Vec3b color;
Shape () {
};
void setShape ( std::vector < cv::Point > cnt, cv::Vec3b clr ) {
idShape = randomString ( );
contour = cnt;
color = clr;
};
cv::Point centroid ( bool showImage ) {
cv::Moments m = cv::moments ( contour, true );
cv::Point centroid ( m.m10 / m.m00, m.m01 / m.m00 );
return centroid;
};
};
class Frame {
public:
std::string idFrame;
cv::Size size;
int type;
std::vector < Shape > shapes;
Frame ( cv::Size imSize, int tp ) {
idFrame = randomString ( );
size = imSize;
type = tp;
};
int addShape ( Shape shape ) {
shapes.push_back ( shape );
return 0;
};
};
contourType findShapes ( cv::Mat img, bool showImage ) {
//Threshold
cv::adaptiveThreshold ( img, img, 255, cv::ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, 5, 1 );
//Erotion
cv::Mat kernel = cv::getStructuringElement ( cv::MORPH_RECT, cv::Size ( 2, 2 ), cv::Point ( 0, 0 ) );
cv::erode ( img, img, kernel );
//Find contours
contourType contours;
cv::Mat contourImg = img.clone ( );
cv::findContours ( contourImg, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE );
shapes
//Show image if true 2nd. argument
if ( showImage == true ) {
cv::Mat contourImage ( img.size ( ), CV_8UC3, cv::Scalar ( 0, 0, 0 ) );
cv::Scalar colors[ 3 ];
colors[ 0 ] = cv::Scalar ( 255, 0, 0 );
colors[ 1 ] = cv::Scalar ( 0, 255, 0 );
colors[ 2 ] = cv::Scalar ( 0, 0, 255 );
for ( size_t idx = 0; idx < contours.size ( ); idx++ ) {
cv::drawContours ( contourImage, contours, idx, colors[ idx % 3 ], CV_FILLED /*thick*/ );
std::cout << idx;
};
cv::imshow ( "Contour Image", contourImage );
cv::waitKey ( 0 );
};
return contours;
};
cv::Vec3b findColor ( cv::Mat img, std::vector < cv::Point > contour, bool print ) {
//Read pixel intensity
cv::Vec3b intensity;
cv::Point coordinate = contour[ 1 ];
intensity = img.at < cv::Vec3b > ( coordinate );
//Print pixel values
if ( print == true ) {
std::cout << intensity << std::endl;
};
return intensity;
};
int main ( int argc, char** argv ) {
std::string url = argv[1];
int type = 0;
cv::Mat img;
img = cv::imread ( url, 0 );
Frame frame ( img.size ( ), type );
contourType shapes;
shapes = findShapes ( img, false );
cv::Mat imColor;
imColor = cv::imread ( url, 1 );
std::list<Shape> te;
for ( size_t i = 0; i < shapes.size ( ); i++ ) {
Shape shape;
shape.setShape ( shapes[i], findColor ( imColor, shapes[i], false ) );
te.push_back( shape ); //Here is where it fails after adding a few objects, running out of memory? The idea is to contain the Shape object inside the a Frame members shapes property. But i'm testing first like this, just adding it to a list.
};
return 0;
}
It adds only 29 shapes to the list, then the error "segmentation fault (core dumped)" appears in console. But the image is about 67 shapes.
Related
I have this C++ function to create an image from multiple images:
Mat imageCollage(vector<Mat> & array_of_images, int M, int N )
{
// All images should be the same size
const Size images_size = array_of_images[0].size();
// Create a black canvas
Mat image_collage( images_size.height * M, images_size.width * N, CV_8UC3, Scalar( 0, 0, 0 ) );
for( int i = 0; i < M; ++i )
{
for( int j = 0; j < N; ++j )
{
if( ( ( i * N ) + j ) >= array_of_images.size() )
break;
Rect roi( images_size.width * j, images_size.height * i, images_size.width, images_size.height );
array_of_images[ ( i * N ) + j ].copyTo( image_collage( roi ) );
}
}
return image_collage;
}
My program is supposed to create an image collage from multiple images and it works for RGB images but not when I convert them to grayscale. I tested separately the functions and I think it's this one that creates the problem. This is the error I get:
OpenCV(4.5.0-dev) /home/csimage/Documents/opencv-repos/opencv/modules/core/src/copy.cpp:254: error: (-215:Assertion failed) channels() == CV_MAT_CN(dtype) in function 'copyTo'
Aborted (core dumped)
In OpenCV CamShift implementation there is a line ( just before the switch statement in onMouse() function in the code below ) of code which goes like this ,
selection &= Rect(0, 0, image.cols, image.rows);
where, selection is a Rect datatype of user defined dimensions ( selected by mouse callback ) and image is a frame from a video. The dimensions of selection and image do not match. What I don't understand is what is the purpose of this &= operator. I commented out this line of code and I couldn't figure out any difference. The code compilers just fine. I am not sure about what goes on inside when two objects are AND-ed. I could not find any resource that addresses this particular issue.
This is the original code:
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
Mat image;
bool backprojMode = false;
bool selectObject = false;
int trackObject = 0;
bool showHist = true;
Point origin;
Rect selection;
int vmin = 10, vmax = 256, smin = 30;
static void onMouse( int event, int x, int y, int, void* )
{
if( selectObject )
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y);
selection &= Rect(0, 0, image.cols, image.rows);
}
switch( event )
{
case CV_EVENT_LBUTTONDOWN:
origin = Point(x,y);
selection = Rect(x,y,0,0);
selectObject = true;
break;
case CV_EVENT_LBUTTONUP:
selectObject = false;
if( selection.width > 0 && selection.height > 0 )
trackObject = -1;
break;
}
}
static void help()
{
cout << "\nThis is a demo that shows mean-shift based tracking\n"
"You select a color objects such as your face and it tracks it.\n"
"This reads from video camera (0 by default, or the camera number the user enters\n"
"Usage: \n"
" ./camshiftdemo [camera number]\n";
cout << "\n\nHot keys: \n"
"\tESC - quit the program\n"
"\tc - stop the tracking\n"
"\tb - switch to/from backprojection view\n"
"\th - show/hide object histogram\n"
"\tp - pause video\n"
"To initialize tracking, select the object with mouse\n";
}
const char* keys =
{
"{1| | 0 | camera number}"
};
int main( int argc, const char* argv[] )
{
help();
VideoCapture cap;
Rect trackWindow;
int hsize = 16;
float hranges[] = {0,180};
const float* phranges = hranges;
CommandLineParser parser(argc, argv, keys);
int camNum = parser.get<int>("1");
cap.open(argv[1]);
/* if( !cap.isOpened() )
{
help();
cout << "***Could not initialize capturing...***\n";
cout << "Current parameter's value: \n";
parser.printParams();
return -1;
}*/
namedWindow( "Histogram", 0 );
namedWindow( "CamShift Demo", 0 );
namedWindow( "ROI", 0 );
setMouseCallback( "CamShift Demo", onMouse, 0 );
createTrackbar( "Vmin", "CamShift Demo", &vmin, 256, 0 );
createTrackbar( "Vmax", "CamShift Demo", &vmax, 256, 0 );
createTrackbar( "Smin", "CamShift Demo", &smin, 256, 0 );
Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
bool paused = false;
for(;;)
{
if( !paused )
{
cap >> frame;
if( frame.empty() )
break;
}
frame.copyTo(image);
if( !paused )
{
cvtColor(image, hsv, COLOR_BGR2HSV);
if( trackObject )
{
int _vmin = vmin, _vmax = vmax;
inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
Scalar(180, 256, MAX(_vmin, _vmax)), mask);
int ch[] = {0, 0};
hue.create(hsv.size(), hsv.depth());
mixChannels(&hsv, 1, &hue, 1, ch, 1);
if( trackObject < 0 )
{
Mat roi(hue, selection), maskroi(mask, selection);
calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
normalize(hist, hist, 0, 255, CV_MINMAX);
trackWindow = selection;
trackObject = 1;
histimg = Scalar::all(0);
int binW = histimg.cols / hsize;
Mat buf(1, hsize, CV_8UC3);
for( int i = 0; i < hsize; i++ )
buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
cvtColor(buf, buf, CV_HSV2BGR);
for( int i = 0; i < hsize; i++ )
{
int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
rectangle( histimg, Point(i*binW,histimg.rows),
Point((i+1)*binW,histimg.rows - val),
Scalar(buf.at<Vec3b>(i)), -1, 8 );
}
imshow("ROI",roi);
}
calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
backproj &= mask;
RotatedRect trackBox = CamShift(backproj, trackWindow,
TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));
cout<<trackWindow.x<<" "<<trackWindow.y<<endl;
if( trackWindow.area() <= 1 )
{
int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5)/6;
trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
trackWindow.x + r, trackWindow.y + r) &
Rect(0, 0, cols, rows);
}
if( backprojMode )
cvtColor( backproj, image, COLOR_GRAY2BGR );
ellipse( image, trackBox, Scalar(0,0,255), 3, CV_AA );
}
}
else if( trackObject < 0 )
paused = false;
if( selectObject && selection.width > 0 && selection.height > 0 )
{
Mat roi(image, selection);
bitwise_not(roi, roi);
}
imshow( "CamShift Demo", image );
//imshow( "Histogram", histimg );
char c = (char)waitKey(50);
if( c == 27 )
break;
switch(c)
{
case 'b':
backprojMode = !backprojMode;
break;
case 'c':
trackObject = 0;
histimg = Scalar::all(0);
break;
case 'h':
showHist = !showHist;
if( !showHist )
destroyWindow( "Histogram" );
else
namedWindow( "Histogram", 1 );
break;
case 'p':
paused = !paused;
break;
default:
int a=1;
//continue;
}
}
return 0;
}
Any comments or suggestions will be appreciated.
The &-operator of two Rect objects results in a new Rect object, which is their intersection. Here they intersect the selection with the entire image-frame. I suspect that this is merely a precaution, done so that selection does not accidentally exceed the boundaries of the image.
the & and | operators are overloaded for cv::Rect, & is the intersection, | the union of 2 Rects,
see for yourself:
Rect a(2,2,5,5);
Rect b(4,4,5,5);
Rect c = a & b;
Rect d = a | b;
cerr << a << endl << b << endl << c << endl << d << endl ;
[5 x 5 from (2, 2)]
[5 x 5 from (4, 4)]
[3 x 3 from (4, 4)] // intersection
[7 x 7 from (2, 2)] // union
in you code example above,
selection &= Rect(0, 0, image.cols, image.rows);
'selection' gets cropped to the valid image borders
I found this code and try to compile it, but it does not work.
The error brought about at this line:
Q_matrix_tilde += q_tilde * q_tilde_transposed;
It told me that there is an error related to memory, but I could not understand why.
Could you help me?
// Alessandro Gentilini
#include <cv.h>
#include <cxcore.h>
#include <vector>
using namespace std;
using namespace cv;
// Estimation of an affine 2D transformation by means of least squares method.
// Reference:
// SPÄTH, Helmuth. Fitting affine and orthogonal transformations between two sets of points. Mathematical Communications, 2004, 9.1: 27-34.
// http://hrcak.srce.hr/file/1425
template < typename point2D_t, typename point3D_t >
class LeastSquare2DAffineTransformationEstimator
{
public:
// Solves the linear systems descripted by formula (17)
static cv::Mat estimate( const std::vector<point2D_t>& P, const std::vector<point2D_t>& Q )
{
Mat Q_tilde = Q_set_to_Q_matrix_tilde(P);
Mat c_tilde_0 = c_j_tilde(0,P,Q);
Mat c_tilde_1 = c_j_tilde(1,P,Q);
Mat Q_tilde_inv = Q_tilde.inv();
Mat a_tilde_0 = Q_tilde_inv * c_tilde_0;
Mat a_tilde_1 = Q_tilde_inv * c_tilde_1;
cv::Mat t = cv::Mat::zeros( 3, 2, cv::DataType<point2D_t::value_type>::type );
cv::Mat(a_tilde_0).copyTo(t.col(0));
cv::Mat(a_tilde_1).copyTo(t.col(1));
cv::transpose(t,t);
return t;
}
private:
// Implements the formula (12)
static cv::Mat q_to_q_tilde( const point2D_t& q )
{
vector<point2D_t> v;
v.push_back(point2D_t(q.x));
v.push_back(point2D_t(q.y));
v.push_back(point2D_t(1));
return cv::Mat(v,true);
}
// Implements the formula (14)
static cv::Mat Q_set_to_Q_matrix_tilde( const std::vector<point2D_t>& Q_set )
{
size_t m = Q_set.size();
cv::Mat Q_matrix_tilde = cv::Mat::zeros( 3, 3, cv::DataType<point2D_t::value_type>::type );
cv::Mat temp= cv::Mat::zeros( 3, 3, cv::DataType<point2D_t::value_type>::type );
cv::Mat temp1= cv::Mat::zeros( 3, 3, cv::DataType<point2D_t::value_type>::type );
cv::Mat q_tilde = cv::Mat::zeros( 3, 1, cv::DataType<point2D_t::value_type>::type );
cv::Mat q_tilde_transposed = cv::Mat::zeros( 1, 3, cv::DataType<point2D_t::value_type>::type );
for ( size_t i = 0; i < m; i++ ) {
q_tilde = q_to_q_tilde(Q_set[i]);
cv::transpose( q_tilde, q_tilde_transposed );
/*cout<<q_tilde_transposed.col<<" "<<q_tilde_transposed.row<<endl;*/
temp = q_tilde * q_tilde_transposed;
cv::add(temp,Q_matrix_tilde,temp1);
}
return Q_matrix_tilde;
}
// Implements the formula (16)
static cv::Mat c_j_tilde( const size_t& j, const std::vector<point2D_t>& Q_set, const std::vector<point2D_t>& P_set )
{
if ( Q_set.size() != P_set.size() ) {
throw 0;
}
if ( j > 2 ) {
throw 1;
}
size_t m = Q_set.size();
point2D_t::value_type p_ji;
point2D_t::value_type c_j0 = 0;
for ( size_t i = 0; i < m; i++ ) {
switch( j ) {
case 0: p_ji = P_set[i].x; break;
case 1: p_ji = P_set[i].y; break;
}
c_j0 += Q_set[i].x * p_ji;
}
point2D_t::value_type c_j1 = 0;
for ( size_t i = 0; i < m; i++ ) {
switch( j ) {
case 0: p_ji = P_set[i].x; break;
case 1: p_ji = P_set[i].y; break;
}
c_j1 += Q_set[i].y * p_ji;
}
point2D_t::value_type c_j2 = 0;
for ( size_t i = 0; i < m; i++ ) {
switch( j ) {
case 0: p_ji = P_set[i].x; break;
case 1: p_ji = P_set[i].y; break;
}
c_j2 += 1 * p_ji;
}
vector<point2D_t> v;
v.push_back(point2D_t(c_j0));
v.push_back(point2D_t(c_j1));
v.push_back(point2D_t(c_j2));
cv::Mat vv = Mat(v,true);
return vv;
}
};
#include <vector>
#include <iostream>
int main( int argc, char** argv )
{
std::vector<cv::Point2f> P,Q;
P.push_back(cv::Point2f( 1, 0));
P.push_back(cv::Point2f( 0, 1));
P.push_back(cv::Point2f(-1, 0));
P.push_back(cv::Point2f( 0,-1));
Q.push_back(cv::Point2f(1+sqrtf(2)/2, 1+sqrtf(2)/2));
Q.push_back(cv::Point2f(1-sqrtf(2)/2, 1+sqrtf(2)/2));
Q.push_back(cv::Point2f(1-sqrtf(2)/2, 1-sqrtf(2)/2));
Q.push_back(cv::Point2f(1+sqrtf(2)/2, 1-sqrtf(2)/2));
//std::cout <<
LeastSquare2DAffineTransformationEstimator<cv::Point2f,cv::Point3f>::estimate(P,Q);
return 0;
}
Your code works perfect. I have run it and get result:
[0.70710683, -0.70710683 1,
0.70710683, 0.70710683 1]
I'm implementing video capture rotation with bilinear interpolation like warpAffine() does in OpenCV library. But so far I have got some problems:
1.I'm getting some artifacts during rotations. Here are samples of border, 90 rotation and 360 degrees artifacts
https://www.dropbox.com/sh/oe51ty0cy695i3o/hcAzwmAk6z
2.I can't change resolution of my capture using
capture.set(CV_CAP_PROP_FRAME_WIDTH, 1280 )
capture.set(CV_CAP_PROP_FRAME_HEIGHT, 720 )
Both of them return false value.
I use LifeCam Cinema.
Here is my code:
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <cmath>
#include <ctime>
#include <iostream>
#include <cstdlib>
using namespace cv;
using namespace std;
const double M_PI = 3.14159265359;
void print_help( const char* prg );
Mat rotate( Mat& in, int angle , Point2f rotationCenter );
inline uchar int2uchar( int color ) {
return (uchar)( color < 0 ? 0 : color > 255 ? 255 : color );
}
void print_help( const char* prg ) {
cout << "Report:" << endl;
cout << "Application : " << prg << endl;
cout << "Can't access capture device" << endl;
}
// rotation with bilinear interpolation
Mat rotate( Mat& in, int angle , Point2f rotationCenter ) {
// Note : added Scalar(0) for unused pixels to be black
Mat out( in.size(), in.type(), Scalar(0) );
float in_radians = (float)( angle * M_PI / 180 );
float sinAngle = (float)( sin( in_radians ) );
float cosAngle = (float)( cos( in_radians ) );
for ( int col(0); col < in.cols; ++col ) {
for ( int row(0); row < in.rows; ++row ) {
// already around rotationCenter
// x' = x * cos(angle) - y * sin(angle)
float temp_columns( ( col - rotationCenter.x ) * (cosAngle) -
( row - rotationCenter.y ) * (sinAngle) +
rotationCenter.x );
// y' = x * sin(angle) + y * cos(angle)
float temp_rows ( ( col - rotationCenter.x ) * (sinAngle) +
( row - rotationCenter.y ) * (cosAngle) +
rotationCenter.y );
float max_col( ceil (temp_columns) );
float min_col( floor(temp_columns) );
float max_row( ceil (temp_rows) );
float min_row( floor(temp_rows) );
// clip all irrelevant parts
if ( max_col >= in.cols || max_row >= in.rows ||
min_col < 0 || min_row < 0 ) {
// don't draw
continue;
}
float deltaCol( temp_columns - min_col );
float deltaRow( temp_rows - min_row );
// left top, right top, left bottom and right bottom
Vec3b q12( in.at < Vec3b >( (int)min_row, (int)min_col ) );
Vec3b q22( in.at < Vec3b >( (int)min_row, (int)max_col ) );
Vec3b q11( in.at < Vec3b >( (int)max_row, (int)min_col ) );
Vec3b q21( in.at < Vec3b >( (int)max_row, (int)max_col ) );
// R1 - linear interpolation of bottom neighborhoods
double blueR1 ( ( 1 - deltaCol ) * q11[0] + deltaCol * q21[0] );
double greenR1 ( ( 1 - deltaCol ) * q11[1] + deltaCol * q21[1] );
double redR1 ( ( 1 - deltaCol ) * q11[2] + deltaCol * q21[2] );
// R2 - linear interpolation of top neighborhoods
double blueR2 ( ( 1 - deltaCol ) * q12[0] + deltaCol * q22[0] );
double greenR2 ( ( 1 - deltaCol ) * q12[1] + deltaCol * q22[1] );
double redR2 ( ( 1 - deltaCol ) * q12[2] + deltaCol * q22[2] );
// P - linear interpolation of R1 and R2
int blue ( (int)ceil( ( 1 - deltaRow ) * blueR2 + deltaRow * blueR1 ) );
int green( (int)ceil( ( 1 - deltaRow ) * greenR2 + deltaRow * greenR1 ) );
int red ( (int)ceil( ( 1 - deltaRow ) * redR2 + deltaRow * redR1 ) );
// Vec3b stands for 3-channel value, each channel is a byte
out.at < Vec3b >( row, col )[ 0 ] = int2uchar(blue);
out.at < Vec3b >( row, col )[ 1 ] = int2uchar(green);
out.at < Vec3b >( row, col )[ 2 ] = int2uchar(red);
}
}
return out;
}
int main( int ac, char ** av ) {
if ( ac < 2 ) {
print_help( av[ 0 ] );
return -1;
}
// In degrees
int step = 1, angle = 90;
VideoCapture capture;
// doesn't work properly
if ( capture.set(CV_CAP_PROP_FRAME_WIDTH, 1280 ) &&
capture.set(CV_CAP_PROP_FRAME_HEIGHT, 720 ) ) {
cout << "Resolution : "
<< capture.get(CV_CAP_PROP_FRAME_WIDTH )
<< " x "
<< capture.get(CV_CAP_PROP_FRAME_HEIGHT )
<< endl;
} else {
cout << "There's some problem with VideoCapture::set()" << endl;
}
capture.open( atoi( av[ 1 ] ) );
while ( !capture.isOpened( ) ) {
print_help( av[ 0 ] );
cout << "Capture device " << atoi( av[ 1 ] ) << " failed to open!" << endl;
cout << "Connect capture device to PC\a" << endl;
system("pause");
cout << endl;
capture.open( atoi( av[ 1 ] ) );
}
cout << "Device " << atoi( av[ 1 ] ) << " is connected" << endl;
string original("Original");
string withInterpolation("With Bilinear Interpolation");
namedWindow( original, CV_WINDOW_AUTOSIZE );
namedWindow( withInterpolation, CV_WINDOW_AUTOSIZE);
Mat frame;
for ( ;; ) {
capture >> frame;
if ( frame.empty( ) )
break;
createTrackbar("Rotate", withInterpolation, &angle, 360, 0);
imshow( original, frame );
char key = ( char ) waitKey( 2 );
switch ( key ) {
case '+':
angle += step;
break;
case '-':
angle -= step;
break;
case 27:
case 'q':
return 0;
break;
}
Mat result;
Point2f rotationCenter( (float)( frame.cols / 2.0 ),
(float)( frame.rows / 2.0 ) );
result = rotate( frame, angle, rotationCenter );
// Note : mirror effect
// 1 says, that given frame will be flipped horizontally
flip(result,result, 1);
imshow( withInterpolation, result );
// test to compare my bilinear interpolation and of OpenCV
Mat temp;
warpAffine( frame, temp,
getRotationMatrix2D( rotationCenter, angle, (double)(1.0) ),
frame.size(), 1, 0 );
string openCVInterpolation("OpenCV Bilinear Interpolation");
namedWindow( openCVInterpolation, CV_WINDOW_AUTOSIZE );
createTrackbar("Rotate", openCVInterpolation, &angle, 360, 0);
flip(temp,temp, 1);
imshow( openCVInterpolation, temp );
}
return 0;
}
Addressing your second issue - setting Lifecam resolution using OpenCV
I found that the Lifecam dashboard was perhaps interfering with OpenCV Videocapture calls. If you uninstall Lifecam using Programs & Features from the control panel, the calls
capture.set(CV_CAP_PROP_FRAME_WIDTH, 1280)
capture.set(CV_CAP_PROP_FRAME_HEIGHT, 720)
will work fine.
I have opencv code for detecting square. And now I want after detect square, the code run another command.
Here is the code:
#include "cv.h"
#include "cxcore.h"
#include "highgui.h"
#include "math.h"
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <sstream>
using namespace std;
double angle( CvPoint* pt1, CvPoint* pt2, CvPoint* pt0 )
{
double dx1 = pt1->x - pt0->x;
double dy1 = pt1->y - pt0->y;
double dx2 = pt2->x - pt0->x;
double dy2 = pt2->y - pt0->y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
} // angle
IplImage* findSquares4( IplImage* img, CvMemStorage* storage )
{
double s = 0, t = 0;
int sdetect =0, sqdetect = 0,sqt = 0;
CvSeq* contours;
CvSeq* result;
CvSeq* squares = cvCreateSeq( 0, sizeof( CvSeq), sizeof( CvPoint), storage );
IplImage* cny = cvCreateImage(cvGetSize(img), 8, 1);
cvCanny(img, cny, 5, 100, 3);
cvNamedWindow("canny",CV_WINDOW_AUTOSIZE);
cvShowImage("canny",cny);
cvFindContours( cny, storage, &contours, sizeof( CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint( 0, 0) );
while( contours )
{
result = cvApproxPoly( contours,
sizeof( CvContour),
storage,
CV_POLY_APPROX_DP,
cvContourPerimeter( contours)*0.02, 0 );
if( result->total == 4 &&
fabs( cvContourArea( result, CV_WHOLE_SEQ)) > 1000 &&
cvCheckContourConvexity( result) )
{
s = 0;
for( int i = 2; i < 5; i++ )
{
{
t = fabs( angle(
( CvPoint*)cvGetSeqElem( result, i ),
( CvPoint*)cvGetSeqElem( result, i-2 ),
( CvPoint*)cvGetSeqElem( result, i-1 )));
s = s > t ? s : t;
cout << "s = " << s<< endl;
cout << "t = " << t<< endl;
}
} // for
if( s < 0.3 )
for( int i = 0; i < 4; i++ )
cvSeqPush( squares,
( CvPoint*)cvGetSeqElem( result, i ));
} // if
contours = contours->h_next;
} // while
if ((squares->total/4) = 1)
{
sdetect = 1;
} / /if
else
{
sdetect = 2;
sleep(0.5);
} // else
if (sqdetect != sdetect)
{
sqdetect=sdetect;
switch(sqdetect)
{
case 0 : system(NULL) ; break;
case 2 : cout<< "no "<< endl; break;
case 1 : system("./ambil1.sh"); break;
} // switch
} // if
sdetect = 0;
cout<<"Persegi : "<< squares->total/4 <<endl;
cvReleaseMemStorage(&storage);
cvClearSeq(squares);
} // findSquares4
void drawSquares(IplImage *img, CvSeq* squares )
{
CvFont font;
cvInitFont( &font, CV_FONT_HERSHEY_SIMPLEX, 0.4f, 0.4f, 0,1, 8 );
int i,j,sdetect,sqdetect = 0;
CvSeqReader reader;
cvStartReadSeq( squares, &reader, 0 );
for( i = 0; i < squares->total; i += 4 )
{
j++;
CvPoint pt[4], *rect = pt;
int count = 4;
// read 4 vertices
memcpy( pt, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
memcpy( pt + 1, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
memcpy( pt + 2, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
memcpy( pt + 3, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
cvPutText( img, "SQUARE", pt[i], &font, CV_RGB(20,255,0));
cvPolyLine( img, &rect, &count, 1, 1, CV_RGB(200,0,0), 4, CV_AA, 0 );
} // for
cvClearSeq(squares);
} // drawSquares
But what I got is that the program becomes laggy. So what is the right algorithm to place condition for executing system (./ambil1.sh)?
The program waits until the subordinate command finishes. During this time it will not respond.
If that's what you need, system is adequate.
If you want to fire and forget, you can either use the fork/exec pair, or just use the shell & operator to run stuff in the background.
If you need to fire, change your program's behaviour, then change it back when the subordinate command finishes, you almost have to use the fork/exec/SIGCHLD/waitpid combination.