My project takes a bitmap snapshot of the client area of a given window, and converts this data into an IplImage instance. Then after a grayscale conversion, threshing etc, bounding boxes are drawn around any contours which exceed a given minimum size (area volume).
The result is then shown within a cvNamedWindow
All I need to do now is allow the user to click within these rects to effectively "choose" this contour, so that the application can then extract the subrect as a new image and save it to disk.
How can this be achieved using OpenCV in C++?
If you store your bounding boxes you can check in a for loop in a mouse event handler if a box is clicked and which box is clicked. The code for creating a mouse event:
cvNamedWindow("MyWindow", CV_WINDOW_NORMAL);
cvSetMouseCallback("MyWindow", mouseEvent, 0);
imshow("MyWindow", image);
void mouseEvent(int evt, int x, int y, int flags, void *param) {
if (evt == CV_EVENT_LBUTTONDOWN) {
printf("%d %d\n", x, y);
}
}
You can probably find out yourself how to check if these coordinates are within a bounding box. I would also recommend to use the C++ API, as stated in the comments, as it is much easier once you get the hang of it.
In case anyone else was interested on how to actually extract the chosen image,
here's how I was able to do it:
inline
IplImage* getSubImage(IplImage *image, CvRect region)
{
cvSetImageROI(image, region);
IplImage *imgRet = cvCreateImage( cvSize(region.width, region.height), image->depth, image->nChannels );
cvCopy(image, imgRet);
cvResetImageROI(image);
return imgRet;
}
inline
bool pointInRect(const int x, const int y, const CvRect& r)
{
return (
(x > r.x) && (x < (r.x + r.width)) &&
(y > r.y) && (y < (r.y + r.height))
);
}
void onMouseEvent(int evt, int x, int y, int flags, void *param)
{
if (evt == CV_EVENT_LBUTTONDOWN) {
// boundingBoxes is declared as a vector of CvRects and
// filled in the main loop using cvBoundingRect(contour)
for(UINT i = 0; i < boundingBoxes.size(); i++)
{
CvRect rect = boundingBoxes[i].rect;
if( pointInRect(x, y, rect) )
{
IplImage* img = getSubImage(imgSource, rect);
// Do whatever you want with the sub-image here
cvNamedWindow("Selection");
cvShowImage("Selection", img);
cvReleaseImage(&img);
break;
}
}
}
}
Related
I have this function:
void Texture::render(int x, int y, int w, int h, SDL_Renderer *&renderer, double angle, SDL_Point* center, SDL_RendererFlip flip)
{
// Set a destination value to -1 to keep the current value
if (x < 0) { x = rect.x; }
if (y < 0) { y = rect.y; }
if (w < 0) { w = rect.w; }
if (h < 0) { h = rect.h; }
// Create destination rectangle
SDL_Rect dstRect = { x, y, w, h };
// Render to screen
SDL_RenderCopyEx(renderer, texture, &rect, &dstRect, angle, center, flip);
}
It works. It creates an image of the correct size at the location I want. But I want to add a chunk of code where it resizes the texture itself to be the size given in the destRect.
So, anyone who finds this and reads the conversation I had with Nelfeal in the comments will see that I had a misunderstanding of how SDL_RenderCopyEx works. There's no need to resize the texture. If you need to do something like that, you can just use the dstRect when you copy it.
Actually, as far as I can find, there isn't a method to resize the actual texture itself. I'm sure one exists, but it's definitely not something people are commonly using. Which is usually a sign that it's a bad idea.
I've tweaked my code to try and simplify it, for anybody who's trying to do something similar to me:
void render(SDL_Renderer *&renderer, SDL_Rect *dstRect=NULL, SDL_Rect &srcRect=NULL, double angle=0.0, SDL_Point* center=NULL, SDL_RendererFlip flip=SDL_FLIP_NONE);
void Texture::render(SDL_Renderer *&renderer, SDL_Rect *dstRect, SDL_Rect *srcRect, double angle, SDL_Point* center, SDL_RendererFlip flip)
{
// Check to see if a destination was provided
bool check = false;
if (dstRect == NULL)
{
check = true;
dstRect = new SDL_Rect();
dstRect->x = 0;
dstRect->y = 0;
dstRect->w = SCREEN_WIDTH;
dstRect->h = SCREEN_HEIGHT;
}
// Check to see if the entire texture is being copied
if (srcRect == NULL) { srcRect = ▭ }
// Render to screen
SDL_RenderCopyEx(renderer, texture, srcRect, dstRect, angle, center, flip);
// Free dstRect
if (check) delete dstRect;}
And it looks like this when using the function:
bgTex.render(renderer);
blobTex.render(renderer, &blobDstRect);
I have a simple window that contains simple black image with small solid circle inside it. I have wrote a simple code to be able to drag and drop this circle. I could do it correctly. Inside the mouse_event function:
void on_mouse_event(int event_type, int x, int y, int flags, void*){
if (event_type == cv::EVENT_RBUTTONDOWN){
//Catch the circle
}
else if (event_type == cv::EVENT_MOUSEMOVE){
//Release the point
}
else if (event_type == cv::EVENT_MOUSEMOVE){
//Change circle position according to curser moving
//re draw the circle again
//show the new image
}
}
The main function:
while (true){
//show image code (simple cv::imshow);
if (cv::waitKey(1) == 27){
break;
}
}
The problem is that if I drag the circle and start to move fast, the image will not change till I stop. However, if I go slowly it will change according to the move. What is the reason of this problem?
P.S I am not in doubt of slow hardware at all. I am working on workstation and I am mentoring the processor utilization and just one of its 8 core reach around 50% and the memory is almost free.
I am using Windows 10 if it helps.
you could test the following code.(adapted from opencv_annotation.cpp)
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
using namespace std;
using namespace cv;
// Function prototypes
void on_mouse(int, int, int, int, void*);
// Public parameters
Mat image(600, 800, CV_8UC3, Scalar(220, 220, 220));
Mat current_view;
int circle_center_x = image.cols / 2, circle_center_y = image.rows / 2, radius = 40;
bool dragging = false;
const string window_name = "OpenCV Mouse Event Demo";
void on_mouse(int event, int x, int y, int, void *)
{
// Action when left button is clicked
if (event == EVENT_LBUTTONDOWN & (abs(circle_center_x - x) < 20) & (abs(circle_center_y - y) < 20))
{
dragging = true;
}
if (event == EVENT_LBUTTONUP)
{
dragging = false;
}
// Action when mouse is moving
if ((event == EVENT_MOUSEMOVE) && dragging)
{
image.copyTo(current_view);
circle_center_x = x;
circle_center_y = y;
circle(current_view, Point(circle_center_x, circle_center_y), radius, Scalar(255, 0, 0), 5);
imshow(window_name, current_view);
}
}
int main(int argc, const char** argv)
{
// Init window interface and couple mouse actions
namedWindow(window_name, WINDOW_AUTOSIZE);
setMouseCallback(window_name, on_mouse);
image.copyTo(current_view);
circle(current_view, Point(circle_center_x, circle_center_y), radius, Scalar(255, 0, 0), 5);
imshow(window_name, current_view);
int key_pressed = 0;
do
{
// Keys for processing
// Based on the universal ASCII code of the keystroke: http://www.asciitable.com/
// <SPACE> = 32 add circle to current image
// <ESC> = 27 exit program
key_pressed = 0xFF & waitKey(0);
if (key_pressed==32)
{
// draw a circle on the image
circle(image, Point(circle_center_x, circle_center_y), radius, Scalar(0, 0, 255), -1);
image.copyTo(current_view);
circle(current_view, Point(circle_center_x, circle_center_y), radius, Scalar(255, 0, 0), 5);
imshow(window_name, current_view);
}
}
// Continue as long as the <ESC> key has not been pressed
while (key_pressed != 27);
// Close down the window
destroyWindow(window_name);
return 0;
}
Using OpenCV 2.4.3, I am trying to draw a circle on top of an image centered on the mouse (x,y) position when the user is moving the mouse, and just that circle should be there once the mouse stops moving (only the original image with just one circle drawn should be shown in that moment). I thought it was going to be easy, however, I´ve been researching and trying for a couple of hours and can´t make it work the way I described.
Im attaching my code underneath. If anyone could help out I´d be really grateful.
void my_mouse_callback( int event, int x, int y, int flags, void* param );
bool moving_mouse = false;
int main()
{
const char* name = "Circle Example";
IplImage* image_circle = cvLoadImage( "../data/lena.png" );
IplImage* image = cvLoadImage( "../data/lena.png" );
namedWindow(name, CV_WINDOW_AUTOSIZE );
// Set up the callback
cvSetMouseCallback( name, my_mouse_callback, (void*) image_circle);
//Main Loop
while(cvWaitKey(15) != 27){
//If mouse is moving draw circle on top of image
if(moving_mouse){
cvShowImage(name, image_circle);
moving_mouse = false;
}
//If mouse stops moving draw original image and reset image_cicle
else{
cvShowImage(name, image);
image_circle = cvCloneImage(image);
}
}
cvReleaseImage(&image_circle);
cvReleaseImage(&image);
cvDestroyWindow(name);
return 0;
}
// Mouse callback
void my_mouse_callback( int event, int x, int y, int flags, void* param ){
switch( event ){
case CV_EVENT_MOUSEMOVE:
//Drawing a Circle
cvCircle(param,cvPoint(x,y),25,CV_RGB(0,255,0),1);
moving_mouse = true;
break;
}
}
I show you one approach to do this. I explain it later and tell you a difficulty you will have:
static int mouse_x = -1;
static int mouse_y = -1;
void my_mouse_callback( int event, int x, int y, int flags, void* param )
{
if(event == CV_EVENT_MOUSEMOVE)
{
mouse_x = x;
mouse_y = y;
}
}
int main()
{
IplImage* image;
IplImage* image_circle = NULL;
... // load image, create window, initiate callback, etc
int x = -1;
int y = -1;
while(cvWaitKey(15) != 27)
{
if(x != mouse_x || y != mouse_y)
{
x = mouse_x;
y = mouse_y;
cvReleaseImage(&image_circle);
image_circle = cvCloneImage(image);
cvCircle(image_circle,cvPoint(x,y),25,CV_RGB(0,255,0),1);
cvShowImage(name, image_circle);
}
}
... // destroy image
}
Explanation
Here, in the mouse event, you just store the coordinates of the mouse pointer. When an event takes place, the main program will check if the mouse moved to redraw the full image again. Since you want to erase the previous circle, you must copy the original image first to draw the new circle later. You could make this smarter by just copying the part of the original image that were the previous circle was, instead of the entire image.
Problem
A problem when doing something like this in OpenCV is that you can't detect when the mouse goes out of your window, so that you will always have a circle drawn in your image. I don't think you can solve this just with OpenCV, since I don't think there is a kind of MOUSE_OUT event. You would need to look for some Qt callback or system function.
One whole day I have tried a lot to get all the related matches (with matchtemplate function) in sub-Image , which is ROI i have already extracted from the original image with the mousecallback function. So my code is below for the Matchingfunction
////Matching Function
void CTemplate_MatchDlg::OnBnTemplatematch()
{
namedWindow("reference",CV_WINDOW_AUTOSIZE);
while(true)
{
Mat ref = imread("img.jpg"); // Original Image
mod_ref = cvCreateMat(ref.rows,ref.cols,CV_32F);// resizing the image to fit in picture box
resize(ref,mod_ref,Size(),0.5,0.5,CV_INTER_AREA);
Mat tpl =imread("Template.jpg"); // TEMPLATE IMAGE
cvSetMouseCallback("reference",find_mouseHandler,0);
Mat aim=roiImg1.clone(); // SUB_IMAGE FROM ORIGINALIMAGE
// aim variable contains the ROI matrix
// next, want to perform template matching in that ROI // and display results on original image
if(select_flag1 == 1)
{
// imshow("ref",aim);
Mat res(aim.rows-tpl.rows+1, aim.cols-tpl.cols+1,CV_32FC1);
matchTemplate(aim, tpl, res, CV_TM_CCOEFF_NORMED);
threshold(res, res, 0.8, 1., CV_THRESH_TOZERO);
while (1)
{
double minval, maxval, threshold = 0.8;
Point minloc, maxloc;
minMaxLoc(res, &minval, &maxval, &minloc, &maxloc);
//// Draw Bound boxes for detected templates in sub matrix
if (maxval >= threshold)
{
rectangle(
aim,
maxloc,
Point(maxloc.x + tpl.cols, maxloc.y + tpl.rows),
CV_RGB(0,255,0), 1,8,0
);
floodFill(res, maxloc, cv::Scalar(0), 0, cv::Scalar(.1), cv::Scalar(1.));
}else
break;
}
}
////Bounding box for ROI selection with mouse
rectangle(mod_ref, rect2, CV_RGB(255, 0, 0), 1, 8, 0); // rect2 is ROI
// my idea is to get all the matches in ROI with bounding boxes
// no need to mark any matches outside the ROI
//Clearly i want to process only ROI
imshow("reference", mod_ref); // show the image with the results
waitKey(10);
}
//cvReleaseMat(&mod_ref);
destroyWindow("reference");
}
/// ImplementMouse Call Back
void find_mouseHandler(int event, int x, int y, int flags, void* param)
{
if (event == CV_EVENT_LBUTTONDOWN && !drag)
{
/* left button clicked. ROI selection begins*/
point1 = Point(x, y);
drag = 1;
}
if (event == CV_EVENT_MOUSEMOVE && drag)
{
/* mouse dragged. ROI being selected*/
Mat img3 = mod_ref.clone();
point2 = Point(x, y);
rectangle(img3, point1, point2, CV_RGB(255, 0, 0), 1, 8, 0);
imshow("reference", img3);
//
}
if (event == CV_EVENT_LBUTTONUP && drag)
{
Mat img4=mod_ref.clone();
point2 = Point(x, y);
rect1 = Rect(point1.x,point1.y,x-point1.x,y-point1.y);
drag = 0;
roiImg1 = mod_ref(rect1); //SUB_IMAGE MATRIX
imshow("reference", img4);
}
if (event == CV_EVENT_LBUTTONUP)
{
/* ROI selected */
select_flag1 = 1;
drag = 0;
}
}
build and debugging process successfully done. But, when I click the Match button in dialog I'm getting the error:
Unhandled exception at 0x74bf812f in Match.exe: Microsoft C++ exception: cv::Exception at memory location 0x001ae150..
So my idea is to get all the matches in the Sub-image when compare with the TEMPLATE IMAGE and show the final result (matches with bounding boxes) in the ORIGINAL IMAGE itself.
Anyone help me in this regard!! Help would be appreciated greatly!!
My code below is a modification of the original tutorial provided by OpenCV.
It loads an image from the command-line and displays it on the screen so the user can draw a rectangle somewhere to select the sub-image to be the template. After that operation is done, the sub-image will be inside a green rectangle:
Press any key to let the program perform the template matching. A new window titled "Template Match:" appears displaying the original image plus a blue rectangle that shows the matched area:
#include <cv.h>
#include <highgui.h>
#include <iostream>
const char* ref_window = "Draw rectangle to select template";
std::vector<cv::Point> rect_points;
void mouse_callback(int event, int x, int y, int flags, void* param)
{
if (!param)
return;
cv::Mat* ref_img = (cv::Mat*) param;
// Upon LMB click, store the X,Y coordinates to define a rectangle.
// Later this info is used to set a ROI in the reference image.
switch (event)
{
case CV_EVENT_LBUTTONDOWN:
{
if (rect_points.size() == 0)
rect_points.push_back(cv::Point(x, y));
}
break;
case CV_EVENT_LBUTTONUP:
{
if (rect_points.size() == 1)
rect_points.push_back(cv::Point(x, y));
}
break;
default:
break;
}
if (rect_points.size() == 2)
{
cv::rectangle(*ref_img,
rect_points[0],
rect_points[1],
cv::Scalar(0, 255, 0),
2);
cv::imshow(ref_window, *ref_img);
}
}
int main(int argc, char* argv[])
{
if (argc < 2)
{
std::cout << "Usage: " << argv[0] << " <image>" << std::endl;
return -1;
}
cv::Mat source = cv::imread(argv[1]); // original image
if (source.empty())
{
std::cout << "!!! Failed to load source image." << std::endl;
return -1;
}
// For testing purposes, our template image will be a copy of the original.
// Later we will present it in a window to the user, and he will select a region
// as a template, and then we'll try to match that to the original image.
cv::Mat reference = source.clone();
cv::namedWindow(ref_window, CV_WINDOW_AUTOSIZE);
cv::setMouseCallback(ref_window, mouse_callback, (void*)&reference);
cv::imshow(ref_window, reference);
cv::waitKey(0);
if (rect_points.size() != 2)
{
std::cout << "!!! Oops! You forgot to draw a rectangle." << std::endl;
return -1;
}
// Create a cv::Rect with the dimensions of the selected area in the image
cv::Rect template_roi = cv::boundingRect(rect_points);
// Create THE TEMPLATE image using the ROI from the rectangle
cv::Mat template_img = cv::Mat(source, template_roi);
// Create the result matrix
int result_cols = source.cols - template_img.cols + 1;
int result_rows = source.rows - template_img.rows + 1;
cv::Mat result;
// Do the matching and normalize
cv::matchTemplate(source, template_img, result, CV_TM_CCORR_NORMED);
cv::normalize(result, result, 0, 1, cv::NORM_MINMAX, -1, cv::Mat());
/// Localizing the best match with minMaxLoc
double min_val = 0, max_val = 0;
cv::Point min_loc, max_loc, match_loc;
int match_method = CV_TM_CCORR_NORMED;
cv::minMaxLoc(result, &min_val, &max_val, &min_loc, &max_loc, cv::Mat());
// When using CV_TM_CCORR_NORMED, max_loc holds the point with maximum
// correlation.
match_loc = max_loc;
// Draw a rectangle in the area that was matched
cv:rectangle(source,
match_loc,
cv::Point(match_loc.x + template_img.cols , match_loc.y + template_img.rows),
cv::Scalar(255, 0, 0), 2, 8, 0 );
imshow("Template Match:", source);
cv::waitKey(0);
return 0;
}
I want to draw/paint on a webcam screen using OpenCV. Since I'm reading from a cam, the frames are constantly changing, so I'm trying to figure out a way to keep or save the drawing on the current frame and use it for the next frame. The code below allows you to draw on the screen but when it gets the next frame, the drawing is gone and it starts over.
Could someone please help me ... Thanks.
CvCapture *input;
input = cvCaptureFromCAM( 0 );
cvSetMouseCallback("Demo",&on_mouse, 0);
for(;;)
{
frame = cvQueryFrame(input);
if(!image)
{
image = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 3);
screenBuffer = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 3);
}
cvCopy(frame, image, 0);
if(drawing) //drawing is a global variable
{
cvCircle(image, cvPoint(last_x,last_y), 10,CV_RGB(red,green,blue), -1, CV_AA, 0);
cvCopy(image, screenBuffer, 0);
}
cvShowImage( "Demo", screenBuffer );
}
void on_mouse( int event, int x, int y, int flags, void* param )
{
last_x = x;
last_y = y;
if(event==CV_EVENT_LBUTTONDOWN)
{
drawing = 1;
}
}
Draw into a separate image and then cvAdd() that to the video image immediately before dispalying it
I will not go into all the details why your approach is bad, but keep in mind that creating 2 extra frames for drawing is a little bit too much.
It's important that you realize that all this kinky stuff is being done on the same thread used to capture new frames. This means what exactly? It means that the extra code you are adding inside the loop will slow the process of capturing and displaying new frames. In other words, you are sabotaging yourself by lowering the framerate of your application. If you don't care, it's ok. If you do, my tip for you is that you stack the captured frames into a buffer and have another thread read, process and display them.
Ok, so you REALLY want to draw over the window that's displaying the captured frames. Well, the obvious thing you can't do (and you discovered this yourself) is that the drawing cannot be made on the captured frame because the frame it's replaced with new data on every loop. So what do you do? You create a 2nd frame to do the drawing. Let's call it the drawing_frame.
The only thing that will be on the drawing_frame are the circles that will appear when the mouse moves over the window, when the LBUTTON of the mouse is clicked (a 2nd click switches between ON/OFF).
After the drawing of the circle occurs, the drawing_frame is overlayed on top on the frame captured by the camera. This process is a little expensive on the CPU, and since we are doing it in the main thread of the application, it will also decrease the framerate.
I strongly suggest that everyone interested in adding/merging/overlaying transparent frames with OpenCV take a look at Transparent image overlays in OpenCV.
By the way, I'm using cvCaptureFromCAM(-1) becouse I'm on Linux. You probably should change that to whatever works for you. According to your post it's cvCaptureFromCAM(0).
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
int drawing = 0;
int last_x = 0;
int last_y = 0;
void on_mouse(int event, int x, int y, int flags, void* param)
{
last_x = x;
last_y = y;
if (event == CV_EVENT_LBUTTONDOWN)
{
// switches between On and Off
if (drawing)
drawing = 0;
else
drawing = 1;
}
}
int main()
{
CvCapture* capture = NULL;
if ((capture = cvCaptureFromCAM(-1)) == NULL)
{
fprintf(stderr, "ERROR: capture is NULL \n");
return -1;
}
cvNamedWindow("mywindow", CV_WINDOW_AUTOSIZE);
cvQueryFrame(capture); // Sometimes needed to get correct data
cvSetMouseCallback("mywindow",&on_mouse, 0);
IplImage* frame = NULL;
IplImage* drawing_frame = NULL;
while (1)
{
if ((frame = cvQueryFrame(capture)) == NULL)
{
fprintf( stderr, "ERROR: cvQueryFrame failed\n");
break;
}
if (frame == NULL)
{
fprintf( stderr, "WARNING: cvQueryFrame returned NULL, sleeping..\n");
usleep(100000);
continue;
}
if (!drawing_frame) // This frame is created only once
{
drawing_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, frame->nChannels);
cvZero(drawing_frame);
}
if (drawing)
{
cvCircle(drawing_frame, cvPoint(last_x,last_y), 10,CV_RGB(0, 255, 0), -1, CV_AA, 0);
// For overlaying (copying transparent images) in OpenCV
// http://www.aishack.in/2010/07/transparent-image-overlays-in-opencv/
for (int x = 0; x < frame->width; x++)
{
for (int y = 0; y < frame->height; y++)
{
CvScalar source = cvGet2D(frame, y, x);
CvScalar over = cvGet2D(drawing_frame, y, x);
CvScalar merged;
CvScalar S = { 1,1,1,1 };
CvScalar D = { 1,1,1,1 };
for(int i = 0; i < 4; i++)
merged.val[i] = (S.val[i] * source.val[i] + D.val[i] * over.val[i]);
cvSet2D(frame, y, x, merged);
}
}
}
cvShowImage("mywindow", frame);
int key = cvWaitKey(10);
if (key == 113) // q was pressed on the keyboard
break;
}
cvReleaseImage(&frame);
cvReleaseImage(&drawing_frame);
cvReleaseCapture(&capture);
cvDestroyWindow("mywindow");
return 0;
}
You usually will have problems of adding images (they will eventually saturate), so I guess thats why you start over. I see you have color images... if you use more powerful stuff like OpenGL for your drawing you could use the overlay for your drawings. Otherwise check this out:
http://aishack.in/tutorials/transparent-image-overlays-in-opencv/