I have learnt how to draw a line on an image in OpenCV using line( frame, Point( 15, 20 ), Point( 70, 50), 'r', 2, 8 );
I have also learnt how to draw a line on image using mouse clicks. For example the following code draws a line connecting two points user clicks on an image:
using namespace cv;
using namespace std;
void onMouse(int evt, int x, int y, int flags, void* param) {
if(evt == CV_EVENT_LBUTTONDOWN) {
std::vector<cv::Point>* ptPtr = (std::vector<cv::Point>*)param;
ptPtr->push_back(cv::Point(x,y));
}
}
int main()
{
std::vector<Point> points;
cv::namedWindow("Output Window");
Mat frame = cv::imread("chhha.png");
cv::setMouseCallback("Output Window", onMouse, (void*)&points);
int X1=0, Y1=0, X2=0, Y2=0;
while(1)
{
cv::imshow("Output Window", frame);
if (points.size() > 1) //we have 2 points
{
for (auto it = points.begin(); it != points.end(); ++it)
{
}
break;
}
waitKey(10);
}
// Now let us draw a line on the image
line( frame, points[0], points[1], 'r', 2, 8 );
cv::imshow("Output Window", frame);
waitKey( 10 );
getch();
return 0;
}
Now basically what I want is to keep drawing the lines until I right click or may be some character is entered.
What I have tried so far is using do-while loop:
char m;
do{
while(1)
{
cv::imshow("Output Window", frame);
if (points.size() > 1) //we have 2 points
{
for (auto it = points.begin(); it != points.end(); ++it)
{
}
break;
}
waitKey(10);
}
// Draw a line
line( frame, points[0], points[1], 'r', 2, 8 );
cv::imshow("Output Window", frame);
cout<<"do you want more lines, if so , press 'y'"<<endl;
cin>>m;
// instead of this a right click check would be much better
if(m!='y')
{
break;
}
}while(m=='y');
But the problem is this way not even one line would be drawn, and after few clicks and entering 'y', the application won't respond.
Please help me solve this issue.
There are probably several issues with your code that prevent it from doing what you want. The first thing that comes to mind is that you're not clearing the vector used for capturing the coordinates. After drawing a line
line( frame, points[0], points[1], 'r', 2, 8 );
you should reset the vector like this
points.clear();
so that the next mouse click's coordinate goes to points[0]. Otherwise it would append to the vector and you would keep drawing lines between the first two mouse coordinates over and over again.
Related
I have a simple window that contains simple black image with small solid circle inside it. I have wrote a simple code to be able to drag and drop this circle. I could do it correctly. Inside the mouse_event function:
void on_mouse_event(int event_type, int x, int y, int flags, void*){
if (event_type == cv::EVENT_RBUTTONDOWN){
//Catch the circle
}
else if (event_type == cv::EVENT_MOUSEMOVE){
//Release the point
}
else if (event_type == cv::EVENT_MOUSEMOVE){
//Change circle position according to curser moving
//re draw the circle again
//show the new image
}
}
The main function:
while (true){
//show image code (simple cv::imshow);
if (cv::waitKey(1) == 27){
break;
}
}
The problem is that if I drag the circle and start to move fast, the image will not change till I stop. However, if I go slowly it will change according to the move. What is the reason of this problem?
P.S I am not in doubt of slow hardware at all. I am working on workstation and I am mentoring the processor utilization and just one of its 8 core reach around 50% and the memory is almost free.
I am using Windows 10 if it helps.
you could test the following code.(adapted from opencv_annotation.cpp)
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
using namespace std;
using namespace cv;
// Function prototypes
void on_mouse(int, int, int, int, void*);
// Public parameters
Mat image(600, 800, CV_8UC3, Scalar(220, 220, 220));
Mat current_view;
int circle_center_x = image.cols / 2, circle_center_y = image.rows / 2, radius = 40;
bool dragging = false;
const string window_name = "OpenCV Mouse Event Demo";
void on_mouse(int event, int x, int y, int, void *)
{
// Action when left button is clicked
if (event == EVENT_LBUTTONDOWN & (abs(circle_center_x - x) < 20) & (abs(circle_center_y - y) < 20))
{
dragging = true;
}
if (event == EVENT_LBUTTONUP)
{
dragging = false;
}
// Action when mouse is moving
if ((event == EVENT_MOUSEMOVE) && dragging)
{
image.copyTo(current_view);
circle_center_x = x;
circle_center_y = y;
circle(current_view, Point(circle_center_x, circle_center_y), radius, Scalar(255, 0, 0), 5);
imshow(window_name, current_view);
}
}
int main(int argc, const char** argv)
{
// Init window interface and couple mouse actions
namedWindow(window_name, WINDOW_AUTOSIZE);
setMouseCallback(window_name, on_mouse);
image.copyTo(current_view);
circle(current_view, Point(circle_center_x, circle_center_y), radius, Scalar(255, 0, 0), 5);
imshow(window_name, current_view);
int key_pressed = 0;
do
{
// Keys for processing
// Based on the universal ASCII code of the keystroke: http://www.asciitable.com/
// <SPACE> = 32 add circle to current image
// <ESC> = 27 exit program
key_pressed = 0xFF & waitKey(0);
if (key_pressed==32)
{
// draw a circle on the image
circle(image, Point(circle_center_x, circle_center_y), radius, Scalar(0, 0, 255), -1);
image.copyTo(current_view);
circle(current_view, Point(circle_center_x, circle_center_y), radius, Scalar(255, 0, 0), 5);
imshow(window_name, current_view);
}
}
// Continue as long as the <ESC> key has not been pressed
while (key_pressed != 27);
// Close down the window
destroyWindow(window_name);
return 0;
}
The following code draws a line connecting two points user clicks on an image:
using namespace cv;
using namespace std;
void onMouse(int evt, int x, int y, int flags, void* param) {
if(evt == CV_EVENT_LBUTTONDOWN) {
std::vector<cv::Point>* ptPtr = (std::vector<cv::Point>*)param;
ptPtr->push_back(cv::Point(x,y));
}
}
int main()
{
std::vector<Point> points;
cv::namedWindow("Output Window");
Mat frame = cv::imread("chhha.png");
cv::setMouseCallback("Output Window", onMouse, (void*)&points);
int X1=0, Y1=0, X2=0, Y2=0;
while(1)
{
cv::imshow("Output Window", frame);
if (points.size() > 1) //we have 2 points
{
for (auto it = points.begin(); it != points.end(); ++it)
{
}
break;
}
waitKey(10);
}
//just for testing that we are getting pixel values
X1=points[0].x;
X2=points[1].x;
Y1=points[0].y;
Y2=points[1].y;
cout<<"First and second X coordinates are given below"<<endl;
cout<<X1<<'\t'<<X2<<endl;
cout<<"First and second Y coordinates are given below"<<endl;
cout<<Y1<<'\t'<<Y2<<endl;
// Now let us draw a line on the image
line( frame, points[0], points[1], 'r', 2, 8 );
cv::imshow("Output Window", frame);
waitKey( 10 );
getch();
return 0;
}
The problem is the program is not exiting if I close (by clicking the cross button on the image on top right )the "Output Window", instead it hangs and says Not responding.
How do I remove this problem ?
The reason that your application does not exit is that you have an infinite loop, and clicking on the cross to close a window does nothing to break that loop.
One way to exit is to test for a key being pressed, e.g.
while(true)
{
...
char c = cv::waitKey(10);
if(c == 'q')
break;
}
BTW, assuming you are on windows, if a window is destroyed waitKey() intercepts a WM_DESTROY message. In this case waitKey() returns message.wParam, but the windows doco for WM_DESTROY says This parameter is not used. It looks like a bug to me, but it might be worth investigating whether waitKey() returns a consistent value when the window is closed - normally -1 is returned if no key is pressed.
How can I get properly one resolution feed from camera in OpenCV (640x320) but cut it into half and display only one half of the frame (320x240). So not to scale down, but to actually crop. I am using OpenCV 2.4.5, VS2010 and C++
This quite standard code gets 640x480 input resolution and I made some changes to crop resolution to 320x240. Should I use Mat instead of IplImage, and if so what would be the best way?
#include "stdafx.h"
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
char key;
int main()
{
cvNamedWindow("Camera_Output", 1); //Create window
CvCapture* capture = cvCaptureFromCAM(1); //Capture using camera 1 connected to system
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 640 );
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 480 );
while(1){ //Create loop for live streaming
IplImage* framein = cvQueryFrame(capture); //Create image frames from capture
/* sets the Region of Interest - rectangle area has to be __INSIDE__ the image */
cvSetImageROI(framein, cvRect(0, 0, 320, 240));
/* create destination image - cvGetSize will return the width and the height of ROI */
IplImage *frameout = cvCreateImage(cvGetSize(framein), framein->depth, framein->nChannels);
/* copy subimage */
cvCopy(framein, frameout, NULL);
/* always reset the Region of Interest */
cvResetImageROI(framein);
cvShowImage("Camera_Output", frameout); //Show image frames on created window
key = cvWaitKey(10); //Capture Keyboard stroke
if (char(key) == 27){
break; //ESC key loop will break.
}
}
cvReleaseCapture(&capture); //Release capture.
cvDestroyWindow("Camera_Output"); //Destroy Window
return 0;
}
I think you don't check whether you are getting a CvCapture. On my system with only one camera your code doesn't work because you query camera 1. But the first camera should be 0 Thus change this code.
CvCapture* capture = cvCaptureFromCAM(1); //Capture using camera 1 connected to system
to (note I change 1 to 0):
CvCapture* capture = cvCaptureFromCAM(0); //Capture using camera 1 connected to system
if (! capture ){
/*your error handling*/
}
Further than that your code seems to be working for me. You might also check the other pointer values whether you are not getting NULL.
You can easily crop a video by calling the following function.
cvSetMouseCallback("image", mouseHandler, NULL);
The mouseHandler function is like that.
void mouseHandler(int event, int x, int y, int flags, void* param){
if (event == CV_EVENT_LBUTTONDOWN && !drag)
{
/* left button clicked. ROI selection begins */
select_flag=0;
point1 = Point(x, y);
drag = 1;
}
if (event == CV_EVENT_MOUSEMOVE && drag)
{
/* mouse dragged. ROI being selected */
Mat img1 = img.clone();
point2 = Point(x, y);
rectangle(img1, point1, point2, CV_RGB(255, 0, 0), 3, 8, 0);
imshow("image", img1);
}
if (event == CV_EVENT_LBUTTONUP && drag)
{
point2 = Point(x, y);
rect = Rect(point1.x,point1.y,x-point1.x,y-point1.y);
drag = 0;
roiImg = img(rect);
}
if (event == CV_EVENT_LBUTTONUP)
{
/* ROI selected */
select_flag = 1;
drag = 0;
}
}
For the details you can visit the following link.:How to Crop Video from Webcam using OpenCV
this is easy in python... but the key idea is that cv2 arrays can be referenced and sliced. all you need is a slice of framein.
the following code takes a slice from (0,0) to (320,240). note that numpy arrays are indexed with column priority.
# Required modules
import cv2
# Constants for the crop size
xMin = 0
yMin = 0
xMax = 320
yMax = 240
# Open cam, decode image, show in window
cap = cv2.VideoCapture(0) # use 1 or 2 or ... for other camera
cv2.namedWindow("Original")
cv2.namedWindow("Cropped")
key = -1
while(key < 0):
success, img = cap.read()
cropImg = img[yMin:yMax,xMin:xMax] # this is all there is to cropping
cv2.imshow("Original", img)
cv2.imshow("Cropped", cropImg)
key = cv2.waitKey(1)
cv2.destroyAllWindows()
Working Example of cropping Faces from live camera
void CropFaces::DetectAndCropFaces(Mat frame, string locationToSaveFaces) {
std::vector<Rect> faces;
Mat frame_gray;
// Convert to gray scale
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
// Equalize histogram
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 3,
0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Iterate over all of the faces
for (size_t i = 0; i < faces.size(); i++) {
// Find center of faces
Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
Mat face = frame_gray(faces[i]);
std::vector<Rect> eyes;
Mat croppedRef(frame, faces[i]);
cv::Mat cropped;
// Copy the data into new matrix
croppedRef.copyTo(cropped);
string fileName = locationToSaveFaces+ "\\face_" + to_string(faces[i].x) + ".jpg";
resize(cropped, cropped, Size(65, 65));
imwrite(fileName, cropped);
}
// Display frame
imshow("DetectAndSave", frame);
}
void CropFaces::PlayVideoForCropFaces(string locationToSaveFaces) {
VideoCapture cap(0); // Open default camera
Mat frame;
face_cascade.load("haarcascade_frontalface_alt.xml"); // load faces
while (cap.read(frame)) {
DetectAndCropFaces(frame, locationToSaveFaces); // Call function to detect faces
if (waitKey(30) >= 0) // pause
break;
}
}
One whole day I have tried a lot to get all the related matches (with matchtemplate function) in sub-Image , which is ROI i have already extracted from the original image with the mousecallback function. So my code is below for the Matchingfunction
////Matching Function
void CTemplate_MatchDlg::OnBnTemplatematch()
{
namedWindow("reference",CV_WINDOW_AUTOSIZE);
while(true)
{
Mat ref = imread("img.jpg"); // Original Image
mod_ref = cvCreateMat(ref.rows,ref.cols,CV_32F);// resizing the image to fit in picture box
resize(ref,mod_ref,Size(),0.5,0.5,CV_INTER_AREA);
Mat tpl =imread("Template.jpg"); // TEMPLATE IMAGE
cvSetMouseCallback("reference",find_mouseHandler,0);
Mat aim=roiImg1.clone(); // SUB_IMAGE FROM ORIGINALIMAGE
// aim variable contains the ROI matrix
// next, want to perform template matching in that ROI // and display results on original image
if(select_flag1 == 1)
{
// imshow("ref",aim);
Mat res(aim.rows-tpl.rows+1, aim.cols-tpl.cols+1,CV_32FC1);
matchTemplate(aim, tpl, res, CV_TM_CCOEFF_NORMED);
threshold(res, res, 0.8, 1., CV_THRESH_TOZERO);
while (1)
{
double minval, maxval, threshold = 0.8;
Point minloc, maxloc;
minMaxLoc(res, &minval, &maxval, &minloc, &maxloc);
//// Draw Bound boxes for detected templates in sub matrix
if (maxval >= threshold)
{
rectangle(
aim,
maxloc,
Point(maxloc.x + tpl.cols, maxloc.y + tpl.rows),
CV_RGB(0,255,0), 1,8,0
);
floodFill(res, maxloc, cv::Scalar(0), 0, cv::Scalar(.1), cv::Scalar(1.));
}else
break;
}
}
////Bounding box for ROI selection with mouse
rectangle(mod_ref, rect2, CV_RGB(255, 0, 0), 1, 8, 0); // rect2 is ROI
// my idea is to get all the matches in ROI with bounding boxes
// no need to mark any matches outside the ROI
//Clearly i want to process only ROI
imshow("reference", mod_ref); // show the image with the results
waitKey(10);
}
//cvReleaseMat(&mod_ref);
destroyWindow("reference");
}
/// ImplementMouse Call Back
void find_mouseHandler(int event, int x, int y, int flags, void* param)
{
if (event == CV_EVENT_LBUTTONDOWN && !drag)
{
/* left button clicked. ROI selection begins*/
point1 = Point(x, y);
drag = 1;
}
if (event == CV_EVENT_MOUSEMOVE && drag)
{
/* mouse dragged. ROI being selected*/
Mat img3 = mod_ref.clone();
point2 = Point(x, y);
rectangle(img3, point1, point2, CV_RGB(255, 0, 0), 1, 8, 0);
imshow("reference", img3);
//
}
if (event == CV_EVENT_LBUTTONUP && drag)
{
Mat img4=mod_ref.clone();
point2 = Point(x, y);
rect1 = Rect(point1.x,point1.y,x-point1.x,y-point1.y);
drag = 0;
roiImg1 = mod_ref(rect1); //SUB_IMAGE MATRIX
imshow("reference", img4);
}
if (event == CV_EVENT_LBUTTONUP)
{
/* ROI selected */
select_flag1 = 1;
drag = 0;
}
}
build and debugging process successfully done. But, when I click the Match button in dialog I'm getting the error:
Unhandled exception at 0x74bf812f in Match.exe: Microsoft C++ exception: cv::Exception at memory location 0x001ae150..
So my idea is to get all the matches in the Sub-image when compare with the TEMPLATE IMAGE and show the final result (matches with bounding boxes) in the ORIGINAL IMAGE itself.
Anyone help me in this regard!! Help would be appreciated greatly!!
My code below is a modification of the original tutorial provided by OpenCV.
It loads an image from the command-line and displays it on the screen so the user can draw a rectangle somewhere to select the sub-image to be the template. After that operation is done, the sub-image will be inside a green rectangle:
Press any key to let the program perform the template matching. A new window titled "Template Match:" appears displaying the original image plus a blue rectangle that shows the matched area:
#include <cv.h>
#include <highgui.h>
#include <iostream>
const char* ref_window = "Draw rectangle to select template";
std::vector<cv::Point> rect_points;
void mouse_callback(int event, int x, int y, int flags, void* param)
{
if (!param)
return;
cv::Mat* ref_img = (cv::Mat*) param;
// Upon LMB click, store the X,Y coordinates to define a rectangle.
// Later this info is used to set a ROI in the reference image.
switch (event)
{
case CV_EVENT_LBUTTONDOWN:
{
if (rect_points.size() == 0)
rect_points.push_back(cv::Point(x, y));
}
break;
case CV_EVENT_LBUTTONUP:
{
if (rect_points.size() == 1)
rect_points.push_back(cv::Point(x, y));
}
break;
default:
break;
}
if (rect_points.size() == 2)
{
cv::rectangle(*ref_img,
rect_points[0],
rect_points[1],
cv::Scalar(0, 255, 0),
2);
cv::imshow(ref_window, *ref_img);
}
}
int main(int argc, char* argv[])
{
if (argc < 2)
{
std::cout << "Usage: " << argv[0] << " <image>" << std::endl;
return -1;
}
cv::Mat source = cv::imread(argv[1]); // original image
if (source.empty())
{
std::cout << "!!! Failed to load source image." << std::endl;
return -1;
}
// For testing purposes, our template image will be a copy of the original.
// Later we will present it in a window to the user, and he will select a region
// as a template, and then we'll try to match that to the original image.
cv::Mat reference = source.clone();
cv::namedWindow(ref_window, CV_WINDOW_AUTOSIZE);
cv::setMouseCallback(ref_window, mouse_callback, (void*)&reference);
cv::imshow(ref_window, reference);
cv::waitKey(0);
if (rect_points.size() != 2)
{
std::cout << "!!! Oops! You forgot to draw a rectangle." << std::endl;
return -1;
}
// Create a cv::Rect with the dimensions of the selected area in the image
cv::Rect template_roi = cv::boundingRect(rect_points);
// Create THE TEMPLATE image using the ROI from the rectangle
cv::Mat template_img = cv::Mat(source, template_roi);
// Create the result matrix
int result_cols = source.cols - template_img.cols + 1;
int result_rows = source.rows - template_img.rows + 1;
cv::Mat result;
// Do the matching and normalize
cv::matchTemplate(source, template_img, result, CV_TM_CCORR_NORMED);
cv::normalize(result, result, 0, 1, cv::NORM_MINMAX, -1, cv::Mat());
/// Localizing the best match with minMaxLoc
double min_val = 0, max_val = 0;
cv::Point min_loc, max_loc, match_loc;
int match_method = CV_TM_CCORR_NORMED;
cv::minMaxLoc(result, &min_val, &max_val, &min_loc, &max_loc, cv::Mat());
// When using CV_TM_CCORR_NORMED, max_loc holds the point with maximum
// correlation.
match_loc = max_loc;
// Draw a rectangle in the area that was matched
cv:rectangle(source,
match_loc,
cv::Point(match_loc.x + template_img.cols , match_loc.y + template_img.rows),
cv::Scalar(255, 0, 0), 2, 8, 0 );
imshow("Template Match:", source);
cv::waitKey(0);
return 0;
}
I want to draw/paint on a webcam screen using OpenCV. Since I'm reading from a cam, the frames are constantly changing, so I'm trying to figure out a way to keep or save the drawing on the current frame and use it for the next frame. The code below allows you to draw on the screen but when it gets the next frame, the drawing is gone and it starts over.
Could someone please help me ... Thanks.
CvCapture *input;
input = cvCaptureFromCAM( 0 );
cvSetMouseCallback("Demo",&on_mouse, 0);
for(;;)
{
frame = cvQueryFrame(input);
if(!image)
{
image = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 3);
screenBuffer = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 3);
}
cvCopy(frame, image, 0);
if(drawing) //drawing is a global variable
{
cvCircle(image, cvPoint(last_x,last_y), 10,CV_RGB(red,green,blue), -1, CV_AA, 0);
cvCopy(image, screenBuffer, 0);
}
cvShowImage( "Demo", screenBuffer );
}
void on_mouse( int event, int x, int y, int flags, void* param )
{
last_x = x;
last_y = y;
if(event==CV_EVENT_LBUTTONDOWN)
{
drawing = 1;
}
}
Draw into a separate image and then cvAdd() that to the video image immediately before dispalying it
I will not go into all the details why your approach is bad, but keep in mind that creating 2 extra frames for drawing is a little bit too much.
It's important that you realize that all this kinky stuff is being done on the same thread used to capture new frames. This means what exactly? It means that the extra code you are adding inside the loop will slow the process of capturing and displaying new frames. In other words, you are sabotaging yourself by lowering the framerate of your application. If you don't care, it's ok. If you do, my tip for you is that you stack the captured frames into a buffer and have another thread read, process and display them.
Ok, so you REALLY want to draw over the window that's displaying the captured frames. Well, the obvious thing you can't do (and you discovered this yourself) is that the drawing cannot be made on the captured frame because the frame it's replaced with new data on every loop. So what do you do? You create a 2nd frame to do the drawing. Let's call it the drawing_frame.
The only thing that will be on the drawing_frame are the circles that will appear when the mouse moves over the window, when the LBUTTON of the mouse is clicked (a 2nd click switches between ON/OFF).
After the drawing of the circle occurs, the drawing_frame is overlayed on top on the frame captured by the camera. This process is a little expensive on the CPU, and since we are doing it in the main thread of the application, it will also decrease the framerate.
I strongly suggest that everyone interested in adding/merging/overlaying transparent frames with OpenCV take a look at Transparent image overlays in OpenCV.
By the way, I'm using cvCaptureFromCAM(-1) becouse I'm on Linux. You probably should change that to whatever works for you. According to your post it's cvCaptureFromCAM(0).
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
int drawing = 0;
int last_x = 0;
int last_y = 0;
void on_mouse(int event, int x, int y, int flags, void* param)
{
last_x = x;
last_y = y;
if (event == CV_EVENT_LBUTTONDOWN)
{
// switches between On and Off
if (drawing)
drawing = 0;
else
drawing = 1;
}
}
int main()
{
CvCapture* capture = NULL;
if ((capture = cvCaptureFromCAM(-1)) == NULL)
{
fprintf(stderr, "ERROR: capture is NULL \n");
return -1;
}
cvNamedWindow("mywindow", CV_WINDOW_AUTOSIZE);
cvQueryFrame(capture); // Sometimes needed to get correct data
cvSetMouseCallback("mywindow",&on_mouse, 0);
IplImage* frame = NULL;
IplImage* drawing_frame = NULL;
while (1)
{
if ((frame = cvQueryFrame(capture)) == NULL)
{
fprintf( stderr, "ERROR: cvQueryFrame failed\n");
break;
}
if (frame == NULL)
{
fprintf( stderr, "WARNING: cvQueryFrame returned NULL, sleeping..\n");
usleep(100000);
continue;
}
if (!drawing_frame) // This frame is created only once
{
drawing_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, frame->nChannels);
cvZero(drawing_frame);
}
if (drawing)
{
cvCircle(drawing_frame, cvPoint(last_x,last_y), 10,CV_RGB(0, 255, 0), -1, CV_AA, 0);
// For overlaying (copying transparent images) in OpenCV
// http://www.aishack.in/2010/07/transparent-image-overlays-in-opencv/
for (int x = 0; x < frame->width; x++)
{
for (int y = 0; y < frame->height; y++)
{
CvScalar source = cvGet2D(frame, y, x);
CvScalar over = cvGet2D(drawing_frame, y, x);
CvScalar merged;
CvScalar S = { 1,1,1,1 };
CvScalar D = { 1,1,1,1 };
for(int i = 0; i < 4; i++)
merged.val[i] = (S.val[i] * source.val[i] + D.val[i] * over.val[i]);
cvSet2D(frame, y, x, merged);
}
}
}
cvShowImage("mywindow", frame);
int key = cvWaitKey(10);
if (key == 113) // q was pressed on the keyboard
break;
}
cvReleaseImage(&frame);
cvReleaseImage(&drawing_frame);
cvReleaseCapture(&capture);
cvDestroyWindow("mywindow");
return 0;
}
You usually will have problems of adding images (they will eventually saturate), so I guess thats why you start over. I see you have color images... if you use more powerful stuff like OpenGL for your drawing you could use the overlay for your drawings. Otherwise check this out:
http://aishack.in/tutorials/transparent-image-overlays-in-opencv/