Opencv Unhandled Exception whe use cvCretateImage() - c++

I have the code below. Is a open realtime edge detection, but i had an error on line: pProcessedFrame = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U, 1);
"Unhandled exception at 0x00007FF6CAF1284C in opencv2.exe: 0xC0000005: Access violation reading location 0x000000000000002C."
Anybody can resolve this insue?
My configuration is Visual Studio 2013 and Opencv 2.4.10
#include <iostream>
#include "opencv/cv.h"
#include "opencv/highgui.h"
using namespace std;
// Define the IplImage pointers we're going to use as globals
IplImage* pFrame;
IplImage* pProcessedFrame;
IplImage* tempFrame;
// Slider for the low threshold value of our edge detection
int maxLowThreshold = 1024;
int lowSliderPosition = 150;
// Slider for the high threshold value of our edge detection
int maxHighThreshold = 1024;
int highSliderPosition = 250;
// Function to find the edges of a given IplImage object
IplImage* findEdges(IplImage* sourceFrame, double thelowThreshold, double theHighThreshold, double theAperture)
{
// Convert source frame to greyscale version (tempFrame has already been initialised to use greyscale colour settings)
cvCvtColor(sourceFrame, tempFrame, CV_RGB2GRAY);
// Perform canny edge finding on tempframe, and push the result back into itself!
cvCanny(tempFrame, tempFrame, thelowThreshold, theHighThreshold, theAperture);
// Pass back our now processed frame!
return tempFrame;
}
// Callback function to adjust the low threshold on slider movement
void onLowThresholdSlide(int theSliderValue)
{
lowSliderPosition = theSliderValue;
}
// Callback function to adjust the high threshold on slider movement
void onHighThresholdSlide(int theSliderValue)
{
highSliderPosition = theSliderValue;
}
int main(int argc, char** argv)
{
// Create two windows
cvNamedWindow("WebCam", CV_WINDOW_AUTOSIZE);
cvNamedWindow("Processed WebCam", CV_WINDOW_AUTOSIZE);
// Create the low threshold slider
// Format: Slider name, window name, reference to variable for slider, max value of slider, callback function
cvCreateTrackbar("Low Threshold", "Processed WebCam", &lowSliderPosition, maxLowThreshold, onLowThresholdSlide);
// Create the high threshold slider
cvCreateTrackbar("High Threshold", "Processed WebCam", &highSliderPosition, maxHighThreshold, onHighThresholdSlide);
// Create CvCapture object to grab data from the webcam
CvCapture* pCapture;
// Start capturing data from the webcam
pCapture = cvCaptureFromCAM(CV_CAP_V4L2);
// Display image properties
cout << "Width of frame: " << cvGetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_WIDTH) << endl; // Width of the frames in the video stream
cout << "Height of frame: " << cvGetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_HEIGHT) << endl; // Height of the frames in the video stream
cout << "Image brightness: " << cvGetCaptureProperty(pCapture, CV_CAP_PROP_BRIGHTNESS) << endl; // Brightness of the image (only for cameras)
cout << "Image contrast: " << cvGetCaptureProperty(pCapture, CV_CAP_PROP_CONTRAST) << endl; // Contrast of the image (only for cameras)
cout << "Image saturation: " << cvGetCaptureProperty(pCapture, CV_CAP_PROP_SATURATION) << endl; // Saturation of the image (only for cameras)
cout << "Image hue: " << cvGetCaptureProperty(pCapture, CV_CAP_PROP_HUE) << endl; // Hue of the image (only for cameras)
// Create an image from the frame capture
pFrame = cvQueryFrame(pCapture);
// Create a greyscale image which is the size of our captured image
pProcessedFrame = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U, 1);
// Create a frame to use as our temporary copy of the current frame but in grayscale mode
tempFrame = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U, 1);
// Loop controling vars
char keypress;
bool quit = false;
while (quit == false)
{
// Make an image from the raw capture data
// Note: cvQueryFrame is a combination of cvGrabFrame and cvRetrieveFrame
pFrame = cvQueryFrame(pCapture);
// Draw the original frame in our window
cvShowImage("WebCam", pFrame);
// Process the grame to find the edges
pProcessedFrame = findEdges(pFrame, lowSliderPosition, highSliderPosition, 3);
// Showed the processed output in our other window
cvShowImage("Processed WebCam", pProcessedFrame);
// Wait 20 milliseconds
keypress = cvWaitKey(20);
// Set the flag to quit if escape was pressed
if (keypress == 27)
{
quit = true;
}
} // End of while loop
// Release our stream capture object to free up any resources it has been using and release any file/device handles
cvReleaseCapture(&pCapture);
// Release our images
cvReleaseImage(&pFrame);
cvReleaseImage(&pProcessedFrame);
// This causes errors if you don't set it to NULL before releasing it. Maybe because we assign
// it to pProcessedFrame as the end result of the findEdges function, and we've already released pProcessedFrame!!
tempFrame = NULL;
cvReleaseImage(&tempFrame);
// Destory all windows
cvDestroyAllWindows();
}

Thank you all. I found solution, my cam not capturing image, I change to another camera and now the code is running fine.

Related

Inter-laying a video sequence to another video in OpenCV

How can I add a small video sequence to another video using OpenCV?
To elaborate, let's say I have a video playing which is to be interactive where let's say the user viewing the video gestures something and a short sequence plays at the bottom or at the corner of the existing video.
For each frame, you need to copy an image with the content you need inside the video frame. The steps are:
Define the size of the overlay frame
Define where to show the overlay frame
For each frame
Fill the overlay frame with some content
Copy the overlay frame in the defined position in the original frame.
This small snippet will show a random noise overlay window on bottom right of the camera feed:
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main()
{
// Video capture frame
Mat3b frame;
// Overlay frame
Mat3b overlayFrame(100, 200);
// Init VideoCapture
VideoCapture cap(0);
// check if we succeeded
if (!cap.isOpened()) {
cerr << "ERROR! Unable to open camera\n";
return -1;
}
// Get video size
int w = cap.get(CAP_PROP_FRAME_WIDTH);
int h = cap.get(CAP_PROP_FRAME_HEIGHT);
// Define where the show the overlay frame
Rect roi(w - overlayFrame.cols, h - overlayFrame.rows, overlayFrame.cols, overlayFrame.rows);
//--- GRAB AND WRITE LOOP
cout << "Start grabbing" << endl
<< "Press any key to terminate" << endl;
for (;;)
{
// wait for a new frame from camera and store it into 'frame'
cap.read(frame);
// Fill overlayFrame with something meaningful (here random noise)
randu(overlayFrame, Scalar(0, 0, 0), Scalar(256, 256, 256));
// Overlay
overlayFrame.copyTo(frame(roi));
// check if we succeeded
if (frame.empty()) {
cerr << "ERROR! blank frame grabbed\n";
break;
}
// show live and wait for a key with timeout long enough to show images
imshow("Live", frame);
if (waitKey(5) >= 0)
break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}

How to read more than 1 image using highgui(opencv)

I am developing a program to display batch of image and make record while clicking the selected position of the image.
I would like to load set of images( named in incremental order)
and open it 1 after the previous one is closed.
What I wanted the program like step by step:
A file with batch of images named in order (including JPEG, TIFF and PNG formats.)
e.g. IMG_00000001.JPG to IMG_00000003.JPG...
When I run my program, it will display the first image (IMG_00000001.JPG)
then I will click the image and the cmd will show the position that I clicked.
After closing the window, the next image will be displayed(IMG_00000002.JPG).
Continue until the last image in the folder.
Thanks a lot !! I have been searching through the internet for the past few weeks, there are examples but get errors every single time while running it, I was so frustrated and desperate for answer!
Here is my code
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
void CallBackFunc(int event, int x, int y, int flags, void* userdata)
{
if ( event == EVENT_LBUTTONDOWN )
{
cout << "Clicked position is: (" << x << ", " << y << ")" << endl;
}
}
int main(int argc, char** argv)
{
// Read image from file
Mat img = imread("cube_0.JPG");
Mat img1 = imread("cube_1.JPG");
//if fail to read the image
if ( img.empty() )
{
cout << "Error loading the image" << endl;
return -1;
}
//Create a window
namedWindow("My Window", 1);
//set the callback function for any mouse event
setMouseCallback("My Window", CallBackFunc, NULL);
//show the image
imshow("My Window", img);
// Wait until user press some key
waitKey(0);
return 0;
}

Why does the function cv::subtract() returns the error "size of input arguments do not match"?

I want to subtract the two successive images taken from the webcam.
as you can see I am doing this inside a while loop. In the last line of the while loop I am setting frame2 = frame and so I can subtract them from the next iteration. But the function cv::subtract returns the above error in the terminal.
what am I doing wrong?
#include <iostream>
#include "core.hpp"
#include "highgui.hpp"
#include "imgcodecs.hpp"
#include "cv.h"
using namespace std;
using namespace cv;
int main(int argc, char* argv[])
{
VideoCapture cap(0); ///open the video camera no. 0 (laptop's default camera)
///make a writer object:
cv::VideoWriter writer;
if (!cap.isOpened()) /// if not success, exit program
{
cout << "ERROR INITIALIZING VIDEO CAPTURE" << endl;
return -1;
}
char* windowName = "Webcam Feed(diff image)";
namedWindow(windowName,WINDOW_NORMAL); ///create a window to display our webcam feed
///we need to define 4 arguments for initializing the writer object:
//filename string:
string filename = "C:\\Users\\PEYMAN\\Desktop\\ForegroundExtraction\\openCV_tutorial\\2.writing from video to file\\Payman.avi";
//fourcc integer:
int fcc = CV_FOURCC('D','I','V','3');
//frame per sec integer:
int fps = 10;
//frame size:
cv::Size framesize(cap.get(CV_CAP_PROP_FRAME_WIDTH),cap.get(CV_CAP_PROP_FRAME_HEIGHT));
///initialize the writet object:
writer = VideoWriter(filename,fcc,fps,framesize);
if(!writer.isOpened()){
cout << "Error opening the file" << endl;
getchar();
return -1;
}
int counter = 0;
while (1) {
Mat frame,frame2,diff_frame;
///read a new frame from camera feed and save it to the variable frame:
bool bSuccess = cap.read(frame);
if (!bSuccess) ///test if frame successfully read
{
cout << "ERROR READING FRAME FROM CAMERA FEED" << endl;
break;
}
/// now the last read frame is stored in the variable frame and here it is written to the file:
writer.write(frame);
if (counter > 0){
cv::subtract(frame2,frame,diff_frame);
imshow(windowName, diff_frame ); ///show the frame in "MyVideo" window
}
///wait for 10ms for a key to be pressed
switch(waitKey(1)){
///the writing from webcam feed will go on until the user presses "esc":
case 27:
///'esc' has been pressed (ASCII value for 'esc' is 27)
///exit program.
return 0;
}
frame2 = frame;
counter++;
}
return 0;
}
Every time you execute the while loop frame2 is created and default initialized. When you call
cv::subtract(frame2,frame,diff_frame);
You are trying to subtract a default constructed Mat from a Mat that has an image in it. These two Mats will not be the same size so you get the error.
You need to move the declaration of frame and frame2 outside of the while loop if you want them to retain their values after each execution of the while loop. You also need to initialize frame2 to the same size or capture a second image into it so you can use subtract the first time through.
You need to declare frame2 outside the scope of the while loop like you did with counter. Right now, you get a fresh, empty frame2 with each iteration of the loop.
You might as well move all the Mats outside the while loop so that memory doesn't have to be de-allocated at the end of each iteration and re-allocated the next, although this isn't an error and you likely won't see the performance penalty in this case.
Also, #rhcpfan is right in that you need to be careful about shallow vs deep copies. Use cv::swap(frame, fram2).

Writing video with openCV - no key frame set for track 0

I'm trying to modify and write some video using openCV 2.4.6.1 using the following code:
cv::VideoCapture capture( video_filename );
// Check if the capture object successfully initialized
if ( !capture.isOpened() )
{
printf( "Failed to load video, exiting.\n" );
return -1;
}
cv::Mat frame, cropped_img;
cv::Rect ROI( OFFSET_X, OFFSET_Y, WIDTH, HEIGHT );
int fourcc = static_cast<int>(capture.get(CV_CAP_PROP_FOURCC));
double fps = 30;
cv::Size frame_size( RADIUS, (int) 2*PI*RADIUS );
video_filename = "test.avi";
cv::VideoWriter writer( video_filename, fourcc, fps, frame_size );
if ( !writer.isOpened() && save )
{
printf("Failed to initialize video writer, unable to save video!\n");
}
while(true)
{
if ( !capture.read(frame) )
{
printf("Failed to read next frame, exiting.\n");
break;
}
// select the region of interest in the frame
cropped_img = frame( ROI );
// display the image and wait
imshow("cropped", cropped_img);
// if we are saving video, write the unwrapped image
if (save)
{
writer.write( cropped_img );
}
char key = cv::waitKey(30);
When I try to run the output video 'test.avi' with VLC I get the following error: avidemux error: no key frame set for track 0. I'm using Ubuntu 13.04, and I've tried using videos encoded with MPEG-4 and libx264. I think the fix should be straightforward but can't find any guidance. The actual code is available at https://github.com/benselby/robot_nav/tree/master/video_unwrap. Thanks in advance!
[PYTHON] Apart from the resolution mismatch, there can also be a frames-per-second mismatch. In my case, the resolution was correctly set, but the problem was with fps. Checking the frames per second at which VideoCapture object was reading, it showed to be 30.0, but if I set the fps of VideoWriter object to 30.0, the same error was being thrown in VLC. Instead of setting it to 30.0, you can get by with the error by setting it to 30.
P.S. You can check the resolution and the fps at which you are recording by using the cap.get(3) for width, cap.get(4) for height and cap.get(5) for fps inside the capturing while/for loop.
The full code is as follows:
import numpy as np
import cv2 as cv2
cap = cv2.VideoCapture(0)
#Define Codec and create a VideoWriter Object
fourcc = cv2.VideoWriter_fourcc('X','V','I','D')
#30.0 in the below line doesn't work while 30 does work.
out = cv2.VideoWriter('output.mp4', fourcc, 30, (640, 480))
while(True):
ret, frame = cap.read()
colored_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
print('Width = ', cap.get(3),' Height = ', cap.get(4),' fps = ', cap.get(5))
out.write(colored_frame)
cv2.imshow('frame', colored_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
The full documentation (C++) for what all properties can be checked is available here : propId OpenCV Documentation
This appears to be an issue of size mismatch between the frames written and the VideoWriter object opened. I was running into this issue when trying to write a series of resized images from my webcam into a video output. When I removed the resizing step and just grabbed the size from an initial test frame, everything worked perfectly.
To fix my resizing code, I essentially ran a single test frame through my processing and then pulled its size when creating the VideoWriter object:
#include <cassert>
#include <iostream>
#include <time.h>
#include "opencv2/opencv.hpp"
using namespace cv;
int main()
{
VideoCapture cap(0);
assert(cap.isOpened());
Mat testFrame;
cap >> testFrame;
Mat testDown;
resize(testFrame, testDown, Size(), 0.5, 0.5, INTER_NEAREST);
bool ret = imwrite("test.png", testDown);
assert(ret);
Size outSize = Size(testDown.cols, testDown.rows);
VideoWriter outVid("test.avi", CV_FOURCC('M','P','4','2'),1,outSize,true);
assert(outVid.isOpened());
for (int i = 0; i < 10; ++i) {
Mat frame;
cap >> frame;
std::cout << "Grabbed frame" << std::endl;
Mat down;
resize(frame, down, Size(), 0.5, 0.5, INTER_NEAREST);
//bool ret = imwrite("test.png", down);
//assert(ret);
outVid << down;
std::cout << "Wrote frame" << std::endl;
struct timespec tim, tim2;
tim.tv_sec = 1;
tim.tv_nsec = 0;
nanosleep(&tim, &tim2);
}
}
My guess is that your problem is in the size calculation:
cv::Size frame_size( RADIUS, (int) 2*PI*RADIUS );
I'm not sure where your frames are coming from (i.e. how the capture is set up), but likely in rounding or somewhere else your size gets messed up. I would suggest doing something similar to my solution above.

Background subtraction in OpenCV(C++)

I want to implement a background averaging method. I have 50 frames of images taken in one second and some of the frames contain lightning which I want to extract as the foreground. The frames are taken with a stationary camera and the frames are taken as grayscales. What I want to do is:
Get the background model
After, compare each frame to the background model to determine whether there is lighting in that frame or not.
I read some documents on how this can possible be done by using cvAcc() but am having a difficulty understanding how this can be done. I would appreciate a piece of code which guide me and links to documents that can help me understand how I can implement this.
Thanking you in advance.
We had the same task in one of our projects.
To get the background model, we simply create a class BackgroundModel, capture the first (lets say) 50 frames and calculate the average frame to avoid pixel errors in the background model.
For example, if you get an 8-bit greyscale image (CV_8UC1) from your camera, you initialize your model with CV_16UC1 to avoid clipping.
cv::Mat model = cv::Mat(HEIGHT, WIDTH, CV_16UC1, cv::Scalar(0));
Now, waiting for the first frames to calculate your model, just add every frame to the model and count the amount of received frames.
void addFrame(cv::Mat frame) {
cv::Mat convertedFrame;
frame.convertTo(convertedFrame, CV_16UC1);
cv::add(convertedFrame, model, model);
if (++learnedFrames >= FRAMES_TO_LEAN) { // FRAMES_TO_LEARN = 50
createMask();
}
}
The createMask() function calculates the average frame which we use for the model.
void createMask() {
cv::convertScaleAbs(model, mask, 1.0 / learnedFrames);
mask.convertTo(mask, CV_8UC1);
}
Now, you just send all the frames the way through the BackgroundModel class to a function subtract(). If the result is an empty cv::Mat, the mask is still calculated. Otherwise, you get a subtracted frame.
cv::Mat subtract(cv::Mat frame) {
cv::Mat result;
if (++learnedFrames >= FRAMES_TO_LEAN) { // FRAMES_TO_LEARN = 50
cv::subtract(frame, mask, result);
}
else {
addFrame(frame);
}
return result;
}
Last but not least, you can use
Scalar sum(const Mat& mtx)
to calculate the pixel sum and decide if it's a frame with lights on it.
MyPolygon function mask the ROI and after that, it calculates the abs Pixel difference and calculates the number of white pixels.
srcImage : Reference image.
#include <opencv2/opencv.hpp>
#include <iostream>
#include <random>
using namespace std;
using namespace cv;
cv::Mat MyPolygon( Mat img )
{
int lineType = 8;
// [(892, 145), (965, 150), (933, 199), (935, 238), (970, 248), (1219, 715), (836, 709), (864, 204)]
/** Create some points */
Point rook_points[1][8];
rook_points[0][0] = Point(892, 145);
rook_points[0][1] = Point(965, 150);
rook_points[0][2] = Point(933, 199);
rook_points[0][3] = Point(935, 238);
rook_points[0][4] = Point(970, 248);
rook_points[0][5] = Point(1219, 715);
rook_points[0][6] = Point(836, 709);
rook_points[0][7] = Point(864, 204);
const Point* ppt[1] = { rook_points[0] };
int npt[] = { 8 };
cv::Mat mask = cv::Mat::zeros(img.size(), img.type());
fillPoly( mask,
ppt,
npt,
1,
Scalar( 255, 0, 0 ),
lineType
);
cv::bitwise_and(mask,img, img);
return img;
}
int main() {
/* code */
cv::Mat srcImage = cv::imread("/home/gourav/Pictures/L1 Image.png", cv::IMREAD_GRAYSCALE);
resize(srcImage, srcImage, Size(1280, 720));
// cout << " Width : " << srcImage.cols << endl;
// cout << " Height: " << srcImage.rows << endl;
if (srcImage.empty()){
std::cerr<<"Ref Image not found\n";
return 1;
}
cv::Mat img = MyPolygon(srcImage);
Mat grayBlur;
GaussianBlur(srcImage, grayBlur, Size(5, 5), 0);
VideoCapture cap("/home/gourav/GenralCode/LD3LF1_stream1.mp4");
Mat frames;
if(!cap.isOpened()){
std::cout << "Error opening video stream or file" << endl;
return -1;
}
while (1)
{
cap >> frames;
if (frames.empty())
break;
// Convert current frame to grayscale
cvtColor(frames, frames, COLOR_BGR2GRAY);
// cout << "Frame Width : " << frames.cols << endl;
// cout << "Frame Height: " << frames.rows << endl;
Mat imageBlure;
GaussianBlur(frames, imageBlure, Size(5, 5), 0);
cv::Mat frame = MyPolygon(imageBlure);
Mat dframe;
absdiff(frame, grayBlur, dframe);
// imshow("grayBlur", grayBlur);
// Threshold to binarize
threshold(dframe, dframe, 30, 255, THRESH_BINARY);
//White Pixels
int number = cv::countNonZero(dframe);
cout<<"Count: "<< number <<"\n";
if (number > 3000)
{
cout<<"generate Alert ";
}
// Display Image
imshow("dframe", dframe);
char c=(char)waitKey(25);
if (c==27)
break;
}
cap.release();
return 0;
}