I'm trying to write VLC video filter using openCV to normalize video.
And I'm having troubles with VLC picture_t converting to IplImage and back to picture_t. IplImage is needed for transform to Mat and proceed with openCV tools. Couldn't find the solution elsewhere, so asking for help here.
The main function for transforming images is:
static picture_t *FilterNormalizeOpenCV( filter_t *p_filter, picture_t *p_pic )
{
Mat frame1;
IplImage* p_img1;
video_format_t fmt_out, fmt_in;
int i_planes = p_pic->i_planes;
CvSize sz = cvSize(abs(p_pic->format.i_width), abs(p_pic->format.i_height));
CvPoint pt1, pt2;
filter_sys_t *p_sys = p_filter->p_sys;
if ((!p_pic)) {
msg_Err( p_filter, "no image array" );
printf ("\n\n %s \n\n", "No p pic!!!");
return NULL;
}
i_planes = p_pic->i_planes;
if (i_planes<1) {
printf ("\n\n %s \n\n", "No image planes!!!");
msg_Err( p_filter, "no image planes" );
return NULL;
}
fmt_in = p_pic->format;
fmt_out = p_pic->format;
fmt_out.i_width = p_pic->format.i_width * p_sys->f_scale;
fmt_out.i_height = p_pic->format.i_height * p_sys->f_scale;
fmt_out.i_chroma = VLC_CODEC_BGRA;
p_sys->p_proc_image = image_Convert( p_sys->p_image, p_pic,
&(p_pic->format), &fmt_out );
p_sys->p_to_be_freed = p_sys->p_proc_image;
i_planes = p_sys->p_proc_image->i_planes;
for( int i = 0; i < i_planes; i++ )
{
sz = cvSize(abs(p_sys->p_proc_image->p[i].i_visible_pitch /
p_sys->p_proc_image->p[i].i_pixel_pitch),
abs(p_sys->p_proc_image->p[i].i_visible_lines));
p_sys->p_cv_image[i] = cvCreateImageHeader(sz, IPL_DEPTH_8U,
p_sys->p_proc_image->p[i].i_pixel_pitch);
cvSetData( p_sys->p_cv_image[i],
(char*)(p_sys->p_proc_image->p[i].p_pixels), p_sys->p_proc_image->p[i].i_pitch );
}
p_img1 = p_sys->p_cv_image[0];
frame1 = cvarrToMat(p_img1);
//Here we'll proceed Mat frame with openCV
//and convert it back ti IplImage
//p_img1 = new IplImage(frame1);
cvGetRawData(p_img1, (uchar **) & p_pic->p[0].p_pixels, NULL, &sz);
return p_pic;
}
So, for now, it only receives VLC picture_t, converts it to BGRA(for openCV purposes), converts it to IplImage and back to picture_t.
But, in result, comes smth messy, which I can't download here cause of low reputation (screenshot). Would appreciate any help.
Related
I'm using OpenCV to do Canny Edge Detection on images obtained by a camera over LAN.
This works, but I keep getting Corrupt JPEG data: premature end of data segment errors at run time once every few frames when I imshow("recv", canny), but I don't get that if I do imshow("recv", frame)
// Block until receive message from a client
do {
recvMsgSize = sock.recvFrom(buffer, BUF_LEN, sourceAddress, sourcePort);
} while (recvMsgSize > sizeof(int));
int total_pack = ((int*)buffer)[0];
char* longbuf = new char[double(PACK_SIZE * total_pack)];
for (int i = 0; i < total_pack; i++) {
recvMsgSize = sock.recvFrom(buffer, BUF_LEN, sourceAddress, sourcePort);
if (recvMsgSize != PACK_SIZE) {
continue;
}
memcpy(&longbuf[i * PACK_SIZE], buffer, PACK_SIZE);
}
Mat rawData = Mat(1, PACK_SIZE * total_pack, CV_8UC1, longbuf);
Mat frame = imdecode(rawData, IMREAD_COLOR);
if (frame.size().width == 0) {
continue;
}
Mat canny = CannyThreshold(frame);
imshow("recv", canny);
delete[] longbuf;
if you can get "frame" in mat form, then probably there is something wrong about this line:
Mat canny = CannyThreshold(frame);
Maybe you can change this line with something like this:
int minCannyThreshold = 190;
int maxCannyThreshold = 230;
Canny(frame, frame, minCannyThreshold, maxCannyThreshold, 5, true);
imshow("recv", frame);
I met one feature for cv::imdecode() - when I tried to load the image with with non standard size, I always got the image with a NULL buffer and rows = 0.
For example, here is a .jpg with size 236x402:
My C++ code:
FILE* pf = NULL;
pf = _wfopen(strFileName, L"r");
if (!pf)
{
return false;
}
fread(pFileBuf, 1, nFileSize, pf);
fclose(pf);
cv::Mat matRaw = cv::Mat( 1, nFileSize, CV_8UC1, pFileBuf);
cv::Mat matImage = cv::imdecode(matRaw, CV_LOAD_IMAGE_COLOR);
delete [] pFileBuf;
SIZE size;
size.cx = matImage.cols;
size.cy = matImage.rows;
After run matImage.rows = 0 and matImage.data = NULL.
But for an jpeg image with 1280x720 it works well.
Ideas?
The bug in
pf = _wfopen(strFileName, L"r");
Must be
pf = _wfopen(strFileName, L"rb");
Thank for help
I am recently working with OpenCV and C++ for a project and I found a weird thing:
when I try to access single pixal value in IplImage and assign other value, it could run properly but the result is that it can only operate part of the whole image.
relevant code:
IplImage* output_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* current_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
while((current_frame = cvQueryFrame(video_gray)) != 0 )
{
for (int row=0;row<height;row++)
{
uchar* ptr_current_frame = (uchar *)(current_frame->imageData+current_frame->widthStep*row);
uchar* ptr_output_frame = (uchar *)(output_frame->imageData+output_frame->widthStep*row);
for (int cols=0;cols<width;cols++)
{
//other codes...
ptr_output_frame[cols]=ptr_current_frame[cols];
}
}
}
The result is the left part of the image was copied to the output_frame. And when I run the following code:
IplImage* output_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* current_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
while((current_frame = cvQueryFrame(video_gray)) != 0 )
{
for (int row=0;row<height;row++)
{
uchar* ptr_current_frame = (uchar *)current_frame->imageData+current_frame->width*row;
uchar* ptr_output_frame = (uchar *)output_frame->imageData+output_frame->width*row;
for (int cols=0;cols<width;cols++)
{
//other codes...
ptr_output_frame[cols]=ptr_current_frame[cols];
}
}
}
I got the upside part of the image in the output_frame.
I cannot get the the whole image copy in output_frame either way. Could anybody help me with this? Thanks a lot!
[updates] 05/16/2015
I have found out that output_frame->widthStep is different from current->widthStep after current_frame is executed in the following code:
current_frame = cvQueryFrame(video_gray);
It makes sense why the first part of the code wouldnt work. But I still dont know why the second part of the code doesnt work.
I now have my whole codes updated here and hope you can help me make this right. I am really appreciate your help.
CvCapture* video_gray = cvCreateFileCapture("test_gray.avi");
const double fps = cvGetCaptureProperty(video_gray, CV_CAP_PROP_FPS);
const int width = (int)cvGetCaptureProperty(video_gray, CV_CAP_PROP_FRAME_WIDTH);
const int height = (int)cvGetCaptureProperty(video_gray, CV_CAP_PROP_FRAME_HEIGHT);
const CvSize size = cvSize(width, height);
IplImage* current_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* output_frame=cvCreateImage(size, IPL_DEPTH_8U, 1);
int flag = 0;
cvNamedWindow("test",1);
cvNamedWindow("test2",1);
while((current_frame = cvQueryFrame(video_gray)) != 0 )
{
cout<<flag++<<endl;
if(flag<500) continue;
for (int row=0;row<height;row++)
{
uchar* ptr_current_frame = (uchar *)(current_frame->imageData+current_frame->widthStep*row);
uchar* ptr_output_frame = (uchar *)(output_frame->imageData+output_frame->widthStep*row);
for (int cols=0;cols<width;cols++)
{
ptr_output_frame[cols]= ptr_current_frame[cols];
}
}
cvShowImage("test",output_frame);
cvShowImage("test2",current_frame);
cvWaitKey(10);
}
You don't handle the number of channels...
please try
IplImage* output_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* current_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
while((current_frame = cvQueryFrame(video_gray)) != 0 )
{
for (int row=0;row<height;row++)
{
uchar* ptr_current_frame = (uchar *)(current_frame->imageData+current_frame->widthStep*row);
uchar* ptr_output_frame = (uchar *)(output_frame->imageData+output_frame->widthStep*row);
for (int cols=0;cols<width;cols++)
{
//other codes...
// here insead take care of the channels too. Only write ne channel to the output
ptr_output_frame[cols]=ptr_current_frame[cols*current_frame->nChannels];
}
}
}
but you should really try to switch to C++ API: try something like this:
cv::VideoCapture video = cv::VideoCapture("test_gray.avi");
int width = ...
int height = ...
cv::Mat image_captured;
cv::Mat image_gray; // if wanted
cv::Mat image_output1 = cv::Mat(height, width, CV_8UC1);
cv::Mat image_output2 = cv::Mat(height, width, CV_8UC1);
while(video.read(image_captured))
{
// if you want to convert the image to
if(image_captured.channels() > 1)
{
cv::cvtColor(image_captured, image_gray, CV_BGR2GRAY);
}
else
{
image_gray = image_captured;
}
[...]
for(int j=0; j<height; ++j)
{
for(int i=0; i<width; ++i)
{
image_output1.at<uchar>(j,i) = image_gray.at<uchar>(j,i); // read from single channel image
image_output1.at<uchar>(j,i) = image_captured.at<cv::Vec3b>(j,i)[0]; // only read the first channel of a multi-channel image
}
}
}
cv::imshow("output1", image_output1);
cv::imshow("output2", image_output2);
cv::imshow("input", image_capured);
cv::waitKey(0);
}
much easier to use, and you can still make it more efficient by using row-pointers in each iteration etc
Heey, I'm trying to sort out the function of Optical Flow of openCV, but for some reason I'm getting an exception in visual studio:
Unhandled exception at 0x772615de in Optical_flow.exe: Microsoft C++ exception: cv::Exception at memory location 0x0036f334..
With breakpoints I found out that the error lies within the cvCalcOpticalFlowHS function.
I'm using openCV 2.1
#include <cv.h>
#include <highgui.h>
using namespace cv;
int init() {
return 0;
}
int main(int argc, char **args) {
CvCapture* capture = cvCaptureFromFile("Video/Wildlife.wmv");
double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
CvSize size;
size.width = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
size.height = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
CvVideoWriter* writer = cvCreateVideoWriter("result.avi", 0, fps,size, 1);
IplImage* curFrame = cvQueryFrame(capture);
Mat u = Mat(size, CV_32FC2);
Mat v = Mat(size, CV_32FC2);
CvTermCriteria IterCriteria;
IterCriteria.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS;
IterCriteria.max_iter = 500;
IterCriteria.epsilon = 0.01;
while(1) {
IplImage* nextFrame = cvQueryFrame(capture);
if(!nextFrame) break;
u = Mat::zeros(size, CV_32FC2);
v = Mat::zeros(size, CV_32FC2);
/* Do optical flow computation */
cvCalcOpticalFlowHS(&curFrame, &nextFrame, 0, &u, &v, 0.01, IterCriteria);
cvWriteFrame(writer, curFrame);
curFrame = nextFrame;
}
cvReleaseVideoWriter(&writer);
cvReleaseCapture(&capture);
return 0;
}
Anyone has seen this problem before or sees the mistake I made?
Best Regards
Remco
From the documentation, curFrame and nextFrame should be 8-bit single channel. You are currently just pulling these from the loaded file without checking/converting them as necessary. Can you confirm that the input is of the right type?
Also you have a nasty mix of C++ style cv::Mat with C style IplImage*. I'd suggest you upgrade to a more recent version of OpenCV (2.4 has recently been released), and try to stick with the one or other of the C++ or C style methods.
Note also that this optical flow method is classed as obsolete with a recommendation to use either calcOpticalFlowPyrLK() for sparse features or calcOpticalFlowFarneback() for dense features.
Below is some example code demonstrating calcOpticalFlowFarneback(), which is what I believe you are trying to achieve. It takes data from the webcam rather than a file.
#include <opencv2/opencv.hpp>
using namespace cv;
void drawOptFlowMap(const cv::Mat& flow,
cv::Mat& cflowmap,
int step,
const cv::Scalar& color
)
{
for(int y = 0; y < cflowmap.rows; y += step)
for(int x = 0; x < cflowmap.cols; x += step)
{
const cv::Point2f& fxy = flow.at<cv::Point2f>(y, x);
cv::line(cflowmap,
cv::Point(x,y),
cv::Point(cvRound(x+fxy.x),cvRound(y+fxy.y)),
color);
cv::circle(cflowmap, cv::Point(x,y), 2, color, -1);
}
}
int main(int argc, char **args) {
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
Mat newFrame, newGray, prevGray;
cap >> newFrame; // get a new frame from camera
cvtColor(newFrame, newGray, CV_BGR2GRAY);
prevGray = newGray.clone();
double pyr_scale = 0.5;
int levels = 3;
int winsize = 5;
int iterations = 5;
int poly_n = 5;
double poly_sigma = 1.1;
int flags = 0;
while(1) {
cap >> newFrame;
if(newFrame.empty()) break;
cvtColor(newFrame, newGray, CV_BGR2GRAY);
Mat flow = Mat(newGray.size(), CV_32FC2);
/* Do optical flow computation */
calcOpticalFlowFarneback(
prevGray,
newGray,
flow,
pyr_scale,
levels,
winsize,
iterations,
poly_n,
poly_sigma,
flags
);
drawOptFlowMap(flow, newFrame, 20, CV_RGB(0,255,0));
namedWindow("Output",1);
imshow("Output", newFrame);
waitKey(1);
prevGray = newGray.clone();
}
return 0;
}
The above code is pretty similar to the fback.cpp sample code which comes with OpenCV.
I have a strange problem. If I use cvCvtColor on an image it works but if I want to modify that image and use cvCvtColor on it there is an error:
OpenCV Error: Sizes of input arguments
do not match () in cvCvtColor, file
/build/buildd-opencv_2.1.0-3-i386-PaiiLK/opencv-2.1.0/src/cv/cvcolor.cpp,
line 2208 terminate called after
throwing an instance of
'cv::Exception'
There shouldn't be this error because I have as output:
targetImage->width =300,
targetImage->height =300 cap->width
=300, cap->height =300
that is: the size is the same. So it's nonsense..
Any idea of a possible solution?
The relevant code is here:
printf("\ntargetImage->width =%d, targetImage->height =%d ",targetImage->width,targetImage->height );
cap = cvCreateImage(cvSize(targetImage->width,targetImage->height), IPL_DEPTH_8U, 1);
cvCvtColor(targetImage, cap, CV_BGR2GRAY);//HERE NO PROBLEM
CvRect xargetRect = cvRect(0,0,300,300);
subImage(targetImage, &showImg, xargetRect);
cap = cvCreateImage(cvSize(targetImage->width,targetImage->height), IPL_DEPTH_8U, 1);
printf("\ntargetImage->width =%d, targetImage->height =%d ",targetImage->width,targetImage->height );
printf("\ncap->width =%d, cap->height =%d ",cap->width,cap->height );
cvCvtColor(targetImage, cap, CV_BGR2GRAY); //HERE THE PROBLEM
Thanks
This is the subimage code:
/// Modifies an already allocated image header to map
/// a subwindow inside another image.
inline void subImage(IplImage *dest, const IplImage *orig, const CvRect &r) {
dest->width = r.width;
dest->height = r.height;
dest->imageSize = r.height * orig->widthStep;
dest->imageData = orig->imageData + r.y * orig->widthStep + r.x * orig->nChannels;
dest->widthStep = orig->widthStep;
dest->roi = NULL;
dest->nSize = sizeof(IplImage);
dest->depth = orig->depth;
dest->nChannels = orig->nChannels;
dest->dataOrder = IPL_DATA_ORDER_PIXEL;
}
I now have a working dev environment, so I should post some code.
The error message in your question shows that you are using OpenCV 2.1. I tried the code sample in OpenCV 2.2 and it works just fine, your subImage seems to be working as expected. Though the CvRect &r parameter works as X,Y with width, height (as opposed to P1 to p2). Below is the code I tried (minor modifications, but very same subImage):
#include "cv.h"
#include "highgui.h"
/// Modifies an already allocated image header to map
/// a subwindow inside another image.
inline void subImage(IplImage *dest, const IplImage *orig, const CvRect &r)
{
dest->width = r.width;
dest->height = r.height;
dest->imageSize = r.height * orig->widthStep;
dest->imageData = orig->imageData + r.y * orig->widthStep + r.x * orig->nChannels;
dest->widthStep = orig->widthStep;
dest->roi = NULL;
dest->nSize = sizeof(IplImage);
dest->depth = orig->depth;
dest->nChannels = orig->nChannels;
dest->dataOrder = IPL_DATA_ORDER_PIXEL;
}
int _tmain(int argc, _TCHAR* argv[])
{
IplImage targetImage;
IplImage* showImg = cvLoadImage("c:\\image11.bmp");
//printf("\ntargetImage->width =%d, targetImage->height =%d ", targetImage->width, targetImage->height );
//IplImage* cap = cvCreateImage(cvSize(targetImage->width, targetImage->height), IPL_DEPTH_8U, 1);
//cvCvtColor(targetImage, cap, CV_BGR2GRAY);//HERE NO PROBLEM
CvRect xargetRect = cvRect(100, 100, 100, 100);
subImage(&targetImage, showImg, xargetRect);
IplImage* cap = cvCreateImage(cvSize(targetImage.width, targetImage.height), IPL_DEPTH_8U, 1);
printf("\ntargetImage->width =%d, targetImage->height =%d ", targetImage.width, targetImage.height );
printf("\ncap->width =%d, cap->height =%d ", cap->width, cap->height );
cvCvtColor(&targetImage, cap, CV_BGR2GRAY); //HERE THE PROBLEM
int result = cvSaveImage("c:\\image11.output.bmp", &targetImage);
return 0;
}