I am recently working with OpenCV and C++ for a project and I found a weird thing:
when I try to access single pixal value in IplImage and assign other value, it could run properly but the result is that it can only operate part of the whole image.
relevant code:
IplImage* output_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* current_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
while((current_frame = cvQueryFrame(video_gray)) != 0 )
{
for (int row=0;row<height;row++)
{
uchar* ptr_current_frame = (uchar *)(current_frame->imageData+current_frame->widthStep*row);
uchar* ptr_output_frame = (uchar *)(output_frame->imageData+output_frame->widthStep*row);
for (int cols=0;cols<width;cols++)
{
//other codes...
ptr_output_frame[cols]=ptr_current_frame[cols];
}
}
}
The result is the left part of the image was copied to the output_frame. And when I run the following code:
IplImage* output_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* current_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
while((current_frame = cvQueryFrame(video_gray)) != 0 )
{
for (int row=0;row<height;row++)
{
uchar* ptr_current_frame = (uchar *)current_frame->imageData+current_frame->width*row;
uchar* ptr_output_frame = (uchar *)output_frame->imageData+output_frame->width*row;
for (int cols=0;cols<width;cols++)
{
//other codes...
ptr_output_frame[cols]=ptr_current_frame[cols];
}
}
}
I got the upside part of the image in the output_frame.
I cannot get the the whole image copy in output_frame either way. Could anybody help me with this? Thanks a lot!
[updates] 05/16/2015
I have found out that output_frame->widthStep is different from current->widthStep after current_frame is executed in the following code:
current_frame = cvQueryFrame(video_gray);
It makes sense why the first part of the code wouldnt work. But I still dont know why the second part of the code doesnt work.
I now have my whole codes updated here and hope you can help me make this right. I am really appreciate your help.
CvCapture* video_gray = cvCreateFileCapture("test_gray.avi");
const double fps = cvGetCaptureProperty(video_gray, CV_CAP_PROP_FPS);
const int width = (int)cvGetCaptureProperty(video_gray, CV_CAP_PROP_FRAME_WIDTH);
const int height = (int)cvGetCaptureProperty(video_gray, CV_CAP_PROP_FRAME_HEIGHT);
const CvSize size = cvSize(width, height);
IplImage* current_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* output_frame=cvCreateImage(size, IPL_DEPTH_8U, 1);
int flag = 0;
cvNamedWindow("test",1);
cvNamedWindow("test2",1);
while((current_frame = cvQueryFrame(video_gray)) != 0 )
{
cout<<flag++<<endl;
if(flag<500) continue;
for (int row=0;row<height;row++)
{
uchar* ptr_current_frame = (uchar *)(current_frame->imageData+current_frame->widthStep*row);
uchar* ptr_output_frame = (uchar *)(output_frame->imageData+output_frame->widthStep*row);
for (int cols=0;cols<width;cols++)
{
ptr_output_frame[cols]= ptr_current_frame[cols];
}
}
cvShowImage("test",output_frame);
cvShowImage("test2",current_frame);
cvWaitKey(10);
}
You don't handle the number of channels...
please try
IplImage* output_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* current_frame = cvCreateImage(size, IPL_DEPTH_8U, 1);
while((current_frame = cvQueryFrame(video_gray)) != 0 )
{
for (int row=0;row<height;row++)
{
uchar* ptr_current_frame = (uchar *)(current_frame->imageData+current_frame->widthStep*row);
uchar* ptr_output_frame = (uchar *)(output_frame->imageData+output_frame->widthStep*row);
for (int cols=0;cols<width;cols++)
{
//other codes...
// here insead take care of the channels too. Only write ne channel to the output
ptr_output_frame[cols]=ptr_current_frame[cols*current_frame->nChannels];
}
}
}
but you should really try to switch to C++ API: try something like this:
cv::VideoCapture video = cv::VideoCapture("test_gray.avi");
int width = ...
int height = ...
cv::Mat image_captured;
cv::Mat image_gray; // if wanted
cv::Mat image_output1 = cv::Mat(height, width, CV_8UC1);
cv::Mat image_output2 = cv::Mat(height, width, CV_8UC1);
while(video.read(image_captured))
{
// if you want to convert the image to
if(image_captured.channels() > 1)
{
cv::cvtColor(image_captured, image_gray, CV_BGR2GRAY);
}
else
{
image_gray = image_captured;
}
[...]
for(int j=0; j<height; ++j)
{
for(int i=0; i<width; ++i)
{
image_output1.at<uchar>(j,i) = image_gray.at<uchar>(j,i); // read from single channel image
image_output1.at<uchar>(j,i) = image_captured.at<cv::Vec3b>(j,i)[0]; // only read the first channel of a multi-channel image
}
}
}
cv::imshow("output1", image_output1);
cv::imshow("output2", image_output2);
cv::imshow("input", image_capured);
cv::waitKey(0);
}
much easier to use, and you can still make it more efficient by using row-pointers in each iteration etc
Related
How to get the brightness and constrast of range of pixel and apply it to set a gradient on another image in opencv c++?
I've tried to do that with this code
but I didn't get a good result
I want to apply the brightness and constrast of second part of image1 to the first part of image2 to apply an gradiant. the final goal is to stitch the two images
Mat image = imread("1.jpg" );
Mat image2 = imread("2.jpg" );
const int darkness_threshold = 128;
cv::Mat hsv;
cvtColor(image, hsv, COLOR_BGR2HSV);
const auto result = cv::mean(hsv);
cv::Mat hsv2;
cvtColor(image2, hsv2, COLOR_BGR2HSV);
const auto result2 = cv::mean(hsv2);
cout<<"resultat1: "<<result<<endl;
cout<<"resultat2: "<<result2<<endl;
Mat new_image = Mat::zeros( image.size(), image.type() );
double alpha = result2[0] - result[0];
double beta = result2[2] - result[2];
for( int y = 0; y < image.rows; y++ ) {
for( int x = 0; x < image.cols; x++ ) {
for( int c = 0; c < image.channels(); c++ ) {
new_image.at<Vec3b>(y,x)[c] =
saturate_cast<uchar>( alpha*image.at<Vec3b>(y,x)[c] + beta );
}
}
}
namedWindow("New Image",WINDOW_NORMAL);
imshow("New Image", new_image);
namedWindow("Image2",WINDOW_NORMAL);
imshow("Image2", image2);
imwrite("imgs1.jpg",new_image);
imwrite("imgs2.jpg",image2);
waitKey();
return 0;
I am currently programming with a PixeLINK USB3 machine vision camera along with OpenCV in C. I have had some success passing camera images in Mat format with the following code:
PXL_RETURN_CODE rc = PxLInitialize(0, &hCamera);
if (!API_SUCCESS(rc))
{
printf("Error: Unable to initialize a camera. \n");
return EXIT_FAILURE;
}
vector<U8> frameBuffer(3000 * 3000 * 2);
FRAME_DESC frameDesc;
if (API_SUCCESS(PxLSetStreamState(hCamera, START_STREAM)))
{
while (true)
{
frameDesc.uSize = sizeof(frameDesc);
rc = GetNextFrame(hCamera, (U32)frameBuffer.size(), &frameBuffer[0],
&frameDesc, 5);
Mat image(2592, 2048, CV_8UC1);
Mat imageCopy;
// Where passing of image data occurs
int k = 0;
for (int row = 0; row < 2048; row++)
{
for (int col = 0; col < 2592; col++)
{
image.at<uchar>(row, col) = frameBuffer[k];
k++;
}
}...
As I mentioned this works, but it seems very sloppy. I have looked online but haven't found too much detail.
I have tried:
Mat image(2592, 2048, CV_8UC1, &frameBuffer, size_t step=AUTO_STEP);
as well as,
Mat image(2592, 2048, CV_8UC1, frameBuffer, size_t step=AUTO_STEP).
The former is the only one that compile successfully, and displays gibberish - I mean, it doesn't form an image.
Have you tried switching the row and col of your Mat?
You initialized your Mat with row = 2592, col = 2048,
but you're using switched row and col in your for() loop.
I think this code should work properly:
Mat image(2048, 2592, CV_8UC1, &frameBuffer[0]);
Or, if you're using C++11,
Mat image(2048, 2592, CV_8UC1, frameBuffer.data());
I'm trying to write VLC video filter using openCV to normalize video.
And I'm having troubles with VLC picture_t converting to IplImage and back to picture_t. IplImage is needed for transform to Mat and proceed with openCV tools. Couldn't find the solution elsewhere, so asking for help here.
The main function for transforming images is:
static picture_t *FilterNormalizeOpenCV( filter_t *p_filter, picture_t *p_pic )
{
Mat frame1;
IplImage* p_img1;
video_format_t fmt_out, fmt_in;
int i_planes = p_pic->i_planes;
CvSize sz = cvSize(abs(p_pic->format.i_width), abs(p_pic->format.i_height));
CvPoint pt1, pt2;
filter_sys_t *p_sys = p_filter->p_sys;
if ((!p_pic)) {
msg_Err( p_filter, "no image array" );
printf ("\n\n %s \n\n", "No p pic!!!");
return NULL;
}
i_planes = p_pic->i_planes;
if (i_planes<1) {
printf ("\n\n %s \n\n", "No image planes!!!");
msg_Err( p_filter, "no image planes" );
return NULL;
}
fmt_in = p_pic->format;
fmt_out = p_pic->format;
fmt_out.i_width = p_pic->format.i_width * p_sys->f_scale;
fmt_out.i_height = p_pic->format.i_height * p_sys->f_scale;
fmt_out.i_chroma = VLC_CODEC_BGRA;
p_sys->p_proc_image = image_Convert( p_sys->p_image, p_pic,
&(p_pic->format), &fmt_out );
p_sys->p_to_be_freed = p_sys->p_proc_image;
i_planes = p_sys->p_proc_image->i_planes;
for( int i = 0; i < i_planes; i++ )
{
sz = cvSize(abs(p_sys->p_proc_image->p[i].i_visible_pitch /
p_sys->p_proc_image->p[i].i_pixel_pitch),
abs(p_sys->p_proc_image->p[i].i_visible_lines));
p_sys->p_cv_image[i] = cvCreateImageHeader(sz, IPL_DEPTH_8U,
p_sys->p_proc_image->p[i].i_pixel_pitch);
cvSetData( p_sys->p_cv_image[i],
(char*)(p_sys->p_proc_image->p[i].p_pixels), p_sys->p_proc_image->p[i].i_pitch );
}
p_img1 = p_sys->p_cv_image[0];
frame1 = cvarrToMat(p_img1);
//Here we'll proceed Mat frame with openCV
//and convert it back ti IplImage
//p_img1 = new IplImage(frame1);
cvGetRawData(p_img1, (uchar **) & p_pic->p[0].p_pixels, NULL, &sz);
return p_pic;
}
So, for now, it only receives VLC picture_t, converts it to BGRA(for openCV purposes), converts it to IplImage and back to picture_t.
But, in result, comes smth messy, which I can't download here cause of low reputation (screenshot). Would appreciate any help.
When i use the canny edge algorithm, it produces the 2 edges opposite the thick colored line as expected, but i want only one edge to be displayed so as to make my line and curve detection algorithm much less complicated, any ideas on how i can make that happen ?
Here is the code :
bool CannyEdgeDetection(DataStructure& col)
{
Mat src, src_gray;
Mat dst, detected_edges, fin;
int WhiteCount = 0, BCount = 0;
char szFil1[32] = "ocv.bmp";
char szFil2[32] = "dst.bmp";
src = imread(szFil1);
dst = imread(szFil1);
blur( src_gray, detected_edges, Size(3,3) );
Canny( src, dst, 100, 200, 3 );
imwrite(szFil2, dst );
IplImage* img = cvLoadImage(szFil2);
int height = img->height;
int width = img->width;
int step = img->widthStep;
int channels = img->nChannels;
uchar * datau = (uchar *)img->imageData;
for(int i=0;i<height;i++){
for(int j=0;j<width;j++){
for(int k=0;k<channels;k++){
datau[i*step+j*channels+k] = 255 - datau[i*step+j*channels+k];
if (datau[i*step+j*channels+k]==0){
WhiteCount++;
col.pixel_col [i][j] = 2;
}
else{BCount++;
col.pixel_col[i][j] = 0;
}
}
}
}
cvSaveImage("img.bmp" ,img);
return 0;
}
This is not the original image but similar :
Which part do i comment out to be able to read black images in white backgrounds ? or any colored image ?
bool done;
do
{
cv::morphologyEx(img, temp, cv::MORPH_OPEN, element);
cv::bitwise_not(temp, temp);
cv::bitwise_and(img, temp, temp);
cv::bitwise_or(skel, temp, skel);
cv::erode(img, img, element);
double max;
cv::minMaxLoc(img, 0, &max);
done = (max == 0);
} while (!done);
That process is called skeletonization or thinning. You can google for that.
Here is a simple method for skeletonization : skeletonization OpenCV In C#
Below is the output I got when applied above method to your image ( Image is inverted before skeletonization because above method work for white images in black background, just opposite case of your input image).
Heey, I'm trying to sort out the function of Optical Flow of openCV, but for some reason I'm getting an exception in visual studio:
Unhandled exception at 0x772615de in Optical_flow.exe: Microsoft C++ exception: cv::Exception at memory location 0x0036f334..
With breakpoints I found out that the error lies within the cvCalcOpticalFlowHS function.
I'm using openCV 2.1
#include <cv.h>
#include <highgui.h>
using namespace cv;
int init() {
return 0;
}
int main(int argc, char **args) {
CvCapture* capture = cvCaptureFromFile("Video/Wildlife.wmv");
double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
CvSize size;
size.width = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
size.height = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
CvVideoWriter* writer = cvCreateVideoWriter("result.avi", 0, fps,size, 1);
IplImage* curFrame = cvQueryFrame(capture);
Mat u = Mat(size, CV_32FC2);
Mat v = Mat(size, CV_32FC2);
CvTermCriteria IterCriteria;
IterCriteria.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS;
IterCriteria.max_iter = 500;
IterCriteria.epsilon = 0.01;
while(1) {
IplImage* nextFrame = cvQueryFrame(capture);
if(!nextFrame) break;
u = Mat::zeros(size, CV_32FC2);
v = Mat::zeros(size, CV_32FC2);
/* Do optical flow computation */
cvCalcOpticalFlowHS(&curFrame, &nextFrame, 0, &u, &v, 0.01, IterCriteria);
cvWriteFrame(writer, curFrame);
curFrame = nextFrame;
}
cvReleaseVideoWriter(&writer);
cvReleaseCapture(&capture);
return 0;
}
Anyone has seen this problem before or sees the mistake I made?
Best Regards
Remco
From the documentation, curFrame and nextFrame should be 8-bit single channel. You are currently just pulling these from the loaded file without checking/converting them as necessary. Can you confirm that the input is of the right type?
Also you have a nasty mix of C++ style cv::Mat with C style IplImage*. I'd suggest you upgrade to a more recent version of OpenCV (2.4 has recently been released), and try to stick with the one or other of the C++ or C style methods.
Note also that this optical flow method is classed as obsolete with a recommendation to use either calcOpticalFlowPyrLK() for sparse features or calcOpticalFlowFarneback() for dense features.
Below is some example code demonstrating calcOpticalFlowFarneback(), which is what I believe you are trying to achieve. It takes data from the webcam rather than a file.
#include <opencv2/opencv.hpp>
using namespace cv;
void drawOptFlowMap(const cv::Mat& flow,
cv::Mat& cflowmap,
int step,
const cv::Scalar& color
)
{
for(int y = 0; y < cflowmap.rows; y += step)
for(int x = 0; x < cflowmap.cols; x += step)
{
const cv::Point2f& fxy = flow.at<cv::Point2f>(y, x);
cv::line(cflowmap,
cv::Point(x,y),
cv::Point(cvRound(x+fxy.x),cvRound(y+fxy.y)),
color);
cv::circle(cflowmap, cv::Point(x,y), 2, color, -1);
}
}
int main(int argc, char **args) {
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
Mat newFrame, newGray, prevGray;
cap >> newFrame; // get a new frame from camera
cvtColor(newFrame, newGray, CV_BGR2GRAY);
prevGray = newGray.clone();
double pyr_scale = 0.5;
int levels = 3;
int winsize = 5;
int iterations = 5;
int poly_n = 5;
double poly_sigma = 1.1;
int flags = 0;
while(1) {
cap >> newFrame;
if(newFrame.empty()) break;
cvtColor(newFrame, newGray, CV_BGR2GRAY);
Mat flow = Mat(newGray.size(), CV_32FC2);
/* Do optical flow computation */
calcOpticalFlowFarneback(
prevGray,
newGray,
flow,
pyr_scale,
levels,
winsize,
iterations,
poly_n,
poly_sigma,
flags
);
drawOptFlowMap(flow, newFrame, 20, CV_RGB(0,255,0));
namedWindow("Output",1);
imshow("Output", newFrame);
waitKey(1);
prevGray = newGray.clone();
}
return 0;
}
The above code is pretty similar to the fback.cpp sample code which comes with OpenCV.