manual access/construction to OpenCV Mat Data - c++

I'm new to OpenCV, I have created a simple program for learning purposes, here is the code:
cv::Mat image;
std::string imageName = "test.jpg";
image = cv::imread(imageName, cv::IMREAD_COLOR); // Read the file
if(! image.data ) // Check for invalid input
{
std::cout << "Could not open or find the image" << std::endl ;
return false;
}
uchar* mData = image.data;//input buffer data
uchar* nData = (uchar*)malloc(image.rows * image.cols * image.channels() * sizeof(uchar));//output buffer data
std::cout << image.rows << " " << image.cols << " " << image.channels() << " " << sizeof(uchar) << std::endl;
std::cout << image.step << std::endl;
//int r,g,b;
for(int i = 0;i < image.rows ;i++)
{
for(int j = 0;j < image.step ;j++)
{
nData[image.step * i + j ] = mData[image.step * i + j ] / 3;
nData[image.step * i + j + 1] = mData[image.step * i + j + 1] / 3;
nData[image.step * i + j + 2] = mData[image.step * i + j + 2] / 3;
}
}
cv::Mat outputImage(image.rows, image.cols, CV_8UC3, (void*)nData);
if(! outputImage.data ) // Check
{
std::cout << "output mat failure" << std::endl ;
return false;
}
cv::namedWindow( "Input image window", cv::WINDOW_AUTOSIZE ); // Create a window for display.
cv::imshow( "Input image window", image ); // Show our image inside it.
cv::namedWindow( "Output image window", cv::WINDOW_AUTOSIZE ); // Create a window for display.
cv::imshow( "Output image window", outputImage ); // Show our image inside it.
cv::waitKey(0); // Wait for a keystroke in the window
free(nData);
1- is it safe to access the input Mat Data like this? what is the need of isContinuous()?
2- is this the correct way to do such a manual copy?
3- free(nData) do I have to manually handle my own memory?
4- I will use this with GPU, so I need to make sure that the input pointer "is safe" so I can use memcpy without worrying

Related

how to capture images per second from video files by using opencv and c++?

I have encountered on designing program to allow capturing images every second from video files (avi, mp4, etc...).
First, I was able to capture images frame by frame from video file.
Second, I was able to analyze pixel color values from images in the same folder at the same time and saved pixel value in the txt file.
And here I have some problem. I am now trying to combine these two codes at once, but I have strange results. I refer the code below.
int main(){
VideoCapture cap("D:\\data\\extra\\video200ul.avi");
if (!cap.isOpened())
return -1;
Ptr<BackgroundSubtractor> pMOG2 = createBackgroundSubtractorMOG2(20, 16, true);
Mat fg_mask;
Mat frame;
int count = 0;
String name, folder;
for (;;) {
// Get frame
cap >> frame; // get a new frame from video
++count;
// Update counter
// Background subtraction
if (count % 2 == 0) {
pMOG2->apply(frame, fg_mask, 0.001);
cout << count << endl;
if (!frame.empty()) {
imshow("frame", frame);
// imshow("fg_mask", fg_mask);
}
// Save foreground mask
name = "mask" + std::to_string(count) + ".png";
// string name = "mask_" + std::to_string(static_cast<long long>(count) + ".png";
folder = imwrite("D:\\data\\extra\\" + name, frame);
}
anal(folder);
}
waitKey(0);
return 0;
}
First, The code above I wrote is for capturing images frame by frame from video file. However, if I got the images per frame, I will have so many pictures on my folder, so I would like to capture an image per second from the video file. I have tried to use CV_CAP_PROP_POS_MSEC instead using cap << frame, but it did not work for me.
Second, when I merge this code to another code what I wrote below, it showed some error messages like, "libpng warning image width, length, data are zero in ihdr."
int anal(String folder) {
folder = "D:\\data\\extra\\*.png";
vector<String> filenames;
glob(folder, filenames);
cv::Mat ori_image;
for (size_t i = 0; i < filenames.size(); ++i) {
ori_image = imread(filenames[i], IMREAD_COLOR);
if (ori_image.empty()) {
cout << "Check your file again." << std::endl;
return -1;
}
rectangle(ori_image, Point(215, 98), Point(245, 110), Scalar(0, 255, 255), 1);
imshow("Original Image", ori_image);
cv::Scalar sums;
sums = cv::sum(ori_image);
double totalSum = sums[0] + sums[1] + sums[2];
if (totalSum <= 0) {
cout << "$$ RGB percentage $$" << " \n\n";
cout << "R: " << 100.0 / 3 << " % \n";
cout << "G: " << 100.0 / 3 << " % \n";
cout << "B: " << 100.0 / 3 << " % \n\n";
}
else {
cout << "$$ RGB percentage $$" << " \n\n"; // red value
cout << "R: " << sums[2] / totalSum * 100 << " % \n"; // red value
cout << "G: " << sums[1] / totalSum * 100 << " % \n"; // green value
cout << "B: " << sums[0] / totalSum * 100 << " % \n\n"; // blue value
}
}
as I prepared the code above, I tried to calculate red, blue, green percentages of all the captured images from the video. However, when I separate these two code and run them, they worked fine, but if I merge them together, It showed error messages.
I would like to combine these two code for analysis for color values from the captured images at video every second.
Please help me out this problem.
Thank you in advance.
-----------Edited part----------------------
I used your revised version and applied to my updated code,
void imageAnalysis(std::string folder, cv::Mat frame){
cv::Mat ori_image = frame.clone();
std::string path = folder;
cv::rectangle(ori_image, Point(215, 105), Point(245, 120), Scalar(0, 255, 255), 1);
cv::imshow("Original Image", ori_image);
cv::waitKey(1);
String folder = "D:\\data\\dfdf\\*.png";
vector<String> filenames;
cv::glob(path, filenames);
for (size_t t = 0; t < filenames.size(); t++) {
ori_image = imread(filenames[t], IMREAD_COLOR); // ori_image
if (ori_image.empty()) { //ori_image
cout << "Check your file again." << "\n";
break;
//return -1;
}
rectangle(ori_image, Point(215, 105), Point(245, 120), Scalar(0, 255, 255), 1);
imshow("Original Image", ori_image);
cv::waitKey(1);
Mat image_HSV;
cvtColor(ori_image, image_HSV, CV_BGR2HSV);
double h = 0.0;
double s = 0.0;
double v = 0.0;
int col = image_HSV.cols; // 480
int row = image_HSV.rows; // 272
int corow = ((col - 235) - 215) * ((row - 152) - 108);
Mat mask;
inRange(image_HSV, Scalar(100, 0, 0), Scalar(100, 255, 255), mask); // convert binary
image_HSV.setTo(Scalar(0, 0, 0), mask);
for (int i = 108; i < row - 152; i++) {
for (int j = 215; j < col - 235; j++) {
Vec3b hsv = image_HSV.at<cv::Vec3b>(i, j);
h += (int)(hsv.val[0]);
s += (int)(hsv.val[1]);
v += (int)(hsv.val[2]);
if (hsv[0] != 100) {
hsv[0] = 0;
hsv[1] = 0;
hsv[2] = 0;
}
}
}
cout << "$$ Hue(H), Saturation(S), Brightness(V) $$" << filenames[t] << " !! \n\n";
cout << "H: " << h / corow * 360 / 180 << " % \n"; //
cout << "S: " << s / corow * 100 / 255 << " % \n";
cout << "V: " << v / corow * 100 / 255 << " % \n\n";
std::ofstream file("D:\\data\\dfdf\\result_4.txt", std::ios_base::app);
file << v / corow * 100 / 255 << " \n"; // v value
file.close();
}
}
As you can see the imageAnalysis() function, I added std::string folder for the path of extracted images from video clip. However, when I applied this code, I have really weird results like below..
enter image description here
I thought I am supposed to get color value from every 24th image but as you see the results above, I got color values from all images in random order.
Thank you in advance.
It was really nice to learn how to code in efficient way!!
Just to clear the error you mentioned about CV_CAP_PROP_POS_MSEC in your comments:
when I apply CV_CAP_PROP_POS_MSEC to my code, I found some error
messages like "CV_CAP_PROP_POS_MSEC is not defined."
A lot of the constant values are scoped in OpenCV. That means, CV_CAP_PROP_POS_MSEC is not defined, but cv::CV_CAP_PROP_POS_MSEC is. You can also obtain the FPS with cv::CAP_PROP_FPS.
Now to your code, I would actually do something that does not require to save and load the image, but rather pass the images to be processed, like this:
#include "opencv2/opencv.hpp"
#include <iostream>
int main(){
cv::VideoCapture cap("D:\\data\\extra\\video200ul.avi");
if (!cap.isOpened())
{
std::cout << "Could not open video" << std::endl;
return -1;
}
cv::Ptr<cv::BackgroundSubtractor> pMOG2 = cv::createBackgroundSubtractorMOG2(20, 16, true);
cv::Mat fg_mask, frame;
int count = 0;
const int fps = 24; // you may set here the fps or get them from the video
std::string name, folder;
// with cap.read you can check already if the video ended
while (cap.read(frame)) {
// Background subtraction
if (count % fps == 0) {
pMOG2->apply(frame, fg_mask, 0.001);
// Save foreground mask
name = "mask" + std::to_string(count) + ".png";
bool result = cv::imwrite("D:\\data\\extra\\" + name, frame);
imageAnalysis(frame, count);
}
// at the end of the loop so that the first image is used
++count;
}
cv::waitKey(0);
return 0;
}
And the imageAnalysis function is defined as:
// You can pass cv::Mat as value, it is almost like a smart pointer
void imageAnalysis(cv::Mat frame, int count)
{
cv::Mat ori_image = frame.clone();
cv::rectangle(ori_image, Point(215, 98), Point(245, 110), Scalar(0, 255, 255), 1);
// each imshow needs a waitKey to update the window in which it is being shown
cv::imshow("Original Image", ori_image);
cv::waitKey(1);
cv::Scalar sums;
sums = cv::sum(ori_image);
double totalSum = sums[0] + sums[1] + sums[2];
std::ofstream output("D:\\data\\extra\\mask" + std::to_string(count) + ".txt");
if (totalSum <= 0)
{
std::cout << "$$ RGB percentage $$" << std::endl << std::endl;
std::cout << "R: " << 100.0 / 3 << std::endl;
std::cout << "G: " << 100.0 / 3 << std::endl;
std::cout << "B: " << 100.0 / 3 << std::endl << std::endl;
output << "$$ RGB percentage $$" << std::endl << std::endl;
output << "R: " << 100.0 / 3 << std::endl;
output << "G: " << 100.0 / 3 << std::endl;
output << "B: " << 100.0 / 3 << std::endl << std::endl;
}
else {
std::cout << "$$ RGB percentage $$" << std::endl << std::endl;
std::cout << "R: " << sums[2] / totalSum * 100 << std::endl; // red value
std::cout << "G: " << sums[1] / totalSum * 100 << std::endl; // green value
std::cout << "B: " << sums[0] / totalSum * 100 << std::endl << std::endl; // blue value
output << "$$ RGB percentage $$" << std::endl << std::endl;
output << "R: " << sums[2] / totalSum * 100 << std::endl; // red value
output << "G: " << sums[1] / totalSum * 100 << std::endl; // green value
output << "B: " << sums[0] / totalSum * 100 << std::endl << std::endl; // blue value
}
}
Some comments of the code above, I replaced the cap >> frame to cap.read(frame). It is the same functionality, but the later gives a bool result that is false if it could not grab the image, like if the video is over. I change the count add at the end, yo you get the frames 0,23,... this way the first one will be use as well. Finally, you should use the namespaces cv::, std:: etc. This is just best practice, it avoids ambiguities and problems that may arise with certain libraries.
If you do not need the image in disk, but only the analysis, then remove the saving part and pass every frame to the imageAnalysis function, this way you may have more data for your statistics. Also, consider returning the cv:Scalar of sums in the function and then you can do some statistics of the whole second or the whole video.
If you have any question, feel free to ask in the comments.

Processing Video as a string of images

I have a project where part of it is processing a video as a batch of images. I found a snippet online which helped me process a batch of images(the one attached below)
string Img_seq[] = {"flying_plane"};
int size_of_Img_seq = sizeof(Img_seq) / sizeof(Img_seq[0]);
imgpath = argv[1];
if (argc >= 3)
{
TrackingResPath = argv[2];
cout << "The image path is: " << imgpath << endl;
int Seg_num = 1; //for OPE
for (int j = 0; j < size_of_Img_seq; ++j)
{
folder = imgpath + Img_seq[j] + "/*.jpg";
string imgInFormat, imgOutFormat, imgOutFormat1, imgOutFormat2;
ofstream outBB;
imgInFormat = imgpath + Img_seq[j] + "/%05d.jpg";
if (argc >= 3)
{
//add process here
}
}
// get the total number of images in folder
glob(folder, filenames);
int Frames_per_seg = filenames.size(); // for OPE
cout << "Frames_per_seg = " << Frames_per_seg << endl;
for (int k = 1; k <= Seg_num; ++k)
{
sprintf(imgInPath, imgInFormat.c_str(), startFrame);// read image path
sprintf(imgOutPath, imgOutFormat.c_str(), startFrame);
}
I need to use this procedure to process a video from either a directory or from the webcam. I tried the one way which I've given below but it is spitting out errors
VideoCapture capture(videoFilename);
if(!capture.isOpened()){
cerr << "Unable to open video file:" << videoFilename << endl;
exit(EXIT_FAILURE);
}
while( (char)keyboard != 'q' && (char)keyboard != 27){
if(!capture.read(frame)){
cerr << "Unable to read next frame" << endl;
cerr << "Exiting" << endl;
exit(EXIT_FAILURE);
}}
stringstream ss;
rectangle(frame, cv::Point(10, 2), cv::Point(100, 20),
cv::Scalar(255, 255, 255), -1);
ss << capture.get(CV_CAP_PROP_POS_FRAMES);
string Img_seq[] = ss.str();
keyboard = waitKey( 30 );
The errors are basically because Img_seq[] is not linked properly between the above two codes.
Could anyone help me understand how I should proceed in using video for a batch processing logic.

c++ opencv find center of object and add circle

Hi I am trying to learn opencv . with this code ı am trying to put circle to center of objects . When ı try to write 1,2,3 etc it is okey but when ı try to add circle to center of object it is problem. .
My code:
void findcontours(){
cv::namedWindow("contourdemo",CV_GUI_EXPANDED);
src = cv::imread("/home/zugurtaga/Desktop/project/opencv/imgs/seed.jpg");
if(src.data){
if(src.channels() > 1){
cv::cvtColor(src,src,CV_RGB2GRAY);
}else {
src = src;
}
cv::Mat cImc = src.clone();
cv::threshold(cImc,cImc,150,255,CV_THRESH_BINARY_INV | CV_THRESH_OTSU);
cv::imshow("contourdemo",cImc);
cv::waitKey(0);
vector<vector <cv::Point> > contours;
vector<cv::Point> contPoly;
vector<cv::Vec4i> hierarchy;
cv::findContours(cImc,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);
cout << "Number of counts: " << contours.size() << endl;
tmp = cv::Mat::zeros(cImc.size(),CV_8UC3);
cv::RotatedRect rRect;
//Problem line ???
shapeMoments = cv::moments(contours);
cv::HuMoments(shapeMoments,shapeHumoments);
centerofmass.x = (shapeMoments.m10 / shapeMoments.m00);
centerofmass.y = (shapeMoments.m01 / shapeMoments.m00);
cout << "centerofmass.x: " << centerofmass.x << " centerofmass.y: " << centerofmass.y << endl;
// till here
for (size_t i = 0; i < contours.size(); i++) {
cv::drawContours(tmp,contours,i,cv::Scalar(120,0,0),CV_FILLED,8,hierarchy,0,cv::Point());
rRect = cv::minAreaRect(contours[i]);
cv::putText(tmp,cv::format("%d",i+1),rRect.center,1,1,cv::Scalar(255,255,255));
cv::circle(tmp,centerofmass,(int)3,cv::Scalar(255,255,255),-1,8,0);
}
cv::imshow("contourdemo",tmp);
cv::waitKey(0);
cv::destroyAllWindows();
}else{
cout << "No file.." << endl;
}
}
Thanks for any help.
ı found problem after some trying this is working.
for (size_t i = 0; i < contours.size(); i++) {
shapeMoments = cv::moments(contours[i]);
cv::HuMoments(shapeMoments,shapeHumoments);
centerofmass.x = (shapeMoments.m10 / shapeMoments.m00);
centerofmass.y = (shapeMoments.m01 / shapeMoments.m00);
cout << "centerofmass.x: " << centerofmass.x << " centerofmass.y: " << centerofmass.y << endl;
cv::drawContours(tmp,contours,i,cv::Scalar(120,0,0),CV_FILLED,8,hierarchy,0,cv::Point());
rRect = cv::minAreaRect(contours[i]);
cv::putText(tmp,cv::format("%d",i+1),centerofmass,1,1,cv::Scalar(255,255,255));
cv::circle(tmp,centerofmass,(int)3,cv::Scalar(255,255,255),-1,8,0);
}

QT QImage, how to extract RGB?

I want to extract RGB from each pixel in QImage. Ideally, I want to use the img.bits() function.
QImage img;
if( img.load("Red.jpg") )
{
uchar *bits = img.bits();
for (int i = 0; i < 12; i++)
{
std::cout << (int) bits[i] << std::endl;
}
}
How to manipulate the returned bits? I expected all red because the picture is a pure red image created in Paint. However, I get 36, 27, 237, 255, 36 etc...
QImage img( "Red.jpg" );
if ( false == img.isNull() )
{
QVector<QRgb> v = img.colorTable(); // returns a list of colors contained in the image's color table.
for ( QVector<QRgb>::const_iterator it = v.begin(), itE = v.end(); it != itE; ++it )
{
QColor clrCurrent( *it );
std::cout << "Red: " << clrCurrent.red()
<< " Green: " << clrCurrent.green()
<< " Blue: " << clrCurrent.blue()
<< " Alpha: " << clrCurrent.alpha()
<< std::endl;
}
}
However this example above does returns the color table. Color table does not includes same colors twice. They will be added once in order of appearance.
If you want to get each pixels color, you can use next lines:
for ( int row = 1; row < img.height() + 1; ++row )
for ( int col = 1; col < img.width() + 1; ++col )
{
QColor clrCurrent( img.pixel( row, col ) );
std::cout << "Pixel at [" << row << "," << col << "] contains color ("
<< clrCurrent.red() << ", "
<< clrCurrent.green() << ", "
<< clrCurrent.blue() << ", "
<< clrCurrent.alpha() << ")."
<< std::endl;
}
Reference for bits() says:
Returns a pointer to the first pixel data. This is equivalent to scanLine(0).
So if you check reference for scanLine()
If you are accessing 32-bpp image data, cast the returned pointer to QRgb* (QRgb has a 32-bit size) and use it to read/write the pixel value. You cannot use the uchar* pointer directly, because the pixel format depends on the byte order on the underlying platform. Use qRed(), qGreen(), qBlue(), and qAlpha() to access the pixels.
One other option would probably be pixel() member function.
Hope that helps.
One of the problems with using the bits() function is that you need to know the format of the original image. You should convert it to RGB by using convertToFormat.
img = img.convertToFormat(QImage::Format_RGB888);
Now, when you call bits(), the data will be in the RGB format with the proper data alignment.
uchar *bits = img.bits();
for (int i = 0; i < (img.width() * img.height() * 3); i++)
{
std::cout << (int) bits[i] << std::endl;
}
In Qt 5.6 was introduced QColor QImage::pixelColor(int x, int y) method, so you can directly get color information from image pixel.

Getting pixel color with Magick++?

I've already asked this question, but that was about FreeImage. Now I'm trying to do the same thing with ImageMagick (to be more correct, with Magick++).
All I need is to get the RGB value of pixels in an image with the ability to print it out onto the screen. I asked this in the ImageMagick forum, but it seems there is nobody there. :-( Can anybody help, please?
Version 6 API
Given an "Image" object, you have to request a "pixel cache", then work with it. Documentation is here and here:
// load an image
Magick::Image image("test.jpg");
int w = image.columns();
int h = image.rows();
// get a "pixel cache" for the entire image
Magick::PixelPacket *pixels = image.getPixels(0, 0, w, h);
// now you can access single pixels like a vector
int row = 0;
int column = 0;
Magick::Color color = pixels[w * row + column];
// if you make changes, don't forget to save them to the underlying image
pixels[0] = Magick::Color(255, 0, 0);
image.syncPixels();
// ...and maybe write the image to file.
image.write("test_modified.jpg");
Version 7 API
Access to pixels has changed in version 7 (see: porting), but low-level access is still present:
MagickCore::Quantum *pixels = image.getPixels(0, 0, w, h);
int row = 0;
int column = 0;
unsigned offset = image.channels() * (w * row + column);
pixels[offset + 0] = 255; // red
pixels[offset + 1] = 0; // green
pixels[offset + 2] = 0; // blue
#Sga's answer didn't work for me, I'm using the ImageMagick-7.0.7-Q8 (8 bit depth) library.
Here's how I did it, to scan an image pixel by pixel and output each one's RGB value:
// "InitializeMagick" called beforehand!
void processImage()
{
std::ifstream fin;
std::stringstream fs;
fs << "/img.png";
std::cout << "Opening image \"" << fs.str() << "\".." << std::endl;
try
{
Image img;
img.read( fs.str() );
int imgWidth = img.columns();
int imgHeight = img.rows();
std::cout << "Image width: " << imgWidth << std::endl;
std::cout << "Image height: " << imgHeight << std::endl;
std::cout << "Image channels: " << img.channels() << std::endl;
img.modifyImage();
for ( int row = 0; row <= imgHeight; row++ )
{
for ( int column = 0; column <= imgWidth; column++ )
{
ColorRGB px = img.pixelColor( column, row );
std::cout << "Pixel " << column << "," << row << " R: " << px.red() << " G: " << px.green() <<
" B: " << px.blue() << std::endl;
}
}
}
catch ( Magick::Exception & error )
{
std::cerr << "Caught Magick++ exception: " << error.what() << std::endl;
}
fin.close(); // Close the file
}