Visualization pixel with Opencv Mat at - c++

I don't understand why output is a strange number when I run my code:
int main(int argc, char** argv)
{
Mat im;
im = imread("lena.png", CV_LOAD_IMAGE_GRAYSCALE);
cout << im.at<uchar>(0, 0) << endl;
waitKey(0);
}
If I visualize image I see the correct image.
Where am I wrong?

Because it shows the symbol, like cout << char(123) << endl;
You have to use int cast:
cout << (int) im.at<uchar>(0, 0) << endl;

As stated in the official documentation you donĀ“t get the actual intensity value directly but a scalar.
Try this:
Scalar intensity = im.at<uchar>(0, 0);
cout << intensity.val[0] << endl;
and for images with more than one channel you can use:
Vec3b intensity = im.at<Vec3b>(0, 0);
cout << intensity.val[0] << intensity.val[1] << intensity.val[2] << endl;

Related

OpenCv Mat object is not correctly initialized from Char array but works from imread(..)

In my application the JPEG image arrives as a char* array. I need to convert this to an OpenCV cv::Mat object. To test behavior I create another cv::Mat from cv::imread.
The Mat create from imread works fine when I try to convert it from BGR to RGB format. But the Mat created from the raw char array Seg faults when trying to convert it.
Code is attached below:
//read_image -> get the char* array
//convert_image -> convert char* array to Mat object
//mat_process -> convert the Matrix to the RGB format of the correct size
//When the Matrix is created from cv::imread and I call mat_process it returns correctly
//When the Matrix is created from read_image -> convert_image, it seg faults on mat_process
typedef cv::Point3_<float> Pixel;
char* read_image(string image_name){
cout << "Reading image " << endl;
std::ifstream file(image_name, ios::binary);
file.seekg(0, std::ios::end);
size_t filesize = (int)file.tellg();
file.seekg(0);
char* output = new char[filesize];
file.read(output, filesize);
cout << "IMAGE SIZE IS " << filesize << endl;
return output;
}
cv::Mat convert_image(char* img){
cv::Mat* raw_data= new cv::Mat(1080,1920,CV_8UC3,img,cv::Mat::AUTO_STEP);
cout << " Got raw data " << raw_data->cols << " rows " << raw_data->rows << endl;
//Mat decoded_mat = imdecode(raw_data,IMREAD_UNCHANGED);
//cout << "Decoded the matrix " << decoded_mat.cols << " rows are " << decoded_mat.rows << endl;
//return decoded_mat;
return *raw_data;
}
auto mat_process(cv::Mat src, uint width, uint height) -> cv::Mat{
// convert to float; BGR -> RGB
cv::Mat dst;
cout << "Creating dst" << endl;
src.convertTo(dst, CV_32FC3); **// SEGMENTATION FAULT OCCURS HERE WITH RAW CHAR ARRAY**
cout << "Creating dst2" << endl;
cv::cvtColor(dst, dst, cv::COLOR_BGR2RGB);
cout << "Creating dst3" << endl;
// normalize to -1 & 1
Pixel* pixel = dst.ptr<Pixel>(0,0);
cout << "Creating dst4" << endl;
const Pixel* endPixel = pixel + dst.cols * dst.rows;
cout << "Creating dst5" << endl;
for (; pixel != endPixel; pixel++)
normalize(*pixel);
// resize image as model input
cout << "Creating dst6" << endl;
cv::resize(dst, dst, cv::Size(width, height));
cout << "Creating dst7" << endl;
return dst;
}
char * img1 = read_image(imgf1);
cv::Mat img1_mat = convert_image(img1);
cv::Mat img1_comp = imread(imgf1);
cout << " Start process " <<endl;
cv::Mat conv1 = mat_process(img1_comp,320,320); //This code executes fine
cout << " Done 1 " << endl;
cv::Mat conv2 = mat_process(img1_mat,320,320); //Segmentation Fault appears here
cout << " Done process " <<endl;

Convert images from Pylon to Opencv in c++

I want to convert stereo images captured by Basler cameras to opencv (Mat) format. In the below code i have converted images to opencv format, but in show stages, i can not show the images. please guide me.
Thanks
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
PylonInitialize();
Pylon::PylonAutoInitTerm autoInitTerm;//me
try
{
// Get the transport layer factory.
CTlFactory& tlFactory = CTlFactory::GetInstance();
// Get all attached devices and exit application if no device is found.
DeviceInfoList_t devices;
if (tlFactory.EnumerateDevices(devices) == 0)
{
throw RUNTIME_EXCEPTION("No camera present.");
}
CInstantCameraArray cameras(min(devices.size(), c_maxCamerasToUse));
// Create and attach all Pylon Devices.
for (size_t i = 0; i < cameras.GetSize(); ++i)
{
cameras[i].Attach(tlFactory.CreateDevice(devices[i]));
// Print the model name of the camera.
cout << "Using device " << cameras[i].GetDeviceInfo().GetModelName() << endl;
}
CGrabResultPtr ptrGrabResult;
CImageFormatConverter formatConverter;//me
formatConverter.OutputPixelFormat = PixelType_BGR8packed;//me
CPylonImage pylonImage;//me
// Create an OpenCV image
Mat openCvImage;//me
for (int i = 0; i < c_countOfImagesToGrab && cameras.IsGrabbing(); ++i)
{
cameras.RetrieveResult(5000, ptrGrabResult, TimeoutHandling_ThrowException);
intptr_t cameraContextValue = ptrGrabResult->GetCameraContext();
#ifdef PYLON_WIN_BUILD
#endif
// Print the index and the model name of the camera.
cout << "Camera " << cameraContextValue << ": " << cameras[cameraContextValue].GetDeviceInfo().GetModelName() << endl;
// Now, the image data can be processed.
cout << "GrabSucceeded: " << ptrGrabResult->GrabSucceeded() << endl;
cout << "SizeX: " << ptrGrabResult->GetWidth() << endl;
cout << "SizeY: " << ptrGrabResult->GetHeight() << endl;
const uint8_t *pImageBuffer = (uint8_t *)ptrGrabResult->GetBuffer();
cout << "Gray value of first pixel: " << (uint32_t)pImageBuffer[0] << endl << endl;
formatConverter.Convert(pylonImage, ptrGrabResult);//me
// Create an OpenCV image out of pylon image
openCvImage = cv::Mat(ptrGrabResult->GetHeight(), ptrGrabResult->GetWidth(), CV_8UC3, (uint8_t *)pylonImage.GetBuffer());//me
if (cameraContextValue == 0)
{
imshow("left camera", openCvImage);
imwrite("right_img.png", openCvImage);
}
else if (cameraContextValue == 1)
{
imshow("right camera", openCvImage);
imwrite("right_img.png", openCvImage);
}
Sleep(3000);
}
}
catch (const GenericException &e)
{
// Error handling
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while (cin.get() != '\n');
// Releases all pylon resources.
PylonTerminate();
return exitCode;
}
You need to create a window to display an opencv image into, use :
namedWindow("left camera", CV_WINDOW_NORMAL);
imshow("left camera", openCvImage);
There is also a few mistakes in your code, i guess "right_img.png" should be change in "left_img.png", otherwise you will save only one image.
And this is redundant code
PylonInitialize();
Pylon::PylonAutoInitTerm autoInitTerm;
autoInitTerm is automatically calling PylonInitialize() and PylonTerminate(). So you should remove it or remove PylonInitialize() and PylonTerminate()
I think a waitKey(0) is required after the imshow to display the image.
add below piece of code. after completing for (size_t i = 0; i < cameras.GetSize(); ++i)
cameras.StartGrabbing(GrabStrategy_LatestImageOnly, GrabLoop_ProvidedByUser);
Add this to your code. and like above comments remove unnecessary code.

Functions in OpenCV

Hi I have written the following code in OpenCV. Basically it reads a video from file. Now, I want to create a function to resize the video but I am unsure how to call the "VideoCapture" class from the main function. I have written a sample function to see if it'll read anything but it compiles fine showing stuff from the main function but nothing from the newly created function. Any help? P.S I'm not very experienced, bear with me LOL.
using namespace cv;
using namespace std;
void resize_video(VideoCapture capture);
int main(int argc, char** argv)
{
VideoCapture capture; //the C++ API class to capture the video from file
if(argc == 2)
capture.open(argv[1]);
else
capture.open(0);
if(!capture.isOpened())
{
cout << "Cannot open video file " << endl;
return -1;
}
Mat frame;
namedWindow("display", CV_WINDOW_AUTOSIZE);
cout << "Get the video dimensions " << endl;
int fps = capture.get((int)CV_CAP_PROP_FPS);
int height = capture.get((int)CV_CAP_PROP_FRAME_HEIGHT);
int width = capture.get((int)CV_CAP_PROP_FRAME_WIDTH);
int noF = capture.get((int)CV_CAP_PROP_FRAME_COUNT);
CvSize size = cvSize(width , height);
cout << "Dimensions: " << width << height << endl;
cout << "Number of frames: " << noF << endl;
cout << "Frames per second: " << fps << endl;
while(true)
{
capture >> frame;
if(frame.empty())
break;
imshow("display", frame);
if (waitKey(30)== 'i')
break;
}
//resize_video();
}
void resize_video(VideoCapture capture)
{
cout << "Begin resizing video " << endl;
//return 0;
}
you want to call your function INSIDE the while loop, not after it (too late, program over)
so, it might look like this:
void resize_video( Mat & image )
{
//
// do your processing
//
cout << "Begin resizing video " << endl;
}
and call it like:
while(true)
{
capture >> frame;
if(frame.empty())
break;
resize_video(frame);
imshow("display", frame);
if (waitKey(30)== 'i')
break;
}

opencv - image multiplication

hi, i'm trying to play a little bit with Mat class.
I want to do a product element wise between two images, the c++/opencv port of MATLAB immultiply.
This is my code:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat imgA, imgB;
Mat imgAB;
Mat product;
void printMinMax(Mat m, string s) {
double minVal;
double maxVal;
Point minLoc;
Point maxLoc;
minMaxLoc( m, &minVal, &maxVal, &minLoc, &maxLoc );
cout << "min val in " << s << ": " << minVal << endl;
cout << "max val in " << s << ": " << maxVal << endl;
}
int main(int /*argc*/, char** /*argv*/) {
cout << "OpenCV version: " << CV_MAJOR_VERSION << " " << CV_MINOR_VERSION << endl;
imgA = imread("test1.jpg");
cout << "original image size: " << imgA.rows << " " << imgA.cols << endl;
cout << "original type: " << imgA.type() << endl;
cvtColor(imgA, imgA, CV_BGR2GRAY);
printMinMax(imgA, "imgA");
imgB = imread("test2.jpg");
cout << "original image size: " << imgB.rows << " " << imgB.cols << endl;
cout << "original type: " << imgB.type() << endl;
cvtColor(imgB, imgB, CV_BGR2GRAY);
printMinMax(imgB, "imgB");
namedWindow("originals", CV_WINDOW_AUTOSIZE);
namedWindow("product", CV_WINDOW_AUTOSIZE);
imgAB = Mat( max(imgA.rows,imgB.rows), imgA.cols+imgB.cols, imgA.type());
imgA.copyTo(imgAB(Rect(0, 0, imgA.cols, imgA.rows)));
imgB.copyTo(imgAB(Rect(imgA.cols, 0, imgB.cols, imgB.rows)));
product = imgA.mul(imgB);
printMinMax(product, "product");
while( true )
{
char c = (char)waitKey(10);
if( c == 27 )
{ break; }
imshow( "originals", imgAB );
imshow( "product", product );
}
return 0;
}
here is the result:
OpenCV version: 2 4
original image size: 500 500
original type: 16
min val in imgA: 99
max val in imgA: 255
original image size: 500 500
original type: 16
min val in imgB: 0
max val in imgB: 255
init done
opengl support available
min val in product: 0
max val in product: 255
I think that max value in the product has to be greater than 255, but is truncated to 255 because the type of the two matrixes is 16.
I have tried to convert the matrixes to CV_32F but the maxVal in the product is 64009 (a number that i don't understand)
Thanks to Wajih comment i have done some basic test, and some basic debug, and i got i work perfectly. I think this could become a mini tutorial on alpha blending and image multiply, but for now is only a few lines of commented code.
note that the 2 images must be of the same size.. and for sure some error checking should be done for a solid code..
Hope it helps someone! And, of course, if you have some hints to make this code more readable or more compact (one-liner guys are very appreciate!) or efficient.. just comment, thank you a lot!
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
void printMinMax(Mat m, string name) {
double minVal;
double maxVal;
Point minLoc;
Point maxLoc;
if(m.channels() >1) {
cout << "ERROR: matrix "<<name<<" must have 1 channel for calling minMaxLoc" << endl;
}
minMaxLoc( m, &minVal, &maxVal, &minLoc, &maxLoc );
cout << "min val in " << name << ": " << minVal << " in loc: " << minLoc << endl;
cout << "max val in " << name << ": " << maxVal << " in loc: " << maxLoc << endl;
}
int main(int /*argc*/, char** /*argv*/) {
cout << "OpenCV version: " << CV_MAJOR_VERSION << " " << CV_MINOR_VERSION << endl; // 2 4
Mat imgA, imgB;
Mat imgAB;
Mat product;
// fast matrix creation, comma-separated initializer
// example1: create a matrix with value from 0 to 255
imgA = Mat(3, 3, CV_8UC1);
imgA = (Mat_<uchar>(3,3) << 0,1,2,3,4,5,6,7,255);
cout << "test Mat 3x3" << endl << imgA << endl;
// not that if a value exceed 255 it is truncated at value%256
imgA = (Mat_<uchar>(3,3) << 0,1, 258 ,3,4,5,6,7,255);
cout << "test Mat 3x3 with last element truncated to 258%256=2" << endl << imgA << endl;
// create a second matrix
imgB = Mat(3, 3, CV_8UC1);
imgB = (Mat_<uchar>(3,3) << 0,1,2,3,4,5,6,7,8);
// now the matrix product. we are multiplying a value that can goes from 0-255 with another 0-255 value..
// the edge cases are "min * min" and "max * max",
// that means: our product is a function that return a value in the domain 0*0-255*255 ; 0-65025
// ah, ah! this number exceed the Mat U8C1 domain!, we need different data types.
// we need a bigger one.. let's say 32FC1
Mat imgA_32FC1 = imgA.clone();
imgA_32FC1.convertTo(imgA_32FC1, CV_32FC1);
Mat imgB_32FC1 = imgB.clone();
imgB_32FC1.convertTo(imgB_32FC1, CV_32FC1);
// after conversion.. value are scaled?
cout << "imgA after conversion:" << endl << imgA_32FC1 << endl;
cout << "imgB after conversion:" << endl << imgB_32FC1 << endl;
product = imgA_32FC1.mul( imgB_32FC1 );
// note: the product values are in the range 0-65025
cout << "the product:" << endl << product << endl;
// now, this does not have much sense, because we started from a 0-255 range Mat and now we have a 0-65025 that is nothing..
// it is not uchar range and it is not float range (that is a lot bigger than that)
// so, we can normalize back to 0-255
// what do i mean with 'normalize' now?
// i mean: scale all values for a constant that maps 0 to 0 and 65025 to 255..
product.convertTo(product, CV_32FC1, 1.0f/65025.0f * 255);
// but it is still a 32FC1.. not as the start matix..
cout << "the product, normalized back to 0-255, still in 32FC1:" << endl << product << endl;
product.convertTo(product, CV_8UC1);
cout << "the product, normalized back to 0-255, now int 8UC1:" << endl << product << endl;
cout << "-----------------------------------------------------------" << endl;
// real stuffs now.
imgA = imread("test1.jpg");
cvtColor(imgA, imgA, CV_BGR2GRAY);
imgB = imread("test2.jpg");
cvtColor(imgB, imgB, CV_BGR2GRAY);
imgA_32FC1 = imgA.clone();
imgA_32FC1.convertTo(imgA_32FC1, CV_32FC1);
imgB_32FC1 = imgB.clone();
imgB_32FC1.convertTo(imgB_32FC1, CV_32FC1);
product = imgA_32FC1.mul( imgB_32FC1 );
printMinMax(product, "product");
product.convertTo(product, CV_32FC1, 1.0f/65025.0f * 255);
product.convertTo(product, CV_8UC1);
// concat two images in one big image
imgAB = Mat( max(imgA.rows,imgB.rows), imgA.cols+imgB.cols, imgA.type());
imgA.copyTo(imgAB(Rect(0, 0, imgA.cols, imgA.rows)));
imgB.copyTo(imgAB(Rect(imgA.cols, 0, imgB.cols, imgB.rows)));
namedWindow("originals", CV_WINDOW_AUTOSIZE);
namedWindow("product", CV_WINDOW_AUTOSIZE);
while( true )
{
char c = (char)waitKey(10);
if( c == 27 )
{ break; }
imshow( "originals", imgAB );
imshow( "product", product );
}
return 0;
}
You are right, you should convert your matrices imgA, imgB to say CV32FC1 type. Since the max values in this matrices is 255, the maximum possible value is 65025. However, the maximum at imgA and imgB may not be in the same location, so 64009 is quite possible.

Problem of displaying image using OpenCV in C++

I wish to display image using c++ OpenCV code. Below are the code fragment:
//Load image
Mat img = imread("Example1.jpg");
//Display in window
namedWindow("Skin");
//display image
imshow("Skin",img);
//returns the matrix size
cout<<img.size()<<endl; // wrong
Please help on how to display the size. I have tried many times buy fail.
//Load image
Mat img = imread("Example1.jpg");
//Display in window
namedWindow("Skin");
//display image
imshow("Skin",img);
//returns the matrix size
cout << "width= " << img.cols << " height= " << img.rows<< endl;
//OR if you wanna use the size() getter.
cout << "width= " << img.size().width << " height= " << img.size().height << endl;