Converting a Float to RGB data in opencv - c++

I have a 2D vector of float values that I need to create an image from it.
The code that I have is as follows:
inline cv::Mat ConvertToMat(vector<vector<float>> inputData)
{
static int MAXGREY = 255;
static int MAXRANGE = 255;
int Red, Blue, Green;
float maxValue = GetMaxValue(inputData); // find max value in input data
cv::Mat output(inputData.getXSize(), inputData.getXSize(), CV_8UC3, cv::Scalar::all(0));
// if the max value is equal to or less than 0, no data in the vector to convert.
if (maxValue > 0)
{
for (int x = 0; x < inputData.size(); x++)
{
for (int y = 0; y < inputData[x].size(); y++)
{
auto Value = inputData[x][y];
Green = 0;
Red = Value * 255 / maxValue;
Blue = (maxValue - Value) * 255 / maxValue;
cv::Vec3b xyzBuffer;
xyzBuffer[0] = Blue;
xyzBuffer[1] = Red;
xyzBuffer[2] = Green;
output.at<cv::Vec3b>(x, y) = xyzBuffer;
}
}
}
return output;
}
but this method doesn't generate suitable results when there is a pixel with a very high value and a lot of pixels with small values, all small values can not be seen on the output.
for example, lets look this set of data for input:
int main()
{
vector<vector<float>> inputData =
{
{1,2,3,4,5,6,7,8,9,10},
{1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5},
{1,2,3,4,5,6,7,8,9,10},
{1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5},
{1,2,3,4,2000,6,7,8,9,10},
{1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5},
{1,2,3,4,5,6,7,8,9,10},
{1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5},
{1,2,3,4,5,6,7,8,9,10},
{1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5}
};
cv::Mat image=ConvertToMat(inputData);
cv::imwrite("c://tmp//myimage.jpg", image);
return 0;
}
The generated out is as follow (the value of each pixel is shown on the pixel):
Since we have 3X byte data for colour, we should have enough dynamic range to show the data for each pixel in a different colour. but the above algorithm converts the value of 1 and 2 and 3 into the same colour (254,0,0).
How can I convert a float into three different colours so I can see all pixels with a different colour suitable for visual inspection (so each pixel which is near to the other has similar colour but not the same colour)?

Related

My C++ code is not detecting objects correctly yolov5

I have a yolov5 onnx file where I trained apples and bananas. I was using python until today, but I decided to switch to c++ to gain some speed. I get correct results when I use yolov5's own onnx files and image in the code I added below. But when I add my own onnx file and my test image it gives me wrong result. You can also find the attached image. What is the problem here?
// Include Libraries.
\#include \<opencv2/opencv.hpp\>
\#include \<fstream\>
// Namespaces.
using namespace cv;
using namespace std;
using namespace cv::dnn;
// Constants.
const float INPUT_WIDTH = 640.0;
const float INPUT_HEIGHT = 640.0;
const float SCORE_THRESHOLD = 0.3;
const float NMS_THRESHOLD = 0.4;
const float CONFIDENCE_THRESHOLD = 0.65;
// Text parameters.
const float FONT_SCALE = 0.7;
const int FONT_FACE = FONT_HERSHEY_SIMPLEX;
const int THICKNESS = 1;
// Colors.
Scalar BLACK = Scalar(0,0,0);
Scalar BLUE = Scalar(255, 178, 50);
Scalar YELLOW = Scalar(0, 255, 255);
Scalar RED = Scalar(0,0,255);
// Draw the predicted bounding box.
void draw_label(Mat& input_image, string label, int left, int top)
{
// Display the label at the top of the bounding box.
int baseLine;
Size label_size = getTextSize(label, FONT_FACE, FONT_SCALE, THICKNESS, &baseLine);
top = max(top, label_size.height);
// Top left corner.
Point tlc = Point(left, top);
// Bottom right corner.
Point brc = Point(left + label_size.width, top + label_size.height + baseLine);
// Draw black rectangle.
rectangle(input_image, tlc, brc, BLACK, FILLED);
// Put the label on the black rectangle.
putText(input_image, label, Point(left, top + label_size.height), FONT_FACE, FONT_SCALE, YELLOW, THICKNESS);
}
vector\<Mat\> pre_process(Mat &input_image, Net &net)
{
// Convert to blob.
Mat blob;
blobFromImage(input_image, blob, 1./255., Size(INPUT_WIDTH, INPUT_HEIGHT), Scalar(), true, false);
net.setInput(blob);
// Forward propagate.
vector<Mat> outputs;
net.forward(outputs, net.getUnconnectedOutLayersNames());
return outputs;
}
Mat post_process(Mat &input_image, vector\<Mat\> &outputs, const vector\<string\> &class_name)
{
// Initialize vectors to hold respective outputs while unwrapping detections.
vector\<int\> class_ids;
vector\<float\> confidences;
vector\<Rect\> boxes;
// Resizing factor.
float x_factor = input_image.cols / INPUT_WIDTH;
float y_factor = input_image.rows / INPUT_HEIGHT;
float *data = (float *)outputs[0].data;
const int dimensions = 85;
const int rows = 25200;
// Iterate through 25200 detections.
for (int i = 0; i < rows; ++i)
{
float confidence = data[4];
// Discard bad detections and continue.
if (confidence >= CONFIDENCE_THRESHOLD)
{
float * classes_scores = data + 5;
// Create a 1x85 Mat and store class scores of 80 classes.
Mat scores(1, class_name.size(), CV_32FC1, classes_scores);
// Perform minMaxLoc and acquire index of best class score.
Point class_id;
double max_class_score;
minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
// Continue if the class score is above the threshold.
if (max_class_score > SCORE_THRESHOLD)
{
// Store class ID and confidence in the pre-defined respective vectors.
confidences.push_back(confidence);
class_ids.push_back(class_id.x);
// Center.
float cx = data[0];
float cy = data[1];
// Box dimension.
float w = data[2];
float h = data[3];
// Bounding box coordinates.
int left = int((cx - 0.5 * w) * x_factor);
int top = int((cy - 0.5 * h) * y_factor);
int width = int(w * x_factor);
int height = int(h * y_factor);
// Store good detections in the boxes vector.
boxes.push_back(Rect(left, top, width, height));
}
}
// Jump to the next column.
data += 85;
}
// Perform Non Maximum Suppression and draw predictions.
vector<int> indices;
NMSBoxes(boxes, confidences, SCORE_THRESHOLD, NMS_THRESHOLD, indices);
for (int i = 0; i < indices.size(); i++)
{
int idx = indices[i];
Rect box = boxes[idx];
int left = box.x;
int top = box.y;
int width = box.width;
int height = box.height;
// Draw bounding box.
rectangle(input_image, Point(left, top), Point(left + width, top + height), BLUE, 3*THICKNESS);
// Get the label for the class name and its confidence.
string label = format("%.2f", confidences[idx]);
label = class_name[class_ids[idx]] + ":" + label;
// Draw class labels.
draw_label(input_image, label, left, top);
//cout<<"The Value is "<<label;
//cout<<endl;
}
return input_image;
}
int main()
{
vector<string> class_list;
ifstream ifs("/Users/admin/Documents/C++/First/obj.names");
string line;
while (getline(ifs, line))
{
class_list.push_back(line);
}
// Load image.
Mat frame;
frame = imread("/Users/admin/Documents/C++/First/test.jpg");
// Load model.
Net net;
net = readNet("/Users/admin/Documents/C++/First/my.onnx");
vector<Mat> detections;
detections = pre_process(frame, net);
Mat img = post_process(frame, detections, class_list);
//Mat img = post_process(frame.clone(), detections, class_list);
// Put efficiency information.
// The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
vector<double> layersTimes;
double freq = getTickFrequency() / 1000;
double t = net.getPerfProfile(layersTimes) / freq;
string label = format("Inference time : %.2f ms", t);
putText(img, label, Point(20, 40), FONT_FACE, FONT_SCALE, RED);
imshow("Output", img);
waitKey(0);
return 0;
}
The photos I use are 640x480. I played around with the size of the photo, thinking it might be related, but the same problem persisted.
The Yolov5 output format is xyxy as can be seen here:
https://github.com/ultralytics/yolov5/blob/bfa1f23045c7c4136a9b8ced9d6be8249ed72692/detect.py#L161
Not xywh as you are assuming in your code

Setting pixel color of 8-bit grayscale image using pointer

I have this code:
QImage grayImage = image.convertToFormat(QImage::Format_Grayscale8);
int size = grayImage.width() * grayImage.height();
QRgb *data = new QRgb[size];
memmove(data, grayImage.constBits(), size * sizeof(QRgb));
QRgb *ptr = data;
QRgb *end = ptr + size;
for (; ptr < end; ++ptr) {
int gray = qGray(*ptr);
}
delete[] data;
It is based on this: https://stackoverflow.com/a/40740985/8257882
How can I set the color of a pixel using that pointer?
In addition, using qGray() and loading a "bigger" image seem to crash this.
This works:
int width = image.width();
int height = image.height();
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
image.setPixel(x, y, qRgba(0, 0, 0, 255));
}
}
But it is slow when compared to explicitly manipulating the image data.
Edit
Ok, I have this code now:
for (int y = 0; y < height; ++y) {
uchar *line = grayImage.scanLine(y);
for (int x = 0; x < width; ++x) {
int gray = qGray(line[x]);
*(line + x) = uchar(gray);
qInfo() << gray;
}
}
And it seems to work. However, when I use an image that has only black and white colors and print the gray value, black color gives me 0 and white gives 39. How can I get the gray value in a range of 0-255?
First of all you are copying too much data in this line:
memmove(data, grayImage.constBits(), size * sizeof(QRgb));
The size ob Qrgb is 4 bytes, but according to the documentation, the size of a Format_Grayscale8 pixel is only 8 bits or 1 byte. If you remove sizeof(QRgb) you should be copying the correct amount of bytes, assuming all the lines in the bitmap are consecutive (which, according to the documentation, they are not -- they are aligned to at minimum 32-bits, so you would have to account for that in size). The array data should not be of type Qrgb[size] but ucahr[size]. You can then modify data as you like. Finally, you will probably have to create a new QImage with one of the constructors that accept image bits as uchar and assign the new image to the old image:
auto newImage = QImage( data, image.width(), image.height(), QImage::Format_Grayscale8, ...);
grayImage = std::move( newImage );
But instead of copying image data, you could probably just modify grayImage directly by accessing its data through bits(), or even better, through scanLine(), maybe something like this:
int line, column;
auto pLine = grayImage.scanLine(line);
*(pLine + column) = uchar(grayValue);
EDIT:
According to scanLine documentation, the image is at least 32-bit aligned. So if your 8-bit grayScale image is 3 pixels wide, a new scan line will start every 4 bytes. If you have a 3x3 image, the total size of the memory required to hold the image pixels will be 12. The following code shows the required memory size:
int main() {
auto image = QImage(3, 3, QImage::Format_Grayscale8);
std::cout << image.bytesPerLine() * image.height() << "\n";
return 0;
}
The fill method (setting all gray values to 0xC0) could be implemented like this:
auto image = QImage(3, 3, QImage::Format_Grayscale8);
uchar gray = 0xc0;
for ( int i = 0; i < image.height(); ++i ) {
auto pLine = image.scanLine( i );
for ( int j = 0; j < image.width(); ++j )
*pLine++ = gray;
}

Converting Depth Image into open cv Mat format

I am using a PMD camera to capture depth image in the below format
struct DepthData
{
int version;
std::chrono::microseconds timeStamp;
uint16_t width;
uint16_t height;
Vector of uint_16 exposureTimes;
Vector of DepthPoint points; //!< array of points
};
Depth point structure looks like this
struct DepthPoint
{
float x; //!< X coordinate [meters]
float y; //!< Y coordinate [meters]
float z; //!< Z coordinate [meters]
float noise; //!< noise value [meters]
uint16_t grayValue; //!< 16-bit gray value
uint8_t depthConfidence; //!< value 0 = bad, 255 = good
};
And I am trying to convert it into opencv mat data structure. Below is the code.
But it is throwing an exception. Kindly help
const int imageSize = w * h;
Mat out = cv::Mat(h, w, CV_16UC3, Scalar(0, 0, 0));
const Scalar S;
for (int h = 0; h < out.rows; h++)
{
//printf("%" PRIu64 "\n", point.at(h).grayValue);
for (int w = 0; w < out.cols; w++)
{
//printf("%" PRIu64 "\n", point.at(h).grayValue);
out.at<cv::Vec3f>(h,w)[0] = point[w].x;
out.at<cv::Vec3f>(h, w)[1] = point[w].y;
out.at<cv::Vec3f>(h, w)[2] = point[w].z;
}
}
imwrite("E:/softwares/1.8.0.71/bin/depthImage1.png", out);
You seem to be doing a couple of things wrong.
You are creating an image of type CV_16UC3 which is a 3 channel 16 bit unsigned char image. Then in out.at<cv::Vec3f>(h,w)[0] you try to read part of it as a vector of 3 floats. You should probably create your image as a float image instead.
For further details please provide the exception. It will be easier to help.
UPD: If you just want a depth image, create an image like this:
Mat out = cv::Mat::zeros(h, w, CV_16UC1);
Then in every pixel:
out.at<uint16_t>(h, w) = depth_point.grayValue;

c++ and opencv get and set pixel color to Mat

I'm trying to set a new color value to some pixel into a cv::Mat image my code is below:
Mat image = img;
for(int y=0;y<img.rows;y++)
{
for(int x=0;x<img.cols;x++)
{
Vec3b color = image.at<Vec3b>(Point(x,y));
if(color[0] > 150 && color[1] > 150 && color[2] > 150)
{
color[0] = 0;
color[1] = 0;
color[2] = 0;
cout << "Pixel >200 :" << x << "," << y << endl;
}
else
{
color.val[0] = 255;
color.val[1] = 255;
color.val[2] = 255;
}
}
imwrite("../images/imgopti"+to_string(i)+".tiff",image);
It seems to get the good pixel in output (with cout) however in the output image (with imwrite) the pixel concerned aren't modified. I have already tried using color.val[0].. I still can't figure out why the pixel colors in the output image dont change.
thanks
You did everything except copying the new pixel value back to the image.
This line takes a copy of the pixel into a local variable:
Vec3b color = image.at<Vec3b>(Point(x,y));
So, after changing color as you require, just set it back like this:
image.at<Vec3b>(Point(x,y)) = color;
So, in full, something like this:
Mat image = img;
for(int y=0;y<img.rows;y++)
{
for(int x=0;x<img.cols;x++)
{
// get pixel
Vec3b & color = image.at<Vec3b>(y,x);
// ... do something to the color ....
color[0] = 13;
color[1] = 13;
color[2] = 13;
// set pixel
//image.at<Vec3b>(Point(x,y)) = color;
//if you copy value
}
}
just use a reference:
Vec3b & color = image.at<Vec3b>(y,x);
color[2] = 13;
I would not use .at for performance reasons.
Define a struct:
//#pragma pack(push, 2) //not useful (see comments below)
struct BGR {
uchar blue;
uchar green;
uchar red; };
And then use it like this on your cv::Mat image:
BGR& bgr = image.ptr<BGR>(y)[x];
image.ptr(y) gives you a pointer to the scanline y. And iterate through the pixels with loops of x and y

Average values of a MAT channel

I want to obtain the average values of a MAT and MatND variable, just to estimate the sharpness and brightness. However, I have been facing real issues with the vague values I have been encountering. I tried my best, but am still confused. I really do not know, if am doing the right thing.
calcHist(&src_yuv,1,channels,Mat(),hist,1,histSize,ranges,true,false);
Size d = hist.size();
rows = d.height;
cols = d.width;
for(int k=0;k<hbins;k++)
{
for(int l=0;l<sbins;l++)
{
total = total + hist.at<float>(k,l);
}
}
brightness = total/(rows*cols);
Here , am trying to calculate the histogram of the luma channel of src_yuv, which is in YUV format and average the values. Am I doing it the right way? If I change the datatype within <' '>, ranging from uchar to long int, am ending up with different values, which is understandable. But I dunno which is the right data type to use. Moreover, should I loop it within hbins,sbins or rows, cols? Please help me. am stuck at this for a long time.
Laplacian(src_gray,dst,ddepth,kernel_size,scale,delta,BORDER_DEFAULT);
Size s = dst.size();
rows = s.height;
cols = s.width;
total = 0;
max = 0;
for(int k=0;k<rows;k++)
{
for(int l=0;l<cols;l++)
{
total = total + dst.at<>(k,l);
}
}
average = total/(rows*cols);
What is the exact way to compute average in the above case? Could you please help me here? I tried different datatypes, starting from in for the mat, and long int for the total and averages. Its a gray scale image, and the result in the laplacian convoluted image.
convert the input src_yuv to BGR before calcHist and you will get the desired output with the same code.
EDIT: for YUV:-
out = imread("Lena.jpg");
out.convertTo(out, CV_RGB2YCrCb);
MatND hist;
int hbins = 30, sbins = 32;
int histSize[] = {hbins, sbins};
float hranges[] = { 0, 180 };
float sranges[] = { 0, 256 };
int channels[] = {0,1,2};
const float* ranges[] = { hranges, sranges };
calcHist( &out, 1, channels, Mat(),
hist, 2, histSize, ranges,
true,
false );
Size d = hist.size();
int rows = d.height;
int cols = d.width;
float total;
float brightness;
for(int k=0;k<hbins;k++)
{
for(int l=0;l<sbins;l++)
{
total = total + hist.at<float>(k,l);
}
}
brightness = total/(rows*cols);
this gives me brightness to be 246.895