#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <QtGui>
//make QImage point to the contents of cv::Mat
inline QImage const mat_to_qimage_ref(cv::Mat &mat)
{
return QImage((unsigned char*)(mat.data), mat.cols, mat.rows, mat.step1(), QImage::Format_RGB32);
}
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
QImage img("lena2.jpg");
cv::Mat mat(img.height(), img.width(), CV_8UC4, img.bits(), img.bytesPerLine());
QImage img = mat_to_qimage_ref(mat); //#1
//QImage img2((unsigned char*)(mat.data), mat.cols, mat.rows, mat.step, QImage::Format_RGB32); #2
QLabel label;
label.setPixmap(QPixmap::fromImage(img)); //crash at here
label.show();
return a.exec();
}
(#2) is ok, but #1 will occur undefined behavior?(my case is crash)
Besides, if you use it as the codes below, It is okay
cv::Mat img = cv::imread("lena2.jpg");
QLabel label;
label.setPixmap(QPixmap::fromImage(mat_to_qimage_ref(img)));
label.show();
Don't know what is happening, something related to cycle dependency?
your function should be like this:
QImage mat_to_qimage_ref(const cv::Mat3b &src) {
QImage dest(src.cols, src.rows, QImage::Format_ARGB32);
for (int y = 0; y < src.rows; ++y) {
const cv::Vec3b *srcrow = src[y];
QRgb *destrow = (QRgb*)dest.scanLine(y);
for (int x = 0; x < src.cols; ++x) {
destrow[x] = qRgba(srcrow[x][2], srcrow[x][1], srcrow[x][0], 255);
}
}
return dest;
}
If you don't want to copy the image data, but just create a new QImage header for your data, try this:
Mat mat = Mat(...);
QImage qImage = QImage(
(const uchar*)(mat.data),
mat.cols,
mat.rows,
mat.step1(),
QImage::Format_ARGB32); // if you have no alpha channel (CV_8UC3),
// you can use Format_RGB888
Also note, that OpenCV normally uses BGR channel order, you can use rgbSwapped() to solve this problem, but I don't know whether data gets copied with this function call.
Related
I made an Image Editor in Qt / OpenCV where you can load the Image from the File explorer and grayscale/adaptive threshold/resize it afterwards.
Bug 1: When I resize the Loaded Image to (for example) 600x600 Pixels using my ImageProcessor::Resize(int, int) method, it works fine. But when I change it to like 546x750 Pixels, the Image has a weird grayscale.
Bug 2: When I want to resize my Grayscaled/Thresholded Image, it always gets a weird grayscale similiar to Bug 1.
Codes:
mainwindow.cpp
#include "mainwindow.h"
#include "ui_mainwindow.h"
#include "resizer.h"
MainWindow::MainWindow(QWidget *parent)
: QMainWindow(parent)
, ui(new Ui::MainWindow)
{
ui->setupUi(this);
}
MainWindow::~MainWindow()
{
delete ui;
}
void MainWindow::Display(cv::Mat inputImage)
{
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_RGB888);
scene->addPixmap(QPixmap::fromImage(image));
ui->graphicsView->setScene(scene);
ui->graphicsView->show();
}
void MainWindow::on_actionOpen_triggered()
{
QString file = QFileDialog::getOpenFileName(this, "Open", "", "Images (*.jpg *.png)");
std::string filename = file.toStdString();
inputImage = cv::imread(filename);
Display(inputImage);
imgProc = new ImageProcessor(inputImage);
}
void MainWindow::on_pushButton_clicked() // Grayscale
{
scene->clear();
imgProc->mode = 1;
inputImage = imgProc->Grayscale();
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_Grayscale8);
scene->addPixmap(QPixmap::fromImage(image));
ui->graphicsView->setScene(scene);
ui->graphicsView->show();
}
void MainWindow::on_pushButton_2_clicked() // ADT
{
scene->clear();
imgProc->mode = 2;
inputImage = imgProc->AdaptiveThreshold();
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_Grayscale8);
scene->addPixmap(QPixmap::fromImage(image));
ui->graphicsView->setScene(scene);
ui->graphicsView->show();
}
void MainWindow::on_pushButton_3_clicked() // Resize
{
scene->clear();
Resizer resizer;
resizer.exec();
int newWidth = resizer.GetWidth();
int newHeight = resizer.GetHeight();
inputImage = imgProc->Resize(newWidth, newHeight);
if(imgProc->mode == 1 || imgProc->mode == 2)
{
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_Grayscale8);
scene->addPixmap(QPixmap::fromImage(image));
ui->graphicsView->setScene(scene);
ui->graphicsView->show();
}
else
{
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_RGB888);
scene->addPixmap(QPixmap::fromImage(image));
ui->graphicsView->setScene(scene);
ui->graphicsView->show();
}
}
imageprocessor.cpp
#include "imageprocessor.h"
ImageProcessor::ImageProcessor(cv::Mat inputImage)
{
this->inputImage = inputImage;
}
cv::Mat ImageProcessor::Resize(int width, int height)
{
cv::Mat resized;
cv::resize(inputImage, resized, cv::Size(width, height), cv::INTER_LINEAR);
return resized;
}
cv::Mat ImageProcessor::Grayscale()
{
cv::Mat grayscaled;
cv::cvtColor(inputImage, grayscaled, cv::COLOR_RGB2GRAY);
return grayscaled;
}
cv::Mat ImageProcessor::AdaptiveThreshold()
{
cv::Mat binarized, grayscaled;
cv::cvtColor(inputImage, grayscaled, cv::COLOR_RGB2GRAY);
cv::adaptiveThreshold(grayscaled, binarized, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY, 15, 11);
return binarized;
}
QImage::Format_RGB888 is the format type you defined means that:
The image is stored using a 24-bit RGB format (8-8-8).
If your image has 3 channels then your way is correct to continue, except adding this:
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_RGB888).rgbSwapped();
You need to add at the end rgbSwapped() because Qt reads it in RGB order as OpenCV gives BGR.
If you want to send a gray scale image to GUI then you need to use QImage::Format_Grayscale8 format type, which means:
The image is stored using an 8-bit grayscale format.
Here is the clear cocumentation for the formats.
Note: How do you resize your image, by using OpenCV function ? Share resizer.h , I will update the answer accordingly.
I have started learning Qt and is trying to make a simple video Player which will load the video and will play it. It worked perfectly fine. Now added thresholding functionality to it. The threshold value will be obtained from the spinBox.
Code is written in such a way that thresholding operation will be done with value in spinBox except at value 0 (where normal video is displayed).
So this is my function for the same:
void Player::run()
{
while(!stop )
{
if(!capture.read(frame))
stop = true;
// convert RGB to gray
if(frame.channels() == 3)
{
if(thresh == 0)
{
cvtColor(frame, RGBframe, CV_BGR2RGB);
img = QImage((const unsigned char*)(RGBframe.data),
RGBframe.cols,RGBframe.rows,QImage::Format_RGB888);
}
else
{
Mat temp;
cvtColor(frame, temp, CV_BGR2GRAY);
threshold(temp, binary, thresh, 255, 0);
img = QImage((const unsigned char*)(binary.data),
binary.cols, binary.rows, QImage::Format_Indexed8);
bool save = img.save("/home/user/binary.png");
cout<<"threshold value = "<<thresh<<endl;
//imshow("Binary", binary);
}
}
else
{
if(thresh == 0) // original Image
{
img = QImage((const unsigned char*)(frame.data),
frame.cols,frame.rows,QImage::Format_Indexed8);
}
else // convert to Binary Image
{
threshold(frame, binary, thresh, 255, 0);
img = QImage((const unsigned char*)(binary.data),
binary.cols, binary.rows, QImage::Format_Indexed8);
}
}
emit processedImage(img);
this->msleep(delay);
}
}
for spinBox value equals 0 it runs fine but when spinBox value is incremented I get only black screen. I tried imshow(cv:: Mat binary)and it is showing the correct binary image but when I try to save QImage img it is some random black and white pixels (though of same size of original frame).
It seems that you're missing the color table for your indexed image. You need to add a color table (before the while loop):
QVector<QRgb> sColorTable(256);
for (int i = 0; i < 256; ++i){ sColorTable[i] = qRgb(i, i, i); }
and after you create the QImage from the binary Mat you need to add
img.setColorTable(sColorTable);
Or, as pointed out by #KubaOber, from Qt 5.5 you can also use the format QImage::Format_Grayscale8:
// From Qt 5.5
QImage image(inMat.data, inMat.cols, inMat.rows,
static_cast<int>(inMat.step),
QImage::Format_Grayscale8);
In general, you can wrap all Mat to QImage conversion in a function. Below there is the bug corrected and updated version of cvMatToQImage originally found here.
You can then remove all the conversion to QImage from your code and use this function instead.
QImage cvMatToQImage(const cv::Mat &inMat)
{
switch (inMat.type())
{
// 8-bit, 4 channel
case CV_8UC4:
{
QImage image(inMat.data,
inMat.cols, inMat.rows,
static_cast<int>(inMat.step),
QImage::Format_ARGB32);
return image;
}
// 8-bit, 3 channel
case CV_8UC3:
{
QImage image(inMat.data,
inMat.cols, inMat.rows,
static_cast<int>(inMat.step),
QImage::Format_RGB888);
return image.rgbSwapped();
}
// 8-bit, 1 channel
case CV_8UC1:
{
#if QT_VERSION >= 0x050500
// From Qt 5.5
QImage image(inMat.data, inMat.cols, inMat.rows,
static_cast<int>(inMat.step),
QImage::Format_Grayscale8);
#else
static QVector<QRgb> sColorTable;
// only create our color table the first time
if (sColorTable.isEmpty())
{
sColorTable.resize(256);
for (int i = 0; i < 256; ++i)
{
sColorTable[i] = qRgb(i, i, i);
}
}
QImage image(inMat.data,
inMat.cols, inMat.rows,
static_cast<int>(inMat.step),
QImage::Format_Indexed8);
image.setColorTable(sColorTable);
#endif
}
default:
qWarning() << "cvMatToQImage() - cv::Mat image type not handled in switch:" << inMat.type();
break;
}
return QImage();
}
I'm trying to make polygonal hole in QImage alpha channel.
My current implementation use deprecated 'alphaChannel' method and works slow (because it use containPoint for every image pixel instead of draw polygon).
QImage makeImageWithHole(const QImage & image, const std::vector<QPoint> & hole_points)
{
QImage newImage = image.convertToFormat(QImage::Format_ARGB32);
QImage alpha = newImage.alphaChannel();
QPolygon hole(QVector<QPoint>::fromStdVector(hole_points));
for (int x = 0; x < image.width(); x++)
{
for (int y = 0; y < image.height(); y++)
{
if (hole.containsPoint(QPoint(x, y), Qt::OddEvenFill))
{
alpha.setPixel(x, y, 0);
}
}
}
newImage.setAlphaChannel(alpha);
return newImage;
}
I was also trying to implement it using painter and proper composition mode, but in result I have white artifacts on polygon borders.
QImage makeImageWithHole(const QImage & image, const std::vector<QPoint> & hole)
{
QImage newImage = image.convertToFormat(QImage::Format_ARGB32);
QPainter p(&newImage);
p.setCompositionMode(QPainter::CompositionMode_SourceOut);
p.setPen(QColor(255, 255, 255, 255));
p.setBrush(QBrush(QColor(255, 255, 255, 255)));
p.drawPolygon(hole.data(), hole.size());
p.end();
return newImage;
}
What is proper way to do this?
I think you should enable antialiazing like this:
QPainter p(&newImage);
p.setRenderHints(QPainter::Antialiasing);
i'm starting integrating Opencv in a qt application, so i have a the follwing program structure:
QGraphicsView
|
|->QGraphicsPixmapItem (where the captured Image will be)
|
|
|->QGraphicsRectItem (a rectangle that define the roi)
i have the follwing function to process an image:
void Inspection::Process()
{
IplImage* m_CapureImage= Capture()->GetImage(); //cvLoadImage("e:\\Desert.jpg");
IplImage* m_ProcessingImage= cvCreateImage(cvGetSize(m_CapureImage), IPL_DEPTH_8U, 1);
cvCvtColor(m_CapureImage,m_ProcessingImage,CV_BGR2GRAY);
// Process all ROI's in inspection
for (int var = 0; var < ROIs()->rowCount(QModelIndex()); ++var) {
ROI* roi=ROIs()->data(ROIs()->index(var,0),Qt::UserRole).value<ROI*>();
if(roi!=0)
roi->Process(m_ProcessingImage);
}
QImage qImg = IplImage2QImage(m_ProcessingImage);
m_BackgroundItem->setPixmap(QPixmap::fromImage(qImg));
}
///
QImage IplImage2QImage(const IplImage *iplImage)
{
int height = iplImage->height;
int width = iplImage->width;
if (iplImage->depth == IPL_DEPTH_8U && iplImage->nChannels == 3)
{
const uchar *qImageBuffer = (const uchar*)iplImage->imageData;
QImage img(qImageBuffer, width, height, QImage::Format_RGB888);
return img.rgbSwapped();
} else if (iplImage->depth == IPL_DEPTH_8U && iplImage->nChannels == 1){
const uchar *qImageBuffer = (const uchar*)iplImage->imageData;
QImage img(qImageBuffer, width, height, QImage::Format_Indexed8);
QVector<QRgb> colorTable;
for (int i = 0; i < 256; i++){
colorTable.push_back(qRgb(i, i, i));
}
img.setColorTable(colorTable);
return img;
}else{
qWarning() << "Image cannot be converted.";
return QImage();
}
}
So, my question is:
i change the position of the roi and do some changes in a region of iplImage, what i'm doing now is call again:
QImage qImg = IplImage2QImage(m_ProcessingImage);
m_BackgroundItem->setPixmap(QPixmap::fromImage(qImg));
so i will load again all the iplImage. Is there a way to only update the specific ROI of iplImage in the pixmap?
Thanks
EDIT 1:
I changed image display implementation, so now the QGraphicsPixmapItem will only display the original captured image, then i will create a custom QGraphicsRectItem and override the paint method to draw the processed ROI
I have created a QImage of format QImage::Format_Mono. When I try and display the image to a QGraphicsView through a QGraphicsScene the view is unchanged. The QImage is loaded to the scene using a QPixmap produced through the QPixmap::fromImage() function. I also tried saving the QPixmap as a PNG/JPG/BMP using the save function as well to no avail. The basic code structure is as follows:
QGraphicsView *view = new QGraphicsView();
QGraphicsScene *scene = new QGraphicsScene();
view.setScene(scene);
QImage img(size,QImage::Format_Mono);
QVector<QRgb> v;
v.append(Qt::color0); // I have tried using black and white
v.append(Qt::color1); // instead of color0 and 1 as well.
img.setColorTable(v);
// Do some stuff to populate the image using img.setPixel(c,r,bw)
// where bw is an index either 0 or 1 and c and r are within bounds
QPixmap p = QPixmap::fromImage(img);
p.save("mono.png");
scene->addPixmap(p);
// Code to display the view
If I instead make the image of QImage::Format_RGB888 and fill the pixels with either black or white the PNG/View displays appropriately.
How can I update my code to display the QImage in a QGraphicsView?
The error is that the Qt::GlobalColors (such as Qt::white or Qt::color0) are of type QColor, and not QRgb as expected. (QRgb is a typedef for unsigned int)
You can convert a QColor to a QRgb by using the method QColor::rgb(), or directly create a QRgb using the global method qRgb(r,g,b). Following is a complete working example to illustrate, that displays (and saves as PNG) the very exact image whether mono is true or false.
#include <QApplication>
#include <QGraphicsScene>
#include <QGraphicsView>
int main(int argc, char **argv)
{
QApplication app(argc, argv);
QGraphicsView *view = new QGraphicsView();
QGraphicsScene *scene = new QGraphicsScene();
view->setScene(scene);
int W = 100;
int H = 100;
QImage img;
uint color0 = qRgb(255,0,0);
uint color1 = Qt::green.rgb();
bool mono = true;
if(mono)
{
img = QImage(QSize(W,H),QImage::Format_Mono);
QVector<QRgb> v; v << color0 << color1;
img.setColorTable(v);
for(int i=0; i<W; i++)
for(int j=0; j<H; j++)
{
uint index;
if(j-(j/10)*10 > 5)
index = 0;
else
index = 1;
img.setPixel(i,j,index);
}
}
else
{
img = QImage(QSize(W,H),QImage::Format_RGB888);
for(int i=0; i<W; i++)
for(int j=0; j<H; j++)
{
uint color;
if(j-(j/10)*10 > 5)
color = color0;
else
color = color1;
img.setPixel(i,j,color);
}
}
QPixmap p = QPixmap::fromImage(img);
p.save("mono.png");
scene->addPixmap(p);
view->show();
return app.exec();
}