I want to render images from the webcam onto QML view. And I have to transfer the format from OpenCV to QImage. Then I implement a QQuickPaintedItem singleton class to render QImage.
If I don't use QImage::scaled() in my code when I start grabbing and invoking rendering, my program crashes and I don't know why.
image=cv::Mat(stImageInfo.nHeight,stImageInfo.nWidth,CV_8UC3,m_pBufForSaveImage);
//cv::Size dsize = cv::Size(round(0.33 * stImageInfo.nWidth), round(0.27 * stImageInfo.nHeight));
//cv::Mat shrink;
//resize(image, shrink, dsize, 0, 0, CV_INTER_AREA);
QImage Qimag = MatImageToQt(image);
Qimag = Qimag.scaled(image.cols*0.33,image.rows*0.27,Qt::IgnoreAspectRatio,Qt::SmoothTransformation);
MyImage *myimg = MyImage::instance();
myimg->setM_Image(Qimag);
//render QImage to QML
#include "myimage.h"
MyImage::MyImage(QQuickPaintedItem *parent)
{
Q_UNUSED(parent)
}
MyImage* MyImage::myImage = new MyImage;
MyImage *MyImage::instance()
{
return myImage;
}
void MyImage::paint(QPainter *painter)
{
QRectF target(0.0, 0.0, 800.0, 550.0);//width*0.33 height*0.27
QRectF source(0.0, 0.0, 800.0, 550.0);
painter->setRenderHint(QPainter::Antialiasing, true);
painter->drawImage(target, this->m_Image, source);
}
const QImage &MyImage::getM_Image() const
{
return m_Image;
}
void MyImage::setM_Image(const QImage &mimage)
{
if (mimage != m_Image) {
m_Image = mimage;
emit m_ImageChanged();
}
}
Related
code sample
type here
void MainWindow::paintEvent(QPaintEvent *event){
QPainter chartPainter;
QPointF qChart;
QFont font;
chartPainter.begin(this);
qChart = QPointF(100, 100); // x postion, y position
chartPainter.drawText(qChart, "A", 0, 0);
}
How can I display the letter "A" in Qt as mirrored text?
How to apply mirrored text to all text, not just "A"?
QImage has a function called mirrored that can be applied. However, the method using QPainter is not found even after searching.
Is there a way that can be implemented with QPainter or some other way?
Yes you can create a QImage and draw on it. You draw over the image in the Painter.
.h
............
protected:
void paintEvent(QPaintEvent *);
private:
void drawText(const QString &text);
private:
Ui::Widget *ui;
QImage image;
bool drawing;
...........
.cpp
Widget::Widget(QWidget *parent)
: QWidget(parent)
, ui(new Ui::Widget)
{
ui->setupUi(this);
image = QImage(size(), QImage::Format_ARGB32_Premultiplied);
drawing = true; // for updating once
}
void Widget::paintEvent(QPaintEvent *)
{
image = image.mirrored(true, false);
QPainter painter(this);
painter.fillRect(rect(), QGradient(QGradient::SaintPetersburg));
painter.drawImage(rect(), image);
if(drawing)
drawText("Mirrored");
}
void Widget::drawText(const QString &text)
{
QPainter painter(&image);
// set a font
QFont font = painter.font();
font.setFamily("Helvetica");
font.setPixelSize(20);
painter.setFont(font);
// metrics for centerPos and size
QFontMetrics metrics(painter.fontMetrics());
painter.setPen(Qt::black);
// keep it in the middle if possible (is only for the optics. you don't have to do it like that)
painter.drawText((width()/2) -(metrics.horizontalAdvance(text))-10,
(height()/2) - (metrics.height()),
metrics.horizontalAdvance(text),
metrics.height(),
0,
text);
update();
drawing = false;
}
Just a small note: if you draw directly in the painter, the picture will flicker. you can work around it by calling the image directly in the constructor.
Widget::Widget(QWidget *parent)
: QWidget(parent)
, ui(new Ui::Widget)
{
ui->setupUi(this);
image = QImage(size(), QImage::Format_ARGB32_Premultiplied);
//image.fill(Qt::white);
//drawing = true; // update once
drawText("Mirrored");
image = image.mirrored(true, false);
}
.........
void Widget::paintEvent(QPaintEvent *)
{
QPainter painter(this);
painter.fillRect(rect(), QGradient(QGradient::SaintPetersburg));
painter.drawImage(rect(), image);
}
........
you need to set the Qt::TextDirection property of a QPainter object.
Add these to your code:
painter.setLayoutDirection(Qt::RightToLeft);
painter.drawText(rect, Qt::AlignRight, tr("Text goes here"));
I made an Image Editor in Qt / OpenCV where you can load the Image from the File explorer and grayscale/adaptive threshold/resize it afterwards.
Bug 1: When I resize the Loaded Image to (for example) 600x600 Pixels using my ImageProcessor::Resize(int, int) method, it works fine. But when I change it to like 546x750 Pixels, the Image has a weird grayscale.
Bug 2: When I want to resize my Grayscaled/Thresholded Image, it always gets a weird grayscale similiar to Bug 1.
Codes:
mainwindow.cpp
#include "mainwindow.h"
#include "ui_mainwindow.h"
#include "resizer.h"
MainWindow::MainWindow(QWidget *parent)
: QMainWindow(parent)
, ui(new Ui::MainWindow)
{
ui->setupUi(this);
}
MainWindow::~MainWindow()
{
delete ui;
}
void MainWindow::Display(cv::Mat inputImage)
{
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_RGB888);
scene->addPixmap(QPixmap::fromImage(image));
ui->graphicsView->setScene(scene);
ui->graphicsView->show();
}
void MainWindow::on_actionOpen_triggered()
{
QString file = QFileDialog::getOpenFileName(this, "Open", "", "Images (*.jpg *.png)");
std::string filename = file.toStdString();
inputImage = cv::imread(filename);
Display(inputImage);
imgProc = new ImageProcessor(inputImage);
}
void MainWindow::on_pushButton_clicked() // Grayscale
{
scene->clear();
imgProc->mode = 1;
inputImage = imgProc->Grayscale();
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_Grayscale8);
scene->addPixmap(QPixmap::fromImage(image));
ui->graphicsView->setScene(scene);
ui->graphicsView->show();
}
void MainWindow::on_pushButton_2_clicked() // ADT
{
scene->clear();
imgProc->mode = 2;
inputImage = imgProc->AdaptiveThreshold();
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_Grayscale8);
scene->addPixmap(QPixmap::fromImage(image));
ui->graphicsView->setScene(scene);
ui->graphicsView->show();
}
void MainWindow::on_pushButton_3_clicked() // Resize
{
scene->clear();
Resizer resizer;
resizer.exec();
int newWidth = resizer.GetWidth();
int newHeight = resizer.GetHeight();
inputImage = imgProc->Resize(newWidth, newHeight);
if(imgProc->mode == 1 || imgProc->mode == 2)
{
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_Grayscale8);
scene->addPixmap(QPixmap::fromImage(image));
ui->graphicsView->setScene(scene);
ui->graphicsView->show();
}
else
{
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_RGB888);
scene->addPixmap(QPixmap::fromImage(image));
ui->graphicsView->setScene(scene);
ui->graphicsView->show();
}
}
imageprocessor.cpp
#include "imageprocessor.h"
ImageProcessor::ImageProcessor(cv::Mat inputImage)
{
this->inputImage = inputImage;
}
cv::Mat ImageProcessor::Resize(int width, int height)
{
cv::Mat resized;
cv::resize(inputImage, resized, cv::Size(width, height), cv::INTER_LINEAR);
return resized;
}
cv::Mat ImageProcessor::Grayscale()
{
cv::Mat grayscaled;
cv::cvtColor(inputImage, grayscaled, cv::COLOR_RGB2GRAY);
return grayscaled;
}
cv::Mat ImageProcessor::AdaptiveThreshold()
{
cv::Mat binarized, grayscaled;
cv::cvtColor(inputImage, grayscaled, cv::COLOR_RGB2GRAY);
cv::adaptiveThreshold(grayscaled, binarized, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY, 15, 11);
return binarized;
}
QImage::Format_RGB888 is the format type you defined means that:
The image is stored using a 24-bit RGB format (8-8-8).
If your image has 3 channels then your way is correct to continue, except adding this:
QImage image = QImage(inputImage.data, inputImage.cols, inputImage.rows, QImage::Format_RGB888).rgbSwapped();
You need to add at the end rgbSwapped() because Qt reads it in RGB order as OpenCV gives BGR.
If you want to send a gray scale image to GUI then you need to use QImage::Format_Grayscale8 format type, which means:
The image is stored using an 8-bit grayscale format.
Here is the clear cocumentation for the formats.
Note: How do you resize your image, by using OpenCV function ? Share resizer.h , I will update the answer accordingly.
I have an OpenCV backend which retrieves the video frame from a camera device through cv::VideoCapture, does some processing and then passes the cv::Mat frame to a Qt5 application for display in a QML VideoOutput.
The problem is the frames drawn are empty/white.
The CameraService class is receiving a cv::Mat from a backend object which runs on its own thread using a Qt::QueuedConnection signal. Then I convert to it to a QImage, which I use to initialize a QVideoFrame and pass it to a QAbstractVideoSurface received from a QML VideoOutput, after setting a pixel format on it.
I have checked whether the cv::Mat has valid content before conversion to QVideoFrame, so this is not the case.
Or am I doing it completely wrong and should instead draw an image?
Relevant code:
CameraService.cpp
CameraService::CameraService(Video::Backend *backend)
: QObject(),
surface(nullptr),
isFormatSet(false) {
this->backend = backend;
connect(
backend, &Video::Backend::onFrameReady,
this, &CameraService::onVideoFrameReady,
Qt::QueuedConnection);
}
CameraService::~CameraService() {
backend->deleteLater();
}
QAbstractVideoSurface *CameraService::getVideoSurface() const {
return surface;
}
void CameraService::setVideoSurface(QAbstractVideoSurface *surface) {
if (!this->surface && surface)
backend->start();
if (this->surface && this->surface != surface && this->surface->isActive())
this->surface->stop();
this->surface = surface;
if (this->surface && format.isValid()) {
format = this->surface->nearestFormat(format);
this->surface->start(format);
}
}
void CameraService::setFormat(
int width,
int height,
QVideoFrame::PixelFormat frameFormat
){
QSize size(width, height);
QVideoSurfaceFormat format(size, frameFormat);
this->format = format;
if (surface) {
if (surface->isActive())
surface->stop();
this->format = surface->nearestFormat(this->format);
surface->start(this->format);
}
}
void CameraService::onVideoFrameReady(cv::Mat currentFrame) {
if (!surface || currentFrame.empty())
return;
cv::Mat continuousFrame;
if (!currentFrame.isContinuous())
continuousFrame = currentFrame.clone();
else
continuousFrame = currentFrame;
if (!isFormatSet) {
setFormat(
continuousFrame.cols,
continuousFrame.rows,
QVideoFrame::PixelFormat::Format_BGR32);
isFormatSet = true;
}
frame = QImage(
(uchar *)continuousFrame.data,
continuousFrame.cols,
continuousFrame.rows,
continuousFrame.step,
QVideoFrame::imageFormatFromPixelFormat(
QVideoFrame::PixelFormat::Format_BGR32));
surface->present(QVideoFrame(frame));
}
QML object:
VideoOutput {
objectName: "videoOutput";
anchors.fill: parent;
fillMode: VideoOutput.PreserveAspectCrop;
source: CameraService;
}
The CameraService object is made available as a singleton to QML using this statement:
qmlRegisterSingletonInstance<Application::CameraService>("Application.CameraService", 1, 0, "CameraService", service);
Analyzing the code I have noticed that the conversion is not supported (I recommend you check if the format is valid or). For this I have made some changes:...
#ifndef CAMERASERVICE_H
#define CAMERASERVICE_H
#include "backend.h"
#include <QObject>
#include <QPointer>
#include <QVideoFrame>
#include <QVideoSurfaceFormat>
#include <opencv2/core/mat.hpp>
class QAbstractVideoSurface;
class CameraService : public QObject
{
Q_OBJECT
Q_PROPERTY(QAbstractVideoSurface* videoSurface READ videoSurface WRITE setVideoSurface NOTIFY surfaceChanged)
public:
explicit CameraService(Backend *backend, QObject *parent = nullptr);
QAbstractVideoSurface* videoSurface() const;
public Q_SLOTS:
void setVideoSurface(QAbstractVideoSurface* surface);
Q_SIGNALS:
void surfaceChanged(QAbstractVideoSurface* surface);
private Q_SLOTS:
void onVideoFrameReady(cv::Mat currentFrame);
private:
void setFormat(int width, int height, QVideoFrame::PixelFormat frameFormat);
QPointer<QAbstractVideoSurface> m_surface;
QScopedPointer<Backend> m_backend;
QVideoSurfaceFormat m_format;
bool m_isFormatSet;
QImage m_image;
};
#endif // CAMERASERVICE_H
#include "backend.h"
#include "cameraservice.h"
#include <QAbstractVideoSurface>
#include <iostream>
CameraService::CameraService(Backend *backend, QObject *parent)
: QObject(parent), m_backend(backend), m_isFormatSet(false)
{
connect(m_backend.data(), &Backend::frameReady, this, &CameraService::onVideoFrameReady);
}
QAbstractVideoSurface *CameraService::videoSurface() const
{
return m_surface;
}
void CameraService::setVideoSurface(QAbstractVideoSurface *surface){
if (m_surface == surface)
return;
if(m_surface && m_surface != surface && m_surface->isActive())
m_surface->stop();
m_surface = surface;
Q_EMIT surfaceChanged(m_surface);
m_backend->start();
if (m_surface && m_format.isValid()) {
m_format = m_surface->nearestFormat(m_format);
m_surface->start(m_format);
}
}
void CameraService::setFormat(
int width,
int height,
QVideoFrame::PixelFormat frameFormat
){
QSize size(width, height);
QVideoSurfaceFormat format(size, frameFormat);
m_format = format;
if (m_surface) {
if (m_surface->isActive())
m_surface->stop();
m_format = m_surface->nearestFormat(m_format);
m_surface->start(m_format);
}
}
void CameraService::onVideoFrameReady(cv::Mat currentFrame){
if (!m_surface || currentFrame.empty())
return;
cv::Mat continuousFrame;
if (!currentFrame.isContinuous())
continuousFrame = currentFrame.clone();
else
continuousFrame = currentFrame;
if (!m_isFormatSet) {
setFormat(continuousFrame.cols,
continuousFrame.rows,
QVideoFrame::Format_RGB32);
m_isFormatSet = true;
}
m_image = QImage(continuousFrame.data,
continuousFrame.cols,
continuousFrame.rows,
continuousFrame.step,
QImage::Format_RGB888);
m_image = m_image.rgbSwapped();
m_image.convertTo(QVideoFrame::imageFormatFromPixelFormat(QVideoFrame::Format_RGB32));
m_surface->present(QVideoFrame(m_image));
}
You can find the complete example here.
EDIT: The first answer solved my problem. Apart from that I had to set the ASI_BANDWIDTH_OVERLOAD value to 0.
I am programming a Linux application in C++/Qt 5.7 to track stars in my telescope. I use a camera (ZWO ASI 120MM with according SDK v0.3) and grab its frames in a while loop in a separate thread. These are then emitted to a QOpenGlWidget to be displayed. I have following problem: When the mouse is inside the QOpenGlWidget area, the displayed frames get corrupted. Especially when the mouse is moved. The problem is worst when I use an exposure time of 50ms and disappears for lower exposure times. When I feed the pipeline with alternating images from disk, the problem disappears. I assume that this is some sort of thread-synchronization problem between the camera thread and the main thread, but I couldnt solve it. The same problem appears in the openastro software. Here are parts of the code:
MainWindow:
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent){
mutex = new QMutex;
camThread = new QThread(this);
camera = new Camera(nullptr, mutex);
display = new GLViewer(this, mutex);
setCentralWidget(display);
cameraHandle = camera->getHandle();
connect(camThread, SIGNAL(started()), camera, SLOT(connect()));
connect(camera, SIGNAL(exposureCompleted(const QImage)), display, SLOT(showImage(const QImage)), Qt::BlockingQueuedConnection );
camera->moveToThread(camThread);
camThread->start();
}
The routine that grabs the frames:
void Camera::captureFrame(){
while( cameraIsReady && capturing ){
mutex->lock();
error = ASIGetVideoData(camID, buffer, bufferSize, int(exposure*2*1e-3)+500);
if(error == ASI_SUCCESS){
frame = QImage(buffer,width,height,QImage::Format_Indexed8).convertToFormat(QImage::Format_RGB32); //Indexed8 is for 8bit
mutex->unlock();
emit exposureCompleted(frame);
}
else {
cameraStream << "timeout" << endl;
mutex->unlock();
}
}
}
The slot that receives the image:
bool GLViewer::showImage(const QImage image)
{
mutex->lock();
mOrigImage = image;
mRenderQtImg = mOrigImage;
recalculatePosition();
updateScene();
mutex->unlock();
return true;
}
And the GL function that sets the image:
void GLViewer::renderImage()
{
makeCurrent();
glClear(GL_COLOR_BUFFER_BIT);
if (!mRenderQtImg.isNull())
{
glLoadIdentity();
glPushMatrix();
{
if (mResizedImg.width() <= 0)
{
if (mRenderWidth == mRenderQtImg.width() && mRenderHeight == mRenderQtImg.height())
mResizedImg = mRenderQtImg;
else
mResizedImg = mRenderQtImg.scaled(QSize(mRenderWidth, mRenderHeight),
Qt::IgnoreAspectRatio,
Qt::SmoothTransformation);
}
glRasterPos2i(mRenderPosX, mRenderPosY);
glPixelZoom(1, -1);
glDrawPixels(mResizedImg.width(), mResizedImg.height(), GL_RGBA, GL_UNSIGNED_BYTE, mResizedImg.bits());
}
glPopMatrix();
glFlush();
}
}
I stole this code from here: https://github.com/Myzhar/QtOpenCVViewerGl
And lastly, here is how my problem looks:
This looks awful.
The image producer should produce new images and emit them through a signal. Since QImage is implicitly shared, it will automatically recycle frames to avoid new allocations. Only when the producer thread out-runs the display thread will image copies be made.
Instead of using an explicit loop in the Camera object, you can run the capture using a zero-duration timer, and having the event loop invoke it. That way the camera object can process events, e.g. timers, cross-thread slot invocations, etc.
There's no need for explicit mutexes, nor for a blocking connection. Qt's event loop provides cross-thread synchronization. Finally, the QtOpenCVViewerGl project performs image scaling on the CPU and is really an example of how not to do it. You can get image scaling for free by drawing the image on a quad, even though that's also an outdated technique from the fixed pipeline days - but it works just fine.
The ASICamera class would look roughly as follows:
// https://github.com/KubaO/stackoverflown/tree/master/questions/asi-astro-cam-39968889
#include <QtOpenGL>
#include <QOpenGLFunctions_2_0>
#include "ASICamera2.h"
class ASICamera : public QObject {
Q_OBJECT
ASI_ERROR_CODE m_error;
ASI_CAMERA_INFO m_info;
QImage m_frame{640, 480, QImage::Format_RGB888};
QTimer m_timer{this};
int m_exposure_ms = 0;
inline int id() const { return m_info.CameraID; }
void capture() {
m_error = ASIGetVideoData(id(), m_frame.bits(), m_frame.byteCount(),
m_exposure_ms*2 + 500);
if (m_error == ASI_SUCCESS)
emit newFrame(m_frame);
else
qDebug() << "capture error" << m_error;
}
public:
explicit ASICamera(QObject * parent = nullptr) : QObject{parent} {
connect(&m_timer, &QTimer::timeout, this, &ASICamera::capture);
}
ASI_ERROR_CODE error() const { return m_error; }
bool open(int index) {
m_error = ASIGetCameraProperty(&m_info, index);
if (m_error != ASI_SUCCESS)
return false;
m_error = ASIOpenCamera(id());
if (m_error != ASI_SUCCESS)
return false;
m_error = ASIInitCamera(id());
if (m_error != ASI_SUCCESS)
return false;
m_error = ASISetROIFormat(id(), m_frame.width(), m_frame.height(), 1, ASI_IMG_RGB24);
if (m_error != ASI_SUCCESS)
return false;
return true;
}
bool close() {
m_error = ASICloseCamera(id());
return m_error == ASI_SUCCESS;
}
Q_SIGNAL void newFrame(const QImage &);
QImage frame() const { return m_frame; }
Q_SLOT bool start() {
m_error = ASIStartVideoCapture(id());
if (m_error == ASI_SUCCESS)
m_timer.start(0);
return m_error == ASI_SUCCESS;
}
Q_SLOT bool stop() {
m_error = ASIStopVideoCapture(id());
return m_error == ASI_SUCCESS;
m_timer.stop();
}
~ASICamera() {
stop();
close();
}
};
Since I'm using a dummy ASI API implementation, the above is sufficient. Code for a real ASI camera would need to set appropriate controls, such as exposure.
The OpenGL viewer is also fairly simple:
class GLViewer : public QOpenGLWidget, protected QOpenGLFunctions_2_0 {
Q_OBJECT
QImage m_image;
void ck() {
for(GLenum err; (err = glGetError()) != GL_NO_ERROR;) qDebug() << "gl error" << err;
}
void initializeGL() override {
initializeOpenGLFunctions();
glClearColor(0.2f, 0.2f, 0.25f, 1.f);
}
void resizeGL(int width, int height) override {
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, 0, 1);
glMatrixMode(GL_MODELVIEW);
update();
}
// From http://stackoverflow.com/a/8774580/1329652
void paintGL() override {
auto scaled = m_image.size().scaled(this->size(), Qt::KeepAspectRatio);
GLuint texID;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glGenTextures(1, &texID);
glEnable(GL_TEXTURE_RECTANGLE);
glBindTexture(GL_TEXTURE_RECTANGLE, texID);
glTexImage2D(GL_TEXTURE_RECTANGLE, 0, GL_RGB, m_image.width(), m_image.height(), 0,
GL_RGB, GL_UNSIGNED_BYTE, m_image.constBits());
glBegin(GL_QUADS);
glTexCoord2f(0, 0);
glVertex2f(0, 0);
glTexCoord2f(m_image.width(), 0);
glVertex2f(scaled.width(), 0);
glTexCoord2f(m_image.width(), m_image.height());
glVertex2f(scaled.width(), scaled.height());
glTexCoord2f(0, m_image.height());
glVertex2f(0, scaled.height());
glEnd();
glDisable(GL_TEXTURE_RECTANGLE);
glDeleteTextures(1, &texID);
ck();
}
public:
GLViewer(QWidget * parent = nullptr) : QOpenGLWidget{parent} {}
void setImage(const QImage & image) {
Q_ASSERT(image.format() == QImage::Format_RGB888);
m_image = image;
update();
}
};
Finally, we hook the camera and the viewer together. Since the camera initialization may take some time, we perform it in the camera's thread.
The UI should emit signals that control the camera, e.g. to open it, start/stop acquisition, etc., and have slots that provide feedback from the camera (e.g. state changes). A free-standing function would take the two objects and hook them together, using functors as appropriate to adapt the UI to a particular camera. If adapter code would be extensive, you'd use a helper QObject for that, but usually a function should suffice (as it does below).
class Thread : public QThread { public: ~Thread() { quit(); wait(); } };
// See http://stackoverflow.com/q/21646467/1329652
template <typename F>
static void postToThread(F && fun, QObject * obj = qApp) {
QObject src;
QObject::connect(&src, &QObject::destroyed, obj, std::forward<F>(fun),
Qt::QueuedConnection);
}
int main(int argc, char ** argv) {
QApplication app{argc, argv};
GLViewer viewer;
viewer.setMinimumSize(200, 200);
ASICamera camera;
Thread thread;
QObject::connect(&camera, &ASICamera::newFrame, &viewer, &GLViewer::setImage);
QObject::connect(&thread, &QThread::destroyed, [&]{ camera.moveToThread(app.thread()); });
camera.moveToThread(&thread);
thread.start();
postToThread([&]{
camera.open(0);
camera.start();
}, &camera);
viewer.show();
return app.exec();
}
#include "main.moc"
The GitHub project includes a very basic ASI camera API test harness and is complete: you can run it and see the test video rendered in real time.
I want to be able to use rubberband to select an area of an image, then remove the parts of the image outside of the rubberband and display the new image. However when I currently do it, it doesnt crop the correct areas and gives me the wrong image.
#include "mainresizewindow.h"
#include "ui_mainresizewindow.h"
QString fileName;
MainResizeWindow::MainResizeWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainResizeWindow)
{
ui->setupUi(this);
ui->imageLabel->setScaledContents(true);
connect(ui->actionOpen, SIGNAL(triggered()), this, SLOT(open()));
}
MainResizeWindow::~MainResizeWindow()
{
delete ui;
}
void MainResizeWindow::open()
{
fileName = QFileDialog::getOpenFileName(this, tr("Open File"), QDir::currentPath());
if (!fileName.isEmpty()) {
QImage image(fileName);
if (image.isNull()) {
QMessageBox::information(this, tr("Image Viewer"),
tr("Cannot load %1.").arg(fileName));
return;
}
ui->imageLabel->setPixmap(QPixmap::fromImage(image));
ui->imageLabel->repaint();
}
}
void MainResizeWindow::mousePressEvent(QMouseEvent *event)
{
if(ui->imageLabel->underMouse()){
myPoint = event->pos();
rubberBand = new QRubberBand(QRubberBand::Rectangle, this);
rubberBand->show();
}
}
void MainResizeWindow::mouseMoveEvent(QMouseEvent *event)
{
rubberBand->setGeometry(QRect(myPoint, event->pos()).normalized());
}
void MainResizeWindow::mouseReleaseEvent(QMouseEvent *event)
{
QRect myRect(myPoint, event->pos());
rubberBand->hide();
QPixmap OriginalPix(*ui->imageLabel->pixmap());
QImage newImage;
newImage = OriginalPix.toImage();
QImage copyImage;
copyImage = copyImage.copy(myRect);
ui->imageLabel->setPixmap(QPixmap::fromImage(copyImage));
ui->imageLabel->repaint();
}
Any help appreciated.
There are two issues here - the position of the rect relative to the image and the fact that the image is (potentially) scaled in the label.
Position issue:
QRect myRect(myPoint, event->pos());
You should perhaps change this to:
QPoint a = mapToGlobal(myPoint);
QPoint b = event->globalPos();
a = ui->imageLabel->mapFromGlobal(a);
b = ui->imageLabel->mapFromGlobal(b);
Then, the label may be scaling the image because you used setScaledContents(). So you need to work out the actual coordinates on the unscaled image. Something like this maybe (untested/compiled):
QPixmap OriginalPix(*ui->imageLabel->pixmap());
double sx = ui->imageLabel->rect().width();
double sy = ui->imageLabel->rect().height();
sx = OriginalPix.width() / sx;
sy = OriginalPix.height() / sy;
a.x = int(a.x * sx);
b.x = int(b.x * sx);
a.y = int(a.y * sy);
b.y = int(b.y * sy);
QRect myRect(a, b);
...