I need to synchronize some draws with OpenGL with a Metronome. The Metronome is build with libPD, and played with RtAudio.
Both things are working well (separately), but i need to move an object (a triangle) with the pulse a metronome. The application must play the clicks too. Both actions must be done parallel (playing and drawing). I should add a midi record too. My Application is in C++.
I tried to run that with one thread, but it doesn't work.
I tried to follow this explanation: How to make my metronome play at the same time as recording in my program?
The gui Library is WxWidgets. The threads are done with Poco::Runnable in this way:
class MyThread : public Poco::Runnable {
public:
MyThread(BasicGLPane *pane, std::shared_ptr<SoundManager> man);
virtual void run();
private:
BasicGLPane *_pane;
std::shared_ptr<SoundManager> _man;
};
MyThread::MyThread(BasicGLPane *pane, std::shared_ptr<SoundManager> man) {
_pane = pane;
_man = man;
}
void MyThread::run() {
_man->play();
_pane->startAnimation();
}
BasicGLpane is a wxGLCanvas. THe play function of the Sound Manager class is the following:
void SoundManager::play() {
// Init pd
if(!lpd->init(0, 2, sampleRate)) {
std::cerr << "Could not init pd" << std::endl;
exit(1);
}
// Receive messages from pd
lpd->setReceiver(object.get());
lpd->subscribe("metro-bang");
lpd->subscribe("switch");
// send DSP 1 message to pd
lpd->computeAudio(true);
// load the patch
open_patch("metro-main.pd");
std::cout << patch << std::endl;
// Use the RtAudio API to connect to the default audio device.
if(audio->getDeviceCount()==0){
std::cout << "There are no available sound devices." << std::endl;
exit(1);
}
RtAudio::StreamParameters parameters;
parameters.deviceId = audio->getDefaultOutputDevice();
parameters.nChannels = 2;
RtAudio::StreamOptions options;
options.streamName = "Pd Metronome";
options.flags = RTAUDIO_SCHEDULE_REALTIME;
if ( audio->getCurrentApi() != RtAudio::MACOSX_CORE ) {
options.flags |= RTAUDIO_MINIMIZE_LATENCY; // CoreAudio doesn't seem to like this
}
try {
if(audio->isStreamOpen()) {
audio->closeStream();
}
else {
audio->openStream( ¶meters, NULL, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &audioCallback, lpd.get(), &options );
audio->startStream();
}
}
catch ( RtAudioError& e ) {
std::cerr << e.getMessage() << std::endl;
exit(1);
}
}
The OpenGL drawing methode are the following:
void BasicGLPane::startAnimation() {
std::cout<<"Start Animation"<<std::endl;
triangle_1(p1, p2, p3);
Refresh();
}
void BasicGLPane::triangle_1(std::shared_ptr<vertex2f> _p1, std::shared_ptr<vertex2f> _p2, std::shared_ptr<vertex2f> _p3) {
CGLContextObj ctx = CGLGetCurrentContext(); //enable multithreading (only apple)
CGLError err = CGLEnable( ctx, kCGLCEMPEngine);
if (err != kCGLNoError ) {
glEnable(GL_MULTISAMPLE);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, getWidth(), getHeight(),0 , -1, 1);
glShadeModel(GL_SMOOTH);
glBegin(GL_POLYGON); // Drawing Using Triangles
glColor3f (157.0/255.0, 44.0/255.0, 44.0/255.0);
glVertex3f( p1->x, p1->y, 0.0f); // Top left
glVertex3f(p2->x,p2->y, 0.0f); // Top Right
glVertex3f( p3->x,p3->y, 0.0f); //Bottom
glEnd();
glMatrixMode(GL_MODELVIEW);
glEnable (GL_BLEND);
glLoadIdentity();
glDisable(GL_MULTISAMPLE);
}
}
And the thread is calling with the following funcion:
void BasicGLPane::startThread() {
while (_object->getCounter()<10) { //this is only to test the functionality
thread.start(work);
}
thread.join();
manager->stop();
}
And after that, this funtion is called in Reder:
void BasicGLPane::render( wxPaintEvent& evt ) {
//some code here, not important....
startThread();
SwapBuffers();
}
Maybe I'm going to change this object, that is not important now, my problem is the synchronisation. I think RtAudio is making problems, because i become a EXC_BAD_Acces to getDeviceCount() or with any other function from RtAudio. That occurrs only in the thread context.
It would be better to do that with Port Audio?. It would be nice to know what I'm doing wrong, or if there is another way to resolve this problem
I found a solution. The problem was in the interaction between the wxwidgets main loop and openGL.The solution is to create an Idle event int the following way:
//on wxApp
void MyApp::activateRenderLoop(bool on) {
if(on && !render_loop_on) {
Connect(wxID_ANY, wxEVT_IDLE, wxIdleEventHandler(MyApp::onIdle));
render_loop_on = true;
}
else if (!on && render_loop_on) {
Disconnect(wxEVT_IDLE, wxIdleEventHandler(MyApp::onIdle));
render_loop_on = false;
}
}
void MyApp::onIdle(wxIdleEvent &evt) {
activateRenderLoop(glPane->render_on);
if(render_loop_on) {
std::cout<<"MyApp on Idle, render_loop_on"<<std::endl;
glPane->paint_now();
evt.RequestMore();
}
}
//on event table:
EVT_PAINT(BasicGLPane::paint_rt)
void BasicGLPane::rightClick(wxMouseEvent& event) {
render_on = true;
manager->init();
SLEEP(2000);
manager->play();
wxGetApp().activateRenderLoop(true);
}
void BasicGLPane::paint_rt(wxPaintEvent &evt) {
wxPaintDC dc(this);
render_rt(dc);
}
void BasicGLPane::paint_now(){
wxClientDC dc(this);
std::cout<<"paint now() "<<std::endl;
render_rt(dc);
}
void BasicGLPane::render_rt(wxDC &dc) {
wxGLCanvas::SetCurrent(*m_context);
if(_object->getCounter()>=10) {
wxGetApp().activateRenderLoop(false);
manager->stop();
render_on = false;
}
else {
ctx = CGLGetCurrentContext(); //OSx only
err = CGLEnable( ctx, kCGLCEMPEngine); //OSX only
std::cout<<"render_rt CGLError: "<<err<<std::endl;
if (err==0) {
glTranslatef(p3->x, p3->y, 0);
Refresh(false);
}
}
}
The synchronsazion works perfectly now.
Related
Sorry for the long title, but I have a very specific problem that can't really be expressed any more concisely. I'm programming a game engine (GitHub link: here) and I'm trying to let the client create windows on top of the main window which the application supplies automatically.
I've completely managed to get this to work but I'm bothered with the framerate of the main window when it enters full-screen mode (either on initialization or when the user presses alt+enter). I haven't benchmarked the performance, but it is visibly bad (so probably around 20-30 FPS) and the performance only drops when the user creates another window (it doesn't even have to be showing).
Since all of the windows the user creates are children of the main window, I have to hide them before entering full-screen mode.
I have a lot of code in my window class (over 1000 lines), so giving you a minimal example will be very difficult. If you must see the code, please visit the GitHub repo (under platform/windows you will find the code I'm referencing). I wonder if this is a strange artifact of having multiple windows open in the same process, or if I'm just missing some code.
That being said, here is some actual client code:
SandboxApp.h
#pragma once
#include<Infinity.h>
class SandboxApp : public Infinity::Application
{
private:
Infinity::Window *m_popup_window;
Infinity::Rasterizer *m_rasterizer;
Infinity::OrthoCamera m_camera;
Infinity::Renderer2D m_renderer;
Infinity::Texture2D *m_texture;
float m_aspect_ratio;
public:
SandboxApp();
~SandboxApp();
void OnApplicationEntered(Infinity::ApplicationEnteredEvent *event) override;
void OnUserCreate(Infinity::UserCreateEvent *event) override;
void OnUserUpdate(Infinity::UserUpdateEvent *event) override;
void OnUserRender(Infinity::UserRenderEvent *event) override;
void OnUserDestroy(Infinity::UserDestroyEvent *event) override;
void OnWindowResized(Infinity::WindowResizedEvent *event) override;
void Exit(const char *message);
};
SanboxApp.cpp
#define INFINITY_ENTRY_POINT
#include"SandboxApp.h"
SandboxApp::SandboxApp():
m_popup_window(nullptr),
m_rasterizer(nullptr),
m_renderer(),
m_texture(),
m_aspect_ratio(),
m_camera()
{}
SandboxApp::~SandboxApp()
{}
void SandboxApp::Exit(const char *message)
{
INFINITY_CLIENT_ERROR(message);
RequestExit();
}
void SandboxApp::OnApplicationEntered(Infinity::ApplicationEnteredEvent *event)
{
Infinity::Window::WindowParams ¶ms = event->GetMainWindowParams();
params.fullscreen = true;
params.auto_show = false;
}
void SandboxApp::OnUserCreate(Infinity::UserCreateEvent *event)
{
Infinity::Window *window = GetMainWindow();
m_popup_window = Infinity::Window::CreateWindow();
Infinity::Window::WindowParams window_params;
window_params.width = 300;
window_params.height = 300;
window_params.title = "Popup window!";
if (!m_popup_window->Init(window_params))
{
Exit("Error initializing popup window");
return;
}
// Set clear color
Infinity::Context *context = Infinity::Window::GetContext();
context->SetClearColor(0.0f, 0.0f, 0.0f, 1.0f);
m_popup_window->MakeContextCurrent();
context = Infinity::Window::GetContext();
context->SetClearColor(0.0f, 0.0f, 1.0f, 1.0f);
window->MakeContextCurrent();
// Initialize other resources
m_rasterizer = Infinity::Rasterizer::CreateRasterizer();
if (!m_rasterizer->Init(Infinity::Rasterizer::CullMode::NONE, true))
{
Exit("Error initializing rasterizer");
return;
}
m_rasterizer->Bind();
if (!m_renderer.Init())
{
Exit("Error initializing Renderer2D");
return;
}
m_texture = Infinity::Texture2D::CreateTexture();
if (!m_texture->Init("assets/image.png"))
{
Exit("Error initializing texture");
return;
}
INFINITY_CLIENT_INFO("Client created");
window->Show();
m_popup_window->Show();
event->Consume();
}
void SandboxApp::OnUserUpdate(Infinity::UserUpdateEvent *event)
{
Infinity::Window *window = GetMainWindow();
if (KeyPressed(Infinity::KeyCode::Escape))
{
if (window->CursorEnabled())
{
window->DisableCursor();
}
else
{
window->EnableCursor();
}
}
if (window->CursorEnabled())
{
event->Consume();
return;
}
float speed = (float)(3.0 * event->GetDT());
float r_speed = (float)(2.0 * event->GetDT());
float z_speed = (float)(1.0 * event->GetDT());
if (KeyDown(Infinity::KeyCode::Left)) { m_camera.MoveLeft(speed); }
if (KeyDown(Infinity::KeyCode::Right)) { m_camera.MoveRight(speed); }
if (KeyDown(Infinity::KeyCode::Down)) { m_camera.MoveBackward(speed); }
if (KeyDown(Infinity::KeyCode::Up)) { m_camera.MoveForward(speed); }
if (KeyDown(Infinity::KeyCode::W)) { m_camera.zoom += z_speed; }
if (KeyDown(Infinity::KeyCode::S)) { m_camera.zoom -= z_speed; }
if (KeyDown(Infinity::KeyCode::A)) { m_camera.roll -= r_speed; }
if (KeyDown(Infinity::KeyCode::D)) { m_camera.roll += r_speed; }
m_camera.Update(m_aspect_ratio);
event->Consume();
}
void SandboxApp::OnUserRender(Infinity::UserRenderEvent *event)
{
Infinity::Window *window = GetMainWindow();
window->MakeContextCurrent();
Infinity::Context *context = Infinity::Window::GetContext();
context->Clear();
m_renderer.StartScene(&m_camera);
Infinity::Renderer2D::QuadParams quad;
quad.position = { 0.0f, 0.0f };
quad.size = { 1.0f, 1.0f };
quad.color = { 1.0f, 0.0f, 0.0f, 1.0f };
m_renderer.DrawQuad(quad);
m_renderer.EndScene();
m_popup_window->MakeContextCurrent();
context = Infinity::Window::GetContext();
context->Clear();
window->MakeContextCurrent();
event->Consume();
}
void SandboxApp::OnUserDestroy(Infinity::UserDestroyEvent *event)
{
m_renderer.Destroy();
if (m_rasterizer)
{
m_rasterizer->Destroy();
delete m_rasterizer;
}
if (m_texture)
{
m_texture->Destroy();
delete m_texture;
}
if (m_popup_window)
{
m_popup_window->Destroy();
delete m_popup_window;
}
INFINITY_CLIENT_INFO("Client destroyed");
event->Consume();
}
void SandboxApp::OnWindowResized(Infinity::WindowResizedEvent *event)
{
if (event->GetWindow() == GetMainWindow())
{
m_aspect_ratio = (float)event->GetWidth() / (float)event->GetHeight();
m_camera.Update(m_aspect_ratio);
event->Consume();
}
}
Infinity::Application *Infinity::CreateApplication()
{
return new SandboxApp;
}
If you need any other information, please just leave a comment.
Thanks in advance! :)
Update
I tried adding my executables to the Graphics Performance options list but it didn't change the low framerate of the full-screen window.
I did some more testing and found out that that I only need to create the sub-window for these inefficiencies to occur. Even if I don't show, update or render to the window, simply creating it slows down the frame rate of my full-screen main window.
Trying to do more research, I realized that MSDN does not have any documentation on using multiple DXGI swap chains. My hunch is that setting the full-screen state of one swap chain to true somehow interferes with the other swap chain causing inefficiencies (Although my ID3D11Device debug output doesn't mention inefficiencies anywhere)
There’re 2 kinds of full-screen in Windows.
True fullscreen is probably what you’re doing, calling IDXGISwapChain::SetFullscreenState. In this mode, the windows desktop compositor, dwm.exe, is disabled, giving your app exclusive access to the monitor. However, as you found out it has complications. There’re more of them BTW: alt+tab can be slow, popups from other applications may glitch, and also it’s tricky to implement correctly, I think you have to re-create a swap chain and all render targets. On the bright side, in some rare cases it may improve FPS by a single-digit number.
Borderless windowed is what you should be doing since you want multiple Win32 windows running along with your fullscreen content. In this mode the desktop compositor is up and running, updating your window along others. Switching in and out that mode is also way easier, example below.
HRESULT WindowImpl::maximizeBorderless( bool max )
{
// https://devblogs.microsoft.com/oldnewthing/20100412-00/?p=14353
DWORD dwStyle = GetWindowLong( GWL_STYLE );
if( max )
{
MONITORINFO mi = { sizeof( mi ) };
if( GetWindowPlacement( &m_wpPrev ) && GetMonitorInfo( MonitorFromWindow( m_hWnd, MONITOR_DEFAULTTOPRIMARY ), &mi ) )
{
SetWindowLong( GWL_STYLE, dwStyle & ~WS_OVERLAPPEDWINDOW );
SetWindowPos( HWND_TOP,
mi.rcMonitor.left, mi.rcMonitor.top,
mi.rcMonitor.right - mi.rcMonitor.left,
mi.rcMonitor.bottom - mi.rcMonitor.top,
SWP_NOOWNERZORDER | SWP_FRAMECHANGED );
}
}
else
{
SetWindowLong( GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW );
SetWindowPlacement( &m_wpPrev );
SetWindowPos( NULL, 0, 0, 0, 0,
SWP_NOMOVE | SWP_NOSIZE | SWP_NOZORDER |
SWP_NOOWNERZORDER | SWP_FRAMECHANGED );
}
return S_OK;
}
I have a project where I have multiple scenes, each representing a state, such as a splash screen, a main menu, a level, etc. There is a main Game class, and a SceneManager that handles all the scenes. I need to call some of the SceneManager functions from within the individual Scene classes, so I tried to implement passing the SceneManager into the Scene classes via a pointer. However, I keep getting the following error:
Thread 1: EXC_BAD_ACCESS (code=1, address=0x0)
I assume that I haven't initialized something correctly and that I causing the error, but I am having a hard time finding the exact issue and understanding it. I have included the relevant code below:
SceneManager.cpp function that gets the error:
void SceneManager::SetCurrentScene(Scene* scene) {
currentScene = scene;
}
Game.cpp
Game::GameState Game::GAME_STATE = Playing;
sf::RenderWindow Game::mainWindow;
SceneManager* Game::sceneManager;
void Game::Start(void) {
mainWindow.create(sf::VideoMode(GAME_SCREEN_WIDTH, GAME_SCREEN_HEIGHT,32), GAME_WINDOW_TITLE, sf::Style::Close);
//make a clock
sf::Clock deltaClock;
//load the screen manager
//screenManager = new ScreenManager();
sceneManager->SetCurrentScene(new SplashScene(sceneManager)); // <---- This line is the first use of SetCurrentScene()
while(mainWindow.isOpen()) {
//get the delta time
sf::Time deltaTime = deltaClock.restart();
float delta = deltaTime.asMilliseconds();
GameLoop(delta);
}
mainWindow.close();
}
/*
bool Game::IsExiting()
{
if(gameState == Game::Exiting)
return true;
else
return false;
}
*/
sf::RenderWindow& Game::GetWindow()
{
return mainWindow;
}
void Game::GameLoop(float delta) {
sf::Event currentEvent;
mainWindow.pollEvent(currentEvent);
// "close requested" event: we close the window
if (currentEvent.type == sf::Event::Closed) {
mainWindow.close();
}
//graphics and rendering
mainWindow.clear(GAME_WINDOW_BUFFER_COLOR);
//update the current screen
sceneManager->currentScene->Update(currentEvent, delta);
sceneManager->currentScene->Draw(mainWindow);
//display the window
mainWindow.display();
}
Scene.cpp
Scene::Scene() {
Load();
}
Scene::Scene(SceneManager* sceneManager) {
this->sceneManager = sceneManager;
Load();
}
Scene::~Scene() {
}
void Scene::Load() {
}
void Scene::Draw(sf::RenderWindow& renderWindow) {
}
void Scene::Update(sf::Event event, float delta) {
}
Thank you for all of the help!
EDIT: The first answer solved my problem. Apart from that I had to set the ASI_BANDWIDTH_OVERLOAD value to 0.
I am programming a Linux application in C++/Qt 5.7 to track stars in my telescope. I use a camera (ZWO ASI 120MM with according SDK v0.3) and grab its frames in a while loop in a separate thread. These are then emitted to a QOpenGlWidget to be displayed. I have following problem: When the mouse is inside the QOpenGlWidget area, the displayed frames get corrupted. Especially when the mouse is moved. The problem is worst when I use an exposure time of 50ms and disappears for lower exposure times. When I feed the pipeline with alternating images from disk, the problem disappears. I assume that this is some sort of thread-synchronization problem between the camera thread and the main thread, but I couldnt solve it. The same problem appears in the openastro software. Here are parts of the code:
MainWindow:
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent){
mutex = new QMutex;
camThread = new QThread(this);
camera = new Camera(nullptr, mutex);
display = new GLViewer(this, mutex);
setCentralWidget(display);
cameraHandle = camera->getHandle();
connect(camThread, SIGNAL(started()), camera, SLOT(connect()));
connect(camera, SIGNAL(exposureCompleted(const QImage)), display, SLOT(showImage(const QImage)), Qt::BlockingQueuedConnection );
camera->moveToThread(camThread);
camThread->start();
}
The routine that grabs the frames:
void Camera::captureFrame(){
while( cameraIsReady && capturing ){
mutex->lock();
error = ASIGetVideoData(camID, buffer, bufferSize, int(exposure*2*1e-3)+500);
if(error == ASI_SUCCESS){
frame = QImage(buffer,width,height,QImage::Format_Indexed8).convertToFormat(QImage::Format_RGB32); //Indexed8 is for 8bit
mutex->unlock();
emit exposureCompleted(frame);
}
else {
cameraStream << "timeout" << endl;
mutex->unlock();
}
}
}
The slot that receives the image:
bool GLViewer::showImage(const QImage image)
{
mutex->lock();
mOrigImage = image;
mRenderQtImg = mOrigImage;
recalculatePosition();
updateScene();
mutex->unlock();
return true;
}
And the GL function that sets the image:
void GLViewer::renderImage()
{
makeCurrent();
glClear(GL_COLOR_BUFFER_BIT);
if (!mRenderQtImg.isNull())
{
glLoadIdentity();
glPushMatrix();
{
if (mResizedImg.width() <= 0)
{
if (mRenderWidth == mRenderQtImg.width() && mRenderHeight == mRenderQtImg.height())
mResizedImg = mRenderQtImg;
else
mResizedImg = mRenderQtImg.scaled(QSize(mRenderWidth, mRenderHeight),
Qt::IgnoreAspectRatio,
Qt::SmoothTransformation);
}
glRasterPos2i(mRenderPosX, mRenderPosY);
glPixelZoom(1, -1);
glDrawPixels(mResizedImg.width(), mResizedImg.height(), GL_RGBA, GL_UNSIGNED_BYTE, mResizedImg.bits());
}
glPopMatrix();
glFlush();
}
}
I stole this code from here: https://github.com/Myzhar/QtOpenCVViewerGl
And lastly, here is how my problem looks:
This looks awful.
The image producer should produce new images and emit them through a signal. Since QImage is implicitly shared, it will automatically recycle frames to avoid new allocations. Only when the producer thread out-runs the display thread will image copies be made.
Instead of using an explicit loop in the Camera object, you can run the capture using a zero-duration timer, and having the event loop invoke it. That way the camera object can process events, e.g. timers, cross-thread slot invocations, etc.
There's no need for explicit mutexes, nor for a blocking connection. Qt's event loop provides cross-thread synchronization. Finally, the QtOpenCVViewerGl project performs image scaling on the CPU and is really an example of how not to do it. You can get image scaling for free by drawing the image on a quad, even though that's also an outdated technique from the fixed pipeline days - but it works just fine.
The ASICamera class would look roughly as follows:
// https://github.com/KubaO/stackoverflown/tree/master/questions/asi-astro-cam-39968889
#include <QtOpenGL>
#include <QOpenGLFunctions_2_0>
#include "ASICamera2.h"
class ASICamera : public QObject {
Q_OBJECT
ASI_ERROR_CODE m_error;
ASI_CAMERA_INFO m_info;
QImage m_frame{640, 480, QImage::Format_RGB888};
QTimer m_timer{this};
int m_exposure_ms = 0;
inline int id() const { return m_info.CameraID; }
void capture() {
m_error = ASIGetVideoData(id(), m_frame.bits(), m_frame.byteCount(),
m_exposure_ms*2 + 500);
if (m_error == ASI_SUCCESS)
emit newFrame(m_frame);
else
qDebug() << "capture error" << m_error;
}
public:
explicit ASICamera(QObject * parent = nullptr) : QObject{parent} {
connect(&m_timer, &QTimer::timeout, this, &ASICamera::capture);
}
ASI_ERROR_CODE error() const { return m_error; }
bool open(int index) {
m_error = ASIGetCameraProperty(&m_info, index);
if (m_error != ASI_SUCCESS)
return false;
m_error = ASIOpenCamera(id());
if (m_error != ASI_SUCCESS)
return false;
m_error = ASIInitCamera(id());
if (m_error != ASI_SUCCESS)
return false;
m_error = ASISetROIFormat(id(), m_frame.width(), m_frame.height(), 1, ASI_IMG_RGB24);
if (m_error != ASI_SUCCESS)
return false;
return true;
}
bool close() {
m_error = ASICloseCamera(id());
return m_error == ASI_SUCCESS;
}
Q_SIGNAL void newFrame(const QImage &);
QImage frame() const { return m_frame; }
Q_SLOT bool start() {
m_error = ASIStartVideoCapture(id());
if (m_error == ASI_SUCCESS)
m_timer.start(0);
return m_error == ASI_SUCCESS;
}
Q_SLOT bool stop() {
m_error = ASIStopVideoCapture(id());
return m_error == ASI_SUCCESS;
m_timer.stop();
}
~ASICamera() {
stop();
close();
}
};
Since I'm using a dummy ASI API implementation, the above is sufficient. Code for a real ASI camera would need to set appropriate controls, such as exposure.
The OpenGL viewer is also fairly simple:
class GLViewer : public QOpenGLWidget, protected QOpenGLFunctions_2_0 {
Q_OBJECT
QImage m_image;
void ck() {
for(GLenum err; (err = glGetError()) != GL_NO_ERROR;) qDebug() << "gl error" << err;
}
void initializeGL() override {
initializeOpenGLFunctions();
glClearColor(0.2f, 0.2f, 0.25f, 1.f);
}
void resizeGL(int width, int height) override {
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, 0, 1);
glMatrixMode(GL_MODELVIEW);
update();
}
// From http://stackoverflow.com/a/8774580/1329652
void paintGL() override {
auto scaled = m_image.size().scaled(this->size(), Qt::KeepAspectRatio);
GLuint texID;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glGenTextures(1, &texID);
glEnable(GL_TEXTURE_RECTANGLE);
glBindTexture(GL_TEXTURE_RECTANGLE, texID);
glTexImage2D(GL_TEXTURE_RECTANGLE, 0, GL_RGB, m_image.width(), m_image.height(), 0,
GL_RGB, GL_UNSIGNED_BYTE, m_image.constBits());
glBegin(GL_QUADS);
glTexCoord2f(0, 0);
glVertex2f(0, 0);
glTexCoord2f(m_image.width(), 0);
glVertex2f(scaled.width(), 0);
glTexCoord2f(m_image.width(), m_image.height());
glVertex2f(scaled.width(), scaled.height());
glTexCoord2f(0, m_image.height());
glVertex2f(0, scaled.height());
glEnd();
glDisable(GL_TEXTURE_RECTANGLE);
glDeleteTextures(1, &texID);
ck();
}
public:
GLViewer(QWidget * parent = nullptr) : QOpenGLWidget{parent} {}
void setImage(const QImage & image) {
Q_ASSERT(image.format() == QImage::Format_RGB888);
m_image = image;
update();
}
};
Finally, we hook the camera and the viewer together. Since the camera initialization may take some time, we perform it in the camera's thread.
The UI should emit signals that control the camera, e.g. to open it, start/stop acquisition, etc., and have slots that provide feedback from the camera (e.g. state changes). A free-standing function would take the two objects and hook them together, using functors as appropriate to adapt the UI to a particular camera. If adapter code would be extensive, you'd use a helper QObject for that, but usually a function should suffice (as it does below).
class Thread : public QThread { public: ~Thread() { quit(); wait(); } };
// See http://stackoverflow.com/q/21646467/1329652
template <typename F>
static void postToThread(F && fun, QObject * obj = qApp) {
QObject src;
QObject::connect(&src, &QObject::destroyed, obj, std::forward<F>(fun),
Qt::QueuedConnection);
}
int main(int argc, char ** argv) {
QApplication app{argc, argv};
GLViewer viewer;
viewer.setMinimumSize(200, 200);
ASICamera camera;
Thread thread;
QObject::connect(&camera, &ASICamera::newFrame, &viewer, &GLViewer::setImage);
QObject::connect(&thread, &QThread::destroyed, [&]{ camera.moveToThread(app.thread()); });
camera.moveToThread(&thread);
thread.start();
postToThread([&]{
camera.open(0);
camera.start();
}, &camera);
viewer.show();
return app.exec();
}
#include "main.moc"
The GitHub project includes a very basic ASI camera API test harness and is complete: you can run it and see the test video rendered in real time.
I am getting a major (1-2 second) delay between key presses.
Here is main.cpp (the lagging input handling):
#include <iostream>
#include "src/Input/InputManager.h"
#include "src/Graphics/Display.h"
#define LOG(x) std::cout << x << std::endl;
using namespace Rambug;
int main(int arc, char** argv)
{
Graphics::Display display(900, 600, "Rambug Engine Tester", true);
display.createDisplay();
SDL_Event event;
Input::InputManager inputManager;
// "Game" Loop
while (!display.isClosed())
{
display.update();
glClearColor(0.0f, 0.02f, 0.5f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
while (SDL_PollEvent(&event))
{
if (event.type == SDL_KEYDOWN)
{
std::cout << "Keydowner" << std::endl;
}
if (event.type == SDL_KEYUP)
{
std::cout << "Keyupper" << std::endl;
}
}
// inputManager.update();
}
display.destroyDisplay();
system("PAUSE");
return 0;
}
Here is Display.cpp, which runs PERFECTLY without any delay when I run the same code (SDL_KEYDOWN, SDL_KEYUP) I just run SDL_QUIT down there.
#include "Display.h"
namespace Rambug
{
namespace Graphics
{
Display::Display(int width, int height, std::string title, bool log)
{
m_displayWidth = width;
m_displayHeight = height;
m_displayTitle = title;
m_log = log;
m_window = nullptr;
}
Display::Display()
{
}
Display::~Display()
{
}
void Display::createDisplay()
{
// Initialize SDL
SDL_Init(SDL_INIT_EVERYTHING);
// Setting attributes to our window
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BUFFER_SIZE, 32);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
// Create window
m_window = SDL_CreateWindow((m_displayTitle.c_str()), SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, m_displayWidth, m_displayHeight, SDL_WINDOW_OPENGL);
// Error Check Window
if (m_window == nullptr)
{
if (m_log)
std::cerr << "Window could not be created!" << std::endl;
}
else
{
if (m_log)
std::cout << "Window Created Successfully With SDL!" << std::endl;
}
// Create OpenGL Context
m_glContext = SDL_GL_CreateContext(m_window);
// Initialize GLEW
glewExperimental = GL_TRUE;
GLenum status = glewInit();
if (glewExperimental)
{
if (m_log)
std::cout << "Glew Experimental: On" << std::endl;
}
// Error Check GLEW
if (status != GLEW_OK)
{
if (m_log)
{
std::cerr << "GLEW could not be initialized!" << std::endl;
}
}
else
{
if (m_log)
{
std::cout << "GLEW Was Initilized Successfully!" << std::endl;
}
}
// Log OpenGL Version Number
if (m_log)
{
std::cout << "Using OpenGL Version: " << glGetString(GL_VERSION) << std::endl;
}
m_closed = false;
}
void Display::destroyDisplay()
{
SDL_GL_DeleteContext(m_glContext);
SDL_DestroyWindow(m_window);
SDL_Quit();
}
void Display::update()
{
SDL_GL_SwapWindow(m_window);
// Check for Input
while (SDL_PollEvent(&m_sdlEvent))
{
if (m_sdlEvent.type == SDL_QUIT)
{
m_closed = true;
}
}
}
bool Display::isClosed()
{
return m_closed;
}
}
}
I also tried experimenting with an Input manager class, but that was the same deal: delays. The update method is what I would call in main.cpp (I believe that it is commented out)
#include "InputManager.h"
#include <iostream>
#define LOG(x) std::cout << x << std::endl;
namespace Rambug
{
namespace Input
{
InputManager::InputManager()
{
}
InputManager::~InputManager()
{
}
void InputManager::keyPressed(unsigned int keyCode)
{
m_keyMap[keyCode] = true;
}
void InputManager::keyReleased(unsigned int keyCode)
{
m_keyMap[keyCode] = false;
}
bool InputManager::isKeyDown(unsigned int keyCode)
{
auto it = m_keyMap.find(keyCode);
if (it != m_keyMap.end())
{
return it->second;
}
else
{
return false;
}
}
void InputManager::update()
{
while (SDL_PollEvent(&m_event))
{
switch (m_event.type)
{
case SDL_KEYDOWN:
LOG("SDL_KEYDOWN");
keyPressed(m_event.key.keysym.sym);
break;
case SDL_KEYUP:
LOG("SDL_KEYUP");
keyReleased(m_event.key.keysym.sym);
break;
}
}
}
}
}
So InputManager and main.cpp have major delays, while Display.cpp runs perfectly. Is it because I cannot run SDL_PollEvents twice?
Is it because I cannot run SDL_PollEvents twice?
Your problem isn't what I'd expect, but, yes, it's a bad idea to run SDL_PollEvents twice. SDL keeps an event stack which is added to as your program runs. SDL_PollEvents pops events from the stack until it is empty. As a result, running two polling loops, one will remove events which the other will then not see. Blind luck (or execution bottlenecks) will determine which loop is more likely to see any particular event occur. (See http://wiki.libsdl.org/SDL_PollEvent).
If you really want to run two polling loops, you can store unhandled events in your default case, and push the list of events back after each loop with SDL_PushEvent: http://wiki.libsdl.org/SDL_PushEvent
This said, I'm surprised that your events "get through" after a delay: I would expect them to vanish. Are you holding the keys down? Then, your OS key-repeat delay might be what you're seeing, after which the event queue is being flooded between each loop. You might want to check the repeat flag of the key event: http://wiki.libsdl.org/SDL_KeyboardEvent
I would say this points to a design problem. You should ask yourself, why does the Display delegate the Game ending? Would it not be more sensible to inform the Display, along with everything else, of this fact?
SDL keeps an event stack which is added to as your program runs. SDL_PollEvents pops events from the stack until it is empty.
I am fairly sure that it is not a stack, but a queue. The SDL_PushEvent has a somewhat misleading name there; what it really does is shove the event back into the queue from the "wrong" end. (It might be implemented internally as a stack, but it's behaviour is that of a queue.)
Still, Qualia's answer is the right way to go.
However, I don't agree that it is necessarily bad to have multiple event loops - they can be very useful. 2 scenarios:
1) You catch something like a resize event in your main event loop. If the ensuing operations are very time-consuming the event queue will be flooded with more resize events as long as the user keeps resizing the window.
In this case, on can have a separate event loop after the time-consuming repaint, which simply loops until it finds the first non-resize event, then pushes the last 2 events it saw back into the queue, and returns control to the main loop. That way you can discard the accumulated resize events. (It can be done more elegantly using the SDL_PeepEvents function, especially if there is a really huge pile-up of events in the queue.)
2) The action your program takes after catching a specific event will trigger other events, like when using SDL_RaiseWindow, which may trigger a multitude of focus and window related subsequent events, especially if you have more than one SDL window. Here, having a separate event loop can be used to deal with these triggered events, especially if the response to these events should be suppressed.
On the issue of delays, I have also encountered all sorts of strange behaviour with the SDL_KEYDOWN event, usually the event being triggered multiple times, and definitely not related to OS key repetition. This seems to occur only when using SDL_PollEvent; SDL_WaitEventTimeout, even with the timeout delay set to just '1', seems to inhibit this strange behaviour. Interestingly, the SDL_KEYUP event does not exhibit this behaviour.
So I've been trying to make my own mini-engine for a game using SDL and OpenGL, with the main game logic contained inside of a singleton class. But for the life of me I can't figure out how I'm getting a memory leak with this sample portion of the engine, although I'm pretty certain that it occurs whenever I call SDL_GL_SwapBuffers. The leak is about 4 K every few two or three seconds. I'm using SDL 1.3; please help me find the leak, it's been driving me crazy for the past week!
MainModule.h
#ifndef MAINMODULE_H
#define MAINMODULE_H
/// Includes
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
#include <string>
/// Define Statements (Screen elements)
#define MAINMODULE_WIDTH 800
#define MAINMODULE_HEIGHT 600
#define MAINMODULE_CAPTION "MainModule"
/// Define Statements (OpenGL memory usage)
#define MAINMODULE_RED_SIZE 8
#define MAINMODULE_GREEN_SIZE 8
#define MAINMODULE_BLUE_SIZE 8
#define MAINMODULE_ALPHA_SIZE 8
#define MAINMODULE_BUFFER_SIZE 32
#define MAINMODULE_DEPTH_SIZE 16
#define MAINMODULE_DOUBLEBUFFER 1 // 1 to Enable
#define MAINMODULE_FLAGS SDL_OPENGL
/// Define Statements (OpenGL elements)
#define MAINMODULE_CLEARCOLOR 1.0f, 1.0f, 1.0f, 1.0f
#define MAINMODULE_SHADEMODEL GL_SMOOTH
#define MAINMODULE_DEPTH_TEST 0 // 1 to Enable
class MainModule {
private: // Constructor/Deconsctuctor
MainModule();
~MainModule();
private: // Class Variables
static MainModule* _Instance; // Singleton instance of the module.
static int _Width; // Width of the game screen.
static int _Height; // Height of the game screen.
static std::string _Caption; // Game screen caption/title.
static SDL_Surface* _ScreenSurface; // Game screen as represented by the window.
static SDL_Event* _Event; // Events such as mouse/key input.
static bool _IsInitialized; // Has the engine been initialized?
static bool _IsRunning; // Is the engine running?
public: // Get/Set Functions
static inline int Width() { return _Width; }
static inline int Height() { return _Height; }
static inline std::string Caption() { return _Caption; }
static inline bool IsInitialized() { return _IsInitialized; }
static inline bool IsRunning() { return _IsRunning; }
static void SetCaption(std::string caption);
public: // Class Functions
static void ConstructInstance();
static void DeconstructInstance();
static void InitializeModule();
static void RunGameLogic(); // Updates and renders game information.
};
#endif // MAINMODULE_H
MainModule.ccp
/// Includes
#include "MainModule.h"
#include <iostream>
// Static Variable Declarations
MainModule* MainModule::_Instance = 0;
int MainModule::_Width = MAINMODULE_WIDTH;
int MainModule::_Height = MAINMODULE_HEIGHT;
std::string MainModule::_Caption = MAINMODULE_CAPTION;
SDL_Surface* MainModule::_ScreenSurface = 0;
SDL_Event* MainModule::_Event = 0;
bool MainModule::_IsInitialized = false;
bool MainModule::_IsRunning = false;
/// Constructor/Deconstructor
MainModule::MainModule() { }
MainModule::~MainModule() {
if (_Event != 0) delete _Event;
if (_ScreenSurface != 0) SDL_FreeSurface(_ScreenSurface);
SDL_Quit();
}
/// Set Functions
void MainModule::SetCaption(std::string caption)
{ _Caption = caption; if (_IsInitialized) SDL_WM_SetCaption(_Caption.c_str(), 0); }
/// Class Functions
void MainModule::ConstructInstance()
{ if (_Instance == 0) _Instance = new MainModule(); }
void MainModule::DeconstructInstance()
{ if (_Instance != 0) { delete _Instance; _Instance = 0; } }
void MainModule::InitializeModule() {
ConstructInstance(); // Create an instance if the ConstructInstance function wasn't created before.
if (_Instance == 0) { printf("MainModule instance not created properly./n"); return; }
// Initialize SDL.
if (SDL_Init(SDL_INIT_EVERYTHING) != 0) { printf("SDL Initialization error: %s/n", SDL_GetError()); return; }
// Set OpenGL memory usage.
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, MAINMODULE_RED_SIZE);
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, MAINMODULE_GREEN_SIZE);
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, MAINMODULE_BLUE_SIZE);
SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, MAINMODULE_ALPHA_SIZE);
SDL_GL_SetAttribute(SDL_GL_BUFFER_SIZE, MAINMODULE_BUFFER_SIZE);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, MAINMODULE_DEPTH_SIZE);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, MAINMODULE_DOUBLEBUFFER);
// Creates the screen and window.
_ScreenSurface = SDL_SetVideoMode(MAINMODULE_WIDTH, MAINMODULE_HEIGHT, MAINMODULE_BUFFER_SIZE, MAINMODULE_FLAGS);
if (_ScreenSurface == 0) { printf("ScreenSurface not created properly./n"); return; }
SDL_WM_SetCaption(_Caption.c_str(), 0);
(MAINMODULE_DEPTH_TEST == 1) ? glEnable(GL_DEPTH_TEST) : glDisable(GL_DEPTH_TEST);
glClearColor(MAINMODULE_CLEARCOLOR);
glShadeModel(GL_SMOOTH);
_IsInitialized = true;
_IsRunning = true;
_Event = new SDL_Event();
}
void MainModule::RunGameLogic() {
while (SDL_PollEvent(_Event)) { // Event handling loop
switch (_Event->type) {
case SDL_QUIT: // Exits out of game
{ _IsRunning = false; break; }
case SDL_ACTIVEEVENT:
{ break; }
case SDL_KEYDOWN: // Keyboard press down
{ break; }
case SDL_KEYUP: // Keyboard press up
{ break; }
case SDL_MOUSEMOTION: // Mouse movement
{ break; }
case SDL_MOUSEBUTTONDOWN: // Mouse button down
{ break; }
case SDL_MOUSEBUTTONUP: // Mouse button up
{ break; }
}
}
// Rendering logic
(MAINMODULE_DEPTH_TEST == 1) ? glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) : glClear(GL_COLOR_BUFFER_BIT);
SDL_GL_SwapBuffers();
}
/// Entry point for the program
int main(int argc, char** argv) {
MainModule::InitializeModule();
if (MainModule::IsInitialized()) { // If the modules initialized sucessfully
while (MainModule::IsRunning()) { MainModule::RunGameLogic(); }
}
MainModule::DeconstructInstance();
return 0;
}
Generally that sort of issue relates to internal issues with the underlying OpenGL libraries, the compiler, or something of that sort - graphics code like that is notorious for being a bit buggy and finicky.
If you give a few more details I may be able to give some more help - what operating system, compiler, graphics driver/version, specific SDL version down to build, etc.
In the mean time, consider compiling under another operating system and see what happens, or switch out SDL version.
But yeah, almost certain you're not doing anything wrong...