QAudioOutput - application size is continuously growing - c++

I'm trying to write into buffer for QAudioOutput every 20ms. When I try to execute this code I can see the size of process increases about 4-8 kB per second. I was trying to find some function to clear internal buffer of QIODevice or DAudioOuptut but had no luck.
I'm using Qt 5.2.1
In the example below is written only silence(zeros) but it has the same effect:
#include <QLibraryInfo>
#include <QtCore/QCoreApplication>
#include <windows.h> // for Sleep
#include <QAudioOutput>
#include <QAudioDeviceInfo>
#include <QAudioFormat>
#include <array>
class QAudioOutput;
int main(int argc, char *argv[])
{
// Create QApplication
QCoreApplication app(argc, argv);
app.setApplicationName("Audiotest");
//Initialize device
QIODevice * _output;
QAudioDeviceInfo _device = QAudioDeviceInfo::defaultOutputDevice();
QAudioFormat _format;
_format.setSampleRate(44100);
_format.setChannelCount(2);
_format.setSampleSize(16);
_format.setCodec("audio/pcm"); // This codec should be supported on all platforms and plugin implementation
_format.setByteOrder(QAudioFormat::LittleEndian);
_format.setSampleType(QAudioFormat::SignedInt);
if (!_device.isFormatSupported(_format)) {
printf("Default format not supported - trying to use nearest.\n");
_format = _device.nearestFormat(_format);
}
QAudioOutput * _audioOutput = new QAudioOutput(_device, _format);
_output = _audioOutput->start();
std::array<char, 32768> _buffer;
_buffer.fill(0);
for (;;) {
const int periodSize = _audioOutput->periodSize();
const int chunks = _audioOutput->bytesFree() / periodSize;
for (int i = 0; i < chunks; ++i) {
const qint64 len = periodSize;
if (len && _output) {
_output->write(_buffer.data(), len);
}
if (len != periodSize) {
break;
}
}
Sleep(20);
}
return 0;
}

When your loop runs, nothing else does. Your code should be asynchronous, and you should invert the control flow. React to a notification by the audio output device that it has processed a certain interval of samples.
To receive the first notification, you need to prime the device with some data.
// https://github.com/KubaO/stackoverflown/tree/master/questions/audio-37993427
#include <QtMultimedia>
#include <array>
int main(int argc, char ** argv) {
QCoreApplication app{argc, argv};
auto device = QAudioDeviceInfo::defaultOutputDevice();
QAudioFormat format;
format.setSampleRate(44100);
format.setChannelCount(2);
format.setSampleSize(16);
format.setCodec("audio/pcm");
format.setByteOrder(QAudioFormat::LittleEndian);
format.setSampleType(QAudioFormat::SignedInt);
if (!device.isFormatSupported(format))
qFatal("Default format not supported");
QAudioOutput audioOutput{device, format};
auto output = audioOutput.start();
qDebug() << audioOutput.state();
std::array<char, 32768> buffer;
buffer.fill(0);
auto write = [&]{
qDebug() << "notify";
auto periodSize = audioOutput.periodSize();
auto chunks = audioOutput.bytesFree() / periodSize;
for (int i = 0; i < chunks; ++i) {
if (periodSize && output) {
auto len = output->write(buffer.data(), periodSize);
if (len != periodSize)
break;
}
}
};
audioOutput.setNotifyInterval(20);
QObject::connect(&audioOutput, &QAudioOutput::notify, write);
write();
return app.exec();
}

Don't run your own event loop; instead, connect the QAudioOutput's notify signal to a slot in one of your QObjects, and have that slot call write() one time. The notify signal will be emitted whenever the QAudioOutput needs some more audio data to play.
All of this will happen inside QApplication::exec(), which you should call (near the end of main()) to run the Qt event loop for you, rather than your own for-loop.

Related

Handle threads between classes with C++, libvlc and ubuntu

I have an application in C++ with a GUI interface that needs to reproduce some mp3 depending on user's interactions. I need to reproduce the mp3 without blocking the program's flow.
In order to save code, I decided to write a class to handle the mp3 reproducing and to reproduce it in a new thread, but I'm having problems when I need to stop the playing.
I know that libvlc have already some locking function, but the flow of the program stops when the mp3 is playing.
The mp3 starts correctly, but if I try to call the stop_mp3() function, I get a core dumped error.
The error is generated when I call the stop function from the secondpanel.cpp.
// replay.h
#include <vlc/vlc.h>
class rePlay
{
public:
rePlay();
virtual ~rePlay();
void play_mp3(const char*);
void stop_mp3();
protected:
libvlc_instance_t *inst;
libvlc_media_player_t *mp;
libvlc_media_t *m;
private:
};
// rePlay.cpp
#include "rePlay.h"
#include <vlc/vlc.h>
#include <mutex>
std::mutex mp3_mutex;
rePlay::rePlay()
{
//ctor
}
rePlay::~rePlay()
{
//dtor
}
void rePlay::play_mp3(const char* path){
mp3_mutex.lock();
// load the vlc engine
inst = libvlc_new(0, NULL);
printf("apro il file %d\n", inst);
// create a new item
m = libvlc_media_new_path(inst, path);
// create a media play playing environment
mp = libvlc_media_player_new_from_media(m);
// no need to keep the media now
libvlc_media_release(m);
// play the media_player
libvlc_media_player_play(mp);
printf("Done.\n");
}
void rePlay::stop_mp3(){
mp3_mutex.unlock();
// stop playing
libvlc_media_player_stop(mp);
// free the media_player
libvlc_media_player_release(mp);
libvlc_release(inst);
}
// firstpanel.h
class firstpanel: public wxPanel
{
public:
firstpanel(wxWindow* parent, Isola02Frame*, wxWindowID id=wxID_ANY,const wxPoint& pos=wxDefaultPosition,const wxSize& size=wxDefaultSize);
virtual ~firstpanel();
void checkValue(wxCommandEvent& event);
void check_cf(wxTimerEvent& event);
rePlay *mp3_apertura_porta = new rePlay(); // <-- I DECLARED THE pointer here
//(*Declarations(firstpanel)
wxStaticText* assistenza;
wxStaticText* first_panel;
wxStaticText* identificazione;
wxTextCtrl* tessera;
//*)
...
}
// firstpanel.cpp
std::thread second = std::thread([this]() noexcept {
this->mp3_apertura_porta->play_mp3("/home/robodyne/Project/audio/scegli-rifiuto.mp3"); });
second.join();
// secondpanel.cpp
void secondpanel::OnBitmapButton2Click(wxCommandEvent& event)
{
firstpanel *ptr;
ptr->mp3_apertura_porta->stop_mp3();
}
EDIT1: Thanks to #Ted Lyngmo, I used the libvlcpp library which seems to be async somehow and it works fine. The only problem is that I do not know how to call mp.stopAsync() from stop_mp3() to stop the audio file because variable mp is not global.
#include "rePlay.h"
#include <vlc/vlc.h>
#include <mutex>
#include <unistd.h>
#include "vlcpp/vlc.hpp"
std::mutex mp3_mutex;
rePlay::rePlay()
{
//ctor
}
rePlay::~rePlay()
{
//dtor
}
void rePlay::play_mp3(const char* path){
auto instance = VLC::Instance(0, nullptr);
auto media = VLC::Media(instance, path, VLC::Media::FromPath);
auto mp = VLC::MediaPlayer(media);
auto mp.play();
#if LIBVLC_VERSION_INT >= LIBVLC_VERSION(4, 0, 0, 0)
#else
mp.stop();
#endif
}
void rePlay::stop_mp3(){
mp.stopAsync(); <-- variable mp is not global!
}
EDIT2:
I think the libvlcpp doesn't work well with GUI applications.
If I run it in a console application, I'm able to perform other operations in parallel, but when I execute it in the WxWidgets application, it blocks the flow.
This is the terminal console application:
#include "vlcpp/vlc.hpp"
#include <thread>
#include <iostream>
int main(int ac, char** av)
{
if (ac < 2)
{
std::cerr << "usage: " << av[0] << " <file to play>" << std::endl;
return 1;
}
auto instance = VLC::Instance(0, nullptr);
auto media = VLC::Media(instance, av[1], VLC::Media::FromPath);
auto mp = VLC::MediaPlayer(media);
mp.play();
for (int i=0; i < 10000000; i++){
printf("%d\n", i);
}
std::this_thread::sleep_for( std::chrono::seconds( 10 ) );
#if LIBVLC_VERSION_INT >= LIBVLC_VERSION(4, 0, 0, 0)
mp.stopAsync();
#else
mp.stop();
#endif
}
the for() cycle works in parallel while the mp3 is playing.
The same doesn't happen if I use it with my application.

Google Speech Recognition doesn't work because of colliding threads Qt C++

I'm using Google's Speech-To-Text API in my Qt C++ application.
Google's C++ documentation is helpful but to an extent.
In my code below, if I uncomment
std::this_thread::sleep_for(std::chrono::seconds(1));
The speech recognition is working, but not properly - it's skipping some words. But without this line, it doesn't work at all. I think that's because the while loop of MicrophoneThreadMain() collide with the while loop of start_speech_to_text(). But I'm not sure.
I want these two functions to run side-by-side simultaneously, without interruptions, and with no delays.
I tried to use QThreads and Signal and Slots but couldn’t make it work.
speech_to_text.cpp
#include "speechtotext.h"
using google::cloud::speech::v1::StreamingRecognitionConfig;
using google::cloud::speech::v1::RecognitionConfig;
using google::cloud::speech::v1::Speech;
using google::cloud::speech::v1::StreamingRecognizeRequest;
using google::cloud::speech::v1::StreamingRecognizeResponse;
SpeechToText::SpeechToText(QObject *parent) : QObject(parent)
{
}
void SpeechToText::initialize()
{
QAudioFormat qtFormat;
// Get default audio input device
QAudioDeviceInfo qtInfo = QAudioDeviceInfo::defaultInputDevice();
// Set the audio format settings
qtFormat.setCodec("audio/pcm");
qtFormat.setByteOrder(QAudioFormat::Endian::LittleEndian);
qtFormat.setChannelCount(1);
qtFormat.setSampleRate(16000);
qtFormat.setSampleSize(16);
qtFormat.setSampleType(QAudioFormat::SignedInt);
// Check whether the format is supported
if (!qtInfo.isFormatSupported(qtFormat)) {
qWarning() << "Default format is not supported";
exit(3);
}
// Instantiate QAudioInput with the settings
audioInput = new QAudioInput(qtFormat);
// Start receiving data from audio input
ioDevice = audioInput->start();
emit finished_initializing();
}
void SpeechToText::MicrophoneThreadMain(grpc::ClientReaderWriterInterface<StreamingRecognizeRequest,
StreamingRecognizeResponse> *streamer)
{
StreamingRecognizeRequest request;
std::size_t size_read;
while(true)
{
audioDataBuffer.append(ioDevice->readAll());
size_read = audioDataBuffer.size();
// And write the chunk to the stream.
request.set_audio_content(&audioDataBuffer.data()[0], size_read);
std::cout << "Sending " << size_read / 1024 << "k bytes." << std::endl;
streamer->Write(request);
//std::this_thread::sleep_for(std::chrono::seconds(1));
}
}
void SpeechToText::start_speech_to_text()
{
StreamingRecognizeRequest request;
auto *streaming_config = request.mutable_streaming_config();
RecognitionConfig *recognition_config = new RecognitionConfig();
recognition_config->set_language_code("en-US");
recognition_config->set_sample_rate_hertz(16000);
recognition_config->set_encoding(RecognitionConfig::LINEAR16);
streaming_config->set_allocated_config(recognition_config);
// Create a Speech Stub connected to the speech service.
auto creds = grpc::GoogleDefaultCredentials();
auto channel = grpc::CreateChannel("speech.googleapis.com", creds);
std::unique_ptr<Speech::Stub> speech(Speech::NewStub(channel));
// Begin a stream.
grpc::ClientContext context;
auto streamer = speech->StreamingRecognize(&context);
// Write the first request, containing the config only.
streaming_config->set_interim_results(true);
streamer->Write(request);
// The microphone thread writes the audio content.
std::thread microphone_thread(&SpeechToText::MicrophoneThreadMain, this, streamer.get());
// Read responses.
StreamingRecognizeResponse response;
while (streamer->Read(&response)) // Returns false when no more to read.
{
// Dump the transcript of all the results.
for (int r = 0; r < response.results_size(); ++r)
{
auto result = response.results(r);
std::cout << "Result stability: " << result.stability() << std::endl;
for (int a = 0; a < result.alternatives_size(); ++a)
{
auto alternative = result.alternatives(a);
std::cout << alternative.confidence() << "\t"
<< alternative.transcript() << std::endl;
}
}
}
grpc::Status status = streamer->Finish();
microphone_thread.join();
if (!status.ok()) {
// Report the RPC failure.
qDebug() << "error RPC";
std::cerr << status.error_message() << std::endl;
}
}
speech_to_text.h
#ifndef SPEECHTOTEXT_H
#define SPEECHTOTEXT_H
#include <QObject>
#include <QDebug>
#include <QThread>
#include <thread>
#include <chrono>
#include <fstream>
#include <iostream>
#include <iterator>
#include <string>
#include <functional>
#include <QtMultimedia>
#include <QtMultimedia/QAudioInput>
#include <QAudioDeviceInfo>
#include <QAudioFormat>
#include <QIODevice>
#include <QtConcurrent>
#include <QMutex>
#include <grpc++/grpc++.h>
#include "google/cloud/speech/v1/cloud_speech.grpc.pb.h"
using google::cloud::speech::v1::StreamingRecognitionConfig;
using google::cloud::speech::v1::RecognitionConfig;
using google::cloud::speech::v1::Speech;
using google::cloud::speech::v1::StreamingRecognizeRequest;
using google::cloud::speech::v1::StreamingRecognizeResponse;
class SpeechToText : public QObject
{
Q_OBJECT
public:
explicit SpeechToText(QObject *parent = nullptr);
signals:
void finished_initializing();
void finished_speech_to_text(QString);
public slots:
void initialize();
void start_speech_to_text();
private:
void MicrophoneThreadMain(grpc::ClientReaderWriterInterface<StreamingRecognizeRequest,
StreamingRecognizeResponse> *);
QAudioInput *audioInput;
QIODevice *ioDevice;
QByteArray audioDataBuffer;
};
#endif // SPEECHTOTEXT_H
Any idea on how to solve this?
I post here the solution to my problem. Thanks #allquixotic for all the helpful information.
in mainwindow.cpp
void MainWindow::setUpMicrophoneRecorder()
{
microphone_thread = new QThread(this);
microphone_recorder_engine.moveToThread(microphone_thread);
connect(microphone_thread, SIGNAL(started()), &microphone_recorder_engine, SLOT(start_listen()));
connect(&microphone_recorder_engine, &MicrophoneRecorder::microphone_data_raw,
this, [this] (const QByteArray &data) {
this->speech_to_text_engine.listen(data);
});
microphone_thread->start();
}
void MainWindow::setUpSpeechToTextEngine()
{
speech_to_text_thread = new QThread(this);
speech_to_text_engine.moveToThread(speech_to_text_thread);
connect(speech_to_text_thread, SIGNAL(started()), &speech_to_text_engine, SLOT(initialize()));
connect(&speech_to_text_engine, SIGNAL(finished_speech_to_text(QString)), this, SLOT(process_user_input(QString)));
speech_to_text_thread->start();
}
microphonerecorder.h
#ifndef MICROPHONERECORDER_H
#define MICROPHONERECORDER_H
#include <QObject>
#include <QByteArray>
#include <QDebug>
#include <QtMultimedia>
#include <QtMultimedia/QAudioInput>
#include <QAudioDeviceInfo>
#include <QAudioFormat>
#include <QIODevice>
class MicrophoneRecorder : public QObject
{
Q_OBJECT
public:
explicit MicrophoneRecorder(QObject *parent = nullptr);
signals:
void microphone_data_raw(const QByteArray &);
public slots:
void start_listen();
private slots:
void listen(const QByteArray &);
private:
QAudioInput *audioInput;
QIODevice *ioDevice;
QByteArray audioDataBuffer;
};
#endif // MICROPHONERECORDER_H
microphonerecorder.cpp
#include "microphonerecorder.h"
MicrophoneRecorder::MicrophoneRecorder(QObject *parent) : QObject(parent)
{
}
void MicrophoneRecorder::listen(const QByteArray &audioData)
{
emit microphone_data_raw(audioData);
}
void MicrophoneRecorder::start_listen()
{
QAudioFormat qtFormat;
// Get default audio input device
QAudioDeviceInfo qtInfo = QAudioDeviceInfo::defaultInputDevice();
// Set the audio format settings
qtFormat.setCodec("audio/pcm");
qtFormat.setByteOrder(QAudioFormat::Endian::LittleEndian);
qtFormat.setChannelCount(1);
qtFormat.setSampleRate(16000);
qtFormat.setSampleSize(16);
qtFormat.setSampleType(QAudioFormat::SignedInt);
// Check whether the format is supported
if (!qtInfo.isFormatSupported(qtFormat)) {
qWarning() << "Default format is not supported";
exit(3);
}
// Instantiate QAudioInput with the settings
audioInput = new QAudioInput(qtFormat);
// Start receiving data from audio input
ioDevice = audioInput->start();
// Listen to the received data for wake words
QObject::connect(ioDevice, &QIODevice::readyRead, [=] {
listen(ioDevice->readAll());
});
}
speechtotext.h
#ifndef SPEECHTOTEXT_H
#define SPEECHTOTEXT_H
#include <QObject>
#include <QDebug>
#include <QThread>
#include <QDateTime>
#include <thread>
#include <chrono>
#include <string>
#include <QtMultimedia>
#include <QtMultimedia/QAudioInput>
#include <QAudioDeviceInfo>
#include <QAudioFormat>
#include <QIODevice>
#include <QtConcurrent>
#include <QMutex>
#include <grpc++/grpc++.h>
#include "google/cloud/speech/v1/cloud_speech.grpc.pb.h"
using google::cloud::speech::v1::StreamingRecognitionConfig;
using google::cloud::speech::v1::RecognitionConfig;
using google::cloud::speech::v1::Speech;
using google::cloud::speech::v1::StreamingRecognizeRequest;
using google::cloud::speech::v1::StreamingRecognizeResponse;
class SpeechToText : public QObject
{
Q_OBJECT
public:
explicit SpeechToText(QObject *parent = nullptr);
signals:
void finished_initializing();
void in_speech_to_text();
void out_of_speech_to_text();
void finished_speech_to_text(QString);
public slots:
void initialize();
void listen(const QByteArray &);
void start_speech_to_text();
private:
void MicrophoneThreadMain(grpc::ClientReaderWriterInterface<StreamingRecognizeRequest,
StreamingRecognizeResponse> *);
void StreamerThread(grpc::ClientReaderWriterInterface<StreamingRecognizeRequest,
StreamingRecognizeResponse> *);
QByteArray audioDataBuffer;
int m_start_time;
};
#endif // SPEECHTOTEXT_H
speechtotext.cpp
#include "speechtotext.h"
using google::cloud::speech::v1::StreamingRecognitionConfig;
using google::cloud::speech::v1::RecognitionConfig;
using google::cloud::speech::v1::Speech;
using google::cloud::speech::v1::StreamingRecognizeRequest;
using google::cloud::speech::v1::StreamingRecognizeResponse;
SpeechToText::SpeechToText(QObject *parent) : QObject(parent)
{
}
void SpeechToText::initialize()
{
emit finished_initializing();
}
void SpeechToText::MicrophoneThreadMain(grpc::ClientReaderWriterInterface<StreamingRecognizeRequest,
StreamingRecognizeResponse> *streamer)
{
StreamingRecognizeRequest request;
std::size_t size_read;
while (time(0) - m_start_time <= TIME_RECOGNITION)
{
int chunk_size = 64 * 1024;
if (audioDataBuffer.size() >= chunk_size)
{
QByteArray bytes_read = QByteArray(audioDataBuffer);
size_read = std::size_t(bytes_read.size());
// And write the chunk to the stream.
request.set_audio_content(&bytes_read.data()[0], size_read);
bool ok = streamer->Write(request);
/*if (ok)
{
std::cout << "Sending " << size_read / 1024 << "k bytes." << std::endl;
}*/
audioDataBuffer.clear();
audioDataBuffer.resize(0);
}
std::this_thread::sleep_for(std::chrono::milliseconds(50));
}
qDebug() << "Out of speech recognition: " << end_date;
emit out_of_speech_to_text();
streamer->WritesDone();
}
void SpeechToText::StreamerThread(grpc::ClientReaderWriterInterface<StreamingRecognizeRequest,
StreamingRecognizeResponse> *streamer)
{
// Read responses.
StreamingRecognizeResponse response;
while (time(0) - m_start_time <= TIME_RECOGNITION)
{
if(streamer->Read(&response)) // Returns false when no more to read.
{
// Dump the transcript of all the results.
if (response.results_size() > 0)
{
auto result = response.results(0);
if (result.alternatives_size() > 0)
{
auto alternative = result.alternatives(0);
auto transcript = QString::fromStdString(alternative.transcript());
if (result.is_final())
{
qDebug() << "Speech recognition: " << transcript;
emit finished_speech_to_text(transcript);
}
}
}
}
}
}
void SpeechToText::listen(const QByteArray &audioData)
{
audioDataBuffer.append(audioData);
}
void SpeechToText::start_speech_to_text()
{
qDebug() << "in start_speech_to_text: " << start_date;
emit in_speech_to_text();
m_start_time = time(0);
audioDataBuffer.clear();
audioDataBuffer.resize(0);
StreamingRecognizeRequest request;
auto *streaming_config = request.mutable_streaming_config();
RecognitionConfig *recognition_config = new RecognitionConfig();
recognition_config->set_language_code("en-US");
recognition_config->set_sample_rate_hertz(16000);
recognition_config->set_encoding(RecognitionConfig::LINEAR16);
streaming_config->set_allocated_config(recognition_config);
// Create a Speech Stub connected to the speech service.
auto creds = grpc::GoogleDefaultCredentials();
auto channel = grpc::CreateChannel("speech.googleapis.com", creds);
std::unique_ptr<Speech::Stub> speech(Speech::NewStub(channel));
// Begin a stream.
grpc::ClientContext context;
auto streamer = speech->StreamingRecognize(&context);
// Write the first request, containing the config only.
streaming_config->set_interim_results(true);
streamer->Write(request);
// The microphone thread writes the audio content.
std::thread microphone_thread(&SpeechToText::MicrophoneThreadMain, this, streamer.get());
std::thread streamer_thread(&SpeechToText::StreamerThread, this, streamer.get());
microphone_thread.join();
streamer_thread.join();
}
You should really follow Google's example and only do 64k at a time.
You should use WritesDone() on the streamer when you intend the request to be shipped to Google's server.
It appears that you aren't ever clearing out your QByteArray's data, so it will just pile up over time with each successive append call on your QByteArray. Since you're using a pointer to the first element of data in the underlying array, each time you run through your loop, you're sending the entire audio data that's been captured up to that point to streamer. I suggest a nested loop that calls QIODevice::read(char *data, qint64 maxSize) repeatedly until your QByteArray has exactly 64KB. You'll need to handle a return value of -1 indicating end of stream, and adjust maxSize downwards based on how much more data is needed to fill your array up to 64k. Requests to Google's API with too little data (e.g. just a couple of bytes as your current loop appears to do at first) may get you rate-limited, or create upstream congestion on the Internet connection due to the high protocol overhead to data ratio. Also it's probably easier to handle this with a plain C-style array of a fixed size (64k) rather than a QByteArray because you don't need resizing, and AFAIK QByteArray::clear() could cause memory allocation (not great for performance). To avoid re-sending old data on a short write (e.g. when the microphone stream closes before the 64k buffer is full), you should also memset(array, 0, sizeof array); after each ClientReaderWriterInterface::WritesDone() call.
If the network can't keep up with the incoming microphone data, you may end up with an overrun situation on the QAudioInput where it runs out of local buffer to store the audio. More buffering makes this less likely but also decreases responsiveness. You may want to just buffer all the data that comes off of the QAudioInput into an unbounded QByteArray and read out of that 64k at a time (you can do so by wrapping it in a QBuffer and all your code dealing with QIODevice in MicrophoneThreadMain() will be compatible.) I think, normally, for projects like yours, the user would prefer to have worse responsiveness, as opposed to having to repeat themselves, in case of a network related overrun. But there's probably a threshold - maybe 5 seconds or so - after which the buffered data might become "out of date" as the user may try speaking into the mic again, causing a weird effect of multiple STT events happening in rapid succession once the upstream bottleneck frees up.

Difference between R.3.4.4 and R.3.5.1 in R API for C

I have a C++ program that uses R API to send commands to R and display the result. Once reduced to it's minimal form, it's an R console coded in C++.
It used to work fine (most of the time) with R.3.4.3 and R.3.4.4, but everything falls appart when I tried to make the transition to R.3.5.1.
For some commands (typically a call to "par", or "barplot", or anything related to graphics), I get the error message: "Error in < My command > : the base graphics system is not registered"
I never encountered this error before, and google searching it gives surprisingly few results.
My console using R3.4.3 (it's the same with 3.4.4) :
The same commands using R3.5.1:
Note that this behavior does not happen in a regular R console, so it must have something to do with the R-API for C/C++ (and the way it handles graphics devices, maybe?).
My code consists essentially in a RManager class that calls the API to exchange with R, and a simple window providing a lineEdit where user can input its command, and a text field where R results are displayed (see images above).
I'll provide the full code for reproducibility, but if you want to jump where R communication is really handled, it all happens in rmanager.cpp, the rest is just the GUI.
main.cpp:
#include "mainwindow.h"
#include <QApplication>
#include <thread>
#include <iostream>
#include <chrono>
#ifdef _WIN32
#include <windows.h>
#include <tlhelp32.h>
#include <QProcess>
#include <cwchar>
#endif
int main(int argc, char *argv[])
{
int result = 0;
QApplication a(argc, argv);
MainWindow w;
w.show();
result = a.exec();
return result;
}
MainWindow.h:
#ifndef MAINWINDOW_H
#define MAINWINDOW_H
#include <QMainWindow>
#include <QTextStream>
#include <QFile>
#include <QTimer>
#include <rmanager.h>
namespace Ui {
class MainWindow;
}
class MainWindow : public QMainWindow
{
Q_OBJECT
public:
explicit MainWindow(QWidget *parent = 0);
~MainWindow();
private slots:
// Command received from GUI
void on_inputCmd_returnPressed();
// Read result from the R Manager
void getResult(QString);
//Read errors from the R Manager
void getError(QString);
private:
Ui::MainWindow *ui;
QTimer pollInput;
RManager *rconsole;
// Result buffer for last command
QString resultBuffer;
// Send command directly to R
void sendRCmd(QString command);
signals:
// Starts the R Manager event loop
void runConsole();
};
#endif // MAINWINDOW_H
mainWindow.cpp:
#include "mainwindow.h"
#include "ui_mainwindow.h"
#include <QDebug>
MainWindow::MainWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainWindow)
{
ui->setupUi(this); // Just a QLineEdit for inputs and a QTextEdit to display result.
ui->outputConsole->document()->setMaximumBlockCount(500);
// R API connection
rconsole = new RManager(parent);
// Signals connection
connect(rconsole, SIGNAL(writeConsole(QString)), this, SLOT(getResult(QString)));
connect(rconsole, SIGNAL(writeConsoleError(QString)), this, SLOT(getError(QString)));
connect(this, SIGNAL(runConsole()), rconsole, SLOT(runConsole()));
pollInput.start(10); // Check for R results every 10 ms.
// R Callbacks event loop
emit runConsole();
}
MainWindow::~MainWindow()
{
delete ui;
pollInput.stop();
}
/**
* #brief MainWindow::getResult Aggregate results from R until an only '\n' is sent
* Then send it to the user (RPP or GUI)
* #param res
*/
void MainWindow::getResult(QString res)
{
// While != "\n" add res to the result buffer
if (res != "\n") {
resultBuffer.append(res);
} else {
// the res to the resultBuffer to conserve the last \n
resultBuffer.append(res);
// Get the current text values from the text fields and append the result
ui->outputConsole->append(resultBuffer);
resultBuffer.clear();
}
}
/**
* #brief MainWindow::getError Send immediatly any error from R
* #param error
*/
void MainWindow::getError(QString error)
{
qDebug() << "getError called with error: " << error ;
// Get the current text values from the text fields and append the result
ui->outputConsole->append(error);
}
/**
* #brief MainWindow::sendRCmd Low level method to send command to R
* Display the command in the GUI
* #param command
*/
void MainWindow::sendRCmd(QString command)
{
ui->outputConsole->append("> "+command+"\n");
// Send the command to R
rconsole->parseEval(command);
ui->inputCmd->clear();
}
/**
* #brief MainWindow::on_inputCmd_returnPressed Send command to R from the GUI
*/
void MainWindow::on_inputCmd_returnPressed()
{
// Get the current text values from the text fields
QString command = ui->inputCmd->text();
sendRCmd(command);
}
ui_mainwindow.h (generated by Qt Creator):
/********************************************************************************
** Form generated from reading UI file 'mainwindow.ui'
**
** Created by: Qt User Interface Compiler version 5.11.0
**
** WARNING! All changes made in this file will be lost when recompiling UI file!
********************************************************************************/
#ifndef UI_MAINWINDOW_H
#define UI_MAINWINDOW_H
#include <QtCore/QVariant>
#include <QtWidgets/QApplication>
#include <QtWidgets/QLineEdit>
#include <QtWidgets/QMainWindow>
#include <QtWidgets/QTextEdit>
#include <QtWidgets/QVBoxLayout>
#include <QtWidgets/QWidget>
QT_BEGIN_NAMESPACE
class Ui_MainWindow
{
public:
QWidget *centralWidget;
QVBoxLayout *verticalLayout;
QTextEdit *outputConsole;
QLineEdit *inputCmd;
void setupUi(QMainWindow *MainWindow)
{
if (MainWindow->objectName().isEmpty())
MainWindow->setObjectName(QStringLiteral("MainWindow"));
MainWindow->resize(382, 413);
centralWidget = new QWidget(MainWindow);
centralWidget->setObjectName(QStringLiteral("centralWidget"));
verticalLayout = new QVBoxLayout(centralWidget);
verticalLayout->setSpacing(6);
verticalLayout->setContentsMargins(11, 11, 11, 11);
verticalLayout->setObjectName(QStringLiteral("verticalLayout"));
outputConsole = new QTextEdit(centralWidget);
outputConsole->setObjectName(QStringLiteral("outputConsole"));
outputConsole->setFocusPolicy(Qt::NoFocus);
outputConsole->setUndoRedoEnabled(false);
outputConsole->setReadOnly(true);
verticalLayout->addWidget(outputConsole);
inputCmd = new QLineEdit(centralWidget);
inputCmd->setObjectName(QStringLiteral("inputCmd"));
inputCmd->setClearButtonEnabled(true);
verticalLayout->addWidget(inputCmd);
MainWindow->setCentralWidget(centralWidget);
retranslateUi(MainWindow);
QMetaObject::connectSlotsByName(MainWindow);
} // setupUi
void retranslateUi(QMainWindow *MainWindow)
{
MainWindow->setWindowTitle(QApplication::translate("MainWindow", "MainWindow", nullptr));
} // retranslateUi
};
namespace Ui {
class MainWindow: public Ui_MainWindow {};
} // namespace Ui
QT_END_NAMESPACE
#endif // UI_MAINWINDOW_H
rmanager.h
#ifndef RMANAGER_H
#define RMANAGER_H
#include <QObject>
class RManager : public QObject
{
Q_OBJECT
public:
explicit RManager(QObject *parent = 0);
// R side methods/callbacks
int parseEval(const QString & line);
// R interface callbacks
void myShowMessage( const char* message );
void myWriteConsoleEx( const char* message, int len, int oType );
int myReadConsole(const char *, unsigned char *, int, int);
int winReadConsole(const char*, char*, int, int);
void myResetConsole();
void myFlushConsole();
void myCleanerrConsole();
void myBusy( int which );
static RManager &r();
signals:
void writeConsole(QString);
void writeConsoleError(QString);
public slots:
void runConsole();
private:
bool R_is_busy;
static RManager *r_inst;
};
// Functions to match the library : call RManager's methods
void myR_ShowMessage( const char* message );
void myR_WriteConsoleEx( const char* message, int len, int oType );
int myR_ReadConsole(const char *prompt, unsigned char *buf, int len, int addtohistory);
int ReadConsole(const char *prompt, char *buf, int len, int addtohistory);
void myR_ResetConsole();
void myR_FlushConsole();
void myR_ClearerrConsole();
void myR_Busy( int which );
void myR_CallBack();
void myR_AskOk(const char *);
int myR_AskYesNoCancel(const char *);
#endif // RMANAGER_H
And finally, rmanager.cpp
#include "rmanager.h"
#include <qmessagebox.h>
#include <QDebug>
#define R_INTERFACE_PTRS
#include <Rembedded.h>
#ifndef _WIN32
#include <Rinterface.h> // For Linux.
#endif
#include <R_ext/RStartup.h>
#include <Rinternals.h>
#include <R_ext/Parse.h>
#include <locale.h>
RManager* RManager::r_inst = 0 ;
RManager::RManager(QObject *parent) : QObject(parent)
{
if (r_inst) {
throw std::runtime_error( tr("Il ne peut y avoir qu'une instance de RppConsole").toStdString() ) ;
} else {
r_inst = this ;
}
const char *argv[] = {"RConsole", "--gui=none", "--no-save",
"--silent", "--vanilla", "--slave"};
int argc = sizeof(argv) / sizeof(argv[0]);
setlocale(LC_NUMERIC, "C"); //try to ensure R uses .
#ifndef _WIN32
R_SignalHandlers = 0; // Don't let R set up its own signal handlers
#endif
Rf_initEmbeddedR(argc, (char**)argv); // The call that is supposed to register the graphics system, amongst other things.
R_ReplDLLinit(); // this is to populate the repl console buffers
structRstart Rst;
R_DefParams(&Rst);
Rst.R_Interactive = (Rboolean) false; // sets interactive() to eval to false
#ifdef _WIN32
Rst.rhome = getenv("R_HOME");
Rst.home = getRUser();
Rst.CharacterMode = LinkDLL;
Rst.ReadConsole = ReadConsole;
Rst.WriteConsole = NULL;
Rst.WriteConsoleEx = myR_WriteConsoleEx;
Rst.CallBack = myR_CallBack;
Rst.ShowMessage = myR_AskOk;
Rst.YesNoCancel = myR_AskYesNoCancel;
Rst.Busy = myR_Busy;
#endif
R_SetParams(&Rst);
// Assign callbacks to R's
#ifndef _WIN32
ptr_R_ShowMessage = myR_ShowMessage ;
ptr_R_ReadConsole = myR_ReadConsole;
ptr_R_WriteConsoleEx = myR_WriteConsoleEx ;
ptr_R_WriteConsole = NULL;
ptr_R_ResetConsole = myR_ResetConsole;
ptr_R_FlushConsole = myR_FlushConsole;
ptr_R_ClearerrConsole = myR_ClearerrConsole;
ptr_R_Busy = myR_Busy;
R_Outputfile = NULL;
R_Consolefile = NULL;
#endif
#ifdef TIME_DEBUG
_earliestSendToRBool = false;
#endif
Rf_endEmbeddedR(0);
}
RManager &RManager::r()
{
return *r_inst;
}
void RManager::runConsole()
{
// Start the event loop to get results from R
R_ReplDLLinit();
while (R_ReplDLLdo1() > 0) {}
}
/**
* #brief RManager::parseEval is the core of this console, sending commands to R.
* #param line
* #return
*/
int RManager::parseEval(const QString &line) {
ParseStatus status;
SEXP cmdSexp, cmdexpr = R_NilValue;
int i, errorOccurred, retVal=0;
// Convert the command line to SEXP
PROTECT(cmdSexp = Rf_allocVector(STRSXP, 1));
SET_STRING_ELT(cmdSexp, 0, Rf_mkChar(line.toLocal8Bit().data()));
cmdexpr = PROTECT(R_ParseVector(cmdSexp, -1, &status, R_NilValue));
switch (status){
case PARSE_OK:
// Loop is needed here as EXPSEXP might be of length > 1
for(i = 0; ((i < Rf_length(cmdexpr)) && (retVal==0)); i++){
R_tryEval(VECTOR_ELT(cmdexpr, i), R_GlobalEnv, &errorOccurred);
if (errorOccurred) {
retVal = -1;
}
}
break;
case PARSE_INCOMPLETE:
// need to read another line
retVal = 1;
break;
case PARSE_NULL:
Rf_warning(tr("%s: Etat d'analyse de commande : NULL (%d)\n").toStdString().data(), "RPPConsole", status);
retVal = -2;
break;
case PARSE_ERROR:
Rf_warning(tr("Erreur d'analyse de la commande : \"%s\"\n").toStdString().data(), line.toStdString().c_str());
retVal = -2;
break;
case PARSE_EOF:
Rf_warning(tr("%s: Etat d'analyse de commande : EOF (%d)\n").toStdString().data(), "RPPConsole", status);
break;
default:
Rf_warning(tr("%s: Etat d'analyse de commande non documenté %d\n").toStdString().data(), "RPPConsole", status);
retVal = -2;
break;
}
UNPROTECT(2);
return retVal;
}
// RManager callbacks implementation
void RManager::myShowMessage(const char *message)
{
// Never called till now
QMessageBox::information(qobject_cast<QWidget*>(parent()),QString(tr("Bonjour le monde")),QString(message),QMessageBox::Ok,QMessageBox::NoButton);
}
void RManager::myWriteConsoleEx(const char *message, int len, int oType)
{
QString msg;
if (len) {
msg = QString::fromLocal8Bit(message, len);
if(!oType)
emit writeConsole(msg);
else
emit writeConsoleError(msg);
}
}
int RManager::myReadConsole(const char* /*prompt*/, unsigned char* /*buf*/, int /*len*/, int /*addtohistory*/ ){
return 0;
}
// For Windows, unsigned char is replaced by char
int RManager::winReadConsole(const char* /*prompt*/, char* /*buf*/, int /*len*/, int /*addtohistory*/ ){
return 0;
}
void RManager::myResetConsole()
{
}
void RManager::myFlushConsole()
{
}
void RManager::myCleanerrConsole()
{
}
void RManager::myBusy( int which ){
R_is_busy = static_cast<bool>( which ) ;
}
// Connects R callbacks to RManager static methods
void myR_ShowMessage( const char* message ){
RManager::r().myShowMessage( message ) ;
}
void myR_WriteConsoleEx( const char* message, int len, int oType ){
RManager::r().myWriteConsoleEx(message, len, oType);
}
int myR_ReadConsole(const char *prompt, unsigned char *buf, int len, int addtohistory){
return RManager::r().myReadConsole( prompt, buf, len, addtohistory ) ;
}
int ReadConsole(const char *prompt, char *buf, int len, int addtohistory) {
return RManager::r().winReadConsole( prompt, buf, len, addtohistory ) ;
}
void myR_ResetConsole(){
RManager::r().myResetConsole();
}
void myR_FlushConsole(){
RManager::r().myFlushConsole();
}
void myR_ClearerrConsole(){
RManager::r().myCleanerrConsole();
}
void myR_Busy( int which ){
RManager::r().myBusy(which);
}
void myR_CallBack() {
// Called during i/o, eval, graphics in ProcessEvents
}
void myR_AskOk(const char* /*info*/) {
}
int myR_AskYesNoCancel(const char* /*question*/) {
const int yes = 1;
return yes;
}
Thank you in advance for your ideas on what the problem might be. Is it a R.3.5.1 bug, or is there something that I should have defined/connected and missed ? I read the R.3.5.1 changes description without finding a clue about it.
PS: I'm under windows 10, compiling with Microsoft Visual C++ Compiler 15.0 (32 bits), and using Qt 5.11.0 (for the GUI components).
PPS: Following user2554330's advice, I checked for calls to GEregisterSystem, that is supposed to set the graphics system, and thus prevent this error. I found that in both cases, this function is called, at application launch, but not with the same call stack.
For R.3.4.3:
For R.3.5.1:
I found a solution (thanks to Luke Tierney on R-devel mailing list).
I just needed to move the call to Rf_endEmbeddedR to the destructor of RManager, where it should have been.
It doesn't really explain why it worked the way it was before with R.3.4 and not with R.3.5, but it does solve the practical issue.
Maybe this was never supposed to work with Rf_endEmbeddedR called so soon, and it only used to, thanks to a bug that has been fixed.

Using Live555 to Stream Live Video from an IP camera connected to an H264 encoder

I am using a custom Texas Instruments OMAP-L138 based board that basically consists of an ARM9 based SoC and a DSP processor. It is connected to a camera lens. What I'm trying to do is to capture live video stream which is sent to the dsp processor for H264 encoding which is sent over uPP in packets of 8192 bytes. I want to use the testH264VideoStreamer supplied by Live555 to live stream the H264 encoded video over RTSP. The code I have modified is shown below:
#include <liveMedia.hh>
#include <BasicUsageEnvironment.hh>
#include <GroupsockHelper.hh>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <fcntl.h>
#include <string.h>
#include <errno.h>
#include <string.h>
#include <unistd.h> //to allow read() function
UsageEnvironment* env;
H264VideoStreamFramer* videoSource;
RTPSink* videoSink;
//-------------------------------------------------------------------------------
/* Open File Descriptor*/
int stream = open("/dev/upp", O_RDONLY);
/* Declaring a static 8 bit unsigned integer of size 8192 bytes that keeps its value between invocations */
static uint8_t buf[8192];
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Execute play function as a forwarding mechanism
//------------------------------------------------------------------------------
void play(); // forward
//------------------------------------------------------------------------------
// MAIN FUNCTION / ENTRY POINT
//------------------------------------------------------------------------------
int main(int argc, char** argv)
{
// Begin by setting up our live555 usage environment:
TaskScheduler* scheduler = BasicTaskScheduler::createNew();
env = BasicUsageEnvironment::createNew(*scheduler);
// Create 'groupsocks' for RTP and RTCP:
struct in_addr destinationAddress;
destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
// Note: This is a multicast address. If you wish instead to stream
// using unicast, then you should use the "testOnDemandRTSPServer"
// test program - not this test program - as a model.
const unsigned short rtpPortNum = 18888;
const unsigned short rtcpPortNum = rtpPortNum+1;
const unsigned char ttl = 255;
const Port rtpPort(rtpPortNum);
const Port rtcpPort(rtcpPortNum);
Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
rtpGroupsock.multicastSendOnly(); // we're a SSM source
Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
rtcpGroupsock.multicastSendOnly(); // we're a SSM source
// Create a 'H264 Video RTP' sink from the RTP 'groupsock':
OutPacketBuffer::maxSize = 1000000;
videoSink = H264VideoRTPSink::createNew(*env, &rtpGroupsock, 96);
// Create (and start) a 'RTCP instance' for this RTP sink:
const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share
const unsigned maxCNAMElen = 100;
unsigned char CNAME[maxCNAMElen+1];
gethostname((char*)CNAME, maxCNAMElen);
CNAME[maxCNAMElen] = '\0'; // just in case
RTCPInstance* rtcp
= RTCPInstance::createNew(*env, &rtcpGroupsock,
estimatedSessionBandwidth, CNAME,
videoSink, NULL /* we're a server */,
True /* we're a SSM source */);
// Note: This starts RTCP running automatically
/*Create RTSP SERVER*/
RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
if (rtspServer == NULL)
{
*env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
exit(1);
}
ServerMediaSession* sms
= ServerMediaSession::createNew(*env, "IPCAM # TeReSol","UPP Buffer" ,
"Session streamed by \"testH264VideoStreamer\"",
True /*SSM*/);
sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
rtspServer->addServerMediaSession(sms);
char* url = rtspServer->rtspURL(sms);
*env << "Play this stream using the URL \"" << url << "\"\n";
delete[] url;
// Start the streaming:
*env << "Beginning streaming...\n";
play();
env->taskScheduler().doEventLoop(); // does not return
return 0; // only to prevent compiler warning
}
//----------------------------------------------------------------------------------
// afterPlaying() -> Defines what to do once a buffer is streamed
//----------------------------------------------------------------------------------
void afterPlaying(void* /*clientData*/)
{
*env << "...done reading from upp buffer\n";
//videoSink->stopPlaying();
//Medium::close(videoSource);
// Note that this also closes the input file that this source read from.
// Start playing once again to get the next stream
play();
/* We don't need to close the dev as long as we're reading from it. But if we do, use: close( "/dev/upp", O_RDWR);*/
}
//----------------------------------------------------------------------------------------------
// play() Method -> Defines how to read and what to make of the input stream
//----------------------------------------------------------------------------------------------
void play()
{
/* Read nbytes of buffer (sizeof buf ) from the filedescriptor stream and assign them to address where buf is located */
read(stream, &buf, sizeof buf);
printf("Reading from UPP in to Buffer");
/*Open the input file as a 'byte-stream file source': */
ByteStreamMemoryBufferSource* buffSource
= ByteStreamMemoryBufferSource::createNew(*env, buf, sizeof buf,False/*Empty Buffer After Reading*/);
/*By passing False in the above creatNew() method means that the buffer would be read at once */
if (buffSource == NULL)
{
*env << "Unable to read from\"" << "Buffer"
<< "\" as a byte-stream source\n";
exit(1);
}
FramedSource* videoES = buffSource;
// Create a framer for the Video Elementary Stream:
videoSource = H264VideoStreamFramer::createNew(*env, videoES,False);
// Finally, start playing:
*env << "Beginning to read from UPP...\n";
videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
}
The Problem is that the code though compiles successfully but I'm unable to get the desired output. the RTSP stream on VLC player is on play mode however I can't see any video. I'd be grateful for any assistance in this matter. I might come as a little vague in my description but I'm happy to further explain any part that is required.
Okay so I figured out what needed to be done and am writing for the benefit of all who might face a similar issue. What I needed to do was modify my testH264VideoStreamer.cpp and DeviceSource.cpp file such that it directly reads data from the device (in my case it was the custom am1808 board), store it in a buffer and stream it. The changes I made were:
testH264VideoStreamer.cpp
#include <liveMedia.hh>
#include <BasicUsageEnvironment.hh>
#include <GroupsockHelper.hh>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <fcntl.h>
#include <string.h>
#include <errno.h>
#include <string.h>
#include <unistd.h> //to allow read() function
UsageEnvironment* env;
H264VideoStreamFramer* videoSource;
RTPSink* videoSink;
void play(); // forward
//-------------------------------------------------------------------------
//Entry Point -> Main FUNCTION
//-------------------------------------------------------------------------
int main(int argc, char** argv) {
// Begin by setting up our usage environment:
TaskScheduler* scheduler = BasicTaskScheduler::createNew();
env = BasicUsageEnvironment::createNew(*scheduler);
// Create 'groupsocks' for RTP and RTCP:
struct in_addr destinationAddress;
destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
// Note: This is a multicast address. If you wish instead to stream
// using unicast, then you should use the "testOnDemandRTSPServer"
// test program - not this test program - as a model.
const unsigned short rtpPortNum = 18888;
const unsigned short rtcpPortNum = rtpPortNum+1;
const unsigned char ttl = 255;
const Port rtpPort(rtpPortNum);
const Port rtcpPort(rtcpPortNum);
Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
rtpGroupsock.multicastSendOnly(); // we're a SSM source
Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
rtcpGroupsock.multicastSendOnly(); // we're a SSM source
// Create a 'H264 Video RTP' sink from the RTP 'groupsock':
OutPacketBuffer::maxSize = 600000;
videoSink = H264VideoRTPSink::createNew(*env, &rtpGroupsock, 96);
// Create (and start) a 'RTCP instance' for this RTP sink:
const unsigned estimatedSessionBandwidth = 1024; // in kbps; for RTCP b/w share
const unsigned maxCNAMElen = 100;
unsigned char CNAME[maxCNAMElen+1];
gethostname((char*)CNAME, maxCNAMElen);
CNAME[maxCNAMElen] = '\0'; // just in case
RTCPInstance* rtcp
= RTCPInstance::createNew(*env, &rtcpGroupsock,
estimatedSessionBandwidth, CNAME,
videoSink, NULL /* we're a server */,
True /* we're a SSM source */);
// Note: This starts RTCP running automatically
RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
if (rtspServer == NULL) {
*env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
exit(1);
}
ServerMediaSession* sms
= ServerMediaSession::createNew(*env, "ipcamera","UPP Buffer" ,
"Session streamed by \"testH264VideoStreamer\"",
True /*SSM*/);
sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
rtspServer->addServerMediaSession(sms);
char* url = rtspServer->rtspURL(sms);
*env << "Play this stream using the URL \"" << url << "\"\n";
delete[] url;
// Start the streaming:
*env << "Beginning streaming...\n";
play();
env->taskScheduler().doEventLoop(); // does not return
return 0; // only to prevent compiler warning
}
//----------------------------------------------------------------------
//AFTER PLAY FUNCTION CALLED HERE
//----------------------------------------------------------------------
void afterPlaying(void* /*clientData*/)
{
play();
}
//------------------------------------------------------------------------
//PLAY FUNCTION ()
//------------------------------------------------------------------------
void play()
{
// Open the input file as with Device as the source:
DeviceSource* devSource
= DeviceSource::createNew(*env);
if (devSource == NULL)
{
*env << "Unable to read from\"" << "Buffer"
<< "\" as a byte-stream source\n";
exit(1);
}
FramedSource* videoES = devSource;
// Create a framer for the Video Elementary Stream:
videoSource = H264VideoStreamFramer::createNew(*env, videoES,False);
// Finally, start playing:
*env << "Beginning to read from UPP...\n";
videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
}
DeviceSource.cpp
#include "DeviceSource.hh"
#include <GroupsockHelper.hh> // for "gettimeofday()"
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <fcntl.h>
#include <string.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
//static uint8_t *buf = (uint8_t*)malloc(102400);
static uint8_t buf[8192];
int upp_stream;
//static uint8_t *bufPtr = buf;
DeviceSource*
DeviceSource::createNew(UsageEnvironment& env)
{
return new DeviceSource(env);
}
EventTriggerId DeviceSource::eventTriggerId = 0;
unsigned DeviceSource::referenceCount = 0;
DeviceSource::DeviceSource(UsageEnvironment& env):FramedSource(env)
{
if (referenceCount == 0)
{
upp_stream = open("/dev/upp",O_RDWR);
}
++referenceCount;
if (eventTriggerId == 0)
{
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
}
DeviceSource::~DeviceSource(void) {
--referenceCount;
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
if (referenceCount == 0)
{
}
}
int loop_count;
void DeviceSource::doGetNextFrame()
{
//for (loop_count=0; loop_count < 13; loop_count++)
//{
read(upp_stream,buf, 8192);
//bufPtr+=8192;
//}
deliverFrame();
}
void DeviceSource::deliverFrame0(void* clientData)
{
((DeviceSource*)clientData)->deliverFrame();
}
void DeviceSource::deliverFrame()
{
if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet
u_int8_t* newFrameDataStart = (u_int8_t*) buf; //(u_int8_t*) buf; //%%% TO BE WRITTEN %%%
unsigned newFrameSize = sizeof(buf); //%%% TO BE WRITTEN %%%
// Deliver the data here:
if (newFrameSize > fMaxSize) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
} else {
fFrameSize = newFrameSize;
}
gettimeofday(&fPresentationTime, NULL);
memmove(fTo, newFrameDataStart, fFrameSize);
FramedSource::afterGetting(this);
}
After compiling the code with these modifications, I was able to receive video stream on vlc player.

Obtaining command line arguments in a Qt application

The following snippet is from a little app I wrote using the Qt framework. The idea is that the app can be run in batch mode (i.e. called by a script) or can be run interactively.
It is important therefore, that I am able to parse command line arguments in order to know which mode in which to run etc.
[Edit]
I am debugging using Qt Creator 1.3.1 on Ubuntu Karmic. The arguments are passed in the normal way (i.e. by adding them via the 'Project' settings in the Qt Creator IDE).
When I run the app, it appears that the arguments are not being passed to the application. The code below, is a snippet of my main() function.
int main(int argc, char *argv[])
{
//Q_INIT_RESOURCE(application);
try {
QApplication the_app(argc, argv);
//trying to get the arguments into a list
QStringList cmdline_args = QCoreApplication::arguments();
// Code continues ...
}
catch (const MyCustomException &e) { return 1; }
return 0;
}
[Update]
I have identified the problem - for some reason, although argc is correct, the elements of argv are empty strings.
I put this little code snippet to print out the argv items - and was horrified to see that they were all empty.
for (int i=0; i< argc; i++){
std::string s(argv[i]); //required so I can see the damn variable in the debugger
std::cout << s << std::endl;
}
Does anyone know how I can retrieve the command line args in my application?
If your argc and argv are good, I'm surprised this would be possible as QApplication::arguments() is extremely simple. Note the source code. Filtering the #ifdefs for Linux, it's just:
QStringList QCoreApplication::arguments()
{
QStringList list;
if (!self) {
qWarning("QCoreApplication::arguments: Please instantiate the QApplication object first");
return list;
}
const int ac = self->d_func()->argc;
char ** const av = self->d_func()->argv;
for (int a = 0; a < ac; ++a) {
list << QString::fromLocal8Bit(av[a]);
}
return list;
}
That's all you've got. There's a Unicode caveat which I would not think would apply to Karmic:
"On Unix, this list is built from the argc and argv parameters passed to the constructor in the main() function. The string-data in argv is interpreted using QString::fromLocal8Bit(); hence it is not possible to pass, for example, Japanese command line arguments on a system that runs in a Latin1 locale. Most modern Unix systems do not have this limitation, as they are Unicode-based."
You might try a copy of that code against your argc and argv directly and see what happens.
Only in order to keep response up-to-date, Qt now provides a dedicated class for parsing command line:
http://doc.qt.io/qt-5/qcommandlineparser.html
P.S. : can only post this as response and not a comment; I'm sorry because the question was not really how to parse but how to access.
If you are writing a Console only application then you might want to consider using QCoreApplication instead of QApplicition. QCoreApplication is part of QtCore while QApplication is defined in QtGui, so you get an extra and unnecessary dependency.
here is a simple example to have the arguments in a QStringList. Assuming you start the app with argument -q -t
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
MainWindow w;
w.show();
QString x;
for (int i=1; i<argc; i++)
{
x.append(argv[i]);
}
qDebug() << x;
QStringList args = x.split("-");
args.removeFirst();
qDebug() << "args="<< args;
return a.exec();
}
Output is as follow
x= "-q-t"
args= ("q", "t")
Now you have the arguments as a QStringList ..
and here is a complete code i wrote and use in a small application
#include "mainwindow.h"
#include <QApplication>
#include <QDebug>
static QStringList arguments;
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
MainWindow w;
w.show();
//analyze the arguments
//-b: use buidin player and if it does not exist use mpg123 shell to play files
//
//-t: test the player upon startup and exit
//-s: use the following speaker ID for the test
//-f: use the following file name and path
//syntax: example:
// -b : to use build in player
// -t -s xx:xx:xx:xx:xx -f azanfile.mp3: to test upon startup playing a file
bool useBuildInPlayer;
QString x;
for (int i=1; i<argc; i++)
{
x.append(argv[i]);
}
arguments << x.split("-"); arguments.removeFirst();
qDebug() << arguments;
if (arguments.indexOf("b")>=0)
useBuildInPlayer=true;
else
useBuildInPlayer=false;
bool TestSpeaker = false;
bool spkr=false; QString speaker;
bool playfile=false; QStringList testfiles;
QString filestring;
foreach (QString x, arguments)
{
if (x.left(1)=="s")
{
speaker = x.mid(1,-1); //remove the s from the beginning
spkr=true;
}
if (x.left(1)=="f")
{
filestring=x.mid(1,-1);
playfile=true;
testfiles<<filestring;
}
if (x=="t")
TestSpeaker = true;
}
if (TestSpeaker)
{
if (spkr)
{
qDebug() << "testing speaker "<< speaker;
}
else
{
qDebug() << "test argument needs speaker -s xx:xx:xx:xx:xx";
}
if (playfile)
{
qDebug() << "testing file "<< filestring;
}
else
{
qDebug() << "test file is missing";
}
}
if (TestSpeaker && spkr && playfile)
{
if (useBuildInPlayer) //use build in player
{
qDebug() << "testing using buildin player";
}
else // use mpg123 shell
{
qDebug() << "testing using mpg123 shell";
}
}
return a.exec();
}