I have written a simple C++ program using OpenCV to test fps on raspberry pi.
#include <iostream>
#include <chrono>
#include <thread>
#include <unistd.h>
#include <string>
#include <opencv2/opencv.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/core/ocl.hpp>
using namespace cv;
using namespace std;
using namespace std::chrono;
using namespace std::this_thread;
using namespace cv::ml;
int main(void)
{
VideoCapture camera(0);
camera.set(CAP_PROP_FRAME_WIDTH, 640);
camera.set(CAP_PROP_FRAME_HEIGHT, 480);
camera.set(CAP_PROP_FPS, 25);
Mat frame;
while (camera.read(frame)) {
for (size_t i = 0; i < frame.dataend - frame.datastart; i++)
std::cout << frame.data[i];
}
}
I then use the following scrips to test fps.
The first uses raspvid as the input, the second used my C++ program as input
The raspvid version gets to the 25fps.
My C++ program never goes over 10fps.
Any ide why this is?
raspivid -w 640 -h 480 -fps 25 -t 120000 -o -| ffmpeg -re -i pipe:0 -y -an -c:v copy -f null /dev/null
and
./PiImageAnalyzer.out | ffmpeg -re -f rawvideo -pixel_format bgr24 -video_size 640x480 -framerate 25 -i pipe:0 -y -an -c:v copy -f null /dev/null
Update
these scripts can also be used
raspivid -w 640 -h 480 -fps 25 -t 120000 -o -| ffplay -i -
and
./PiImageAnalyzer.out | ffplay -f rawvideo -pixel_format bgr24 -video_size 640x480 -framerate 25 -i -
Related
I am running this in windows, compiled using cygwin and mingw.
Compile Command:
g++ sdl.cpp -I"include" -L"lib" -lSDL2main -lSDL2 -lSDL2_image -o test.exe
Code:
#include <SDL2/SDL.h>
int main(int argc, char *argv[])
{
if (SDL_Init(SDL_INIT_VIDEO) != 0) {
printf("error initializing SDL: %s\n", SDL_GetError());
}
SDL_Window*win = SDL_CreateWindow("Test",SDL_WINDOWPOS_CENTERED,SDL_WINDOWPOS_CENTERED,1000, 1000, 0);
while (1);
return 0;
}
On Cygwin:
g++ sdl.cpp -lSDL2main -lSDL2 -lSDL2_image -o test.exe
Run the X server with startxwin, open a terminal and
./test.exe
A black window with a white bar appears.
As you put no handling of events, you need a hard kill to close the program
$ ps ax | grep test
18455 18448 18455 25368 pty3 197609 12:39:35 /tmp/test
$ kill -9 18455
I've written a http server that only sends back compressed http responses:
https://github.com/ericcurtin/asio/commit/1d37a1d225d1e812a747b595c02f9770ebd75dd0
So if you you use curl to request the data and decompress the response by piping through gunzip it works fine:
curl -x "" 127.0.0.1:5000/main.cpp --output - | gunzip
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 758 100 758 0 0 740k 0 --:--:-- --:--:-- --:--:-- 740k
// g++ -O0 main.cpp server.cpp connection_manager.cpp request_handler.cpp
// connection.cpp reply.cpp mime_types.cpp request_parser.cpp -lboost_system
// -lpthread -lz
//
// run like: ./a.out 0.0.0.0 5000 .
//
// main.cpp
// ~~~~~~~~
//
// Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <iostream>
#include <string>
#include <boost/asio.hpp>
#include "server.hpp"
int main(int argc, char* argv[])
{
try
{
// Check command line arguments.
if (argc != 4)
{
std::cerr << "Usage: http_server <address> <port> <doc_root>\n";
std::cerr << " For IPv4, try:\n";
std::cerr << " receiver 0.0.0.0 80 .\n";
std::cerr << " For IPv6, try:\n";
std::cerr << " receiver 0::0 80 .\n";
return 1;
}
// Initialise the server.
http::server::server s(argv[1], argv[2], argv[3]);
// Run the server until stopped.
s.run();
}
catch (std::exception& e)
{
std::cerr << "exception: " << e.what() << "\n";
}
return 0;
}
But if you use curl using --compressed which works with other http servers like the one at example.com it fails after the first 512 bytes:
curl -x "" 127.0.0.1:5000/main.cpp --compressed
// g++ -O0 main.cpp server.cpp connection_manager.cpp request_handler.cpp
// connection.cpp reply.cpp mime_types.cpp request_parser.cpp -lboost_system
// -lpthread -lz
//
// run like: ./a.out 0.0.0.0 5000 .
//
// main.cpp
// ~~~~~~~~
//
// Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <iostream>
curl: (23) Failed writing received data to disk/application
#include <string
Any idea on how my compression could be fixed?
I need a function which suspends my program in µs, it should be real time, so if I call it with 50µs the thread should stop for exactly 50µs.
My C++ program is running on a Raspberry Pi with normal Raspbian installed.
I wrote this example program which uses the posix time functions to suspend an measure the suspend time.
#include <cstdlib>
#include "stdint-gcc.h"
#include "signal.h"
#include <time.h>
#include <cerrno>
#include <cstdio>
#include <iostream>
#include <cstring>
#define S_IN_NS 1000000000UL
#define MS_IN_NS 1000000UL
#define US_IN_NS 1000UL
#define GET_TIME_CLOCK CLOCK_MONOTONIC
using namespace std;
int main(int argc, char** argv) {
struct timespec newTimeStamp;
struct timespec oldTimeStamp;
struct timespec sleeptime;
sleeptime.tv_sec = 0;
sleeptime.tv_nsec = 50000; //50us
if (clock_gettime(GET_TIME_CLOCK, &oldTimeStamp) == -1)
cout << "Could not get clock time! ERRNO: " << strerror(errno);
if ((clock_nanosleep(CLOCK_MONOTONIC, 0, &sleeptime, NULL)) == -1)
cout << "Sleep failed! ERRNO: " << strerror(errno);
if (clock_gettime(GET_TIME_CLOCK, &newTimeStamp) == -1)
cout << "Could not get clock time! ERRNO: " << strerror(errno);
uint64_t measuredSec = (newTimeStamp.tv_sec - oldTimeStamp.tv_sec);
int32_t measuredNs = (newTimeStamp.tv_nsec - oldTimeStamp.tv_nsec);
uint64_t diffus = (((measuredSec * S_IN_NS) + measuredNs + 500) / 1000UL);
uint64_t diffns = (((measuredSec * S_IN_NS) + measuredNs));
cout << "Diffns:" << diffns << " Diffus:" << diffus << endl;
return 0;
}
Build commands:
arm-bcm2708hardfp-linux-gnueabi-g++ -lrt -c -g -MMD -MP -MF "build/Debug/GNU_ARM_HARDFP-Linux-x86/main.o.d" -o build/Debug/GNU_ARM_HARDFP-Linux-x86/main.o main.cpp
arm-bcm2708hardfp-linux-gnueabi-g++ -lrt -o dist/Debug/GNU_ARM_HARDFP-Linux-x86/timetest build/Debug/GNU_ARM_HARDFP-Linux-x86/main.o
Result (chrt - manipulate real-time attributes of a process):
pi#raspberrypi ~ $ sudo chrt 99 ./timetest
Diffns:130994 Diffus:131
pi#raspberrypi ~ $ sudo chrt 99 ./timetest
Diffns:135994 Diffus:136
pi#raspberrypi ~ $ sudo chrt 99 ./timetest
Diffns:138993 Diffus:139
The program should sleep for exactly 50us but I measured 130-139us.
If I change the GET_TIME_CLOCK define to CLOCK_PROCESS_CPUTIME_ID the cpu time (excluding the sleeptime) is measured (So as I understand it).
Result:
pi#raspberrypi ~ $ sudo chrt 99 ./timetest
Diffns:89000 Diffus:89
pi#raspberrypi ~ $ sudo chrt 99 ./timetest
Diffns:86000 Diffus:86
pi#raspberrypi ~ $ sudo chrt 99 ./timetest
Diffns:88000 Diffus:88
It takes around 80-90µs to do the clock_nanosleep() function, even if I change the sleeptime to 500µs!
So is there any way to suspend a thread for exactly an amount of time (µs) in a C++ userspace application on Raspbian?
thx
If you need to sleep for such a precise amount of time, you probably need to use a spin loop with a check of the current time. This will consume rather a lot of power (and generate heat), but it's a fairly reliable and portable way to do it. Another idea is to try the ideas on this page: http://blog.regehr.org/archives/794
I have installed Eclipse+CDT and OpenCV with:
$ sudo apt-get install libcv1 libcv-dev libcvaux1 libcvaux-dev \
libhighgui1 libhighgui-dev \
opencv-doc \
python-opencv
After that I've opened Eclipse and created a new c/c++ project.
So I've typed this code:
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cv.h>
#include <highgui.h>
int main(int argc, char *argv[])
{
IplImage* img = 0;
img=cvLoadImage("C:/.../Pictures/immagine.jpg"); // carica l'immagine
cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE); // crea la finestra
cvShowImage("mainWin", img ); // mostra l'immagine
cvWaitKey(0); // wait for a key
cvReleaseImage(&img ); //rilascia l'immagine
system("PAUSE");
return 0;
}
The problem is that I have these errors returned:
Unresolved inclusion: <cv.h>
Unresolved inclusion: <highgui.h>
But in my eclipse workspace project I have these libraries under /usr/include...
What may be wrong?
Thanks.
Open a terminal and execute:
pkg-config --cflags opencv
On my system it returns:
-I/usr/local/include/opencv -I/usr/local/include
Those are the directories you'll have to add on Eclipse to compile your application.
Or, you could try replacing your includes for:
#include <opencv/cv.h>
#include <opencv/highgui.h>
I am looking for a complete i18n gettext() hello world example. I have started a script based upon A tutorial on Native Language Support using GNU gettext by G. Mohanty. I am using Linux and G++.
Code:
cat >hellogt.cxx <<EOF
// hellogt.cxx
#include <libintl.h>
#include <locale.h>
#include <iostream>
#include <cstdlib>
int main (){
char* cwd = getenv("PWD");
std::cout << "getenv(PWD): " << (cwd?cwd:"NULL") << std::endl;
char* l = getenv("LANG");
std::cout << "getenv(LANG): " << (l?l:"NULL") << std::endl;
char* s = setlocale(LC_ALL, "");
std::cout << "setlocale(): " << (s?s:"NULL") << std::endl;
std::cout << "bindtextdomain(): " << bindtextdomain("hellogt", cwd) << std::endl;
std::cout << "textdomain(): " << textdomain( "hellogt") << std::endl;
std::cout << gettext("hello, world!") << std::endl;
}
EOF
g++ -ohellogt hellogt.cxx
xgettext -d hellogt -o hellogt.pot hellogt.cxx
msginit --no-translator -l es_MX -o hellogt_spanish.po -i hellogt.pot
sed --in-place hellogt_spanish.po --expression='/#: /,$ s/""/"hola mundo"/'
sed --in-place hellogt_spanish.po --expression='s/PACKAGE VERSION/hellogt 1.0/'
mkdir -p ./es_MX/LC_MESSAGES
msgfmt -c -v -o ./es_MX/LC_MESSAGES/hellogt.mo hellogt_spanish.po
export LANG=es_MX
ls -l $PWD/es_MX/LC_MESSAGES/hellogt.mo
./hellogt
strace -e trace=open ./hellogt
The program compiles, the text is extracted, Spanish file is created, modified and binary created but hellogt still displays English. The trace shows no evidence of looking in the current working directory for es_MX nor any references to LC_MESSAGES directory.
Your problem is that hellogt.mo is in the wrong location - your program isn't actually opening it. You can tell this by using strace to trace open syscalls:
strace -e trace=open ./hellogt
...
open("/tmp/.//es_MX/LC_MESSAGES/hellogt.mo", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/tmp/.//es/LC_MESSAGES/hellogt.mo", O_RDONLY) = -1 ENOENT (No such file or directory)
You can affect where gettext looks for message catalogs with the LOCPATH environment variable, but if you move it to where gettext is attempting to load it from your example works:
mkdir -p es/LC_MESSAGES
cp hellogt.mo es/LC_MESSAGES
./hellogt
hola mundo
cat >hellogt.cxx <<EOF
// hellogt.cxx
#include <libintl.h>
#include <locale.h>
#include <iostream>
int main (){
setlocale(LC_ALL, "");
bindtextdomain("hellogt", ".");
textdomain( "hellogt");
std::cout << gettext("hello, world!") << std::endl;
}
EOF
g++ -o hellogt hellogt.cxx
xgettext --package-name hellogt --package-version 1.2 --default-domain hellogt --output hellogt.pot hellogt.cxx
msginit --no-translator --locale es_MX --output-file hellogt_spanish.po --input hellogt.pot
sed --in-place hellogt_spanish.po --expression='/"hello, world!"/,/#: / s/""/"hola mundo"/'
mkdir --parents ./es_MX.utf8/LC_MESSAGES
msgfmt --check --verbose --output-file ./es_MX.utf8/LC_MESSAGES/hellogt.mo hellogt_spanish.po
LANGUAGE=es_MX.utf8 ./hellogt
Here is a description of the files created by the above:
hellogt.cxx C++ source file
hellogt Executable image
hellogt.pot Extracted text from C++ source file (portable object template)
hellogt_spanish.po Modified text for Spanish with translations added (using sed)
es_MX.utf8/
LC_MESSAGES/
hellogt.mo Binary translated text for Spanish used at run-time
Here is a description of gettext from Fedora Project. It is simple to follow. But it is in C.
http://fedoraproject.org/wiki/How_to_do_I18N_through_gettext