I'm a university student, who need to study about making adc capturing in BeagleBone Black.
Everything goes really well. I can sampling the data from adc and even print the time stamp in each sample value. Then I check the sampling period of result which i got by using oscilloscope the check the wave from GPIO P8_10 by using "BeagleBoneBlack-GPIO" library Finally I realized that the sampling period is not stable at all.
And I assumed that I supposed to use Interrupt timer in BeagleBone Black. But my root-skill is pretty low to make it by my own.
Anyway. How can i make Interrupt timer by c++ through GPIO because I need to used the interrupt timer to control the adc to make the steady and stable sampling period such as 3ms.
data below is which version I am using, the code, and the result right now also
-BeagleBone Black
-Debian GNU/LInux 8.11 (jessie)
-Linux 5.0.3-bone5
-ARMv7 Processor rev2 (v7l)
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
#include <math.h>
#include<iostream>
#include<fstream>
#include<string>
#include<sstream>
#include<unistd.h>
#include "GPIO/GPIOManager.h"
#include "GPIO/GPIOConst.h"
using namespace std;
#define LIN0_PATH "/sys/bus/iio/devices/iio:device0/in_voltage"
int readAnalog(int number){
stringstream ss;
ss << LIN0_PATH << number << "_raw";
fstream fs;
fs.open(ss.str().c_str(), fstream::in);
fs >> number;
fs.close();
return number;
}
int main(int argc, char* argv[ ]){
int i=0;
GPIO::GPIOManager* gp = GPIO::GPIOManager::getInstance();
int pin1 = GPIO::GPIOConst::getInstance()->getGpioByKey("P8_10");
gp->setDirection(pin1, GPIO::OUTPUT);
char buffer[26];
int millisec;
struct tm* tm_info;
struct timeval tv;
gettimeofday(&tv, NULL);
millisec = lrint(tv.tv_usec/1000.0); // Round to nearest millisec
if (millisec>=1000) {
millisec -=1000;
tv.tv_sec++;
} tm_info = localtime(&tv.tv_sec);
strftime(buffer, 26, "%d/%m/%Y %H:%M:%S", tm_info);
cout<<"print date and time"<<buffer<<":"<<millisec << endl;
for (int j=0;j<100;j++){
gp->setValue(pin1, GPIO::HIGH);
float value[j] = readAnalog(0)*(1.8/4096) ;
gp->setValue(pin1, GPIO::LOW);
usleep(300);
}
for (int j=0;j<100;j++){
cout << fixed;
cout.precision(3);
cout <<i<<";"<<value<< endl;
i++; }
return 0; }
And these are command to run the my file
g++ GPIO/GPIOConst.cpp GPIO/GPIOManager.cpp try.cpp
then
./a.out
and this is the result
print date and time10/04/2019 17:02:27:460
0;1.697
1;1.697
2;1.695
3;1.693
4;1.694
5;1.693
6;1.693
7;1.692
8;1.691
9;1.692
10;1.693
11;1.692
12;1.694
13;1.694
14;1.694
15;1.692
16;1.695
17;1.692
18;1.693
19;1.694
20;1.693
21;1.691
22;1.692
23;1.693
24;1.691
25;1.693
26;1.693
27;1.693
28;1.694
29;1.691
30;1.694
31;1.693
32;1.695
33;1.691
34;1.694
35;1.693
36;1.693
37;1.691
38;1.693
39;1.691
40;1.692
41;1.694
42;1.692
43;1.692
44;1.693
45;1.692
46;1.694
47;1.693
48;1.693
49;1.692
50;1.692
51;1.692
52;1.691
53;1.690
54;1.691
55;1.692
56;1.693
57;1.692
58;1.692
59;1.692
60;1.694
61;1.694
62;1.694
63;1.694
64;1.693
65;1.692
66;1.693
67;1.692
68;1.693
69;1.693
70;1.692
71;1.692
72;1.693
73;1.694
74;1.693
75;1.694
76;1.693
77;1.692
78;1.694
79;1.692
80;1.692
81;1.692
82;1.692
83;1.692
84;1.694
85;1.694
86;1.693
87;1.693
88;1.694
89;1.693
90;1.693
91;1.692
92;1.694
93;1.691
94;1.694
95;1.693
96;1.691
97;1.692
98;1.693
99;1.694
[and this is what i got from oscilloscope][1]
[1]: https://i.stack.imgur.com/FJSRe.jpg
It will be really great if there are anyone who would love to give me some advice. And If there are something concerning you guys. Please feel free to ask me.
Best Regard
Peeranut Noonurak
I wrote a simple C++ app using gdal and Magick++.
The program takes the tiff elaborated from Gdal and add it to a Magick gif.
The process is really slow and use around 800 MB of ram (with only 24 frames)... and takes 15 seconds
I tried to optimize the input files according to this http://im.snibgo.com/spdsiz.htm but the process still slow
Im working for a part of a web service, so 15 second is really too much
int main(int argc, char *argv[]) {
//timing for bench
high_resolution_clock::time_point t1 = high_resolution_clock::now();
char dirName[12];
char timestamp[22];
//takes the args and convert them to tm struct
tm startDate = toTime(std::stringstream(argv[1]));
tm endDate = toTime(std::stringstream(argv[2]));
//calc the time differenze
int diffHours = (int) std::difftime(timegm(&endDate), timegm(&startDate)) / 3600;
//register gdal driver and create the datasets
GDALAllRegister();
GDALDataset *originalDataset;
GDALDataset *newDataset;
//option for the apply color to the tif file and set the alpha
char *optionForDEM[] = {const_cast<char *>("-alpha"), nullptr};
GDALDEMProcessingOptions *options = GDALDEMProcessingOptionsNew(optionForDEM, nullptr);
//read the background and the "alert zones" (za)
Magick::Image background;
Magick::Image za;
background.read("/home/giovanni/CLionProjects/MappeIRPI-CNR/sfondo2.mpc");
za.read("/home/giovanni/CLionProjects/MappeIRPI-CNR/ZA.mpc");
//create a vector for create the gif
//i suspect that this method is really slow
std::vector<Magick::Image> frames;
int g;
time_t date;
for (int i = 0; i < diffHours; ++i) {
//start of gdal processing block
date = timegm(&startDate);
strftime(dirName, 12, DIR_FORMAT.c_str(), gmtime(&date));
fs::create_directory(fs::path(TEMP_PATH + dirName));
originalDataset = (GDALDataset *) GDALOpen((BASE_PATH + dirName + PREVISTE).c_str(), GA_ReadOnly);
newDataset = (GDALDataset *) GDALDEMProcessing((TEMP_PATH + dirName + PREVISTE).c_str(),
originalDataset,
"color-relief",
COLORI.c_str(), options, &g);
GDALClose(newDataset); //write the processed tif to ramdisk
//start of the Magick++ block
Magick::Image tif;
//read the block
tif.read(TEMP_PATH + dirName + PREVISTE);
tif.scale(Magick::Geometry(1083, 1166));
//add the background and the za
//i want to apply that to the final gif, not to every single photo
tif.composite(background, 0, 0, Magick::DstOverCompositeOp);
tif.composite(za, 0, 0, Magick::OverCompositeOp);
//options for annotate the frame
tif.font("/usr/share/fonts/OTF/SFMono-Bold.otf");
tif.fillColor("White");
tif.fontPointsize(37);
tif.boxColor("Black");
strftime(timestamp, 22, DATE_FORMAT.c_str(), gmtime(&date));
tif.annotate(timestamp, Magick::NorthEastGravity);
//add the frame to the vector add set the animation delay
frames.push_back(tif);
tif.animationDelay(3000);
startDate.tm_hour += 1;
}
//write the gif to disk, this takes a very long time
Magick::writeImages(frames.begin(), frames.end(), TEMP_PATH + "sss.gif");
GDALClose(originalDataset);
GDALDEMProcessingOptionsFree(options);
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<seconds>(t2 - t1).count();
std::cout << duration;
return 0;
}
This is the workflow:
get the tifs
do some work with gdal
write the files as png (any faster format?)
read the PNGs with .read
annotate the images with the timestamp
add the background and the ZA over the map *
add the frame to the image vector
write the vector
*i tryed to do this operation after collecting all images, avoiding doing it for each image, but I could not do it.
i tried to convert the background to a 2 colors png, and then to a miff file, and then to a mpc+cache one
the za.png uses alpha and 1 color, i did the same process
What are the fastest formats for the situation? I also need the gif to take as little as possible (it now occupies 5.4 MB !!)
As said the operation must be very fast so I can also consider other libraries simpler ... just fixed these basic problems will resort to multithread (any optimal method?) but from my tests the longest phase is the one that writes the gif on the disk (ramdisk above)
Sample of the result:
https://photos.app.goo.gl/6YVkqSMuoXwMYuE57
Google photo album:
https://photos.app.goo.gl/QDNCAK4i9PCGQW3VA
EDIT
Thanks for the quick comments
I'm on lates Arch linux, but the final program will run a web server not that powerfull
Debian 9.5, 4 virtual cores and 8 GB of ram (if necessary I can ask for an improvement, but not too much
have to manage a minimum of 24 frames but I could even get to 700 ...
the images initially take up 130KB but adding the arrival colors to 400KB
This before adding the background and dividing the zones (see the images in the google album)
i compile with G++ 8.2.1
this is the cmake file
cmake_minimum_required(VERSION 3.12)
project(MappeIrpi)
SET(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(SOURCE_FILES main.cpp vips.cpp)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp -lstdc++fs")
add_executable(MappeIrpi ${SOURCE_FILES})
FIND_PACKAGE(GDAL COMPONENTS REQUIRED)
add_definitions(-DMAGICKCORE_QUANTUM_DEPTH=8)
add_definitions( -DMAGICKCORE_HDRI_ENABLE=0 )
add_definitions(-fopenmp -pthread)
find_package(ImageMagick COMPONENTS Magick++)
include_directories(${ImageMagick_INCLUDE_DIRS})
target_link_libraries(MappeIrpi ${ImageMagick_LIBRARIES} gdal stdc++fs "/usr/lib/libMagickCore-7.Q16HDRI.so.6")
Currently I have not set optimization flags, but even setting -O3 or -Ofast does not change anything
resouce limits:
$ identify -list resource
Resource limits:
Width: 107.374MP
Height: 107.374MP
List length: -1
Area: 33.4532GP
Memory: 15.5779GiB
Map: 31.1558GiB
Disk: unlimited
File: 15000
Thread: 8
Throttle: 0
Time: unlimited
I will be grateful to anyone who helps me
System information
OpenCV => 3.3.0
Operating System / Platform => Ubuntu 16.04, x86_64
Compiler => gcc version 5.4.1 20160904
Cuda => 8.0
Nvidia card => GTX 1080 Ti
ffmpeg details
libavutil 55. 74.100 / 55. 74.100
libavcodec 57.103.100 / 57.103.100
libavformat 57. 77.100 / 57. 77.100
libavdevice 57. 7.101 / 57. 7.101
libavfilter 6.100.100 / 6.100.100
libswscale 4. 7.103 / 4. 7.103
libswresample 2. 8.100 / 2. 8.100
Detailed description
i am trying to play a rtsp stream using cudacodec::VideoReader
Rtsp Stream Details ( from vlc )
this stream plays fine in vlc and cv::VideoCapture but when i try to play it in cudacodec::VideoReader i get a error saying:
OpenCV Error: Gpu API call (CUDA_ERROR_FILE_NOT_FOUND [Code = 301]) in CuvidVideoSource, file /home/deep/Development/libraries/opencv/opencv/modules/cudacodec/src/cuvid_video_source.cpp, line 66
OpenCV Error: Assertion failed (init_MediaStream_FFMPEG()) in FFmpegVideoSource, file /home/deep/Development/libraries/opencv/opencv/modules/cudacodec/src/ffmpeg_video_source.cpp, line 101
Steps to reproduce
#include <iostream>
#include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_CUDACODEC)
#include <opencv2/core.hpp>
#include <opencv2/cudacodec.hpp>
#include <opencv2/highgui.hpp>
int main(int argc, const char* argv[])
{
const std::string fname = "rtsp://admin:admin#192.168.1.13/media/video2";
cv::namedWindow("GPU", cv::WINDOW_NORMAL);
cv::cuda::GpuMat d_frame;
cv::Ptr<cv::cudacodec::VideoReader> d_reader = cv::cudacodec::createVideoReader(fname);
for (;;)
{
if (!d_reader->nextFrame(d_frame))
break;
cv::Mat frame;
d_frame.download(frame);
cv::imshow("GPU", frame);
if (cv::waitKey(3) > 0)
break;
}
return 0;
}
#else
int main()
{
std::cout << "OpenCV was built without CUDA Video decoding support\n" << std::endl;
return 0;
}
#endif
I tried debugging it using GDB and saw that in ffmpeg_video_source.cpp bool init_MediaStream_FFMPEG() directly returns without checking the if condition.
GDB output
cv::cudacodec::detail::FFmpegVideoSource::FFmpegVideoSource
(this=0x402a20 <_start>, fname=...) at /home/deep/Development/libraries/opencv/opencv/modules/cudacodec/src/ffmpeg_video_source.cpp:98
98 cv::cudacodec::detail::FFmpegVideoSource::FFmpegVideoSource(const String& fname) :
(gdb) n
99 stream_(0)
(gdb) n
101 CV_Assert( init_MediaStream_FFMPEG() );
(gdb) s
(anonymous namespace)::init_MediaStream_FFMPEG () at /home/deep/Development/libraries/opencv/opencv/modules/cudacodec/src/ffmpeg_video_source.cpp:94
94 return initialized;
(gdb) display initialized
4: initialized = false
(gdb) s
95 }
UPDATE:
I have solved the problem. solution link
In the solution provided here the problem was related to the pixel format detected by ffmpeg.
In order to check your rtsp pixel format you can use ffprobe.
Then inside your cap_ffmpeg_impl.hpp you should add the case related to your pixel format like
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUVJ420P:
*chroma_format = ::VideoChromaFormat_YUV420;
break;
And then rebuild opencv.
When I create a video with OpenCV's VideoWriter class, it outputs something like this in the terminal :
Output #0, avi, to 'video.avi':
Stream #0.0: Video: mpeg4, yuv420p, 512x384, q=2-31, 12582 kb/s, 90k tbn, 24 tbc
I'd like to disable this but I have no idea how to do this.
"Mute" the console for a while. Ref.
#include <iostream>
#include <fstream>
int main ( int argc, char** argv )
{
std::streambuf* cout_sbuf = std::cout.rdbuf(); // save original sbuf
std::ofstream fout("temp");
std::cout<<"A\n";
std::cout.rdbuf(fout.rdbuf()); // redirect 'cout' to a 'fout'
std::cout<<"B\n";
std::cout.rdbuf(cout_sbuf); // restore the original stream buffer
std::cout<<"C\n";
return 0;
}
Console output:
A
C
A bit of context; this program was built originally to work with USB cameras - but because of the setup between where the cameras needs to be and where the computer is it makes more sense to switch to cameras run over a network. Now I'm trying to convert the program to accomplish this, but my efforts thus far have met with poor results. I've also asked this same question over on the OpenCV forums. Help me spy on my neighbors! (This is with their permission, of course!) :D
I'm using:
OpenCV v2.4.6.0
C++
D-Link Cloud Camera 7100 (Installer is DCS-7010L, according to the instructions.)
I am trying to access the DLink camera's video feed through OpenCV.
I can access the camera through it's IP address with a browser without any issues. Unfourtunately; my program is less cooperative. When attempting to access the camera the program gives the OpenCV-generated error:
warning: Error opening file (../../modules/highgui/src/cap_ffmpeg_impl.hpp:529)
This error occurs with just about everything I try that doesn't somehow generate more problems.
For reference - the code in OpenCV's cap_ffmpeg_impl.hpp around line 529 is as follows:
522 bool CvCapture_FFMPEG::open( const char* _filename )
523 {
524 unsigned i;
525 bool valid = false;
526
527 close();
528
529 #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)
530 int err = avformat_open_input(&ic, _filename, NULL, NULL);
531 #else
532 int err = av_open_input_file(&ic, _filename, NULL, 0, NULL);
533 #endif
...
616 }
...for which I have no idea what I'm looking at. It seems to be looking for the ffmpeg version - but I've already installed the latest ffmpeg on that computer, so that shouldn't be the issue.
This is the edited down version I tried to use as per Sebastian Schmitz's recommendation:
1 #include <fstream> // File input/output
2 #include <iostream> // cout / cin / etc
3 #include <windows.h> // Windows API stuff
4 #include <stdio.h> // More input/output stuff
5 #include <string> // "Strings" of characters strung together to form words and stuff
6 #include <cstring> // "Strings" of characters strung together to form words and stuff
7 #include <streambuf> // For buffering load files
8 #include <array> // Functions for working with arrays
9 #include <opencv2/imgproc/imgproc.hpp> // Image Processor
10 #include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
11 #include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
12 #include "opencv2/calib3d/calib3d.hpp"
13 #include "opencv2/features2d/features2d.hpp"
14 #include "opencv2/opencv.hpp"
15 #include "resource.h" // Included for linking the .rc file
16 #include <conio.h> // For sleep()
17 #include <chrono> // To get start-time of program.
18 #include <algorithm> // For looking at whole sets.
19
20 #ifdef __BORLANDC__
21 #pragma argsused
22 #endif
23
24 using namespace std; // Standard operations. Needed for most basic functions.
25 using namespace std::chrono; // Chrono operations. Needed getting starting time of program.
26 using namespace cv; // OpenCV operations. Needed for most OpenCV functions.
27
28 string videoFeedAddress = "";
29 VideoCapture videoFeedIP = NULL;
30 Mat clickPointStorage; //Artifact from original program.
31
32 void displayCameraViewTest()
33 {
34 VideoCapture cv_cap_IP;
35 Mat color_img_IP;
36 int capture;
37 IplImage* color_img;
38 cv_cap_IP.open(videoFeedAddress);
39 Sleep(100);
40 if(!cv_cap_IP.isOpened())
41 {
42 cout << "Video Error: Video input will not work.\n";
43 cvDestroyWindow("Camera View");
44 return;
45 }
46 clickPointStorage.create(color_img_IP.rows, color_img_IP.cols, CV_8UC3);
47 clickPointStorage.setTo(Scalar(0, 0, 0));
48 cvNamedWindow("Camera View", 0); // create window
49 IplImage* IplClickPointStorage = new IplImage(clickPointStorage);
50 IplImage* Ipl_IP_Img;
51
52 for(;;)
53 {
54 cv_cap_IP.read(color_img_IP);
55 IplClickPointStorage = new IplImage(clickPointStorage);
56 Ipl_IP_Img = new IplImage(color_img_IP);
57 cvAdd(Ipl_IP_Img, IplClickPointStorage, color_img);
58 cvShowImage("Camera View", color_img); // show frame
59 capture = cvWaitKey(10); // wait 10 ms or for key stroke
60 if(capture == 27 || capture == 13 || capture == 32){break;} // if ESC, Return, or space; close window.
61 }
62 cv_cap_IP.release();
63 delete Ipl_IP_Img;
64 delete IplClickPointStorage;
65 cvDestroyWindow("Camera View");
66 return;
67 }
68
69 int main()
70 {
71 while(1)
72 {
73 cout << "Please Enter Video-Feed Address: ";
74 cin >> videoFeedAddress;
75 if(videoFeedAddress == "exit"){return 0;}
76 cout << "\nvideoFeedAddress: " << videoFeedAddress << endl;
77 displayCameraViewTest();
78 if(cvWaitKey(10) == 27){return 0;}
79 }
80 return 0;
81 }
Using added 'cout's I was able to narrow it down to line 38: "cv_cap_IP.open(videoFeedAddress);"
No value I enter for the videoFeedAddress variable seems to get a different result. I found THIS site that lists a number of possible addresses to connect to it. Since there exists no 7100 anywhere in the list & considering that the install is labeled "DCS-7010L" I used the addresses found next to the DCS-7010L listings. When trying to access the camera most of them can be reached through the browser, confirming that they reach the camera - but they don't seem to affect the outcome when I use them in the videoFeedAddress variable.
I've tried many of them both with and without username:password, the port number (554), and variations on ?.mjpg (the format) at the end.
I searched around and came across a number of different "possible" answers - but none of them seem to work for me. Some of them did give me the idea for including the above username:password, etc stuff, but it doesn't seem to be making a difference. Of course, the number of possible combinations is certainly rather large- so I certainly have not tried all of them (more direction here would be appreciated). Here are some of the links I found:
This is one of the first configurations my code was in. No dice.
This one is talking about files - not cameras. It also mentions codecs - but I wouldn't be able to watch it in a web browser if that were the problem, right? (Correct me if I'm wrong here...)
This one has the wrong error code/points to the wrong line of code!
This one mentions compiling OpenCV with ffmpeg support - but I believe 2.4.6.0 already comes with that all set and ready! Otherwise it's not that different from what I've already tried.
Now THIS one appears to be very similar to what I have, but the only proposed solution doesn't really help as I had already located a list of connections. I do not believe this is a duplicate, because as per THIS meta discussion I had a lot more information and so didn't feel comfortable taking over someone else's question - especially if I end up needing to add even more information later.
Thank you for reading this far. I realize that I am asking a somewhat specific question - although I would appreciate any advice you can think of regarding OpenCV & network cameras or even related topics.
TLDR: Network Camera and OpenCV are not cooperating. I'm unsure if
it's the address I'm using to direct the program to the camera or the
command I'm using - but no adjustment I make seems to improve the
result beyond what I've already done! Now my neighbors will go unwatched!
There's a number of ways to get the video. ffmpeg is not the only way although it's most convenient. To diagnose if ffmpeg is capable of reading the stream, you should use the standalone ffmpeg/ffplay to try to open the url. If it can open directly, it may be some minor things like url formatting such as double slashes(rtsp://IPADDRESS:554/live1.sdp instead of rtsp://IPADDRESS:554//live1.sdp). If it cannot open it directly, it may need some extra commandline switches to make it work. Then you would need to modify opencv's ffmpeg implementation # line 529 to pass options to avformat_open_input. This may require quite bit of tweaking before you can get a working program.
You can also check if the camera provide a http mjpeg stream by consulting it's manual. I do not have the camera you are using. So I cannot be of much help on this.
Alternatively, I have two suggestions below, which might help you up and running relatively quickly since you mentioned that vlc is working.
method 1
i assume that you can at least open mjpeg url with your existing opencv/ffmpeg combination. since vlc is working, just use vlc to transcode the video into mjpeg like
vlc.exe --ignore-config -I dummy rtsp://admin:admin#10.10.204.111 --sout=#transcode"{vcodec=MJPG,vb=5000,scale=1,acodec=none}:std{access=http,mux=raw,dst=127.0.0.1:9080/frame.mjpg}"
after that use http://127.0.0.1:9080/frame.mjpg to grab the frame using opencv VideoCapture. this just requires that you have a transcoder program that can convert the incoming stream into mjpeg.
method 2
you can also directly use vlc api programmatically. the following piece of code use vlc to grab the frames. relevant info for compilation
C:\Program Files (x86)\VideoLAN\VLC\sdk\include
C:\Program Files (x86)\VideoLAN\VLC\sdk\lib
libvlc.lib,libvlccore.lib
code
#include "opencv2/highgui/highgui.hpp"
#include <windows.h>
#include <vlc/vlc.h>
using namespace cv;
struct ctx
{
Mat* image;
HANDLE mutex;
uchar* pixels;
};
bool isRunning=true;
Size getsize(const char* path)
{
libvlc_instance_t *vlcInstance;
libvlc_media_player_t *mp;
libvlc_media_t *media;
const char * const vlc_args[] = {
"-R",
"-I", "dummy",
"--ignore-config",
"--quiet",
};
vlcInstance = libvlc_new(sizeof(vlc_args) / sizeof(vlc_args[0]), vlc_args);
media = libvlc_media_new_location(vlcInstance, path);
mp = libvlc_media_player_new_from_media(media);
libvlc_media_release(media);
libvlc_video_set_callbacks(mp, NULL, NULL, NULL, NULL);
libvlc_video_set_format(mp, "RV24",100,100, 100 * 24 / 8); // pitch = width * BitsPerPixel / 8
libvlc_media_player_play(mp);
Sleep(2000);//wait a while so that something get rendered so that size info is available
unsigned int width=640,height=480;
libvlc_video_get_size(mp,0,&width,&height);
if(width==0 || height ==0)
{
width=640;
height=480;
}
libvlc_media_player_stop(mp);
libvlc_release(vlcInstance);
libvlc_media_player_release(mp);
return Size(width,height);
}
void *lock(void *data, void**p_pixels)
{
struct ctx *ctx = (struct ctx*)data;
WaitForSingleObject(ctx->mutex, INFINITE);
*p_pixels = ctx->pixels;
return NULL;
}
void display(void *data, void *id){
(void) data;
assert(id == NULL);
}
void unlock(void *data, void *id, void *const *p_pixels)
{
struct ctx *ctx = (struct ctx*)data;
Mat frame = *ctx->image;
if(frame.data)
{
imshow("frame",frame);
if(waitKey(1)==27)
{
isRunning=false;
//exit(0);
}
}
ReleaseMutex(ctx->mutex);
}
int main( )
{
string url="rtsp://admin:admin#10.10.204.111";
//vlc sdk does not know the video size until it is rendered, so need to play it a bit so that size is known
Size sz = getsize(url.c_str());
// VLC pointers
libvlc_instance_t *vlcInstance;
libvlc_media_player_t *mp;
libvlc_media_t *media;
const char * const vlc_args[] = {
"-R",
"-I", "dummy",
"--ignore-config",
"--quiet",
};
vlcInstance = libvlc_new(sizeof(vlc_args) / sizeof(vlc_args[0]), vlc_args);
media = libvlc_media_new_location(vlcInstance, url.c_str());
mp = libvlc_media_player_new_from_media(media);
libvlc_media_release(media);
struct ctx* context = ( struct ctx* )malloc( sizeof( *context ) );
context->mutex = CreateMutex(NULL, FALSE, NULL);
context->image = new Mat(sz.height, sz.width, CV_8UC3);
context->pixels = (unsigned char *)context->image->data;
libvlc_video_set_callbacks(mp, lock, unlock, display, context);
libvlc_video_set_format(mp, "RV24", sz.width, sz.height, sz.width * 24 / 8); // pitch = width * BitsPerPixel / 8
libvlc_media_player_play(mp);
while(isRunning)
{
Sleep(1);
}
libvlc_media_player_stop(mp);
libvlc_release(vlcInstance);
libvlc_media_player_release(mp);
free(context);
return 0;
}