I have a very annoying OpenCV error, that i can't understand, and handle with.
I write an application which gets mjpg's stream from ip camera, and process it, but when i try to load image from stream, sometimes i have
[mjpeg # 0000000000428480] overread 8
error, and i don't know why.
Even if i try to skip this issue, and try to load next frame from the stream, the application stucks on
frameStatus = cameraHandler->read(mat);
This is code for connection establishing:
void ImageProcessor::connectWithCamera(VideoCapture * cameraHandler) {
if (cameraHandler != nullptr) {
Logger::log("Closing existing camera stream.");
cameraHandler->release();
delete cameraHandler;
}
Logger::log("Camera configuration and connection establishing.");
cameraHandler = new VideoCapture();
cameraHandler->set(CV_CAP_PROP_FRAME_WIDTH, config.RESOLUTION_WIDTH);
cameraHandler->set(CV_CAP_PROP_FRAME_HEIGHT, config.RESOLUTION_HEIGHT);
cameraHandler->set(CV_CAP_PROP_FPS, config.CAMERA_FPS);
cameraHandler->set(CV_CAP_PROP_FOURCC, CV_FOURCC('M', 'J', 'P', 'G'));
while (!cameraHandler->open(config.LINK)) {
Logger::log("Cannot connect to camera! Trying again.");
}
}
And this is code for capturing images:
void ImageProcessor::start() {
VideoCapture * cameraHandler = new VideoCapture();
this->connectWithCamera(cameraHandler);
this->connectWithServer(this->serverConnection);
Logger::log("Id sending.");
serverConnection->send(config.TOKEN + "\n");
Logger::log("Computations starting.");
Mat mat;
Result * result = nullptr;
int delta = 1000 / cameraHandler->get(CV_CAP_PROP_FPS);
char frameErrorCounter = 0;
bool frameStatus;
while (true) {
frameStatus = false;
cv::waitKey(delta);
try {
frameStatus = cameraHandler->read(mat);
} catch (std::exception& e) {
std::string message = e.what();
Logger::log("Critical camera error! : " + message);
}
if (!frameStatus) {
Logger::log("Cannot read a frame from source. ");
++frameErrorCounter;
if (!cameraHandler->isOpened() || frameErrorCounter >= this->GET_FRAME_ERROR_COUNTER) {
Logger::log("Probably camera is disconnected. Trying to establish connection again.");
frameErrorCounter = 0;
this->connectWithCamera(cameraHandler);
Logger::log("Computations starting.");
}
continue;
}
result = processImage(mat);
std::string stringResult;
if (result == nullptr) {
stringResult = this->NO_RESULT;
delete result;
result = nullptr;
} else {
stringResult = result->toJson();
}
if (!serverConnection->send(stringResult)) {
Logger::log("Server connection lost, trying to establish it again.");
serverConnection->close();
while (!serverConnection->isOpen()) {
this->connectWithServer(serverConnection);
}
}
mat.release();
}
}
Thanks in advance!
Related
I need to decode some H.264 frames into raw format (YUV420).
I receive packets which contains frame by some custom protocol.
How can I pass received H.264 frames into GStreamermm API to decode?
In current time I read them tutorials (unfortunately this is GST - API of C version), but can't find actual GStreamermm API documentation.
Please, point me in any documents or examples of how to do it.
I was able to implement the transfer data via pipeline and retrieve decoding raw video frames using C++ version. Here is raw example:
struct WebPipeline {
Glib::RefPtr<Gst::AppSrc> appsrc;
Glib::RefPtr<Gst::AppSink> appdst;
Glib::RefPtr<Gst::Element> h264parser;
Glib::RefPtr<Gst::Element> avdec_h264;
Glib::RefPtr<Gst::Element> jpegenc;
Glib::RefPtr<Gst::Pipeline> pipe;
bool accepts_data {false};
};
WebPipePtr ExampleClass::CreatePipeline() {
auto web_pipe = std::make_shared<WebPipeline>();
web_pipe->appsrc = Gst::AppSrc::create("web_appsrc");
if (!web_pipe->appsrc) {
throw std::runtime_error("Can't create AppSrc");
}
web_pipe->appdst = Gst::AppSink::create("web_appdst");
if (!web_pipe->appdst) {
throw std::runtime_error("Can't create AppSink");
}
web_pipe->h264parser = Gst::ElementFactory::create_element("h264parse", "h264_parser");
if (!web_pipe->h264parser) {
throw std::runtime_error("Can't create h264parse");
}
web_pipe->avdec_h264 = Gst::ElementFactory::create_element("avdec_h264", "avdec264");
if (!web_pipe->avdec_h264) {
throw std::runtime_error("Can't create avdec_h264");
}
web_pipe->jpegenc = Gst::ElementFactory::create_element("jpegenc");
if (!web_pipe->jpegenc) {
throw std::runtime_error("Can't create jpegenc");
}
web_pipe->pipe = Gst::Pipeline::create("websocket_pipe");
if (!web_pipe->pipe) {
throw std::runtime_error("Can't create pipeline");
}
web_pipe->appdst->property_emit_signals() = true;
web_pipe->appdst->set_sync(false);
web_pipe->appdst->signal_new_sample().connect(sigc::bind(sigc::mem_fun(this, &ExampleClass::PullFromPipe), web_pipe->appdst));
web_pipe->appsrc->property_emit_signals() = true;
web_pipe->appsrc->signal_need_data().connect(sigc::bind(sigc::mem_fun(this, &ExampleClass::EnableAcceptance), web_pipe));
web_pipe->appsrc->signal_enough_data().connect(sigc::bind(sigc::mem_fun(this, &ExampleClass::DisableAcceptance), web_pipe));
web_pipe->pipe->add(web_pipe->appsrc)->add(web_pipe->h264parser)->add(web_pipe->avdec_h264)->add(web_pipe->jpegenc)->add(web_pipe->appdst);
web_pipe->appsrc->link(web_pipe->h264parser)->link(web_pipe->avdec_h264)->link(web_pipe->jpegenc)->link(web_pipe->appdst);
web_pipe->pipe->set_state(Gst::STATE_PLAYING);
return web_pipe;
}
void ExampleClass::EnableAcceptance(guint, WebPipePtr pipe) {
if (!pipe->accepts_data) {
BOOST_LOG_SEV(GetLogger(), log::info) << "Begin push frames";
pipe->accepts_data = true;
}
}
void ExampleClass::DisableAcceptance(WebPipePtr pipe) {
if (pipe->accepts_data) {
BOOST_LOG_SEV(GetLogger(), log::info) << "Begin drop frames";
pipe->accepts_data = false;
}
}
void ExampleClass::PushToPipe(WebPipePtr pipe, std::vector<uint8_t>&& frames) {
if (!pipe->accepts_data) {
return Gst::FLOW_CUSTOM_ERROR;
}
GstBuffer* buffer = gst_buffer_new_wrapped_full(static_cast<GstMemoryFlags>(GST_MEMORY_FLAG_READONLY | GST_MEMORY_FLAG_PHYSICALLY_CONTIGUOUS),
const_cast<uint8_t*>(frame.data()),
frame.size(),
0,
frame.size(),
reinterpret_cast<gpointer>(frame_ref), // inner implementation of some sort of wrapper
destroy); // lamda-destructor
buffer->set_pts(time);
return pipe->appsrc->push_buffer(buffer);
}
Gst::FlowReturn ExampleClass::PullFromPipe(const Glib::RefPtr<Gst::AppSink>& appsink) {
auto sample = appsink->pull_sample();
if (!sample) {
return Gst::FLOW_ERROR;
}
if (appsink->property_eos()) {
return Gst::FLOW_EOS;
}
Gst::ClockTime timestamp = 0;
{
auto buffer = sample->get_buffer();
if (!buffer) {
throw std::runtime_error("Can't get buffer from sample");
}
timestamp = buffer->get_pts();
}
// process sample...
return Gst::FLOW_OK;
}
I connected a Azure Kinect Camera to my system, and from code I can grab an IR image and a Depth image, but the Color image is not working. I have compiled the SDK myself, and when running the k4aviewer.exe, I get the same thing. IR + Depth camera work, color camera is just empty.Errors I am getting:
Failed to start microphone: unable to open device!
Failed to start microphone listener: unable to open device!
I then installed the official SDK, in that k4aviewer I get both IR and color camera. But when compiling using that lib and dll, I still get nothing. What might cause this issue in the first place? I can't be totally off, as I get the depth data.
main.cpp:
#include "azure_camera.h"
#include <opencv2/opencv.hpp>
int main() {
int count = global::getCameraCount();
AzureKinect cam (0);
cam.connectCamera();
k4a_device_configuration_t config;// = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL;
config.camera_fps = K4A_FRAMES_PER_SECOND_30;
config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32;
config.color_resolution = K4A_COLOR_RESOLUTION_1080P;
config.depth_delay_off_color_usec = 0;
config.depth_mode = K4A_DEPTH_MODE_NFOV_UNBINNED;
config.disable_streaming_indicator = false;
config.subordinate_delay_off_master_usec = 0;
config.synchronized_images_only = true;
config.wired_sync_mode = K4A_WIRED_SYNC_MODE_STANDALONE;
cam.startCamera(config);
AzureKinect::Images m_images;
cam.grab_images(false, m_images);
{
cv::imshow("Test Color", m_images.color);
cv::imshow("Test Depth", m_images.depth);
cv::waitKey(0);
}
cam.stopCamera();
return 0;
}
AzureCamera.cpp:
#include "azure_camera.h"
AzureKinect::~AzureKinect() {
device.close();
}
bool AzureKinect::connectCamera()
{
try {
device = k4a::device::open(index);
}
catch (...) {
return false;
}
return true;
}
bool AzureKinect::startCamera(k4a_device_configuration_t _config)
{
config = _config;
try {
device.start_cameras(&config);
}
catch(const k4a::error& e) {
printf("Error occurred: %s", e.what());
return false;
}
return true;
}
bool AzureKinect::stopCamera()
{
device.stop_cameras();
return true;
}
bool AzureKinect::grab_images(bool rectified, AzureKinect::Images& images)
{
if (device.get_capture(&capture, std::chrono::milliseconds(1000)))
{
colorImage = capture.get_color_image();
depthImage = capture.get_depth_image();
}
else
{
return false;
}
if (images.color.cols != colorImage.get_width_pixels() || images.color.cols != colorImage.get_height_pixels())
{
images.color = cv::Mat(colorImage.get_height_pixels(), colorImage.get_width_pixels(), CV_8UC4);
}
if (images.depth.cols != depthImage.get_width_pixels() || images.depth.cols != depthImage.get_height_pixels())
{
images.depth = cv::Mat(depthImage.get_height_pixels(), depthImage.get_width_pixels(), CV_16UC1);
}
std::memcpy(images.color.data, colorImage.get_buffer(), colorImage.get_size());
std::memcpy(images.depth.data, depthImage.get_buffer(), depthImage.get_size());
colorImage.reset();
depthImage.reset();
capture.reset();
return true;
}
cv::Mat AzureKinect::get_calibration()
{
return cv::Mat();
}
uint32_t global::getCameraCount()
{
return k4a_device_get_installed_count();
}
AzureCamera.h
#include <k4a/k4a.hpp>
#include <opencv2/core.hpp>
#include <stdint.h>
class AzureKinect {
public:
struct Images {
cv::Mat color;
cv::Mat depth;
};
AzureKinect(int id) : index(id), colorImage(nullptr), depthImage(nullptr) { }
~AzureKinect();
bool connectCamera();
bool startCamera(k4a_device_configuration_t _config);
bool stopCamera();
bool grab_images(bool rectified, AzureKinect::Images& images);
cv::Mat get_calibration();
private:
uint32_t index;
k4a::device device;
k4a::capture capture;
k4a_device_configuration_t config;
k4a::image colorImage;
k4a::image depthImage;
};
namespace global {
uint32_t getCameraCount();
}
Note: I found something really similar at https://github.com/microsoft/Azure-Kinect-Sensor-SDK/issues/1237 , but then I need it to work on this system. How can I debug this?
The timeout might be too short for the first capture. You are also not checking the error code for AzureKinect::grab_images(). What is reported in the error log?
I want to extract albumart from mp3 files
so i use taglib library
extracting title and artist succed.
but when i try to extract album art, it occurs error.
void MusicContainer::getAlbumArt(const char* path) {
static const char *IdPicture = "APIC";
TagLib::MPEG::File mpegFile(path);
TagLib::ID3v2::Tag *id3v2tag = mpegFile.ID3v2Tag();
TagLib::ID3v2::FrameList Frame;
TagLib::ID3v2::AttachedPictureFrame *PicFrame;
void *RetImage = NULL, *SrcImage;
unsigned long Size;
FILE *jpegFile;
errno_t err = fopen_s(&jpegFile, "d:\\FromId3.jpg", "wb");
if (id3v2tag)
{
// picture frame
Frame = id3v2tag->frameListMap()[IdPicture];//error occurs on this line.
if (!Frame.isEmpty())
{
for (TagLib::ID3v2::FrameList::ConstIterator it = Frame.begin(); it != Frame.end(); ++it)
{
PicFrame = (TagLib::ID3v2::AttachedPictureFrame *)(*it);
// if ( PicFrame->type() ==
//TagLib::ID3v2::AttachedPictureFrame::FrontCover)
{
// extract image (in it’s compressed form)
Size = PicFrame->picture().size();
SrcImage = malloc(Size);
if (SrcImage)
{
memcpy(SrcImage, PicFrame->picture().data(), Size);
fwrite(SrcImage, Size, 1, jpegFile);
fclose(jpegFile);
free(SrcImage);
}
}
}
}
}
else
{
cout << "id3v2 not present";
}
}
error message
Exception thrown: write access violation.
_Parent_proxy was 0x10011EE0.
If there is a handler for this exception, the program may be safely continued.
tell me how to fix it.
I made a console application which detects plugin and plugout events for all type of usb devices. but I wanted some filteration in it like I wanted to detect only webcams . This was done by using GUID class. The class for webcam is 'Image' class with GUID "{6bdd1fc5-810f-11d0-bec7-08002be2092f}" .The problem is that this 'Image' class is also used for scanners and I dont want to detect scanners.The code is given below:
static void Main(string[] args)
{
WqlEventQuery weqQuery = new WqlEventQuery();
weqQuery.EventClassName = "__InstanceOperationEvent";
weqQuery.WithinInterval = new TimeSpan(0, 0, 3);
weqQuery.Condition = #"TargetInstance ISA 'Win32_PnPEntity'";
ManagementEventWatcher m_mewWatcher = new ManagementEventWatcher(weqQuery);
m_mewWatcher.EventArrived += new EventArrivedEventHandler(m_mewWatcher_EventArrived);
m_mewWatcher.Start();
Console.ReadLine();
}
static void m_mewWatcher_EventArrived(object sender, EventArrivedEventArgs e)
{
bool bUSBEvent = false;
string deviceCaption = "";
string deviceType = "";
foreach (PropertyData pdData in e.NewEvent.Properties)
{
try
{
ManagementBaseObject mbo = (ManagementBaseObject)pdData.Value;
if (mbo != null)
{
foreach (PropertyData pdDataSub in mbo.Properties)
{
Console.WriteLine(pdDataSub.Name + " = " + pdDataSub.Value);
if (pdDataSub.Name == "Caption")
{
deviceCaption = pdDataSub.Value.ToString();
}
if (pdDataSub.Name == "ClassGuid" && pdDataSub.Value.ToString() == "{6bdd1fc5-810f-11d0-bec7-08002be2092f}")
{
bUSBEvent = true;
deviceType = "Image";
}
}
if (bUSBEvent)
{
if (e.NewEvent.ClassPath.ClassName == "__InstanceCreationEvent")
{
Console.WriteLine("A " + deviceType + " device " + deviceCaption + " was plugged in at " + DateTime.Now.ToString());
}
else if (e.NewEvent.ClassPath.ClassName == "__InstanceDeletionEvent")
{
Console.WriteLine("A " + deviceType + " device " + deviceCaption + " was plugged out at " + DateTime.Now.ToString());
}
}
}
}
catch (Exception ex)
{
}
}
}
for references check this link
I waited but no body answered this question so, after seeing all properties of ManagementBaseObject I found that there is a property named Service which is different for scanners. In scanners the value of Service property is usbscan while in cameras it is usbvideo.
eg.
you can do something like this
if (mbo.Properties["Service"].Value.ToString() == "usbscan")
{
//then it means it is a scanner
}
else
{
//then it means it is a camera
}
note: The main question was that how can we differentiate between a scanner and a webcam because they both use same GUID.
I have developed TTS engine in .NET. Now I want to expose it over web.
I have used the base64 string encoding to transfer the WAV format, but it is slow when I pass longer text.
Now I'm considering to build some MP3 streaming (maybe with NAudio) where I will convert the WAV formated MemoryStream into MP3 stream and pass it to the client. Does anyone has some experience with this?
Does anyone has experience how to convert WAV MemoryStream with NAudio to MP3 MemoryStream?
public class MP3StreamingPanel2 : UserControl
{
enum StreamingPlaybackState
{
Stopped,
Playing,
Buffering,
Paused
}
private BufferedWaveProvider bufferedWaveProvider;
private IWavePlayer waveOut;
private volatile StreamingPlaybackState playbackState;
private volatile bool fullyDownloaded;
private HttpWebRequest webRequest;
public void StreamMP32(string url)
{
Configuration config = ConfigurationManager.OpenExeConfiguration(ConfigurationUserLevel.None);
SettingsSection section = (SettingsSection)config.GetSection("system.net/settings");
section.HttpWebRequest.UseUnsafeHeaderParsing = true;
config.Save();
this.fullyDownloaded = false;
webRequest = (HttpWebRequest)WebRequest.Create(url);
int metaInt = 0; // blocksize of mp3 data
webRequest.Headers.Clear();
webRequest.Headers.Add("GET", "/ HTTP/1.0");
// needed to receive metadata informations
webRequest.Headers.Add("Icy-MetaData", "1");
webRequest.UserAgent = "WinampMPEG/5.09";
HttpWebResponse resp = null;
try
{
resp = (HttpWebResponse)webRequest.GetResponse();
}
catch (WebException e)
{
if (e.Status != WebExceptionStatus.RequestCanceled)
{
//ShowError(e.Message);
}
return;
}
byte[] buffer = new byte[16384 * 4]; // needs to be big enough to hold a decompressed frame
try
{
// read blocksize to find metadata block
metaInt = Convert.ToInt32(resp.GetResponseHeader("icy-metaint"));
}
catch
{
}
IMp3FrameDecompressor decompressor = null;
try
{
using (var responseStream = resp.GetResponseStream())
{
var readFullyStream = new ReadFullyStream(responseStream);
readFullyStream.metaInt = metaInt;
do
{
if (bufferedWaveProvider != null && bufferedWaveProvider.BufferLength - bufferedWaveProvider.BufferedBytes < bufferedWaveProvider.WaveFormat.AverageBytesPerSecond / 4)
{
Debug.WriteLine("Buffer getting full, taking a break");
Thread.Sleep(500);
}
else
{
Mp3Frame frame = null;
try
{
frame = Mp3Frame.LoadFromStream(readFullyStream, true);
}
catch (EndOfStreamException)
{
this.fullyDownloaded = true;
// reached the end of the MP3 file / stream
break;
}
catch (WebException)
{
// probably we have aborted download from the GUI thread
break;
}
if (decompressor == null)
{
// don't think these details matter too much - just help ACM select the right codec
// however, the buffered provider doesn't know what sample rate it is working at
// until we have a frame
WaveFormat waveFormat = new Mp3WaveFormat(frame.SampleRate, frame.ChannelMode == ChannelMode.Mono ? 1 : 2, frame.FrameLength, frame.BitRate);
decompressor = new AcmMp3FrameDecompressor(waveFormat);
this.bufferedWaveProvider = new BufferedWaveProvider(decompressor.OutputFormat);
this.bufferedWaveProvider.BufferDuration = TimeSpan.FromSeconds(20); // allow us to get well ahead of ourselves
//this.bufferedWaveProvider.BufferedDuration = 250;
}
int decompressed = decompressor.DecompressFrame(frame, buffer, 0);
//Debug.WriteLine(String.Format("Decompressed a frame {0}", decompressed));
bufferedWaveProvider.AddSamples(buffer, 0, decompressed);
}
} while (playbackState != StreamingPlaybackState.Stopped);
Debug.WriteLine("Exiting");
// was doing this in a finally block, but for some reason
// we are hanging on response stream .Dispose so never get there
decompressor.Dispose();
}
}
finally
{
if (decompressor != null)
{
decompressor.Dispose();
}
}
}
}
NAudio does not include an MP3 encoder. When I need to encode MP3 I use lame.exe. If you don't want to go via a file, lame.exe allows you to read from stdin and write to stdout, so if you redirect standard in and out on the process you can convert on the fly.