I have developed TTS engine in .NET. Now I want to expose it over web.
I have used the base64 string encoding to transfer the WAV format, but it is slow when I pass longer text.
Now I'm considering to build some MP3 streaming (maybe with NAudio) where I will convert the WAV formated MemoryStream into MP3 stream and pass it to the client. Does anyone has some experience with this?
Does anyone has experience how to convert WAV MemoryStream with NAudio to MP3 MemoryStream?
public class MP3StreamingPanel2 : UserControl
{
enum StreamingPlaybackState
{
Stopped,
Playing,
Buffering,
Paused
}
private BufferedWaveProvider bufferedWaveProvider;
private IWavePlayer waveOut;
private volatile StreamingPlaybackState playbackState;
private volatile bool fullyDownloaded;
private HttpWebRequest webRequest;
public void StreamMP32(string url)
{
Configuration config = ConfigurationManager.OpenExeConfiguration(ConfigurationUserLevel.None);
SettingsSection section = (SettingsSection)config.GetSection("system.net/settings");
section.HttpWebRequest.UseUnsafeHeaderParsing = true;
config.Save();
this.fullyDownloaded = false;
webRequest = (HttpWebRequest)WebRequest.Create(url);
int metaInt = 0; // blocksize of mp3 data
webRequest.Headers.Clear();
webRequest.Headers.Add("GET", "/ HTTP/1.0");
// needed to receive metadata informations
webRequest.Headers.Add("Icy-MetaData", "1");
webRequest.UserAgent = "WinampMPEG/5.09";
HttpWebResponse resp = null;
try
{
resp = (HttpWebResponse)webRequest.GetResponse();
}
catch (WebException e)
{
if (e.Status != WebExceptionStatus.RequestCanceled)
{
//ShowError(e.Message);
}
return;
}
byte[] buffer = new byte[16384 * 4]; // needs to be big enough to hold a decompressed frame
try
{
// read blocksize to find metadata block
metaInt = Convert.ToInt32(resp.GetResponseHeader("icy-metaint"));
}
catch
{
}
IMp3FrameDecompressor decompressor = null;
try
{
using (var responseStream = resp.GetResponseStream())
{
var readFullyStream = new ReadFullyStream(responseStream);
readFullyStream.metaInt = metaInt;
do
{
if (bufferedWaveProvider != null && bufferedWaveProvider.BufferLength - bufferedWaveProvider.BufferedBytes < bufferedWaveProvider.WaveFormat.AverageBytesPerSecond / 4)
{
Debug.WriteLine("Buffer getting full, taking a break");
Thread.Sleep(500);
}
else
{
Mp3Frame frame = null;
try
{
frame = Mp3Frame.LoadFromStream(readFullyStream, true);
}
catch (EndOfStreamException)
{
this.fullyDownloaded = true;
// reached the end of the MP3 file / stream
break;
}
catch (WebException)
{
// probably we have aborted download from the GUI thread
break;
}
if (decompressor == null)
{
// don't think these details matter too much - just help ACM select the right codec
// however, the buffered provider doesn't know what sample rate it is working at
// until we have a frame
WaveFormat waveFormat = new Mp3WaveFormat(frame.SampleRate, frame.ChannelMode == ChannelMode.Mono ? 1 : 2, frame.FrameLength, frame.BitRate);
decompressor = new AcmMp3FrameDecompressor(waveFormat);
this.bufferedWaveProvider = new BufferedWaveProvider(decompressor.OutputFormat);
this.bufferedWaveProvider.BufferDuration = TimeSpan.FromSeconds(20); // allow us to get well ahead of ourselves
//this.bufferedWaveProvider.BufferedDuration = 250;
}
int decompressed = decompressor.DecompressFrame(frame, buffer, 0);
//Debug.WriteLine(String.Format("Decompressed a frame {0}", decompressed));
bufferedWaveProvider.AddSamples(buffer, 0, decompressed);
}
} while (playbackState != StreamingPlaybackState.Stopped);
Debug.WriteLine("Exiting");
// was doing this in a finally block, but for some reason
// we are hanging on response stream .Dispose so never get there
decompressor.Dispose();
}
}
finally
{
if (decompressor != null)
{
decompressor.Dispose();
}
}
}
}
NAudio does not include an MP3 encoder. When I need to encode MP3 I use lame.exe. If you don't want to go via a file, lame.exe allows you to read from stdin and write to stdout, so if you redirect standard in and out on the process you can convert on the fly.
Related
I need to decode some H.264 frames into raw format (YUV420).
I receive packets which contains frame by some custom protocol.
How can I pass received H.264 frames into GStreamermm API to decode?
In current time I read them tutorials (unfortunately this is GST - API of C version), but can't find actual GStreamermm API documentation.
Please, point me in any documents or examples of how to do it.
I was able to implement the transfer data via pipeline and retrieve decoding raw video frames using C++ version. Here is raw example:
struct WebPipeline {
Glib::RefPtr<Gst::AppSrc> appsrc;
Glib::RefPtr<Gst::AppSink> appdst;
Glib::RefPtr<Gst::Element> h264parser;
Glib::RefPtr<Gst::Element> avdec_h264;
Glib::RefPtr<Gst::Element> jpegenc;
Glib::RefPtr<Gst::Pipeline> pipe;
bool accepts_data {false};
};
WebPipePtr ExampleClass::CreatePipeline() {
auto web_pipe = std::make_shared<WebPipeline>();
web_pipe->appsrc = Gst::AppSrc::create("web_appsrc");
if (!web_pipe->appsrc) {
throw std::runtime_error("Can't create AppSrc");
}
web_pipe->appdst = Gst::AppSink::create("web_appdst");
if (!web_pipe->appdst) {
throw std::runtime_error("Can't create AppSink");
}
web_pipe->h264parser = Gst::ElementFactory::create_element("h264parse", "h264_parser");
if (!web_pipe->h264parser) {
throw std::runtime_error("Can't create h264parse");
}
web_pipe->avdec_h264 = Gst::ElementFactory::create_element("avdec_h264", "avdec264");
if (!web_pipe->avdec_h264) {
throw std::runtime_error("Can't create avdec_h264");
}
web_pipe->jpegenc = Gst::ElementFactory::create_element("jpegenc");
if (!web_pipe->jpegenc) {
throw std::runtime_error("Can't create jpegenc");
}
web_pipe->pipe = Gst::Pipeline::create("websocket_pipe");
if (!web_pipe->pipe) {
throw std::runtime_error("Can't create pipeline");
}
web_pipe->appdst->property_emit_signals() = true;
web_pipe->appdst->set_sync(false);
web_pipe->appdst->signal_new_sample().connect(sigc::bind(sigc::mem_fun(this, &ExampleClass::PullFromPipe), web_pipe->appdst));
web_pipe->appsrc->property_emit_signals() = true;
web_pipe->appsrc->signal_need_data().connect(sigc::bind(sigc::mem_fun(this, &ExampleClass::EnableAcceptance), web_pipe));
web_pipe->appsrc->signal_enough_data().connect(sigc::bind(sigc::mem_fun(this, &ExampleClass::DisableAcceptance), web_pipe));
web_pipe->pipe->add(web_pipe->appsrc)->add(web_pipe->h264parser)->add(web_pipe->avdec_h264)->add(web_pipe->jpegenc)->add(web_pipe->appdst);
web_pipe->appsrc->link(web_pipe->h264parser)->link(web_pipe->avdec_h264)->link(web_pipe->jpegenc)->link(web_pipe->appdst);
web_pipe->pipe->set_state(Gst::STATE_PLAYING);
return web_pipe;
}
void ExampleClass::EnableAcceptance(guint, WebPipePtr pipe) {
if (!pipe->accepts_data) {
BOOST_LOG_SEV(GetLogger(), log::info) << "Begin push frames";
pipe->accepts_data = true;
}
}
void ExampleClass::DisableAcceptance(WebPipePtr pipe) {
if (pipe->accepts_data) {
BOOST_LOG_SEV(GetLogger(), log::info) << "Begin drop frames";
pipe->accepts_data = false;
}
}
void ExampleClass::PushToPipe(WebPipePtr pipe, std::vector<uint8_t>&& frames) {
if (!pipe->accepts_data) {
return Gst::FLOW_CUSTOM_ERROR;
}
GstBuffer* buffer = gst_buffer_new_wrapped_full(static_cast<GstMemoryFlags>(GST_MEMORY_FLAG_READONLY | GST_MEMORY_FLAG_PHYSICALLY_CONTIGUOUS),
const_cast<uint8_t*>(frame.data()),
frame.size(),
0,
frame.size(),
reinterpret_cast<gpointer>(frame_ref), // inner implementation of some sort of wrapper
destroy); // lamda-destructor
buffer->set_pts(time);
return pipe->appsrc->push_buffer(buffer);
}
Gst::FlowReturn ExampleClass::PullFromPipe(const Glib::RefPtr<Gst::AppSink>& appsink) {
auto sample = appsink->pull_sample();
if (!sample) {
return Gst::FLOW_ERROR;
}
if (appsink->property_eos()) {
return Gst::FLOW_EOS;
}
Gst::ClockTime timestamp = 0;
{
auto buffer = sample->get_buffer();
if (!buffer) {
throw std::runtime_error("Can't get buffer from sample");
}
timestamp = buffer->get_pts();
}
// process sample...
return Gst::FLOW_OK;
}
I am trying to use Intel OneAPI/OneVPL to decode a stream I receive from an RTSP Camera in C#. But when I run the code I get an enormous memory leak. Around 1-200MB per run, which is around once every second.
When I've collected a GoP from the camera where I know the first data is a keyframe I pass it as a byte array to my CLI and C++ code.
Here I expect it to decode all the frames and return decoded images. It receives 30 frames and returns 16 decoded images but has a memory leak.
I've tried to use Visual Studio memory profiler and all I can tell from it is that its unmanaged memory that's my problem. I've tried to override the "new" and "delete" method inside videoHandler.cpp to track and compare all allocations and deallocations and as far as I can tell everything is handled correctly in there. I cannot see any classes that get instantiated that do not get cleaned up. I think my issue is in the CLI class videoHandlerWrapper.cpp. Am I missing something obvious?
videoHandlerWrapper.cpp
array<imgFrameWrapper^>^ videoHandlerWrapper::decode(array<System::Byte>^ byteArray)
{
array<imgFrameWrapper^>^ returnFrames = gcnew array<imgFrameWrapper^>(30);
{
std::vector<imgFrame> frames(30); //Output from decoding process. imgFrame implements a deconstructor that will rid the data when exiting scope
std::vector<unsigned char> bytes(byteArray->Length); //Input for decoding process
Marshal::Copy(byteArray, 0, IntPtr((unsigned char*)(&((bytes)[0]))), byteArray->Length); //Copy from managed (C#) to unmanaged (C++)
int status = _pVideoHandler->decode(bytes, frames); //Decode
for (size_t i = 0; i < frames.size(); i++)
{
if (frames[i].size > 0)
returnFrames[i] = gcnew imgFrameWrapper(frames[i].size, frames[i].bytes);
}
}
//PrintMemoryUsage();
return returnFrames;
}
videoHandler.cpp
#define BITSTREAM_BUFFER_SIZE 2000000 //TODO Maybe higher or lower bitstream buffer. Thorough testing has been done at 2000000
int videoHandler::decode(std::vector<unsigned char> bytes, std::vector<imgFrame> &frameData)
{
int result = -1;
bool isStillGoing = true;
mfxBitstream bitstream = { 0 };
mfxSession session = NULL;
mfxStatus sts = MFX_ERR_NONE;
mfxSurfaceArray* outSurfaces = nullptr;
mfxU32 framenum = 0;
mfxU32 numVPPCh = 0;
mfxVideoChannelParam* mfxVPPChParams = nullptr;
void* accelHandle = NULL;
mfxVideoParam mfxDecParams = {};
mfxVersion version = { 0, 1 };
//variables used only in 2.x version
mfxConfig cfg = NULL;
mfxLoader loader = NULL;
mfxVariant inCodec = {};
std::vector<mfxU8> input_buffer;
// Initialize VPL session for any implementation of HEVC/H265 decode
loader = MFXLoad();
VERIFY(NULL != loader, "MFXLoad failed -- is implementation in path?");
cfg = MFXCreateConfig(loader);
VERIFY(NULL != cfg, "MFXCreateConfig failed")
inCodec.Type = MFX_VARIANT_TYPE_U32;
inCodec.Data.U32 = MFX_CODEC_AVC;
sts = MFXSetConfigFilterProperty(
cfg,
(mfxU8*)"mfxImplDescription.mfxDecoderDescription.decoder.CodecID",
inCodec);
VERIFY(MFX_ERR_NONE == sts, "MFXSetConfigFilterProperty failed for decoder CodecID");
sts = MFXCreateSession(loader, 0, &session);
VERIFY(MFX_ERR_NONE == sts, "Not able to create VPL session");
// Print info about implementation loaded
version = ShowImplInfo(session);
//VERIFY(version.Major > 1, "Sample requires 2.x API implementation, exiting");
if (version.Major == 1) {
mfxVariant ImplValueSW;
ImplValueSW.Type = MFX_VARIANT_TYPE_U32;
ImplValueSW.Data.U32 = MFX_IMPL_TYPE_SOFTWARE;
MFXSetConfigFilterProperty(cfg, (mfxU8*)"mfxImplDescription.Impl", ImplValueSW);
sts = MFXCreateSession(loader, 0, &session);
VERIFY(MFX_ERR_NONE == sts, "Not able to create VPL session");
}
// Convenience function to initialize available accelerator(s)
accelHandle = InitAcceleratorHandle(session);
bitstream.MaxLength = BITSTREAM_BUFFER_SIZE;
bitstream.Data = (mfxU8*)calloc(bytes.size(), sizeof(mfxU8));
VERIFY(bitstream.Data, "Not able to allocate input buffer");
bitstream.CodecId = MFX_CODEC_AVC;
std::copy(bytes.begin(), bytes.end(), bitstream.Data);
bitstream.DataLength = static_cast<mfxU32>(bytes.size());
memset(&mfxDecParams, 0, sizeof(mfxDecParams));
mfxDecParams.mfx.CodecId = MFX_CODEC_AVC;
mfxDecParams.IOPattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
sts = MFXVideoDECODE_DecodeHeader(session, &bitstream, &mfxDecParams);
VERIFY(MFX_ERR_NONE == sts, "Error decoding header\n");
numVPPCh = 1;
mfxVPPChParams = new mfxVideoChannelParam[numVPPCh];
for (mfxU32 i = 0; i < numVPPCh; i++) {
mfxVPPChParams[i] = {};
}
//mfxVPPChParams[0].VPP.FourCC = mfxDecParams.mfx.FrameInfo.FourCC;
mfxVPPChParams[0].VPP.FourCC = MFX_FOURCC_BGRA;
mfxVPPChParams[0].VPP.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
mfxVPPChParams[0].VPP.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
mfxVPPChParams[0].VPP.FrameRateExtN = 30;
mfxVPPChParams[0].VPP.FrameRateExtD = 1;
mfxVPPChParams[0].VPP.CropW = 1920;
mfxVPPChParams[0].VPP.CropH = 1080;
//Set value directly if input and output is the same.
mfxVPPChParams[0].VPP.Width = 1920;
mfxVPPChParams[0].VPP.Height = 1080;
//// USED TO RESIZE. IF INPUT IS THE SAME AS OUTPUT THIS WILL MAKE IT SHIFT A BIT. 1920x1080 becomes 1920x1088.
//mfxVPPChParams[0].VPP.Width = ALIGN16(mfxVPPChParams[0].VPP.CropW);
//mfxVPPChParams[0].VPP.Height = ALIGN16(mfxVPPChParams[0].VPP.CropH);
mfxVPPChParams[0].VPP.ChannelId = 1;
mfxVPPChParams[0].Protected = 0;
mfxVPPChParams[0].IOPattern = MFX_IOPATTERN_IN_SYSTEM_MEMORY | MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
mfxVPPChParams[0].ExtParam = NULL;
mfxVPPChParams[0].NumExtParam = 0;
sts = MFXVideoDECODE_VPP_Init(session, &mfxDecParams, &mfxVPPChParams, numVPPCh); //This causes a MINOR memory leak!
outSurfaces = new mfxSurfaceArray;
while (isStillGoing == true) {
sts = MFXVideoDECODE_VPP_DecodeFrameAsync(session,
&bitstream,
NULL,
0,
&outSurfaces); //Big memory leak. 100MB pr run in the while loop.
switch (sts) {
case MFX_ERR_NONE:
// decode output
if (framenum >= 30)
{
isStillGoing = false;
break;
}
sts = WriteRawFrameToByte(outSurfaces->Surfaces[1], &frameData[framenum]);
VERIFY(MFX_ERR_NONE == sts, "Could not write 1st vpp output");
framenum++;
break;
case MFX_ERR_MORE_DATA:
// The function requires more bitstream at input before decoding can proceed
isStillGoing = false;
break;
case MFX_ERR_MORE_SURFACE:
// The function requires more frame surface at output before decoding can proceed.
// This applies to external memory allocations and should not be expected for
// a simple internal allocation case like this
break;
case MFX_ERR_DEVICE_LOST:
// For non-CPU implementations,
// Cleanup if device is lost
break;
case MFX_WRN_DEVICE_BUSY:
// For non-CPU implementations,
// Wait a few milliseconds then try again
break;
case MFX_WRN_VIDEO_PARAM_CHANGED:
// The decoder detected a new sequence header in the bitstream.
// Video parameters may have changed.
// In external memory allocation case, might need to reallocate the output surface
break;
case MFX_ERR_INCOMPATIBLE_VIDEO_PARAM:
// The function detected that video parameters provided by the application
// are incompatible with initialization parameters.
// The application should close the component and then reinitialize it
break;
case MFX_ERR_REALLOC_SURFACE:
// Bigger surface_work required. May be returned only if
// mfxInfoMFX::EnableReallocRequest was set to ON during initialization.
// This applies to external memory allocations and should not be expected for
// a simple internal allocation case like this
break;
default:
printf("unknown status %d\n", sts);
isStillGoing = false;
break;
}
}
sts = MFXVideoDECODE_VPP_Close(session); // Helps massively! Halves the memory leak speed. Closes internal structures and tables.
VERIFY(MFX_ERR_NONE == sts, "Error closing VPP session\n");
result = 0;
end:
printf("Decode and VPP processed %d frames\n", framenum);
// Clean up resources - It is recommended to close components first, before
// releasing allocated surfaces, since some surfaces may still be locked by
// internal resources.
if (mfxVPPChParams)
delete[] mfxVPPChParams;
if (outSurfaces)
delete outSurfaces;
if (bitstream.Data)
free(bitstream.Data);
if (accelHandle)
FreeAcceleratorHandle(accelHandle);
if (loader)
MFXUnload(loader);
return result;
}
imgFrameWrapper.h
public ref class imgFrameWrapper
{
private:
size_t size;
array<System::Byte>^ bytes;
public:
imgFrameWrapper(size_t u_size, unsigned char* u_bytes);
~imgFrameWrapper();
!imgFrameWrapper();
size_t get_size();
array<System::Byte>^ get_bytes();
};
imgFrameWrapper.cpp
imgFrameWrapper::imgFrameWrapper(size_t u_size, unsigned char* u_bytes)
{
size = u_size;
bytes = gcnew array<System::Byte>(size);
Marshal::Copy((IntPtr)u_bytes, bytes, 0, size);
}
imgFrameWrapper::~imgFrameWrapper()
{
}
imgFrameWrapper::!imgFrameWrapper()
{
}
size_t imgFrameWrapper::get_size()
{
return size;
}
array<System::Byte>^ imgFrameWrapper::get_bytes()
{
return bytes;
}
imgFrame.h
struct imgFrame
{
int size;
unsigned char* bytes;
~imgFrame()
{
if (bytes)
delete[] bytes;
}
};
MFXVideoDECODE_VPP_DecodeFrameAsync() function creates internal memory surfaces for the processing.
You should release surfaces.
Please check this link it's mentioning about it.
https://spec.oneapi.com/onevpl/latest/API_ref/VPL_structs_decode_vpp.html#_CPPv415mfxSurfaceArray
mfxStatus (*Release)(struct mfxSurfaceArray *surface_array)ΒΆ
Decrements the internal reference counter of the surface. (*Release) should be
called after using the (*AddRef) function to add a surface or when allocation
logic requires it.
And please check this sample.
https://github.com/oneapi-src/oneVPL/blob/master/examples/hello-decvpp/src/hello-decvpp.cpp
Especially, WriteRawFrame_InternalMem() function in https://github.com/oneapi-src/oneVPL/blob/17968d8d2299352f5a9e09388d24e81064c81c87/examples/util/util/util.h
It shows how to release surfaces.
I want to know how to figure out file path is from internal or external storage.
I want to delete a file. Before deleting it i want to check whether it is from internal memory or external.
if file is from internal storage then i can simply delete it like this
file.delete();
But if file is from external storage (sdcard). Then i would first check permission then delete it through storage access framework.
I'm currently doing like this.
File selectedFile = Constant.allMemoryVideoList.get(fPosition).getFile().getAbsoluteFile();
if (android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
if (???????????? check if file is not from internal storage ???????????) {
List<UriPermission> permissions = getContentResolver().getPersistedUriPermissions();
if (permissions != null && permissions.size() > 0) {
sdCardUri = permissions.get(0).getUri();
deleteFileWithSAF();
} else {
AlertDialog.Builder builder = new AlertDialog.Builder(this);
builder.setTitle("Please select external storage directory (e.g SDCard)")
.setMessage("Due to change in android security policy it is not possible to delete or rename file in external storage without granting permission")
.setPositiveButton("OK", new DialogInterface.OnClickListener() {
#Override
public void onClick(DialogInterface dialog, int which) {
// call document tree dialog
Intent intent = new Intent(Intent.ACTION_OPEN_DOCUMENT_TREE);
startActivityForResult(intent, REQUEST_CODE_OPEN_DOCUMENT_TREE);
}
})
.setNegativeButton("CANCEL", new DialogInterface.OnClickListener() {
#Override
public void onClick(DialogInterface dialog, int which) {
}
})
.show();
}
} else {
deleteFile();
}
} else {
deleteFile();
}
deleteFileWithSAF()
private void deleteFileWithSAF() {
//First we get `DocumentFile` from the `TreeUri` which in our case is `sdCardUri`.
DocumentFile documentFile = DocumentFile.fromTreeUri(this, sdCardUri);
//Then we split file path into array of strings.
//ex: parts:{"", "storage", "extSdCard", "MyFolder", "MyFolder", "myImage.jpg"}
// There is a reason for having two similar names "MyFolder" in
//my exmple file path to show you similarity in names in a path will not
//distract our hiarchy search that is provided below.
String[] parts = (selectedFile.getPath()).split("\\/");
// findFile method will search documentFile for the first file
// with the expected `DisplayName`
// We skip first three items because we are already on it.(sdCardUri = /storage/extSdCard)
for (int i = 3; i < parts.length; i++) {
if (documentFile != null) {
documentFile = documentFile.findFile(parts[i]);
}
}
if (documentFile == null) {
// File not found on tree search
// User selected a wrong directory as the sd-card
// Here must inform user about how to get the correct sd-card
// and invoke file chooser dialog again.
AlertDialog.Builder builder = new AlertDialog.Builder(this);
builder.setTitle("Please select root of external storage directory (click SELECT button at bottom)")
.setMessage("Due to change in android security policy it is not possible to delete or rename file in external storage without granting permission")
.setPositiveButton("OK", new DialogInterface.OnClickListener() {
#Override
public void onClick(DialogInterface dialog, int which) {
// call for document tree dialog
Intent intent = new Intent(Intent.ACTION_OPEN_DOCUMENT_TREE);
startActivityForResult(intent, REQUEST_CODE_OPEN_DOCUMENT_TREE);
}
})
.setNegativeButton("CANCEL", new DialogInterface.OnClickListener() {
#Override
public void onClick(DialogInterface dialog, int which) {
}
})
.show();
} else {
// File found on sd-card and it is a correct sd-card directory
// save this path as a root for sd-card on your database(SQLite, XML, txt,...)
// Now do whatever you like to do with documentFile.
// Here I do deletion to provide an example.
if (documentFile.delete()) {// if delete file succeed
// Remove information related to your media from ContentResolver,
// which documentFile.delete() didn't do the trick for me.
// Must do it otherwise you will end up with showing an empty
// ImageView if you are getting your URLs from MediaStore.
getApplicationContext().sendBroadcast(new Intent(Intent.ACTION_MEDIA_SCANNER_SCAN_FILE, Uri.fromFile(selectedFile)));
// Methods.removeMedia(this,selectedFile.getPath());
if (deleteSingleFileCall){
Constant.allMemoryVideoList.remove(videoPosition);
adapter.notifyItemRemoved(videoPosition);
deleteSingleFileCall = false;
}
/*update the playback record to
* getFileName() contain file.getName()*/
for (int i = 0; i < Constant.filesPlaybackHistory.size(); i++) {
if ((selectedFile.getName()).equals(Constant.filesPlaybackHistory.get(i).getFileName())) {
Constant.filesPlaybackHistory.remove(i);
break;
}
}
//save the playback history
Paper.book().write("playbackHistory", Constant.filesPlaybackHistory);
}
}
}
This is how i load files of both internal and external storage.
StorageUtil is library https://github.com/hendrawd/StorageUtil
String[] allPath = StorageUtil.getStorageDirectories(this);
private File directory;
for (String path: allPath){
directory = new File(path);
Methods.load_Directory_Files(directory);
}
All Loaded files in following arraylist.
//all the directory that contains files
public static ArrayList<File> directoryList = null;
//list of all files (internal and external)
public static ArrayList<FilesInfo> allMemoryVideoList = new ArrayList<>();
FilesInfo: Contain all info about file like thumbnail, duration, directory, new or played before, if played then last playback position etc
LoadDirectoryFiles()
public static void load_Directory_Files(File directory) {
//Get all file in storage
File[] fileList = directory.listFiles();
//check storage is empty or not
if(fileList != null && fileList.length > 0)
{
for (int i=0; i<fileList.length; i++)
{
boolean restricted_directory = false;
//check file is directory or other file
if(fileList[i].isDirectory())
{
for (String path : Constant.removePath){
if (path.equals(fileList[i].getPath())) {
restricted_directory = true;
break;
}
}
if (!restricted_directory)
load_Directory_Files(fileList[i]);
}
else
{
String name = fileList[i].getName().toLowerCase();
for (String ext : Constant.videoExtensions){
//Check the type of file
if(name.endsWith(ext))
{
//first getVideoDuration
String videoDuration = Methods.getVideoDuration(fileList[i]);
long playbackPosition;
long percentage = C.TIME_UNSET;
FilesInfo.fileState state;
/*First check video already played or not. If not then state is NEW
* else load playback position and calculate percentage of it and assign it*/
//check it if already exist or not if yes then start from there else start from start position
int existIndex = -1;
for (int j = 0; j < Constant.filesPlaybackHistory.size(); j++) {
String fListName = fileList[i].getName();
String fPlaybackHisName = Constant.filesPlaybackHistory.get(j).getFileName();
if (fListName.equals(fPlaybackHisName)) {
existIndex = j;
break;
}
}
try {
if (existIndex != -1) {
//if true that means file is not new
state = FilesInfo.fileState.NOT_NEW;
//set playbackPercentage not playbackPosition
MediaMetadataRetriever retriever = new MediaMetadataRetriever();
retriever.setDataSource(fileList[i].getPath());
String time = retriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_DURATION);
retriever.release();
int duration = Integer.parseInt(time);
playbackPosition = Constant.filesPlaybackHistory.get(existIndex).getPlaybackPosition();
if (duration > 0)
percentage = 1000L * playbackPosition / duration;
else
percentage = C.TIME_UNSET;
}
else
state = FilesInfo.fileState.NEW;
//playbackPosition have value in percentage
Constant.allMemoryVideoList.add(new FilesInfo(fileList[i],
directory,videoDuration, state, percentage, storageType));
//directory portion
currentDirectory = directory.getPath();
unique_directory = true;
for(int j=0; j<directoryList.size(); j++)
{
if((directoryList.get(j).toString()).equals(currentDirectory)){
unique_directory = false;
}
}
if(unique_directory){
directoryList.add(directory);
}
//When we found extension from videoExtension array we will break it.
break;
}catch (Exception e){
e.printStackTrace();
Constant.allMemoryVideoList.add(new FilesInfo(fileList[i],
directory,videoDuration, FilesInfo.fileState.NOT_NEW, C.TIME_UNSET, storageType));
}
}
}
}
}
}
Constant.directoryList = directoryList;
}
Image So reader could easily understand what is going on.
This is not C++/CLI. This is UWP C++/CX
I am trying to send an HttpRequestMessage outside of a managed class in C++. I looked at the UWP samples, but their requests occur inside of a managed class.
All I want to do is send the request, and then have a callback function. I don't need fancy async/await patterns. This is looking to be a lot more difficult than it should be.
EDIT: I have gotten it to work, but the error handling is atrocious. The extra error handling code from the UWP HttpClient example was not compiling.
client = ref new Windows::Web::Http::HttpClient();
client->DefaultRequestHeaders->UserAgent->Append(ref new Windows::Web::Http::Headers::HttpProductInfoHeaderValue("Windows", "10"));
cancellation_token_source cancellationTokenSource = cancellation_token_source();
create_task(client->SendRequestAsync(message)).then([=](Windows::Web::Http::HttpResponseMessage^ response)
{
auto operation = response->Content->ReadAsBufferAsync();
auto task = create_task(operation);
if (task.wait() == task_status::completed)
{
webResponse->statusCode = (int)response->StatusCode;
auto buffer = task.get();
size_t length = buffer->Length;
if (length > 0)
{
Array<byte>^ array = nullptr;
CryptographicBuffer::CopyToByteArray(buffer, &array);
webResponse->contentLength = array->Length;
webResponse->data = (byte*)malloc(webResponse->contentLength);
memcpy(webResponse->data, array->Data, webResponse->contentLength);
delete array;
}
for each(IKeyValuePair<String^, String^>^ pair in response->Headers)
{
std::string key = PlatformStringToString(pair->Key);
std::string value = PlatformStringToString(pair->Value);
if (key == "Content-Type" && false)
{
// Should have this for completeness, but do we really care?
}
else
{
Web::WebHeader *header = new Web::WebHeader(key.c_str(), value.c_str());
webResponse->AddHeader(header);
}
}
if (request->receiveDoneCallback)
request->receiveDoneCallback(webResponse, request->userPtr);
}
else
abort();
delete request;
delete response;
});
I have a very annoying OpenCV error, that i can't understand, and handle with.
I write an application which gets mjpg's stream from ip camera, and process it, but when i try to load image from stream, sometimes i have
[mjpeg # 0000000000428480] overread 8
error, and i don't know why.
Even if i try to skip this issue, and try to load next frame from the stream, the application stucks on
frameStatus = cameraHandler->read(mat);
This is code for connection establishing:
void ImageProcessor::connectWithCamera(VideoCapture * cameraHandler) {
if (cameraHandler != nullptr) {
Logger::log("Closing existing camera stream.");
cameraHandler->release();
delete cameraHandler;
}
Logger::log("Camera configuration and connection establishing.");
cameraHandler = new VideoCapture();
cameraHandler->set(CV_CAP_PROP_FRAME_WIDTH, config.RESOLUTION_WIDTH);
cameraHandler->set(CV_CAP_PROP_FRAME_HEIGHT, config.RESOLUTION_HEIGHT);
cameraHandler->set(CV_CAP_PROP_FPS, config.CAMERA_FPS);
cameraHandler->set(CV_CAP_PROP_FOURCC, CV_FOURCC('M', 'J', 'P', 'G'));
while (!cameraHandler->open(config.LINK)) {
Logger::log("Cannot connect to camera! Trying again.");
}
}
And this is code for capturing images:
void ImageProcessor::start() {
VideoCapture * cameraHandler = new VideoCapture();
this->connectWithCamera(cameraHandler);
this->connectWithServer(this->serverConnection);
Logger::log("Id sending.");
serverConnection->send(config.TOKEN + "\n");
Logger::log("Computations starting.");
Mat mat;
Result * result = nullptr;
int delta = 1000 / cameraHandler->get(CV_CAP_PROP_FPS);
char frameErrorCounter = 0;
bool frameStatus;
while (true) {
frameStatus = false;
cv::waitKey(delta);
try {
frameStatus = cameraHandler->read(mat);
} catch (std::exception& e) {
std::string message = e.what();
Logger::log("Critical camera error! : " + message);
}
if (!frameStatus) {
Logger::log("Cannot read a frame from source. ");
++frameErrorCounter;
if (!cameraHandler->isOpened() || frameErrorCounter >= this->GET_FRAME_ERROR_COUNTER) {
Logger::log("Probably camera is disconnected. Trying to establish connection again.");
frameErrorCounter = 0;
this->connectWithCamera(cameraHandler);
Logger::log("Computations starting.");
}
continue;
}
result = processImage(mat);
std::string stringResult;
if (result == nullptr) {
stringResult = this->NO_RESULT;
delete result;
result = nullptr;
} else {
stringResult = result->toJson();
}
if (!serverConnection->send(stringResult)) {
Logger::log("Server connection lost, trying to establish it again.");
serverConnection->close();
while (!serverConnection->isOpen()) {
this->connectWithServer(serverConnection);
}
}
mat.release();
}
}
Thanks in advance!