rapid TS fragment ffmpeg decoding - memory leak - c++

Environment:
Ubuntu 16.04 (x64)
C++
ffmpeg
Use-case
Multiple MPEG-TS fragments are rapidly decoded ( numerous every sec )
The format of the TS fragments is dynamic and can't be known ahead of time
The first A/V frames of each fragment are needed to be extracted
Problem statement
The code bellow successfully decodes A/V, BUT, has a huge memory leak ( MBytes/sec )
According to the docs seems all memory is freed as it should ( does it... ? )
Why do I get this huge mem leak, what am I missing in the following code snap ?
struct MEDIA_TYPE {
ffmpeg::AVMediaType eType;
union {
struct {
ffmpeg::AVPixelFormat colorspace;
int width, height;
float fFPS;
} video;
struct : WAVEFORMATEX {
short sSampleFormat;
} audio;
} format;
};
struct FRAME {
enum { MAX_PALNES = 3 + 1 };
int iStrmId;
int64_t pts; // Duration in 90Khz clock resolution
uint8_t** ppData; // Null terminated
int32_t* pStride;// Zero terminated
};
HRESULT ProcessTS(IN Operation op, IN uint8_t* pTS, IN uint32_t uiBytes, bool(*cb)(IN const MEDIA_TYPE& mt, IN FRAME& frame, IN PVOID pCtx), IN PVOID pCbCtx)
{
uiBytes -= uiBytes % 188;// align to 188 packet size
struct CONTEXT {
uint8_t* pTS;
uint32_t uiBytes;
int32_t iPos;
} ctx = { pTS, uiBytes, 0 };
LOGTRACE(TSDecoder, "ProcessTS(%d, 0x%.8x, %d, 0x%.8x, 0x%.8x), this=0x%.8x\r\n", (int)op, pTS, uiBytes, cb, pCbCtx, this);
ffmpeg::AVFormatContext* pFmtCtx = 0;
if (0 == (pFmtCtx = ffmpeg::avformat_alloc_context()))
return E_OUTOFMEMORY;
ffmpeg::AVIOContext* pIoCtx = ffmpeg::avio_alloc_context(pTS, uiBytes, 0, &ctx
, [](void *opaque, uint8_t *buf, int buf_size)->int {
auto pCtx = (CONTEXT*)opaque;
int size = pCtx->uiBytes;
if (pCtx->uiBytes - pCtx->iPos < buf_size)
size = pCtx->uiBytes - pCtx->iPos;
if (size > 0) {
memcpy(buf, pCtx->pTS + pCtx->iPos, size);
pCtx->iPos += size;
}
return size;
}
, 0
, [](void* opaque, int64_t offset, int whence)->int64_t {
auto pCtx = (CONTEXT*)opaque;
switch (whence)
{
case SEEK_SET:
pCtx->iPos = offset;
break;
case SEEK_CUR:
pCtx->iPos += offset;
break;
case SEEK_END:
pCtx->iPos = pCtx->uiBytes - offset;
break;
case AVSEEK_SIZE:
return pCtx->uiBytes;
}
return pCtx->iPos;
});
pFmtCtx->pb = pIoCtx;
int iRet = ffmpeg::avformat_open_input(&pFmtCtx, "fakevideo.ts", m_pInputFmt, 0);
if (ERROR_SUCCESS != iRet) {
assert(false);
pFmtCtx = 0;// a user-supplied AVFormatContext will be freed on failure.
return E_FAIL;
}
struct DecodeContext {
ffmpeg::AVStream* pStream;
ffmpeg::AVCodec* pDecoder;
int iFramesProcessed;
};
HRESULT hr = S_OK;
int iStreamsProcessed = 0;
bool bVideoFound = false;
int64_t ptsLast = 0;
int64_t dtsLast = 0;
auto pContext = (DecodeContext*)alloca(sizeof(DecodeContext) * pFmtCtx->nb_streams);
for (unsigned int i = 0; i < pFmtCtx->nb_streams; i++) {
assert(pFmtCtx->streams[i]->index == i);
pContext[i].pStream = pFmtCtx->streams[i];
pContext[i].pDecoder = ffmpeg::avcodec_find_decoder(pFmtCtx->streams[i]->codec->codec_id);
pContext[i].iFramesProcessed= 0;
if (0 == pContext[i].pDecoder)
continue;
if ((iRet = ffmpeg::avcodec_open2(pFmtCtx->streams[i]->codec, pContext[i].pDecoder, NULL)) < 0) {
_ASSERT(FALSE);
hr = E_FAIL;
goto ErrExit;
}
}
while (S_OK == hr) {
ffmpeg::AVFrame* pFrame = 0;
ffmpeg::AVPacket pkt;
ffmpeg::av_init_packet(&pkt);
if (ERROR_SUCCESS != (iRet = ffmpeg::av_read_frame(pFmtCtx, &pkt))) {
hr = E_FAIL;
break;
}
if ((0 == dtsLast) && (0 != pkt.dts))
dtsLast = pkt.dts;
if ((0 == ptsLast) && (0 != pkt.pts))
ptsLast = pkt.pts;
DecodeContext& ctx = pContext[pkt.stream_index];
if (Operation::DECODE_FIRST_FRAME_OF_EACH_STREAM == op) {
if (iStreamsProcessed == pFmtCtx->nb_streams) {
hr = S_FALSE;
goto Next;
}
if (ctx.iFramesProcessed > 0)
goto Next;
iStreamsProcessed++;
}
if (0 == ctx.pDecoder)
goto Next;
if (0 == (pFrame = ffmpeg::av_frame_alloc())) {
hr = E_OUTOFMEMORY;
goto Next;
}
LOGTRACE(TSDecoder, "ProcessTS(%d, 0x%.8x, %d, 0x%.8x, 0x%.8x), this=0x%.8x, decode, S:%d, T:%d\r\n", (int)op, pTS, uiBytes, cb, pCbCtx, this, pkt.stream_index, ctx.pStream->codec->codec_type);
int bGotFrame = false;
int iBytesUsed = 0;
MEDIA_TYPE mt;
memset(&mt, 0, sizeof(mt));
mt.eType = ctx.pStream->codec->codec_type;
switch (mt.eType) {
case ffmpeg::AVMediaType::AVMEDIA_TYPE_AUDIO:
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if((iRet = ffmpeg::avcodec_decode_audio4(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
goto Next;
}
_ASSERT(pkt.size == iRet);
// FFMPEG AAC decoder oddity, first call to 'avcodec_decode_audio4' results mute audio where the second result the expected audio
bGotFrame = false;
if ((iRet = ffmpeg::avcodec_decode_audio4(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
goto Next;
}
_ASSERT(pkt.size == iRet);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if (false == bGotFrame)
goto Next;
iBytesUsed = ctx.pStream->codec->frame_size;
mt.format.audio.nChannels = ctx.pStream->codec->channels;
mt.format.audio.nSamplesPerSec = ctx.pStream->codec->sample_rate;
mt.format.audio.wBitsPerSample = ffmpeg::av_get_bytes_per_sample(ctx.pStream->codec->sample_fmt) * 8;
mt.format.audio.nBlockAlign = mt.format.audio.nChannels * mt.format.audio.wBitsPerSample / 8;
mt.format.audio.sSampleFormat = (short)pFrame->format;
break;
case ffmpeg::AVMediaType::AVMEDIA_TYPE_VIDEO:
if ((iRet = ffmpeg::avcodec_decode_video2(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
break;
}
if (false == bGotFrame)
goto Next;
assert(ffmpeg::AVPixelFormat::AV_PIX_FMT_YUV420P == ctx.pStream->codec->pix_fmt);// Thats is the only color space currently supported
iBytesUsed = (ctx.pStream->codec->width * ctx.pStream->codec->height * 3) / 2;
mt.format.video.width = ctx.pStream->codec->width;
mt.format.video.height = ctx.pStream->codec->height;
mt.format.video.colorspace = ctx.pStream->codec->pix_fmt;
mt.format.video.fFPS = (float)ctx.pStream->codec->framerate.num / ctx.pStream->codec->framerate.den;
bVideoFound = true;
break;
default:
goto Next;
}
ctx.iFramesProcessed++;
{
FRAME f = { ctx.pStream->index, ((0 == ptsLast) ? dtsLast : ptsLast), (uint8_t**)pFrame->data, (int32_t*)pFrame->linesize };
if ((iRet > 0) && (false == cb(mt, f, pCbCtx)))
hr = S_FALSE;// Breaks the loop
}
Next:
ffmpeg::av_free_packet(&pkt);
if (0 != pFrame) {
//ffmpeg::av_frame_unref(pFrame);
ffmpeg::av_frame_free(&pFrame);
pFrame = 0;
}
}
ErrExit:
for (unsigned int i = 0; i < pFmtCtx->nb_streams; i++)
ffmpeg::avcodec_close(pFmtCtx->streams[i]->codec);
pIoCtx->buffer = 0;// We have allocated the buffer, no need for ffmpeg to free it 4 us
pFmtCtx->pb = 0;
ffmpeg::av_free(pIoCtx);
ffmpeg::avformat_close_input(&pFmtCtx);
ffmpeg::avformat_free_context(pFmtCtx);
return hr;
}

You need to unref the packets before reusing them. And there's no need to allocate and deallocate them all the time.
Here's how I do it which might help you:
// Initialise a packet queue
std::list<AVPacket *> packets;
...
for (int c = 0; c < MAX_PACKETS; c++) {
ff->packets.push_back(av_packet_alloc());
}
while (!quit) {
... get packet from queue
int err = av_read_frame(ff->context, packet);
... process packet (audio, video, etc)
av_packet_unref(packet); // add back to queue for reuse
}
// Release packets
while (ff->packets.size()) { // free packets
AVPacket *packet = ff->packets.front();
av_packet_free(&packet);
ff->packets.pop_front();
}
In your code you've freed a packet which wasn't allocated in the first place.

Related

How I change the resolution one monitor only

For example, my computer has two monitors, the primary monitor's resolution is 800*600, the other monitor's resolution is 1600*900.
I would like define the resolution on one monitor only ?
The function 'SetDisplayConfig' change the resolution screen on the two minotors...
https://learn.microsoft.com/fr-fr/windows-hardware/drivers/display/ccd-apis
https://learn.microsoft.com/fr-fr/windows/win32/api/winuser/nf-winuser-setdisplayconfig
[DllImport("User32.dll")]
public static extern int SetDisplayConfig(
uint numPathArrayElements,
[In] DisplayConfigPathInfo[] pathArray,
uint numModeInfoArrayElements,
[In] DisplayConfigModeInfo[] modeInfoArray,
SdcFlags flags
);
private static Boolean SetDisplaySettings(int Id_Monitor, uint Width, uint Height, uint Scaling)
{
try
{
CCDWrapper.DisplayConfigPathInfo[] pathInfoArray = new CCDWrapper.DisplayConfigPathInfo[0] { };
CCDWrapper.DisplayConfigModeInfo[] modeInfoArray = new CCDWrapper.DisplayConfigModeInfo[0] { };
CCDWrapper.MonitorAdditionalInfo[] additionalInfo = new CCDWrapper.MonitorAdditionalInfo[0] { };
bool Status = GetDisplaySettings(ref pathInfoArray, ref modeInfoArray, ref additionalInfo, true);
CCDWrapper.DisplayConfigPathInfo[] pathInfoArrayCurrent = new CCDWrapper.DisplayConfigPathInfo[0] { };
CCDWrapper.DisplayConfigModeInfo[] modeInfoArrayCurrent = new CCDWrapper.DisplayConfigModeInfo[0] { };
CCDWrapper.MonitorAdditionalInfo[] additionalInfoCurrent = new CCDWrapper.MonitorAdditionalInfo[0] { };
bool StatusCurrent = GetDisplaySettings(ref pathInfoArrayCurrent, ref modeInfoArrayCurrent, ref additionalInfoCurrent, false);
if (StatusCurrent)
{
for (int iPathInfo = 0; iPathInfo <= pathInfoArray.Length-1; iPathInfo++)
//for (int iPathInfo = 0; iPathInfo <= pathInfoArray.Length - 1; iPathInfo++)
{
for (int iPathInfoCurrent = 0; iPathInfoCurrent <= pathInfoArrayCurrent.Length - 1; iPathInfoCurrent++)
{
if ((pathInfoArray[iPathInfo].sourceInfo.id == pathInfoArrayCurrent[iPathInfoCurrent].sourceInfo.id) && (pathInfoArray[iPathInfo].targetInfo.id == pathInfoArrayCurrent[iPathInfoCurrent].targetInfo.id))
{
pathInfoArray[iPathInfo].sourceInfo.adapterId.LowPart = pathInfoArrayCurrent[iPathInfoCurrent].sourceInfo.adapterId.LowPart;
pathInfoArray[iPathInfo].targetInfo.adapterId.LowPart = pathInfoArrayCurrent[iPathInfoCurrent].targetInfo.adapterId.LowPart;
pathInfoArray[iPathInfo].targetInfo.scaling = (CCDWrapper.DisplayConfigScaling)Scaling;
break;
}
}
}
for (int iModeInfo = 0; iModeInfo <= modeInfoArray.Length - 1; iModeInfo++)
{
for (int iPathInfo = 0; iPathInfo <= pathInfoArray.Length - 1; iPathInfo++)
{
if ((modeInfoArray[iModeInfo].id == pathInfoArray[iPathInfo].targetInfo.id) && (modeInfoArray[iModeInfo].infoType == CCDWrapper.DisplayConfigModeInfoType.Target))
{
for (int iModeInfoSource = 0; iModeInfoSource <= modeInfoArray.Length - 1; iModeInfoSource++)
{
if ((modeInfoArray[iModeInfoSource].id == pathInfoArray[iPathInfo].sourceInfo.id) && (modeInfoArray[iModeInfoSource].adapterId.LowPart == modeInfoArray[iModeInfo].adapterId.LowPart) && (modeInfoArray[iModeInfoSource].infoType == CCDWrapper.DisplayConfigModeInfoType.Source))
{
modeInfoArray[iModeInfoSource].adapterId.LowPart = pathInfoArray[iPathInfo].sourceInfo.adapterId.LowPart;
modeInfoArray[iModeInfoSource].sourceMode.height = Height;
modeInfoArray[iModeInfoSource].sourceMode.width = Width;
break;
}
}
modeInfoArray[iModeInfo].adapterId.LowPart = pathInfoArray[iPathInfo].targetInfo.adapterId.LowPart;
break;
}
}
}
uint numPathArrayElements = System.Convert.ToUInt32(pathInfoArray.Length);
uint numModeInfoArrayElements = System.Convert.ToUInt32(modeInfoArray.Length);
long Result = CCDWrapper.SetDisplayConfig(numPathArrayElements, pathInfoArray, numModeInfoArrayElements, modeInfoArray, CCDWrapper.SdcFlags.Apply | CCDWrapper.SdcFlags.UseSuppliedDisplayConfig | CCDWrapper.SdcFlags.SaveToDatabase | CCDWrapper.SdcFlags.NoOptimization | CCDWrapper.SdcFlags.AllowChanges);
if (Result == 0)
return true;
else
return false;
}
else
return false;
}
catch (Exception ex)
{
EventLog.WriteEntry("ResolutionEcran", "Erreur SetDisplaySettings : " + ex.Message, EventLogEntryType.Error);
return false;
}
}
private static Boolean GetDisplaySettings(ref CCDWrapper.DisplayConfigPathInfo[] pathInfoArray, ref CCDWrapper.DisplayConfigModeInfo[] modeInfoArray, ref CCDWrapper.MonitorAdditionalInfo[] additionalInfo, Boolean ActiveOnly, [System.Runtime.InteropServices.Optional] int ID_Monitor)
{
uint numPathArrayElements;
uint numModeInfoArrayElements;
CCDWrapper.QueryDisplayFlags queryFlags = CCDWrapper.QueryDisplayFlags.AllPaths;
if (ActiveOnly)
{
//queryFlags = CCDWrapper.QueryDisplayFlags.OnlyActivePaths;
queryFlags = CCDWrapper.QueryDisplayFlags.OnlyActivePaths;
}
var status = CCDWrapper.GetDisplayConfigBufferSizes(queryFlags, out numPathArrayElements, out numModeInfoArrayElements);
if (status == 0)
{
pathInfoArray = new CCDWrapper.DisplayConfigPathInfo[numPathArrayElements];
modeInfoArray = new CCDWrapper.DisplayConfigModeInfo[numModeInfoArrayElements];
additionalInfo = new CCDWrapper.MonitorAdditionalInfo[numModeInfoArrayElements];
status = CCDWrapper.QueryDisplayConfig(queryFlags, ref numPathArrayElements, pathInfoArray, ref numModeInfoArrayElements, modeInfoArray, IntPtr.Zero);
if (status == 0)
{
for (var iMode = 0; iMode < numModeInfoArrayElements; iMode++)
{
if (modeInfoArray[iMode].infoType == CCDWrapper.DisplayConfigModeInfoType.Target)
{
try
{
additionalInfo[iMode] = CCDWrapper.GetMonitorAdditionalInfo(modeInfoArray[iMode].adapterId, modeInfoArray[iMode].id);
}
catch (Exception)
{
additionalInfo[iMode].valid = false;
}
}
}
return true;
}
else
{
//Erreur : Querying display;
}
}
else
{
//Erreur : Taille Buffer;
}
return false;
}
Use ChangeDisplaySettingsEx function to change the settings of the specified display device to the specified graphics mode. The following is an example you can refer to.
#include <windows.h>
int main()
{
for (DWORD devNum = 0; ; devNum++)
{
DISPLAY_DEVICE dev = {0};
dev.cb = sizeof(DISPLAY_DEVICE);
if (!EnumDisplayDevices(NULL, devNum, &dev, EDD_GET_DEVICE_INTERFACE_NAME))
break;
wprintf(L"Display name: %s \n", dev.DeviceName);
DEVMODE dMode = { 0 };
dMode.dmSize = sizeof(dMode);
if (!EnumDisplaySettings(dev.DeviceName, ENUM_CURRENT_SETTINGS, &dMode))
{
wprintf(L"EnumDisplaySettings error: %d \n", GetLastError());
continue;
}
wprintf(L"Display old settings: \n");
wprintf(L"dmBitsPerPel: %d \n", dMode.dmBitsPerPel);
wprintf(L"dmPelsWidth: %d \n", dMode.dmPelsWidth);
wprintf(L"dmPelsHeight: %d \n", dMode.dmPelsHeight);
wprintf(L"dmDisplayFlags: %x \n", dMode.dmDisplayFlags);
wprintf(L"dmDisplayFrequency: %d \n", dMode.dmDisplayFrequency);
dMode.dmPelsWidth = 800;
dMode.dmPelsHeight = 600;
ChangeDisplaySettingsEx(dev.DeviceName, &dMode, NULL, 0, NULL);
DEVMODE dModeNew = { 0 };
dModeNew.dmSize = sizeof(DEVMODE);
if (!EnumDisplaySettings(dev.DeviceName, ENUM_CURRENT_SETTINGS, &dModeNew))
{
wprintf(L"EnumDisplaySettings error: %d \n", GetLastError());
continue;
}
wprintf(L"Display new settings: \n");
wprintf(L"dmBitsPerPel: %d \n", dModeNew.dmBitsPerPel);
wprintf(L"dmPelsWidth: %d \n", dModeNew.dmPelsWidth);
wprintf(L"dmPelsHeight: %d \n", dModeNew.dmPelsHeight);
wprintf(L"dmDisplayFlags: %x \n", dModeNew.dmDisplayFlags);
wprintf(L"dmDisplayFrequency: %d \n", dModeNew.dmDisplayFrequency);
}
getchar();
}
I set dwflags to 0 let the graphics mode for the current screen change dynamically. Refer to dwflags parameter part of ChangeDisplaySettingsEx's document for deciding how you would like the graphics mode should be changed.
Sorry for my late response...I was absent.
I also need to change the scaling of monitor (Black bars removed on certain resolutions).
[Flags]
public enum DisplayConfigScaling : uint
{
Zero = 0x0,
Identity = 1,
Centered = 2,
Stretched = 3,
Aspectratiocenteredmax = 4,
Custom = 5,
Preferred = 128,
ForceUint32 = 0xFFFFFFFF
}
The function 'ChangeDisplaySettingsEx' may change the scale of monitor ?
Thank you very for your help.

CoreAudio Stuff From C++ to Swift

How to convert the code below to swift? Someone help.
I want to convert this cpp code to swift, from project FreeStreamer.
But some C++ struct and some C callback drive me crazy.
Help.
Here is the code from audio_stream.h and audio_stream.cpp
// var
queued_packet_t *m_queuedHead;
queued_packet_t *m_queuedTail;
queued_packet_t *m_playPacket;
std::list <queued_packet_t*> m_processedPackets;
//struct
typedef struct queued_packet {
UInt64 identifier;
AudioStreamPacketDescription desc;
struct queued_packet *next;
char data[];
} queued_packet_t;
//function one
OSStatus Audio_Stream::encoderDataCallback(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets, AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void *inUserData) {
Audio_Stream *THIS = (Audio_Stream *)inUserData;
pthread_mutex_trylock(&THIS->m_packetQueueMutex);
// Dequeue one packet per time for the decoder
queued_packet_t *front = THIS->m_playPacket;
if (!front) {
/* Don't deadlock */
AS_LOCK_TRACE("encoderDataCallback 2: unlock\n");
pthread_mutex_unlock(&THIS->m_packetQueueMutex);
pthread_mutex_trylock(&THIS->m_streamStateMutex);
THIS->m_converterRunOutOfData = true;
pthread_mutex_unlock(&THIS->m_streamStateMutex);
*ioNumberDataPackets = 0;
ioData->mBuffers[0].mDataByteSize = 0;
return noErr;
}
*ioNumberDataPackets = 1;
ioData->mBuffers[0].mData = front->data;
ioData->mBuffers[0].mDataByteSize = front->desc.mDataByteSize;
ioData->mBuffers[0].mNumberChannels = THIS->m_srcFormat.mChannelsPerFrame;
if (outDataPacketDescription) {
*outDataPacketDescription = &front->desc;
}
THIS->m_playPacket = front->next;
THIS->m_processedPackets.push_front(front);
AS_LOCK_TRACE("encoderDataCallback 5: unlock\n");
pthread_mutex_unlock(&THIS->m_packetQueueMutex);
return noErr;
}
//function two
void Audio_Stream::streamDataCallback(void *inClientData, UInt32 inNumberBytes, UInt32 inNumberPackets, const void *inInputData, AudioStreamPacketDescription *inPacketDescriptions) {
AS_TRACE("%s: inNumberBytes %u, inNumberPackets %u\n", __FUNCTION__, (unsigned int)inNumberBytes, (unsigned int)inNumberPackets);
Audio_Stream *THIS = static_cast<Audio_Stream*>(inClientData);
if (!THIS->m_audioStreamParserRunning) {
AS_TRACE("%s: stray callback detected!\n", __PRETTY_FUNCTION__);
return;
}
for (int i = 0; i < inNumberPackets; i++) {
/* Allocate the packet */
UInt32 size = inPacketDescriptions[i].mDataByteSize;
queued_packet_t *packet = (queued_packet_t *)malloc(sizeof(queued_packet_t) + size);
packet->identifier = THIS->m_packetIdentifier;
// If the stream didn't provide bitRate (m_bitRate == 0), then let's calculate it
if (THIS->m_bitRate == 0 && THIS->m_bitrateBufferIndex < kAudioStreamBitrateBufferSize) {
// Only keep sampling for one buffer cycle; this is to keep the counters (for instance) duration
// stable.
THIS->m_bitrateBuffer[THIS->m_bitrateBufferIndex++] = 8 * inPacketDescriptions[i].mDataByteSize / THIS->m_packetDuration;
if (THIS->m_bitrateBufferIndex == kAudioStreamBitrateBufferSize) {
if (THIS->m_delegate) {
THIS->m_delegate->bitrateAvailable();
}
}
}
AS_LOCK_TRACE("streamDataCallback: lock\n");
pthread_mutex_trylock(&THIS->m_packetQueueMutex);
/* Prepare the packet */
packet->next = NULL;
packet->desc = inPacketDescriptions[i];
packet->desc.mStartOffset = 0;
memcpy(packet->data, (const char *)inInputData + inPacketDescriptions[i].mStartOffset,
size);
if (THIS->m_queuedHead == NULL) {
THIS->m_queuedHead = THIS->m_queuedTail = THIS->m_playPacket = packet;
} else {
THIS->m_queuedTail->next = packet;
THIS->m_queuedTail = packet;
}
THIS->m_cachedDataSize += size;
THIS->m_packetIdentifier++;
AS_LOCK_TRACE("streamDataCallback: unlock\n");
pthread_mutex_unlock(&THIS->m_packetQueueMutex);
}
THIS->determineBufferingLimits();
}
All the FreeStreamer project has been rewrite wieth Swift 3.0 in here FreePlayer
Answer can be found here AudioStream

What is the fastest way to extract a thumbnail from an image?

I want to extract a thumbnail from an image, so I tried to use GDI+. What I did was to create a new small Bitmap to hold the thumbnail, and using Graphics::DrawImage() to draw the bitmap into it (and hence getting a thumbnail).
But when using this approach on a large number of images, it gets really slow. So is there a way to speed it up (by using only Windows API and not an external library).
Note: I know that some images store a thumbnail inside of them, but I am looking to extract a thumbnail from the ones that don't.
You can increase speed by loading thumbnail from EXIF (for JPEG and some other formats). But GetThumbnailImage's result isn't good. Here is another implementation:
Usage:
Gdiplus::Bitmap* thumb = GetThumbnail(filename, thumbWidth, thumbHeight);
Code:
PropertyItem* GetPropertyItemFromImage(Gdiplus::Image* bm, PROPID propId) {
UINT itemSize = bm->GetPropertyItemSize(propId);
if (!itemSize) {
return 0;
}
PropertyItem* item = reinterpret_cast<PropertyItem*>(malloc(itemSize));
if (bm->GetPropertyItem(propId, itemSize, item) != Ok) {
free(item);
return 0;
}
return item;
}
UINT VoidToInt(void* data, unsigned int size) {
switch (size) {
case 8:
return *reinterpret_cast<UINT*>(data);
case 4:
return *reinterpret_cast<DWORD*>(data);
case 2:
return *reinterpret_cast<WORD*>(data);
default:
return *reinterpret_cast<BYTE*>(data);
}
}
typedef IStream * (STDAPICALLTYPE *SHCreateMemStreamFuncType)(const BYTE *pInit, UINT cbInit);
SHCreateMemStreamFuncType SHCreateMemStreamFunc = 0;
bool IsVista() {
static int isVista = -1;
if (isVista == -1)
{
OSVERSIONINFO osver;
osver.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
isVista = (::GetVersionEx(&osver) &&
osver.dwPlatformId == VER_PLATFORM_WIN32_NT &&
(osver.dwMajorVersion >= 6));
}
return isVista != FALSE;
}
Gdiplus::Bitmap* BitmapFromMemory(BYTE* data, unsigned int imageSize) {
if (WinUtils::IsVista()) {
if (!SHCreateMemStreamFunc) {
HMODULE lib = LoadLibrary(_T("Shlwapi.dll"));
SHCreateMemStreamFunc = reinterpret_cast<SHCreateMemStreamFuncType>(GetProcAddress(lib, "SHCreateMemStream"));
if (!SHCreateMemStreamFunc) {
return 0;
}
}
Gdiplus::Bitmap * bitmap;
IStream* pStream = SHCreateMemStreamFunc(data, imageSize);
if (pStream) {
bitmap = Gdiplus::Bitmap::FromStream(pStream);
pStream->Release();
if (bitmap) {
if (bitmap->GetLastStatus() == Gdiplus::Ok) {
return bitmap;
}
delete bitmap;
}
}
} else {
HGLOBAL buffer = ::GlobalAlloc(GMEM_MOVEABLE, imageSize);
if (buffer) {
void* pBuffer = ::GlobalLock(buffer);
if (pBuffer) {
Gdiplus::Bitmap * bitmap;
CopyMemory(pBuffer, data, imageSize);
IStream* pStream = NULL;
if (::CreateStreamOnHGlobal(buffer, FALSE, &pStream) == S_OK) {
bitmap = Gdiplus::Bitmap::FromStream(pStream);
pStream->Release();
if (bitmap) {
if (bitmap->GetLastStatus() == Gdiplus::Ok) {
return bitmap;
}
delete bitmap;
}
}
::GlobalUnlock(buffer);
}
::GlobalFree(buffer);
}
}
return 0;
}
// Based on original method from http://danbystrom.se/2009/01/05/imagegetthumbnailimage-and-beyond/
Gdiplus::Bitmap* GetThumbnail(Gdiplus::Image* bm, int width, int height, Gdiplus::Size* realSize = 0) {
using namespace Gdiplus;
if (realSize) {
realSize->Width = bm->GetWidth();
realSize->Height = bm->GetHeight();
}
Size sz = AdaptProportionalSize(Size(width, height), Size(bm->GetWidth(), bm->GetHeight()));
Bitmap* res = new Bitmap(sz.Width, sz.Height);
Graphics gr(res);
gr.SetInterpolationMode(Gdiplus::InterpolationModeHighQualityBicubic);
UINT size = bm->GetPropertyItemSize(PropertyTagThumbnailData);
if (size) {
// Loading thumbnail from EXIF data (fast)
enum ThumbCompression { ThumbCompressionJPEG, ThumbCompressionRGB, ThumbCompressionYCbCr, ThumbCompressionUnknown }
compression = ThumbCompressionJPEG;
PropertyItem* thumbnailFormatItem = GetPropertyItemFromImage(bm, PropertyTagThumbnailFormat);
if (thumbnailFormatItem) {
UINT format = VoidToInt(thumbnailFormatItem->value, thumbnailFormatItem->length);
if (format == 0) {
compression = ThumbCompressionRGB;
} else if (format == 1) {
compression = ThumbCompressionJPEG;
} else {
compression = ThumbCompressionUnknown;
}
free(thumbnailFormatItem);
} else {
PropertyItem* compressionItem = GetPropertyItemFromImage(bm, PropertyTagThumbnailCompression);
if (compressionItem) {
WORD compressionTag = *reinterpret_cast<WORD*>(compressionItem->value);
if (compressionTag == 1) {
compression = ThumbCompressionRGB;
PropertyItem* photometricInterpretationItem = GetPropertyItemFromImage(bm, PropertyTagPhotometricInterp);
if (photometricInterpretationItem) {
WORD photoMetricInterpretationTag = VoidToInt(photometricInterpretationItem->value, photometricInterpretationItem->length);
free(photometricInterpretationItem);
if (photoMetricInterpretationTag == 6) {
compression = ThumbCompressionYCbCr;
}
}
} else if (compressionTag == 6) {
compression = ThumbCompressionJPEG;
}
free(compressionItem);
}
}
int originalThumbWidth = 0, originalThumbHeight = 0;
if (compression == ThumbCompressionJPEG || compression == ThumbCompressionRGB) {
PropertyItem* thumbDataItem = GetPropertyItemFromImage(bm, PropertyTagThumbnailData);
if (thumbDataItem) {
if (compression == ThumbCompressionJPEG) {
Bitmap* src = BitmapFromMemory(reinterpret_cast<BYTE*>(thumbDataItem->value), thumbDataItem->length);
if (src) {
gr.DrawImage(src, 0, 0, sz.Width, sz.Height);
delete src;
free(thumbDataItem);
return res;
}
} else if (compression == ThumbCompressionRGB) {
PropertyItem* widthItem = GetPropertyItemFromImage(bm, PropertyTagThumbnailImageWidth);
if (widthItem) {
originalThumbWidth = VoidToInt(widthItem->value, widthItem->length);
free(widthItem);
}
PropertyItem* heightItem = GetPropertyItemFromImage(bm, PropertyTagThumbnailImageHeight);
if (heightItem) {
originalThumbHeight = VoidToInt(heightItem->value, heightItem->length);
free(heightItem);
}
if (originalThumbWidth && originalThumbHeight) {
BITMAPINFOHEADER bih;
memset(&bih, 0, sizeof(bih));
bih.biSize = sizeof(bih);
bih.biWidth = originalThumbWidth;
bih.biHeight = -originalThumbHeight;
bih.biPlanes = 1;
bih.biBitCount = 24;
BITMAPINFO bi;
memset(&bi, 0, sizeof(bi));
bi.bmiHeader = bih;
BYTE* data = reinterpret_cast<BYTE*>(thumbDataItem->value);
BYTE temp;
// Convert RGB to BGR
for (unsigned int offset = 0; offset < thumbDataItem->length; offset += 3) {
temp = data[offset];
data[offset] = data[offset + 2];
data[offset + 2] = temp;
}
Bitmap src(&bi, thumbDataItem->value);
if (src.GetLastStatus() == Ok) {
gr.DrawImage(&src, 0, 0, sz.Width, sz.Height);
free(thumbDataItem);
return res;
}
}
} else {
// other type of compression not implemented
}
free(thumbDataItem);
}
}
}
// Fallback - Load full image and draw it (slow)
gr.DrawImage(bm, 0, 0, sz.Width, sz.Height);
return res;
}
Gdiplus::Bitmap* GetThumbnail(const CString& filename, int width, int height, Gdiplus::Size* realSize) {
using namespace Gdiplus;
Image bm(filename);
if (bm.GetLastStatus() != Ok) {
return 0;
}
return GetThumbnail(&bm, width, height, realSize);
}
Gdiplus::Size AdaptProportionalSize(const Gdiplus::Size& szMax, const Gdiplus::Size& szReal)
{
int nWidth;
int nHeight;
double sMaxRatio;
double sRealRatio;
if (szMax.Width < 1 || szMax.Height < 1 || szReal.Width < 1 || szReal.Height < 1)
return Size();
sMaxRatio = szMax.Width / static_cast<double>(szMax.Height);
sRealRatio = szReal.Width / static_cast<double>(szReal.Height);
if (sMaxRatio < sRealRatio) {
nWidth = min(szMax.Width, szReal.Width);
nHeight = static_cast<int>(round(nWidth / sRealRatio));
} else {
nHeight = min(szMax.Height, szReal.Height);
nWidth = static_cast<int>(round(nHeight * sRealRatio));
}
return Size(nWidth, nHeight);
}

How to get Serial Port profile SDP record for remote device

I am trying to write an application that pairs a remote bluetooth device with Windows Mobile/CE handheld and get the service of the device.
I noticed that when you manually pair a device and set the service through the system(eg. go to settings->Bluetooth-> add device) it generates an sdprecord value in the following registry
HKLM/Software/Microsoft/Bluetooth/Device/{deviceAddress}/{*Service_UUID*}.
I am basically trying to implement this in my program.
I followed the following documentations:
http://msdn.microsoft.com/en-us/library/ms883458.aspx
http://msdn.microsoft.com/en-us/library/ms883398.aspx
and then convert the SDPrecord into binary and write it to the registry:
RegSetValueEx(hSerialPortKey,_T("sdprecord"),0,REG_BINARY,(BYTE*)&pRecord,sizeof(pRecord)*2+1);
The result is not the same as what I would get when I manually pair the device.
What am I doing wrong? How can I retrieve the SDP record? Thank you in advance.
Below is my code
BTHNS_RESTRICTIONBLOB RBlob;
memset(&RBlob,0,sizeof(BTHNS_RESTRICTIONBLOB));
RBlob.type = SDP_SERVICE_SEARCH_ATTRIBUTE_REQUEST;
RBlob.numRange = 1;
RBlob.uuids[0].uuidType= SDP_ST_UUID16;//SDP_ST_UUID128;
RBlob.uuids[0].u.uuid16= SerialPortServiceClassID_UUID16;
RBlob.pRange[0].minAttribute =SDP_ATTRIB_PROTOCOL_DESCRIPTOR_LIST;
RBlob.pRange[0].maxAttribute = SDP_ATTRIB_PROTOCOL_DESCRIPTOR_LIST;
SOCKADDR_BTH sa;
memset (&sa, 0, sizeof(SOCKADDR_BTH));
BTH_ADDR *pDeviceAddr = &devAddress;
*(BTH_ADDR *)(&sa.btAddr) = *pDeviceAddr;
sa.addressFamily = AF_BTH;
CSADDR_INFO csai;
memset(&csai,0,sizeof(CSADDR_INFO));
csai.RemoteAddr.lpSockaddr = (sockaddr*)&sa;
csai.RemoteAddr.iSockaddrLength = sizeof(sa);
BLOB blob;
blob.cbSize = sizeof(BTHNS_RESTRICTIONBLOB);
blob.pBlobData = (BYTE *)&RBlob;
WSAQUERYSET wsaq;
memset(&wsaq,0,sizeof(WSAQUERYSET));
wsaq.dwSize = sizeof(WSAQUERYSET);
wsaq.dwNumberOfCsAddrs = 1;
wsaq.dwNameSpace = NS_BTH;
wsaq.lpBlob = &blob;
wsaq.lpcsaBuffer = &csai;
wsaq.lpServiceClassId = &serialPortUUID;
//Initialising winsock
WSADATA data;
int result = WSAStartup(MAKEWORD(2, 2), &data);//initializing winsock
if (hLib == NULL)
{
return S_FALSE;
}
result = WSALookupServiceBegin (&wsaq,0, &hLookup);
while (result ==ERROR_SUCCESS )
{
BYTE sdpBuffer[4096];
memset(sdpBuffer,0,sizeof(sdpBuffer));
LPWSAQUERYSET pResult = (LPWSAQUERYSET)&sdpBuffer;
dwSize = sizeof(sdpBuffer);
pResult->dwSize = sizeof(WSAQUERYSET);
pResult->dwNameSpace = NS_BTH;
pResult->lpBlob = NULL;
pResult->lpServiceClassId = &serialPortUUID;
result = WSALookupServiceNext(hLookup,0,&dwSize,pResult);
if(result == -1 )
{
int error;
error = WSAGetLastError();
if (error == WSA_E_NO_MORE)
break;
}
else
{
//Get SDP record
if (pResult != NULL)
{
if (ERROR_SUCCESS != ServiceAndAttributeSearchParse(pResult->lpBlob->pBlobData,pResult->lpBlob->cbSize,&pRecordArg,&ulRecords)) { //error handling}
ULONG recordIndex;
for (recordIndex = 0; recordIndex < ulRecords; recordIndex++) {
pRecord = pRecordArg[recordIndex];}
I think the code should be ok.Here's my code
int FindRFCOMMChannel (unsigned char *pStream, int cStream, unsigned char *pChann)
{
ISdpRecord **pRecordArg;
int cRecordArg = 0;
*pChann = 0;
int hr = ServiceAndAttributeSearch(pStream, cStream, &pRecordArg, (ULONG *)&cRecordArg);
if (hr != ERROR_SUCCESS)
{
BT_ERROR( CBTRFCOMMMgr, PerformServiceSearch, "ServiceAndAttributeSearch ERROR %d\n");
return hr;
}
for (int i = 0; (! *pChann) && (i < cRecordArg); i++)
{
ISdpRecord *pRecord = pRecordArg[i]; // particular record to examine in this loop
CNodeDataFreeString protocolList; // contains SDP_ATTRIB_PROTOCOL_DESCRIPTOR_LIST data, if available
if (ERROR_SUCCESS != pRecord->GetAttribute(SDP_ATTRIB_PROTOCOL_DESCRIPTOR_LIST,&protocolList) ||
(protocolList.type != SDP_TYPE_CONTAINER))
continue;
ISdpNodeContainer *pRecordContainer = protocolList.u.container;
int cProtocols = 0;
NodeData protocolDescriptor; // information about a specific protocol (i.e. L2CAP, RFCOMM, ...)
pRecordContainer->GetNodeCount((DWORD *)&cProtocols);
for (int j = 0; (! *pChann) && (j < cProtocols); j++) {
pRecordContainer->GetNode(j,&protocolDescriptor);
if (protocolDescriptor.type != SDP_TYPE_CONTAINER)
continue;
ISdpNodeContainer *pProtocolContainer = protocolDescriptor.u.container;
int cProtocolAtoms = 0;
pProtocolContainer->GetNodeCount((DWORD *)&cProtocolAtoms);
for (int k = 0; (! *pChann) && (k < cProtocolAtoms); k++) {
NodeData nodeAtom; // individual data element, such as what protocol this is or RFCOMM channel id.
pProtocolContainer->GetNode(k,&nodeAtom);
if (IsRfcommUuid(&nodeAtom)) {
if (k+1 == cProtocolAtoms) {
// misformatted response. Channel ID should follow RFCOMM uuid
break;
}
NodeData channelID;
pProtocolContainer->GetNode(k+1,&channelID);
*pChann = (unsigned char)GetChannel(&channelID);
break; // formatting error
}
}
}
}
for (i = 0; i < cRecordArg; i++)
pRecordArg[i]->Release();
CoTaskMemFree(pRecordArg);
return (*pChann != 0) ? 1:0;
}
int IsRfcommUuid(NodeData *pNode) {
if (pNode->type != SDP_TYPE_UUID)
return FALSE;
if (pNode->specificType == SDP_ST_UUID16)
return (pNode->u.uuid16 == RFCOMM_PROTOCOL_UUID16);
else if (pNode->specificType == SDP_ST_UUID32)
return (pNode->u.uuid32 == RFCOMM_PROTOCOL_UUID16);
else if (pNode->specificType == SDP_ST_UUID128)
return (0 == memcmp(&RFCOMM_PROTOCOL_UUID,&pNode->u.uuid128,sizeof(GUID)));
return FALSE;
}
int GetChannel (NodeData *pChannelNode) {
if (pChannelNode->specificType == SDP_ST_UINT8)
return pChannelNode->u.uint8;
else if (pChannelNode->specificType == SDP_ST_INT8)
return pChannelNode->u.int8;
else if (pChannelNode->specificType == SDP_ST_UINT16)
return pChannelNode->u.uint16;
else if (pChannelNode->specificType == SDP_ST_INT16)
return pChannelNode->u.int16;
else if (pChannelNode->specificType == SDP_ST_UINT32)
return pChannelNode->u.uint32;
else if (pChannelNode->specificType == SDP_ST_INT32)
return pChannelNode->u.int32;
return 0;
}

Why do I get a crash only sometimes when closing input file with ffmpeg

I have a problem where only sometimes when I call avformat_close_input(&pFormatCtx) and it results in malloc check failed and my application crashes.
I really need to use ffmpeg because I need to grab a thumbnail of a video to show in a list and I cannot find an alternative library.
Can anybody see something in my code where I am doing something wrong when using this library which may cause this malloc check failed problem?
bool MuteCamera::PullFrame( )
{
pMJPEGCodec = avcodec_find_encoder(CODEC_ID_MJPEG );
bool bRet = false;
int videoStream = -1;
AVFrame *pFrame=NULL;
AVFrame *pFrameRGB=NULL;
AVPacket packet;
int frameFinished=0;
//AVDictionary *optionsDict = NULL;
AVInputFormat *pFormat = NULL;
const char formatName[] = "mp4";
if (!(pFormat = av_find_input_format(formatName))) {
printf("can't find input format %s\n", formatName);
return -1;
}
AVFormatContext *pFormatCtx = NULL;
pFormatCtx=avformat_alloc_context();
if(pFormatCtx == NULL)
{
printf("\n NULL CONTEXT \n ");
return -1;
}
if(avformat_open_input (&pFormatCtx, capturedUrl.data(), pFormat, NULL) == 0 )
{
for(int i=0; i<(int)pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{
videoStream=i;
break;
}
}
if(videoStream >= 0 )
{
AVCodecContext *pCodecCtx = pFormatCtx->streams[videoStream]->codec;
AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec != NULL)
{
if( avcodec_open2(pCodecCtx, pCodec, NULL) >= 0 )
{
pFrame=avcodec_alloc_frame();
if(pFrame != NULL)
{
frameFinished = 0;
while(av_read_frame(pFormatCtx, &packet)>=0)
{
if(packet.stream_index==videoStream)
{
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if(frameFinished)
{
printf("\n FRAMEFINISHED \n ");
QString *uu = new QString(capturedUrl.data());//
uu->replace(".mp4", "thumbnail.jpg");
WriteJPEG(pCodecCtx, pFrame, uu->toLatin1().data(), PIX_FMT_YUVJ420P);
if(viewingVideos && viewingFromDifferent)
{
QVariantMap map = QVariantMap();
map["title"] = actualFilename;
map["path"] = actualFilename.replace(".mp4", "thumbnail.jpg");// QString("asset:///white_photo.png");
m_listDataModel << map;
}
delete uu;
av_free_packet(&packet);
break;
}
else
{
printf("\n FRAMENOTFINISHED \n ");
}
}
av_free_packet(&packet);
}
av_free(pFrameRGB);
av_free(pFrame);
avcodec_close(pCodecCtx);
//av_free(pCodecCtx);
cout << "\n before free formatctx \n";
cout.flush();
if(pFormatCtx)
avformat_close_input(&pFormatCtx);
cout << "\n after free formatctx \n";
cout.flush();
}
else
bRet = false;
}
else
bRet = false;
}
else
bRet = false;
}
else
bRet = false;
}
return bRet;
}
bool WriteJPEG (AVCodecContext *pCodecCtx, AVFrame *pFrame, char cFileName[], PixelFormat pix)
{
int complete = 0;
bool bRet = false;
int out_buf_size;
uint8_t *out_buf;
AVCodecContext *pMJPEGCtx = avcodec_alloc_context3(pMJPEGCodec);
if( pMJPEGCtx )
{
pMJPEGCtx->bit_rate = pCodecCtx->bit_rate;
pMJPEGCtx->width = pCodecCtx->width;
pMJPEGCtx->height = pCodecCtx->height;
pMJPEGCtx->pix_fmt = pix;
pMJPEGCtx->codec_id = CODEC_ID_MJPEG;
pMJPEGCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pMJPEGCtx->time_base.num = pCodecCtx->time_base.num;
pMJPEGCtx->time_base.den = pCodecCtx->time_base.den;
pMJPEGCtx->time_base= (AVRational){1,29.7};
if( pMJPEGCodec && (avcodec_open2( pMJPEGCtx, pMJPEGCodec, NULL) >= 0) )
{
AVFrame *oframe;
oframe = avcodec_alloc_frame();
if(oframe == NULL)
{
printf("\n (oframe == NULL");
fflush(stdout);
}
/* calculate the bytes needed for the output image and create buffer for the output image */
out_buf_size = avpicture_get_size(pMJPEGCtx->pix_fmt,
pMJPEGCtx->width,
pMJPEGCtx->height);
out_buf = (uint8_t *)av_malloc(out_buf_size * sizeof(uint8_t));
if (out_buf == NULL) {
fprintf(stderr, "cannot allocate output data buffer!\n");
//ret = -ENOMEM;
}
avpicture_alloc((AVPicture *)oframe, pMJPEGCtx->pix_fmt, pMJPEGCtx->width, pMJPEGCtx->height);
struct SwsContext *sws;
sws = sws_getContext(pMJPEGCtx->width, pMJPEGCtx->height, pCodecCtx->pix_fmt,
pMJPEGCtx->width, pMJPEGCtx->height, pMJPEGCtx->pix_fmt, SWS_BILINEAR,
NULL, NULL, NULL);
sws_scale(sws, (const uint8_t **)pFrame->data, pFrame->linesize,
0, pMJPEGCtx->height, &oframe->data[0], &oframe->linesize[0]);
sws_freeContext(sws);
AVPacket pp2;
av_init_packet(&pp2);
pp2.data = NULL;
pp2.size = 0;
avcodec_encode_video2(pMJPEGCtx, &pp2, oframe, &complete);
if(complete)
{
printf("\n packet recieved");
fflush(stdout);
}
else
{
printf("\n packet NOT recieved");
fflush(stdout);
}
if( SaveFrameJpeg(pp2.size, pp2.data, cFileName ) )
bRet = true;
av_free(oframe);
avcodec_close(pMJPEGCtx);
av_free_packet(&pp2);
av_free(out_buf);
av_free(pMJPEGCtx);
}
else
{
printf("\n problem!!");
fflush(stdout);
}
return bRet;
}
}
bool SaveFrameJpeg(int nszBuffer, uint8_t *buffer, char cOutFileName[])
{
bool bRet = false;
FILE *pFile;
if( nszBuffer > 0 )
{
if(0 == 0 )
{
printf("\n start SaveFrameJpeg=%d",nszBuffer );
fflush(stdout);
pFile= fopen(cOutFileName, "wb");
fwrite(buffer, sizeof(uint8_t), nszBuffer, pFile);
bRet = true;
fclose(pFile);
printf("\n end SaveFrameJpeg=%d",nszBuffer );
fflush(stdout);
}
}
return bRet;
}
bool newPullFrame(const std::string& capturedUrl)
{
AVCodec* pMJPEGCodec = avcodec_find_encoder(CODEC_ID_MJPEG );
int videoStream = -1;
AVDictionary *optionsDict = NULL;
AVInputFormat *pFormat = NULL;
const char formatName[] = "mp4";
if (!(pFormat = av_find_input_format(formatName)))
{
std::cout << "can't find input format " << formatName << "\n";
return false;
}
AVFormatContextHandle FormatCtx(avformat_alloc_context());
if(!FormatCtx.is_valid())
{
std::cout << "\n NULL CONTEXT \n ";
return false;
}
if(avformat_open_input (&FormatCtx, capturedUrl.c_str(), pFormat, NULL))
return false;
for(int i=0; i<(int)FormatCtx->nb_streams; i++)
{
if(FormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{
videoStream=i;
break;
}
}
if(videoStream < 0 )
return false;
CodecContextHandle CodecCtx(FormatCtx->streams[videoStream]->codec, avcodec_close);
AVCodec *pCodec = avcodec_find_decoder(CodecCtx->codec_id);
if(pCodec == NULL)
return false;
if( avcodec_open2(CodecCtx, pCodec, &optionsDict) < 0 )
return false;
FrameHandle Frame(avcodec_alloc_frame(), av_free);
if(!Frame.is_valid())
return false;
int frameFinished=0;
AVPacket packet;
while(av_read_frame(FormatCtx, &packet)>=0)
{
if(packet.stream_index==videoStream)
{
avcodec_decode_video2(CodecCtx, Frame, &frameFinished, &packet);
if(frameFinished)
{
std::string uu (capturedUrl);
size_t pos = capturedUrl.rfind(".mp4");
uu.replace(pos, 4, "thumbnail.jpg");
// save the frame to file
int Bytes = avpicture_get_size(PIX_FMT_YUVJ420P, CodecCtx->width, CodecCtx->height);
BufferHandle buffer((uint8_t*)av_malloc(Bytes*sizeof(uint8_t)), av_free);
CodecContextHandle OutContext(avcodec_alloc_context3(NULL), free_context);
OutContext->bit_rate = CodecCtx->bit_rate;
OutContext->width = CodecCtx->width;
OutContext->height = CodecCtx->height;
OutContext->pix_fmt = PIX_FMT_YUVJ420P;
OutContext->codec_id = CODEC_ID_MJPEG;
OutContext->codec_type = AVMEDIA_TYPE_VIDEO;
OutContext->time_base.num = CodecCtx->time_base.num;
OutContext->time_base.den = CodecCtx->time_base.den;
OutContext->time_base= (AVRational){1,29.7};
AVCodec *OutCodec = avcodec_find_encoder(OutContext->codec_id);
avcodec_open2(OutContext, OutCodec, NULL);
OutContext->mb_lmin = OutContext->lmin = OutContext->qmin * 118;
OutContext->mb_lmax = OutContext->lmax = OutContext->qmax * 118;
OutContext->flags = 2;
OutContext->global_quality = OutContext->qmin * 118;
Frame->pts = 1;
Frame->quality = OutContext->global_quality;
int ActualSize = avcodec_encode_video(OutContext, buffer, Bytes, Frame);
std::ofstream file(uu.data(), std::ios_base::binary | std::ios_base::out);
file.write((const char*)(uint8_t*)buffer, ActualSize);
file.close();
av_free_packet(&packet);
av_free(Frame);
break;
}
else
{
std::cout << " new pullframe frameNOTfinished\n";
cout.flush();
}
//if(CodecCtx->refcounted_frames == 1)
av_free(Frame);
}
av_free_packet(&packet);
}
return true;
}
It looks to me like you need to move your call to av_free_packet(&packet) inside your while loop. So you currently have:
while(av_read_frame(pFormatCtx, &packet)>=0)
{
// A bunch of operations here
}
// this is not the right place for this
av_free_packet(&packet);
Instead you should have
while(av_read_frame(pFormatCtx, &packet)>=0)
{
// A bunch of operations here
// this needs to be called for every call to av_read_frame()
// so it must be inside the while loop
av_free_packet(&packet);
}
See here for further details.
Considering that you writing this in C++ you could consider creating some simple RAII wrappers around these resources to make your resource management much easier.
EDIT
Based on your feedback it seems that this change that I recommended didn't do it. So I revisited the code. I implemented my own save to Jpeg as I could not see yours - though for test purposes it simply keeps over-writing the same file.
In order to simplify the code and get a handle on the resource management I implemented some "smart pointers" for the ffmpeg resources. These automatically clean up at scope exit. I cannot see a resource leak when running through this code and it correctly generates each frame as a jpeg file.
See if you get any value out of this:
extern "C" {
#include <libavdevice\avdevice.h>
#include <libavformat\avformat.h>
#include <libavfilter\avfilter.h>
#include <libavcodec\avcodec.h>
#include <libswscale\swscale.h>
}
#include <iostream>
#include <fstream>
#include <ios>
#include <type_traits>
template<typename T, typename D>
class AVHandle
{
T *val;
typedef D* deleter_t;
deleter_t deleter;
// not default constructible
AVHandle();
// non copiable
AVHandle(const AVHandle&);
AVHandle& operator=(const AVHandle&);
public:
AVHandle(T *in, deleter_t del) : val(in), deleter(del)
{}
operator T *()
{
return val;
}
T* operator->()
{
return val;
}
bool is_valid()
{
return val != 0;
}
~AVHandle()
{
deleter(val);
}
};
typedef AVHandle<AVFrame, void (void*)> FrameHandle;
typedef AVHandle<AVCodecContext, int (AVCodecContext*)> CodecContextHandle;
typedef AVHandle<uint8_t, void(void*)> BufferHandle;
class AVFormatContextHandle
{
AVFormatContext *val;
// not default constrcutible
AVFormatContextHandle();
// non copiable
AVFormatContextHandle(const AVFormatContextHandle&);
AVFormatContextHandle& operator=(const AVFormatContextHandle&);
public:
AVFormatContextHandle(AVFormatContext *ctx) : val(ctx)
{}
operator AVFormatContext *()
{
return val;
}
AVFormatContext* operator ->()
{
return val;
}
AVFormatContext** operator&()
{
return &val;
}
bool is_valid()
{
return val != 0;
}
~AVFormatContextHandle()
{
if(val)
avformat_close_input(&val);
}
};
int free_context(AVCodecContext* c)
{
int ret = avcodec_close(c);
av_free(c);
return ret;
}
bool PullFrame(const std::string& capturedUrl)
{
AVCodec* pMJPEGCodec = avcodec_find_encoder(CODEC_ID_MJPEG );
int videoStream = -1;
AVDictionary *optionsDict = NULL;
AVInputFormat *pFormat = NULL;
const char formatName[] = "mp4";
if (!(pFormat = av_find_input_format(formatName)))
{
std::cout << "can't find input format " << formatName << "\n";
return false;
}
AVFormatContextHandle FormatCtx(avformat_alloc_context());
if(!FormatCtx.is_valid())
{
std::cout << "\n NULL CONTEXT \n ";
return false;
}
if(avformat_open_input (&FormatCtx, capturedUrl.c_str(), pFormat, NULL))
return false;
for(int i=0; i<(int)FormatCtx->nb_streams; i++)
{
if(FormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{
videoStream=i;
break;
}
}
if(videoStream < 0 )
return false;
CodecContextHandle CodecCtx(FormatCtx->streams[videoStream]->codec, avcodec_close);
AVCodec *pCodec = avcodec_find_decoder(CodecCtx->codec_id);
if(pCodec == NULL)
return false;
if( avcodec_open2(CodecCtx, pCodec, &optionsDict) < 0 )
return false;
FrameHandle Frame(avcodec_alloc_frame(), av_free);
if(!Frame.is_valid())
return false;
int frameFinished=0;
AVPacket packet;
while(av_read_frame(FormatCtx, &packet)>=0)
{
if(packet.stream_index==videoStream)
{
avcodec_decode_video2(CodecCtx, Frame, &frameFinished, &packet);
if(frameFinished)
{
std::string uu (capturedUrl);
size_t pos = capturedUrl.rfind(".mp4");
uu.replace(pos, 4, "thumbnail.jpg");
// save the frame to file
int Bytes = avpicture_get_size(PIX_FMT_YUVJ420P, CodecCtx->width, CodecCtx->height);
BufferHandle buffer((uint8_t*)av_malloc(Bytes*sizeof(uint8_t)), av_free);
CodecContextHandle OutContext(avcodec_alloc_context3(NULL), free_context);
OutContext->bit_rate = CodecCtx->bit_rate;
OutContext->width = CodecCtx->width;
OutContext->height = CodecCtx->height;
OutContext->pix_fmt = PIX_FMT_YUVJ420P;
OutContext->codec_id = CODEC_ID_MJPEG;
OutContext->codec_type = AVMEDIA_TYPE_VIDEO;
OutContext->time_base.num = CodecCtx->time_base.num;
OutContext->time_base.den = CodecCtx->time_base.den;
AVCodec *OutCodec = avcodec_find_encoder(OutContext->codec_id);
avcodec_open2(OutContext, OutCodec, NULL);
OutContext->mb_lmin = OutContext->lmin = OutContext->qmin * 118;
OutContext->mb_lmax = OutContext->lmax = OutContext->qmax * 118;
OutContext->flags = 2;
OutContext->global_quality = OutContext->qmin * 118;
Frame->pts = 1;
Frame->quality = OutContext->global_quality;
int ActualSize = avcodec_encode_video(OutContext, buffer, Bytes, Frame);
std::ofstream file("c:\\temp\\output.jpg", std::ios_base::binary | std::ios_base::out);
file.write((const char*)(uint8_t*)buffer, ActualSize);
}
if(CodecCtx->refcounted_frames == 1)
av_frame_unref(Frame);
}
av_free_packet(&packet);
}
return true;
}
int main()
{
av_register_all();
while(true)
PullFrame("c:\\temp\\sample_mpeg4.mp4");
return 0;
}