Basler Pylon camera parameter set command causing image grab timeout - c++

I have code to control the settings of and grab images from a Basler acA720-290gm camera. After a period of running, if the exposure time is changed, the Pylon::CBaslerUniversalInstantCamera.RetrieveResult call starts timing out and the camera becomes unresponsive to further requests to change the exposure time. The exposure time is not set to be large enough that it should be causing a timeout.
This is the heart of the code in question:
void process_pylon(Pylon::String_t serial_number, int64_t width, int64_t height, int64_t packet_size, int64_t inter_packet_delay) {
try {
Pylon::CBaslerUniversalInstantCamera camera;
Pylon::CTlFactory& tlFactory = Pylon::CTlFactory::GetInstance();
Pylon::CDeviceInfo info;
Pylon::DeviceInfoList_t filter;
info.SetSerialNumber(serial_number);
filter.push_back(info);
Pylon::DeviceInfoList_t devices;
while (1) {
try {
while (1) {
if (tlFactory.EnumerateDevices(devices, filter) > 0) {
g_print("The camera is connected.\n");
break;
}
else {
g_printerr("The camera is not connected.\n");
Pylon::WaitObject::Sleep(5000);
}
}
g_print("Attaching camera.\n");
camera.Attach(tlFactory.CreateFirstDevice(info));
g_print("Opening camera.\n");
camera.Open();
g_print("Setting width: %li\n", width);
camera.Width.TrySetValue(width);
g_print("Setting height: %li\n", height);
camera.Height.TrySetValue(height);
g_print("Setting GevSCPSPacketSize (packet size): %li\n", packet_size);
camera.GevSCPSPacketSize.TrySetValue(packet_size);
g_print("Setting GevSCPD (inter-packet delay): %li\n", inter_packet_delay);
camera.GevSCPD.TrySetValue(inter_packet_delay);
g_print("Starting grabbing.\n");
camera.StartGrabbing(Pylon::GrabStrategy_LatestImageOnly);
Pylon::CGrabResultPtr ptrGrabResult;
exposure_auto_set = 1;
exposure_time_raw_set = 1;
gain_auto_set = 1;
gain_raw_set = 1;
while (1) {
// Exposure Time
int32_t epics_exposure_auto_req = exposure_auto_req;
int32_t epics_exposure_auto_set = exposure_auto_set;
if (epics_exposure_auto_set > 0) {
g_print("Setting auto exposure: %i\n", epics_exposure_auto_req);
camera.ExposureAuto.SetIntValue((int64_t) epics_exposure_auto_req);
exposure_auto_set = 0;
}
exposure_auto = camera.ExposureAuto.GetIntValue();
int32_t epics_exposure_time_raw_req = exposure_time_raw_req;
int32_t epics_exposure_time_raw_set = exposure_time_raw_set;
if (epics_exposure_time_raw_set > 0) {
g_print("Setting exposure time: %i\n", epics_exposure_time_raw_req);
camera.ExposureTimeRaw.TrySetValue((int64_t) epics_exposure_time_raw_req, Pylon::IntegerValueCorrection_Nearest);
exposure_time_raw_set = 0;
}
exposure_time_raw = camera.ExposureTimeRaw.GetValue();
// Gain
int32_t epics_gain_auto_req = gain_auto_req;
int32_t epics_gain_auto_set = gain_auto_set;
if (epics_gain_auto_set > 0) {
g_print("Setting auto gain: %i\n", epics_gain_auto_req);
camera.GainAuto.SetIntValue((int64_t) epics_gain_auto_req);
gain_auto_set = 0;
}
gain_auto = camera.GainAuto.GetIntValue();
int32_t epics_gain_raw_req = gain_raw_req;
int32_t epics_gain_raw_set = gain_raw_set;
if (epics_gain_raw_set > 0) {
g_print("Setting gain: %i\n", epics_gain_raw_req);
camera.GainRaw.TrySetValue((int64_t) epics_gain_raw_req, Pylon::IntegerValueCorrection_Nearest);
gain_raw_set = 0;
}
gain_raw = camera.GainRaw.GetValue();
try {
camera.RetrieveResult((unsigned int) timeout_ms, ptrGrabResult, Pylon::TimeoutHandling_ThrowException);
if (ptrGrabResult->GrabSucceeded()) {
if (image_mutex.try_lock()) {
image.AttachGrabResultBuffer(ptrGrabResult);
image_mutex.unlock();
}
}
else {
g_printerr("The grab failed.\n");
g_printerr("%s\n", ptrGrabResult->GetErrorDescription().c_str());
}
}
catch (const Pylon::TimeoutException e) {
g_printerr("%s\n", e.GetDescription());
}
}
}
catch (const Pylon::GenericException& e) {
Pylon::WaitObject::Sleep(1000);
if (camera.IsCameraDeviceRemoved()) {
g_printerr("The connection to the camera has been lost.\n");
camera.DestroyDevice();
}
else {
g_printerr("%s\n", e.GetDescription());
}
}
}
}
catch (const Pylon::GenericException& e) {
g_printerr("An exception occurred.\n");
g_printerr("%s\n", e.GetDescription());
exit(-1);
}
}

Related

How I change the resolution one monitor only

For example, my computer has two monitors, the primary monitor's resolution is 800*600, the other monitor's resolution is 1600*900.
I would like define the resolution on one monitor only ?
The function 'SetDisplayConfig' change the resolution screen on the two minotors...
https://learn.microsoft.com/fr-fr/windows-hardware/drivers/display/ccd-apis
https://learn.microsoft.com/fr-fr/windows/win32/api/winuser/nf-winuser-setdisplayconfig
[DllImport("User32.dll")]
public static extern int SetDisplayConfig(
uint numPathArrayElements,
[In] DisplayConfigPathInfo[] pathArray,
uint numModeInfoArrayElements,
[In] DisplayConfigModeInfo[] modeInfoArray,
SdcFlags flags
);
private static Boolean SetDisplaySettings(int Id_Monitor, uint Width, uint Height, uint Scaling)
{
try
{
CCDWrapper.DisplayConfigPathInfo[] pathInfoArray = new CCDWrapper.DisplayConfigPathInfo[0] { };
CCDWrapper.DisplayConfigModeInfo[] modeInfoArray = new CCDWrapper.DisplayConfigModeInfo[0] { };
CCDWrapper.MonitorAdditionalInfo[] additionalInfo = new CCDWrapper.MonitorAdditionalInfo[0] { };
bool Status = GetDisplaySettings(ref pathInfoArray, ref modeInfoArray, ref additionalInfo, true);
CCDWrapper.DisplayConfigPathInfo[] pathInfoArrayCurrent = new CCDWrapper.DisplayConfigPathInfo[0] { };
CCDWrapper.DisplayConfigModeInfo[] modeInfoArrayCurrent = new CCDWrapper.DisplayConfigModeInfo[0] { };
CCDWrapper.MonitorAdditionalInfo[] additionalInfoCurrent = new CCDWrapper.MonitorAdditionalInfo[0] { };
bool StatusCurrent = GetDisplaySettings(ref pathInfoArrayCurrent, ref modeInfoArrayCurrent, ref additionalInfoCurrent, false);
if (StatusCurrent)
{
for (int iPathInfo = 0; iPathInfo <= pathInfoArray.Length-1; iPathInfo++)
//for (int iPathInfo = 0; iPathInfo <= pathInfoArray.Length - 1; iPathInfo++)
{
for (int iPathInfoCurrent = 0; iPathInfoCurrent <= pathInfoArrayCurrent.Length - 1; iPathInfoCurrent++)
{
if ((pathInfoArray[iPathInfo].sourceInfo.id == pathInfoArrayCurrent[iPathInfoCurrent].sourceInfo.id) && (pathInfoArray[iPathInfo].targetInfo.id == pathInfoArrayCurrent[iPathInfoCurrent].targetInfo.id))
{
pathInfoArray[iPathInfo].sourceInfo.adapterId.LowPart = pathInfoArrayCurrent[iPathInfoCurrent].sourceInfo.adapterId.LowPart;
pathInfoArray[iPathInfo].targetInfo.adapterId.LowPart = pathInfoArrayCurrent[iPathInfoCurrent].targetInfo.adapterId.LowPart;
pathInfoArray[iPathInfo].targetInfo.scaling = (CCDWrapper.DisplayConfigScaling)Scaling;
break;
}
}
}
for (int iModeInfo = 0; iModeInfo <= modeInfoArray.Length - 1; iModeInfo++)
{
for (int iPathInfo = 0; iPathInfo <= pathInfoArray.Length - 1; iPathInfo++)
{
if ((modeInfoArray[iModeInfo].id == pathInfoArray[iPathInfo].targetInfo.id) && (modeInfoArray[iModeInfo].infoType == CCDWrapper.DisplayConfigModeInfoType.Target))
{
for (int iModeInfoSource = 0; iModeInfoSource <= modeInfoArray.Length - 1; iModeInfoSource++)
{
if ((modeInfoArray[iModeInfoSource].id == pathInfoArray[iPathInfo].sourceInfo.id) && (modeInfoArray[iModeInfoSource].adapterId.LowPart == modeInfoArray[iModeInfo].adapterId.LowPart) && (modeInfoArray[iModeInfoSource].infoType == CCDWrapper.DisplayConfigModeInfoType.Source))
{
modeInfoArray[iModeInfoSource].adapterId.LowPart = pathInfoArray[iPathInfo].sourceInfo.adapterId.LowPart;
modeInfoArray[iModeInfoSource].sourceMode.height = Height;
modeInfoArray[iModeInfoSource].sourceMode.width = Width;
break;
}
}
modeInfoArray[iModeInfo].adapterId.LowPart = pathInfoArray[iPathInfo].targetInfo.adapterId.LowPart;
break;
}
}
}
uint numPathArrayElements = System.Convert.ToUInt32(pathInfoArray.Length);
uint numModeInfoArrayElements = System.Convert.ToUInt32(modeInfoArray.Length);
long Result = CCDWrapper.SetDisplayConfig(numPathArrayElements, pathInfoArray, numModeInfoArrayElements, modeInfoArray, CCDWrapper.SdcFlags.Apply | CCDWrapper.SdcFlags.UseSuppliedDisplayConfig | CCDWrapper.SdcFlags.SaveToDatabase | CCDWrapper.SdcFlags.NoOptimization | CCDWrapper.SdcFlags.AllowChanges);
if (Result == 0)
return true;
else
return false;
}
else
return false;
}
catch (Exception ex)
{
EventLog.WriteEntry("ResolutionEcran", "Erreur SetDisplaySettings : " + ex.Message, EventLogEntryType.Error);
return false;
}
}
private static Boolean GetDisplaySettings(ref CCDWrapper.DisplayConfigPathInfo[] pathInfoArray, ref CCDWrapper.DisplayConfigModeInfo[] modeInfoArray, ref CCDWrapper.MonitorAdditionalInfo[] additionalInfo, Boolean ActiveOnly, [System.Runtime.InteropServices.Optional] int ID_Monitor)
{
uint numPathArrayElements;
uint numModeInfoArrayElements;
CCDWrapper.QueryDisplayFlags queryFlags = CCDWrapper.QueryDisplayFlags.AllPaths;
if (ActiveOnly)
{
//queryFlags = CCDWrapper.QueryDisplayFlags.OnlyActivePaths;
queryFlags = CCDWrapper.QueryDisplayFlags.OnlyActivePaths;
}
var status = CCDWrapper.GetDisplayConfigBufferSizes(queryFlags, out numPathArrayElements, out numModeInfoArrayElements);
if (status == 0)
{
pathInfoArray = new CCDWrapper.DisplayConfigPathInfo[numPathArrayElements];
modeInfoArray = new CCDWrapper.DisplayConfigModeInfo[numModeInfoArrayElements];
additionalInfo = new CCDWrapper.MonitorAdditionalInfo[numModeInfoArrayElements];
status = CCDWrapper.QueryDisplayConfig(queryFlags, ref numPathArrayElements, pathInfoArray, ref numModeInfoArrayElements, modeInfoArray, IntPtr.Zero);
if (status == 0)
{
for (var iMode = 0; iMode < numModeInfoArrayElements; iMode++)
{
if (modeInfoArray[iMode].infoType == CCDWrapper.DisplayConfigModeInfoType.Target)
{
try
{
additionalInfo[iMode] = CCDWrapper.GetMonitorAdditionalInfo(modeInfoArray[iMode].adapterId, modeInfoArray[iMode].id);
}
catch (Exception)
{
additionalInfo[iMode].valid = false;
}
}
}
return true;
}
else
{
//Erreur : Querying display;
}
}
else
{
//Erreur : Taille Buffer;
}
return false;
}
Use ChangeDisplaySettingsEx function to change the settings of the specified display device to the specified graphics mode. The following is an example you can refer to.
#include <windows.h>
int main()
{
for (DWORD devNum = 0; ; devNum++)
{
DISPLAY_DEVICE dev = {0};
dev.cb = sizeof(DISPLAY_DEVICE);
if (!EnumDisplayDevices(NULL, devNum, &dev, EDD_GET_DEVICE_INTERFACE_NAME))
break;
wprintf(L"Display name: %s \n", dev.DeviceName);
DEVMODE dMode = { 0 };
dMode.dmSize = sizeof(dMode);
if (!EnumDisplaySettings(dev.DeviceName, ENUM_CURRENT_SETTINGS, &dMode))
{
wprintf(L"EnumDisplaySettings error: %d \n", GetLastError());
continue;
}
wprintf(L"Display old settings: \n");
wprintf(L"dmBitsPerPel: %d \n", dMode.dmBitsPerPel);
wprintf(L"dmPelsWidth: %d \n", dMode.dmPelsWidth);
wprintf(L"dmPelsHeight: %d \n", dMode.dmPelsHeight);
wprintf(L"dmDisplayFlags: %x \n", dMode.dmDisplayFlags);
wprintf(L"dmDisplayFrequency: %d \n", dMode.dmDisplayFrequency);
dMode.dmPelsWidth = 800;
dMode.dmPelsHeight = 600;
ChangeDisplaySettingsEx(dev.DeviceName, &dMode, NULL, 0, NULL);
DEVMODE dModeNew = { 0 };
dModeNew.dmSize = sizeof(DEVMODE);
if (!EnumDisplaySettings(dev.DeviceName, ENUM_CURRENT_SETTINGS, &dModeNew))
{
wprintf(L"EnumDisplaySettings error: %d \n", GetLastError());
continue;
}
wprintf(L"Display new settings: \n");
wprintf(L"dmBitsPerPel: %d \n", dModeNew.dmBitsPerPel);
wprintf(L"dmPelsWidth: %d \n", dModeNew.dmPelsWidth);
wprintf(L"dmPelsHeight: %d \n", dModeNew.dmPelsHeight);
wprintf(L"dmDisplayFlags: %x \n", dModeNew.dmDisplayFlags);
wprintf(L"dmDisplayFrequency: %d \n", dModeNew.dmDisplayFrequency);
}
getchar();
}
I set dwflags to 0 let the graphics mode for the current screen change dynamically. Refer to dwflags parameter part of ChangeDisplaySettingsEx's document for deciding how you would like the graphics mode should be changed.
Sorry for my late response...I was absent.
I also need to change the scaling of monitor (Black bars removed on certain resolutions).
[Flags]
public enum DisplayConfigScaling : uint
{
Zero = 0x0,
Identity = 1,
Centered = 2,
Stretched = 3,
Aspectratiocenteredmax = 4,
Custom = 5,
Preferred = 128,
ForceUint32 = 0xFFFFFFFF
}
The function 'ChangeDisplaySettingsEx' may change the scale of monitor ?
Thank you very for your help.

Display timer in hundredth of a second when under a minute

So in my current RTOS code for mbed I have a timer that counts down from 3 minutes displayed in the format of minutes:seconds.I need to implement way so when the time gets to under a minute, the time is displayed in hundredths of a second, such as 59:59. How would I do that?
Here is my current code (the relevant code for displaying time is under void lcd_func (void const *args)):
#include "mbed.h"
#include "cmsis_os.h"
#include "scoreboard.h"
#define deb 180
C12832 lcd(p5, p7, p6, p8, p11);
LM75B sensor(p28,p27);
InterruptIn By1(p15); // Up on the Joystick
InterruptIn By2(p16); // Right on the Joystick
InterruptIn By3(p12); // Down on the Joystick
InterruptIn Team(p14); // Push on the Joystick
InterruptIn Play(p13); // Left to activate clock
// declaration of IDs handle for various threads
osThreadId score_ID, LCD_ID, time_ID, temp_ID;
// definition of the thread
osThreadDef(score_func, osPriorityNormal, DEFAULT_STACK_SIZE);
osThreadDef(lcd_func, osPriorityNormal, DEFAULT_STACK_SIZE);
osThreadDef(time_func, osPriorityNormal, DEFAULT_STACK_SIZE);
osThreadDef(temp_func, osPriorityNormal, DEFAULT_STACK_SIZE);
// message from ISrs
osMessageQDef(queue, 1, uint32_t);
osMessageQId(queue_ID);
// service routines for Joystick Up, Right and Down
void By1_isr() {osMessagePut(queue_ID, 1, 0);}
void By2_isr() {osMessagePut(queue_ID, 2, 0);}
void By3_isr() {osMessagePut(queue_ID, 3, 0);}
void Team_isr();
void Time_isr();
Timer t;
int minutes,seconds,zero,faults;
int main()
{
t.start();//start the timer
By1.rise(&By1_isr);
By2.rise(&By2_isr);
By3.rise(&By3_isr);
Team.rise(&Team_isr);
Play.rise(&Time_isr);
queue_ID = osMessageCreate(osMessageQ(queue), NULL);
score_ID = osThreadCreate(osThread(score_func), NULL);
LCD_ID = osThreadCreate(osThread(lcd_func), NULL);
time_ID = osThreadCreate(osThread(time_func), NULL);
temp_ID = osThreadCreate(osThread(temp_func), NULL);
}
void Team_isr()
{
if(t.read_ms() > deb) {
score.h0v1 = !score.h0v1;
osSignalSet(LCD_ID, 0x2);
t.reset();
}
}
void Time_isr()
{
if (score.running == 0)
{
score.running = 1;
}
else
{
faults++;
score.running = 0;
}
osSignalSet(time_ID, 0x3);
}
void Timer1_Update (void const *args)
{
score.time_count -= 1;
osSignalSet(LCD_ID, 0x2);
}
void Destroy(float val)
{
osThreadTerminate(time_ID);
osThreadTerminate(score_ID);
osThreadTerminate(LCD_ID);
lcd.cls();
lcd.locate(0,3);
lcd.printf("Gamed Terminated!\n");
lcd.printf("(temperature reached %2.1f)\n", val);
osThreadTerminate(temp_ID);
}
void score_func (void const *args)
{
score.h0v1 = 0; // home by default
score.time_count = 180;
score.home_count = 0;
score.visitors_count = 0;
uint32_t val;
while (1) {
osEvent score_sig = osMessageGet(queue_ID, osWaitForever);
if (score_sig.status == osEventMessage)
val = score_sig.value.v;
if (score.h0v1 == 0)
score.home_count += val;
else
score.visitors_count += val;
osSignalSet(LCD_ID, 0x2);
}
}
void lcd_func (void const *args)
{
while(1) {
minutes=score.time_count/60;
seconds=score.time_count%60;
if (seconds<10)
{
lcd.cls();
lcd.locate(0,3);
lcd.printf("Time remaining: %2d:%d%d\n",minutes,zero,seconds);
}
else{
lcd.cls();
lcd.locate(0,3);
lcd.printf("Time remaining: %2d:%2d\n",minutes,seconds);}
if (score.h0v1 == 0)
lcd.printf("*Home: %2d Visitors: %2d\n",
score.home_count, score.visitors_count);
else
lcd.printf(" Home: %2d *Visitors: %2d\n",
score.home_count, score.visitors_count);
osSignalWait(0x2, osWaitForever);
}
}
void time_func (void const *args)
{
osTimerDef (Timer1, Timer1_Update);
osTimerId Timer1_ID;
// Activate time
Timer1_ID = osTimerCreate (osTimer(Timer1), osTimerPeriodic, NULL);
while(1) {
osSignalWait(0x3, osWaitForever);
if (score.running == 0)
osTimerStop (Timer1_ID);
else
osTimerStart (Timer1_ID, 1000UL);
}
}
void temp_func (void const *args)
{
float temp;
if (sensor.open()) {
sensor.alertTemp(40.0);
while (1) {
temp = (float)sensor.temp();
if (temp > 30.0)
Destroy(temp);
osDelay(5000);
}
}
else
osThreadTerminate(temp_ID);
}
Change your timer to decrement every 100th and start at 18000. Then:
minutes = score.time_count / 6000;
seconds = (score.time_count % 6000) / 100 ;
hundredths = score.time_count % 100 ;
...
if( minutes == 0 )
{
lcd.printf( "Time remaining: 00:%2d:%2d\n", seconds, hundredths ) ;
}
else
{
lcd.printf( "Time remaining: %2d:%2d:00\n", minutes, seconds ) ;
}

rapid TS fragment ffmpeg decoding - memory leak

Environment:
Ubuntu 16.04 (x64)
C++
ffmpeg
Use-case
Multiple MPEG-TS fragments are rapidly decoded ( numerous every sec )
The format of the TS fragments is dynamic and can't be known ahead of time
The first A/V frames of each fragment are needed to be extracted
Problem statement
The code bellow successfully decodes A/V, BUT, has a huge memory leak ( MBytes/sec )
According to the docs seems all memory is freed as it should ( does it... ? )
Why do I get this huge mem leak, what am I missing in the following code snap ?
struct MEDIA_TYPE {
ffmpeg::AVMediaType eType;
union {
struct {
ffmpeg::AVPixelFormat colorspace;
int width, height;
float fFPS;
} video;
struct : WAVEFORMATEX {
short sSampleFormat;
} audio;
} format;
};
struct FRAME {
enum { MAX_PALNES = 3 + 1 };
int iStrmId;
int64_t pts; // Duration in 90Khz clock resolution
uint8_t** ppData; // Null terminated
int32_t* pStride;// Zero terminated
};
HRESULT ProcessTS(IN Operation op, IN uint8_t* pTS, IN uint32_t uiBytes, bool(*cb)(IN const MEDIA_TYPE& mt, IN FRAME& frame, IN PVOID pCtx), IN PVOID pCbCtx)
{
uiBytes -= uiBytes % 188;// align to 188 packet size
struct CONTEXT {
uint8_t* pTS;
uint32_t uiBytes;
int32_t iPos;
} ctx = { pTS, uiBytes, 0 };
LOGTRACE(TSDecoder, "ProcessTS(%d, 0x%.8x, %d, 0x%.8x, 0x%.8x), this=0x%.8x\r\n", (int)op, pTS, uiBytes, cb, pCbCtx, this);
ffmpeg::AVFormatContext* pFmtCtx = 0;
if (0 == (pFmtCtx = ffmpeg::avformat_alloc_context()))
return E_OUTOFMEMORY;
ffmpeg::AVIOContext* pIoCtx = ffmpeg::avio_alloc_context(pTS, uiBytes, 0, &ctx
, [](void *opaque, uint8_t *buf, int buf_size)->int {
auto pCtx = (CONTEXT*)opaque;
int size = pCtx->uiBytes;
if (pCtx->uiBytes - pCtx->iPos < buf_size)
size = pCtx->uiBytes - pCtx->iPos;
if (size > 0) {
memcpy(buf, pCtx->pTS + pCtx->iPos, size);
pCtx->iPos += size;
}
return size;
}
, 0
, [](void* opaque, int64_t offset, int whence)->int64_t {
auto pCtx = (CONTEXT*)opaque;
switch (whence)
{
case SEEK_SET:
pCtx->iPos = offset;
break;
case SEEK_CUR:
pCtx->iPos += offset;
break;
case SEEK_END:
pCtx->iPos = pCtx->uiBytes - offset;
break;
case AVSEEK_SIZE:
return pCtx->uiBytes;
}
return pCtx->iPos;
});
pFmtCtx->pb = pIoCtx;
int iRet = ffmpeg::avformat_open_input(&pFmtCtx, "fakevideo.ts", m_pInputFmt, 0);
if (ERROR_SUCCESS != iRet) {
assert(false);
pFmtCtx = 0;// a user-supplied AVFormatContext will be freed on failure.
return E_FAIL;
}
struct DecodeContext {
ffmpeg::AVStream* pStream;
ffmpeg::AVCodec* pDecoder;
int iFramesProcessed;
};
HRESULT hr = S_OK;
int iStreamsProcessed = 0;
bool bVideoFound = false;
int64_t ptsLast = 0;
int64_t dtsLast = 0;
auto pContext = (DecodeContext*)alloca(sizeof(DecodeContext) * pFmtCtx->nb_streams);
for (unsigned int i = 0; i < pFmtCtx->nb_streams; i++) {
assert(pFmtCtx->streams[i]->index == i);
pContext[i].pStream = pFmtCtx->streams[i];
pContext[i].pDecoder = ffmpeg::avcodec_find_decoder(pFmtCtx->streams[i]->codec->codec_id);
pContext[i].iFramesProcessed= 0;
if (0 == pContext[i].pDecoder)
continue;
if ((iRet = ffmpeg::avcodec_open2(pFmtCtx->streams[i]->codec, pContext[i].pDecoder, NULL)) < 0) {
_ASSERT(FALSE);
hr = E_FAIL;
goto ErrExit;
}
}
while (S_OK == hr) {
ffmpeg::AVFrame* pFrame = 0;
ffmpeg::AVPacket pkt;
ffmpeg::av_init_packet(&pkt);
if (ERROR_SUCCESS != (iRet = ffmpeg::av_read_frame(pFmtCtx, &pkt))) {
hr = E_FAIL;
break;
}
if ((0 == dtsLast) && (0 != pkt.dts))
dtsLast = pkt.dts;
if ((0 == ptsLast) && (0 != pkt.pts))
ptsLast = pkt.pts;
DecodeContext& ctx = pContext[pkt.stream_index];
if (Operation::DECODE_FIRST_FRAME_OF_EACH_STREAM == op) {
if (iStreamsProcessed == pFmtCtx->nb_streams) {
hr = S_FALSE;
goto Next;
}
if (ctx.iFramesProcessed > 0)
goto Next;
iStreamsProcessed++;
}
if (0 == ctx.pDecoder)
goto Next;
if (0 == (pFrame = ffmpeg::av_frame_alloc())) {
hr = E_OUTOFMEMORY;
goto Next;
}
LOGTRACE(TSDecoder, "ProcessTS(%d, 0x%.8x, %d, 0x%.8x, 0x%.8x), this=0x%.8x, decode, S:%d, T:%d\r\n", (int)op, pTS, uiBytes, cb, pCbCtx, this, pkt.stream_index, ctx.pStream->codec->codec_type);
int bGotFrame = false;
int iBytesUsed = 0;
MEDIA_TYPE mt;
memset(&mt, 0, sizeof(mt));
mt.eType = ctx.pStream->codec->codec_type;
switch (mt.eType) {
case ffmpeg::AVMediaType::AVMEDIA_TYPE_AUDIO:
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if((iRet = ffmpeg::avcodec_decode_audio4(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
goto Next;
}
_ASSERT(pkt.size == iRet);
// FFMPEG AAC decoder oddity, first call to 'avcodec_decode_audio4' results mute audio where the second result the expected audio
bGotFrame = false;
if ((iRet = ffmpeg::avcodec_decode_audio4(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
goto Next;
}
_ASSERT(pkt.size == iRet);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if (false == bGotFrame)
goto Next;
iBytesUsed = ctx.pStream->codec->frame_size;
mt.format.audio.nChannels = ctx.pStream->codec->channels;
mt.format.audio.nSamplesPerSec = ctx.pStream->codec->sample_rate;
mt.format.audio.wBitsPerSample = ffmpeg::av_get_bytes_per_sample(ctx.pStream->codec->sample_fmt) * 8;
mt.format.audio.nBlockAlign = mt.format.audio.nChannels * mt.format.audio.wBitsPerSample / 8;
mt.format.audio.sSampleFormat = (short)pFrame->format;
break;
case ffmpeg::AVMediaType::AVMEDIA_TYPE_VIDEO:
if ((iRet = ffmpeg::avcodec_decode_video2(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
break;
}
if (false == bGotFrame)
goto Next;
assert(ffmpeg::AVPixelFormat::AV_PIX_FMT_YUV420P == ctx.pStream->codec->pix_fmt);// Thats is the only color space currently supported
iBytesUsed = (ctx.pStream->codec->width * ctx.pStream->codec->height * 3) / 2;
mt.format.video.width = ctx.pStream->codec->width;
mt.format.video.height = ctx.pStream->codec->height;
mt.format.video.colorspace = ctx.pStream->codec->pix_fmt;
mt.format.video.fFPS = (float)ctx.pStream->codec->framerate.num / ctx.pStream->codec->framerate.den;
bVideoFound = true;
break;
default:
goto Next;
}
ctx.iFramesProcessed++;
{
FRAME f = { ctx.pStream->index, ((0 == ptsLast) ? dtsLast : ptsLast), (uint8_t**)pFrame->data, (int32_t*)pFrame->linesize };
if ((iRet > 0) && (false == cb(mt, f, pCbCtx)))
hr = S_FALSE;// Breaks the loop
}
Next:
ffmpeg::av_free_packet(&pkt);
if (0 != pFrame) {
//ffmpeg::av_frame_unref(pFrame);
ffmpeg::av_frame_free(&pFrame);
pFrame = 0;
}
}
ErrExit:
for (unsigned int i = 0; i < pFmtCtx->nb_streams; i++)
ffmpeg::avcodec_close(pFmtCtx->streams[i]->codec);
pIoCtx->buffer = 0;// We have allocated the buffer, no need for ffmpeg to free it 4 us
pFmtCtx->pb = 0;
ffmpeg::av_free(pIoCtx);
ffmpeg::avformat_close_input(&pFmtCtx);
ffmpeg::avformat_free_context(pFmtCtx);
return hr;
}
You need to unref the packets before reusing them. And there's no need to allocate and deallocate them all the time.
Here's how I do it which might help you:
// Initialise a packet queue
std::list<AVPacket *> packets;
...
for (int c = 0; c < MAX_PACKETS; c++) {
ff->packets.push_back(av_packet_alloc());
}
while (!quit) {
... get packet from queue
int err = av_read_frame(ff->context, packet);
... process packet (audio, video, etc)
av_packet_unref(packet); // add back to queue for reuse
}
// Release packets
while (ff->packets.size()) { // free packets
AVPacket *packet = ff->packets.front();
av_packet_free(&packet);
ff->packets.pop_front();
}
In your code you've freed a packet which wasn't allocated in the first place.

Decoding by libjpeg -> Encoding by x264, strange artefacts on frames

I have a collection of jpeg, which must be decoded by lib jpeg, and after it, encoded by x264 (after it encoded packets are streamed via rtmp).
Code I used for decoding:
struct my_error_mgr
{
struct jpeg_error_mgr pub;
jmp_buf setjmp_buffer;
};
typedef my_error_mgr *my_error_ptr;
METHODDEF(void) my_error_exit (j_common_ptr cinfo)
{
my_error_ptr myerr = (my_error_ptr) cinfo->err;
(*cinfo->err->output_message) (cinfo);
longjmp(myerr->setjmp_buffer, 1);
}
void init_source(j_decompress_ptr ptr)
{
Q_UNUSED(ptr)
}
boolean fill_input_buffer(j_decompress_ptr ptr)
{
Q_UNUSED(ptr)
return TRUE;
}
void term_source(j_decompress_ptr ptr)
{
Q_UNUSED(ptr)
}
void skip_input_data(j_decompress_ptr ptr, long num_bytes)
{
if(num_bytes>0)
{
ptr->src->next_input_byte+=(size_t)num_bytes;
ptr->src->bytes_in_buffer-=(size_t)num_bytes;
}
}
EtherDecoder::EtherDecoder(QObject *parent):
QObject(parent)
{
}
void EtherDecoder::dataBlockReady(QByteArray data)
{
jpeg_decompress_struct decompressInfo;
jpeg_create_decompress(&decompressInfo);
my_error_mgr err;
decompressInfo.do_fancy_upsampling = FALSE;
decompressInfo.src = (jpeg_source_mgr *) (*decompressInfo.mem->alloc_small) ((j_common_ptr) &decompressInfo, JPOOL_PERMANENT, sizeof(jpeg_source_mgr));
decompressInfo.err = jpeg_std_error(&err.pub);
err.pub.error_exit = my_error_exit;
if (setjmp(err.setjmp_buffer))
{
jpeg_destroy_decompress(&decompressInfo);
return;
}
decompressInfo.src->init_source = init_source;
decompressInfo.src->resync_to_restart = jpeg_resync_to_restart;
decompressInfo.src->fill_input_buffer = fill_input_buffer;
decompressInfo.src->skip_input_data = skip_input_data;
decompressInfo.src->term_source = term_source;
decompressInfo.src->next_input_byte = reinterpret_cast<const JOCTET*>(data.data());
decompressInfo.src->bytes_in_buffer = data.size();
jpeg_read_header(&decompressInfo, TRUE);
jpeg_start_decompress(&decompressInfo);
int size = 0;
int n_samples = 0;
char *samples = new char[5242880];
char *reserv = samples;
while (decompressInfo.output_scanline < decompressInfo.output_height)
{
n_samples = jpeg_read_scanlines(&decompressInfo, (JSAMPARRAY) &samples, 1);
samples += n_samples * decompressInfo.image_width * decompressInfo.num_components;
size += n_samples * decompressInfo.image_width * decompressInfo.num_components;
}
jpeg_finish_decompress(&decompressInfo);
QByteArray output(reserv, size);
emit frameReady(output, decompressInfo.output_width, decompressInfo.output_height);
jpeg_destroy_decompress(&decompressInfo);
delete[] reserv;
}
When I emit frameReady signal, I send data to Encoder, method, where I init Encedor looks like:
bool EtherEncoder::initEncoder(unsigned int width, unsigned int height)
{
x264_param_t param;
x264_param_default_preset(&param, "veryfast", "zerolatency");
param.i_width=width;
param.i_height=height;
param.i_frame_total=0;
param.i_csp=X264_CSP_I420;
param.i_timebase_num=1;
param.i_timebase_den=96000;
param.b_annexb=true;
param.b_repeat_headers=false;
x264_param_apply_fastfirstpass(&param);
x264_param_apply_profile(&param, "baseline");
_context=x264_encoder_open(&param);
if(!_context)
return false;
int nal_count;
x264_nal_t *nals;
if(x264_encoder_headers(_context, &nals, &nal_count)<0)
{
x264_encoder_close(_context);
_context=0;
return false;
}
_extradata=QByteArray();
_width=width;
_height=height;
if(nal_count>0)
{
_extradata=QByteArray(
(const char *)nals[0].p_payload,
nals[nal_count-1].p_payload+nals[nal_count-1].i_payload-nals[0].p_payload);
}
return true;
}
And encoding method:
void EtherEncoder::onFrameReady(QByteArray data, int width, int height)
{
while(data.size()>0)
{
if(!_context && initEncoder(width, height))
{
_timestampDelta=realTimestamp();
}
if(_context)
{
x264_picture_t pic;
x264_picture_init(&pic);
pic.i_type=X264_TYPE_AUTO;
pic.i_pts=_timestampDelta*96000;
pic.img.i_csp=X264_CSP_I420;
pic.img.i_plane=3;
int planeSize = width*height;
uint8_t *p = (uint8_t*)data.data();
pic.img.plane[0]=p;
p+=planeSize;
pic.img.plane[1]=p;
p+=planeSize/4;
pic.img.plane[2]=p;
pic.img.i_stride[0]=width;
pic.img.i_stride[1]=width/2;
pic.img.i_stride[2]=width/2;
if(_forceKeyFrame)
{
pic.i_type=X264_TYPE_I;
_forceKeyFrame=false;
}
int nal_count;
x264_nal_t *nals;
int rc=x264_encoder_encode(_context, &nals, &nal_count, &pic, &pic);
if(rc>0)
{
_mutex.lock();
_packets.push_back(
Packet(
QByteArray(
(const char *)nals[0].p_payload, nals[nal_count- 1].p_payload+nals[nal_count-1].i_payload-nals[0].p_payload),
_timestampDelta/96.0,
_timestampDelta/96.0,
pic.b_keyframe));
_timestampDelta+=40;
data.clear();
_mutex.unlock();
emit onPacketReady();
}
}
}
}
Decoding and encoding proceeds without errors, at the end I get valid video stream, but, it seems that in one of this steps I set Invalid data for decoder/encoder. I get only 1/4 part of image (top-left, as I understood) and it has invalid color and come color stripes. Maybe I set invalid strides and planes when encode frame, or maybe my setting data for libjpeg decoder is incorrect.. Please ask questions about my code, I'll try to make some explanations for you. I explodes my brain.. Thank you.

Why do I get a crash only sometimes when closing input file with ffmpeg

I have a problem where only sometimes when I call avformat_close_input(&pFormatCtx) and it results in malloc check failed and my application crashes.
I really need to use ffmpeg because I need to grab a thumbnail of a video to show in a list and I cannot find an alternative library.
Can anybody see something in my code where I am doing something wrong when using this library which may cause this malloc check failed problem?
bool MuteCamera::PullFrame( )
{
pMJPEGCodec = avcodec_find_encoder(CODEC_ID_MJPEG );
bool bRet = false;
int videoStream = -1;
AVFrame *pFrame=NULL;
AVFrame *pFrameRGB=NULL;
AVPacket packet;
int frameFinished=0;
//AVDictionary *optionsDict = NULL;
AVInputFormat *pFormat = NULL;
const char formatName[] = "mp4";
if (!(pFormat = av_find_input_format(formatName))) {
printf("can't find input format %s\n", formatName);
return -1;
}
AVFormatContext *pFormatCtx = NULL;
pFormatCtx=avformat_alloc_context();
if(pFormatCtx == NULL)
{
printf("\n NULL CONTEXT \n ");
return -1;
}
if(avformat_open_input (&pFormatCtx, capturedUrl.data(), pFormat, NULL) == 0 )
{
for(int i=0; i<(int)pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{
videoStream=i;
break;
}
}
if(videoStream >= 0 )
{
AVCodecContext *pCodecCtx = pFormatCtx->streams[videoStream]->codec;
AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec != NULL)
{
if( avcodec_open2(pCodecCtx, pCodec, NULL) >= 0 )
{
pFrame=avcodec_alloc_frame();
if(pFrame != NULL)
{
frameFinished = 0;
while(av_read_frame(pFormatCtx, &packet)>=0)
{
if(packet.stream_index==videoStream)
{
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if(frameFinished)
{
printf("\n FRAMEFINISHED \n ");
QString *uu = new QString(capturedUrl.data());//
uu->replace(".mp4", "thumbnail.jpg");
WriteJPEG(pCodecCtx, pFrame, uu->toLatin1().data(), PIX_FMT_YUVJ420P);
if(viewingVideos && viewingFromDifferent)
{
QVariantMap map = QVariantMap();
map["title"] = actualFilename;
map["path"] = actualFilename.replace(".mp4", "thumbnail.jpg");// QString("asset:///white_photo.png");
m_listDataModel << map;
}
delete uu;
av_free_packet(&packet);
break;
}
else
{
printf("\n FRAMENOTFINISHED \n ");
}
}
av_free_packet(&packet);
}
av_free(pFrameRGB);
av_free(pFrame);
avcodec_close(pCodecCtx);
//av_free(pCodecCtx);
cout << "\n before free formatctx \n";
cout.flush();
if(pFormatCtx)
avformat_close_input(&pFormatCtx);
cout << "\n after free formatctx \n";
cout.flush();
}
else
bRet = false;
}
else
bRet = false;
}
else
bRet = false;
}
else
bRet = false;
}
return bRet;
}
bool WriteJPEG (AVCodecContext *pCodecCtx, AVFrame *pFrame, char cFileName[], PixelFormat pix)
{
int complete = 0;
bool bRet = false;
int out_buf_size;
uint8_t *out_buf;
AVCodecContext *pMJPEGCtx = avcodec_alloc_context3(pMJPEGCodec);
if( pMJPEGCtx )
{
pMJPEGCtx->bit_rate = pCodecCtx->bit_rate;
pMJPEGCtx->width = pCodecCtx->width;
pMJPEGCtx->height = pCodecCtx->height;
pMJPEGCtx->pix_fmt = pix;
pMJPEGCtx->codec_id = CODEC_ID_MJPEG;
pMJPEGCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pMJPEGCtx->time_base.num = pCodecCtx->time_base.num;
pMJPEGCtx->time_base.den = pCodecCtx->time_base.den;
pMJPEGCtx->time_base= (AVRational){1,29.7};
if( pMJPEGCodec && (avcodec_open2( pMJPEGCtx, pMJPEGCodec, NULL) >= 0) )
{
AVFrame *oframe;
oframe = avcodec_alloc_frame();
if(oframe == NULL)
{
printf("\n (oframe == NULL");
fflush(stdout);
}
/* calculate the bytes needed for the output image and create buffer for the output image */
out_buf_size = avpicture_get_size(pMJPEGCtx->pix_fmt,
pMJPEGCtx->width,
pMJPEGCtx->height);
out_buf = (uint8_t *)av_malloc(out_buf_size * sizeof(uint8_t));
if (out_buf == NULL) {
fprintf(stderr, "cannot allocate output data buffer!\n");
//ret = -ENOMEM;
}
avpicture_alloc((AVPicture *)oframe, pMJPEGCtx->pix_fmt, pMJPEGCtx->width, pMJPEGCtx->height);
struct SwsContext *sws;
sws = sws_getContext(pMJPEGCtx->width, pMJPEGCtx->height, pCodecCtx->pix_fmt,
pMJPEGCtx->width, pMJPEGCtx->height, pMJPEGCtx->pix_fmt, SWS_BILINEAR,
NULL, NULL, NULL);
sws_scale(sws, (const uint8_t **)pFrame->data, pFrame->linesize,
0, pMJPEGCtx->height, &oframe->data[0], &oframe->linesize[0]);
sws_freeContext(sws);
AVPacket pp2;
av_init_packet(&pp2);
pp2.data = NULL;
pp2.size = 0;
avcodec_encode_video2(pMJPEGCtx, &pp2, oframe, &complete);
if(complete)
{
printf("\n packet recieved");
fflush(stdout);
}
else
{
printf("\n packet NOT recieved");
fflush(stdout);
}
if( SaveFrameJpeg(pp2.size, pp2.data, cFileName ) )
bRet = true;
av_free(oframe);
avcodec_close(pMJPEGCtx);
av_free_packet(&pp2);
av_free(out_buf);
av_free(pMJPEGCtx);
}
else
{
printf("\n problem!!");
fflush(stdout);
}
return bRet;
}
}
bool SaveFrameJpeg(int nszBuffer, uint8_t *buffer, char cOutFileName[])
{
bool bRet = false;
FILE *pFile;
if( nszBuffer > 0 )
{
if(0 == 0 )
{
printf("\n start SaveFrameJpeg=%d",nszBuffer );
fflush(stdout);
pFile= fopen(cOutFileName, "wb");
fwrite(buffer, sizeof(uint8_t), nszBuffer, pFile);
bRet = true;
fclose(pFile);
printf("\n end SaveFrameJpeg=%d",nszBuffer );
fflush(stdout);
}
}
return bRet;
}
bool newPullFrame(const std::string& capturedUrl)
{
AVCodec* pMJPEGCodec = avcodec_find_encoder(CODEC_ID_MJPEG );
int videoStream = -1;
AVDictionary *optionsDict = NULL;
AVInputFormat *pFormat = NULL;
const char formatName[] = "mp4";
if (!(pFormat = av_find_input_format(formatName)))
{
std::cout << "can't find input format " << formatName << "\n";
return false;
}
AVFormatContextHandle FormatCtx(avformat_alloc_context());
if(!FormatCtx.is_valid())
{
std::cout << "\n NULL CONTEXT \n ";
return false;
}
if(avformat_open_input (&FormatCtx, capturedUrl.c_str(), pFormat, NULL))
return false;
for(int i=0; i<(int)FormatCtx->nb_streams; i++)
{
if(FormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{
videoStream=i;
break;
}
}
if(videoStream < 0 )
return false;
CodecContextHandle CodecCtx(FormatCtx->streams[videoStream]->codec, avcodec_close);
AVCodec *pCodec = avcodec_find_decoder(CodecCtx->codec_id);
if(pCodec == NULL)
return false;
if( avcodec_open2(CodecCtx, pCodec, &optionsDict) < 0 )
return false;
FrameHandle Frame(avcodec_alloc_frame(), av_free);
if(!Frame.is_valid())
return false;
int frameFinished=0;
AVPacket packet;
while(av_read_frame(FormatCtx, &packet)>=0)
{
if(packet.stream_index==videoStream)
{
avcodec_decode_video2(CodecCtx, Frame, &frameFinished, &packet);
if(frameFinished)
{
std::string uu (capturedUrl);
size_t pos = capturedUrl.rfind(".mp4");
uu.replace(pos, 4, "thumbnail.jpg");
// save the frame to file
int Bytes = avpicture_get_size(PIX_FMT_YUVJ420P, CodecCtx->width, CodecCtx->height);
BufferHandle buffer((uint8_t*)av_malloc(Bytes*sizeof(uint8_t)), av_free);
CodecContextHandle OutContext(avcodec_alloc_context3(NULL), free_context);
OutContext->bit_rate = CodecCtx->bit_rate;
OutContext->width = CodecCtx->width;
OutContext->height = CodecCtx->height;
OutContext->pix_fmt = PIX_FMT_YUVJ420P;
OutContext->codec_id = CODEC_ID_MJPEG;
OutContext->codec_type = AVMEDIA_TYPE_VIDEO;
OutContext->time_base.num = CodecCtx->time_base.num;
OutContext->time_base.den = CodecCtx->time_base.den;
OutContext->time_base= (AVRational){1,29.7};
AVCodec *OutCodec = avcodec_find_encoder(OutContext->codec_id);
avcodec_open2(OutContext, OutCodec, NULL);
OutContext->mb_lmin = OutContext->lmin = OutContext->qmin * 118;
OutContext->mb_lmax = OutContext->lmax = OutContext->qmax * 118;
OutContext->flags = 2;
OutContext->global_quality = OutContext->qmin * 118;
Frame->pts = 1;
Frame->quality = OutContext->global_quality;
int ActualSize = avcodec_encode_video(OutContext, buffer, Bytes, Frame);
std::ofstream file(uu.data(), std::ios_base::binary | std::ios_base::out);
file.write((const char*)(uint8_t*)buffer, ActualSize);
file.close();
av_free_packet(&packet);
av_free(Frame);
break;
}
else
{
std::cout << " new pullframe frameNOTfinished\n";
cout.flush();
}
//if(CodecCtx->refcounted_frames == 1)
av_free(Frame);
}
av_free_packet(&packet);
}
return true;
}
It looks to me like you need to move your call to av_free_packet(&packet) inside your while loop. So you currently have:
while(av_read_frame(pFormatCtx, &packet)>=0)
{
// A bunch of operations here
}
// this is not the right place for this
av_free_packet(&packet);
Instead you should have
while(av_read_frame(pFormatCtx, &packet)>=0)
{
// A bunch of operations here
// this needs to be called for every call to av_read_frame()
// so it must be inside the while loop
av_free_packet(&packet);
}
See here for further details.
Considering that you writing this in C++ you could consider creating some simple RAII wrappers around these resources to make your resource management much easier.
EDIT
Based on your feedback it seems that this change that I recommended didn't do it. So I revisited the code. I implemented my own save to Jpeg as I could not see yours - though for test purposes it simply keeps over-writing the same file.
In order to simplify the code and get a handle on the resource management I implemented some "smart pointers" for the ffmpeg resources. These automatically clean up at scope exit. I cannot see a resource leak when running through this code and it correctly generates each frame as a jpeg file.
See if you get any value out of this:
extern "C" {
#include <libavdevice\avdevice.h>
#include <libavformat\avformat.h>
#include <libavfilter\avfilter.h>
#include <libavcodec\avcodec.h>
#include <libswscale\swscale.h>
}
#include <iostream>
#include <fstream>
#include <ios>
#include <type_traits>
template<typename T, typename D>
class AVHandle
{
T *val;
typedef D* deleter_t;
deleter_t deleter;
// not default constructible
AVHandle();
// non copiable
AVHandle(const AVHandle&);
AVHandle& operator=(const AVHandle&);
public:
AVHandle(T *in, deleter_t del) : val(in), deleter(del)
{}
operator T *()
{
return val;
}
T* operator->()
{
return val;
}
bool is_valid()
{
return val != 0;
}
~AVHandle()
{
deleter(val);
}
};
typedef AVHandle<AVFrame, void (void*)> FrameHandle;
typedef AVHandle<AVCodecContext, int (AVCodecContext*)> CodecContextHandle;
typedef AVHandle<uint8_t, void(void*)> BufferHandle;
class AVFormatContextHandle
{
AVFormatContext *val;
// not default constrcutible
AVFormatContextHandle();
// non copiable
AVFormatContextHandle(const AVFormatContextHandle&);
AVFormatContextHandle& operator=(const AVFormatContextHandle&);
public:
AVFormatContextHandle(AVFormatContext *ctx) : val(ctx)
{}
operator AVFormatContext *()
{
return val;
}
AVFormatContext* operator ->()
{
return val;
}
AVFormatContext** operator&()
{
return &val;
}
bool is_valid()
{
return val != 0;
}
~AVFormatContextHandle()
{
if(val)
avformat_close_input(&val);
}
};
int free_context(AVCodecContext* c)
{
int ret = avcodec_close(c);
av_free(c);
return ret;
}
bool PullFrame(const std::string& capturedUrl)
{
AVCodec* pMJPEGCodec = avcodec_find_encoder(CODEC_ID_MJPEG );
int videoStream = -1;
AVDictionary *optionsDict = NULL;
AVInputFormat *pFormat = NULL;
const char formatName[] = "mp4";
if (!(pFormat = av_find_input_format(formatName)))
{
std::cout << "can't find input format " << formatName << "\n";
return false;
}
AVFormatContextHandle FormatCtx(avformat_alloc_context());
if(!FormatCtx.is_valid())
{
std::cout << "\n NULL CONTEXT \n ";
return false;
}
if(avformat_open_input (&FormatCtx, capturedUrl.c_str(), pFormat, NULL))
return false;
for(int i=0; i<(int)FormatCtx->nb_streams; i++)
{
if(FormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{
videoStream=i;
break;
}
}
if(videoStream < 0 )
return false;
CodecContextHandle CodecCtx(FormatCtx->streams[videoStream]->codec, avcodec_close);
AVCodec *pCodec = avcodec_find_decoder(CodecCtx->codec_id);
if(pCodec == NULL)
return false;
if( avcodec_open2(CodecCtx, pCodec, &optionsDict) < 0 )
return false;
FrameHandle Frame(avcodec_alloc_frame(), av_free);
if(!Frame.is_valid())
return false;
int frameFinished=0;
AVPacket packet;
while(av_read_frame(FormatCtx, &packet)>=0)
{
if(packet.stream_index==videoStream)
{
avcodec_decode_video2(CodecCtx, Frame, &frameFinished, &packet);
if(frameFinished)
{
std::string uu (capturedUrl);
size_t pos = capturedUrl.rfind(".mp4");
uu.replace(pos, 4, "thumbnail.jpg");
// save the frame to file
int Bytes = avpicture_get_size(PIX_FMT_YUVJ420P, CodecCtx->width, CodecCtx->height);
BufferHandle buffer((uint8_t*)av_malloc(Bytes*sizeof(uint8_t)), av_free);
CodecContextHandle OutContext(avcodec_alloc_context3(NULL), free_context);
OutContext->bit_rate = CodecCtx->bit_rate;
OutContext->width = CodecCtx->width;
OutContext->height = CodecCtx->height;
OutContext->pix_fmt = PIX_FMT_YUVJ420P;
OutContext->codec_id = CODEC_ID_MJPEG;
OutContext->codec_type = AVMEDIA_TYPE_VIDEO;
OutContext->time_base.num = CodecCtx->time_base.num;
OutContext->time_base.den = CodecCtx->time_base.den;
AVCodec *OutCodec = avcodec_find_encoder(OutContext->codec_id);
avcodec_open2(OutContext, OutCodec, NULL);
OutContext->mb_lmin = OutContext->lmin = OutContext->qmin * 118;
OutContext->mb_lmax = OutContext->lmax = OutContext->qmax * 118;
OutContext->flags = 2;
OutContext->global_quality = OutContext->qmin * 118;
Frame->pts = 1;
Frame->quality = OutContext->global_quality;
int ActualSize = avcodec_encode_video(OutContext, buffer, Bytes, Frame);
std::ofstream file("c:\\temp\\output.jpg", std::ios_base::binary | std::ios_base::out);
file.write((const char*)(uint8_t*)buffer, ActualSize);
}
if(CodecCtx->refcounted_frames == 1)
av_frame_unref(Frame);
}
av_free_packet(&packet);
}
return true;
}
int main()
{
av_register_all();
while(true)
PullFrame("c:\\temp\\sample_mpeg4.mp4");
return 0;
}