ffplay cannot play more than one song - c++

i have taken ffplay.c file from http://ffmpeg.org/doxygen/trunk/ffplay_8c-source.html and re edited it to a cpp file to embed in my win32 gui application . i have made the following changes to it.
made the int main function into a local function as follows, i can pass the HWND to embedd the player
void Ffplay::play_song(string file, HWND parent, bool* successfull)
{
int flags;
VideoState* is;
input_filename = file;
/* register all codecs, demux and protocols */
#if CONFIG_AVDEVICE
avdevice_register_all();
#endif
//avformat_network_init();
//check whether the filename is valid
if (input_filename.empty())
{
logger.log(logger.LEVEL_ERROR, "filename %s is not valid\n", file);
return;
}
if (display_disable)
{
video_disable = 1;
}
flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
if (audio_disable)
flags &= ~SDL_INIT_AUDIO;
else
{
/* Try to work around an occasional ALSA buffer underflow issue when the
* period size is NPOT due to ALSA resampling by forcing the buffer size. */
if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE", "1", 1);
}
if (display_disable)
flags &= ~SDL_INIT_VIDEO;
SDL_SetMainReady();
if (SDL_Init(flags))
{
logger.log(logger.LEVEL_ERROR, "Could not initialize SDL - %s\n", SDL_GetError());
logger.log(logger.LEVEL_ERROR, "(Did you set the DISPLAY variable?)\n");
return;
}
//Initialize optional fields of a packet with default values.
//Note, this does not touch the data and size members, which have to be initialized separately.
av_init_packet(&flush_pkt);
flush_pkt.data = (uint8_t*)&flush_pkt;
if (!display_disable)
{
int flags = SDL_WINDOW_HIDDEN;
if (alwaysontop)
#if SDL_VERSION_ATLEAST(2,0,5)
flags |= SDL_WINDOW_ALWAYS_ON_TOP;
#else
logger.log(logger.LEVEL_INFO, "SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
#endif
if (borderless)
flags |= SDL_WINDOW_BORDERLESS;
else
flags |= SDL_WINDOW_RESIZABLE;
SDL_InitSubSystem(flags);
ShowWindow(parent, true);
//window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
window = SDL_CreateWindowFrom(parent);
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
if (window) {
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
if (!renderer)
{
logger.log(logger.LEVEL_ERROR, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
renderer = SDL_CreateRenderer(window, -1, 0);
}
if (renderer)
{
if (!SDL_GetRendererInfo(renderer, &renderer_info))
{
logger.log(logger.LEVEL_INFO, "Initialized %s renderer.\n", renderer_info.name);
}
}
}
if (!window || !renderer || !renderer_info.num_texture_formats)
{
logger.log(logger.LEVEL_ERROR, "Failed to create window or renderer: %s\n", SDL_GetError());
return;
}
}
is = stream_open(input_filename.c_str(), file_iformat);
if (!is)
{
logger.log(logger.LEVEL_ERROR, "Failed to initialize VideoState!\n");
return;
}
//the song is playing now
*successfull = true;
event_loop(is);
//the song has quit;
*successfull = false;
}
changed the callback functions as the static ones couldn't be used by c++ eg,
void Ffplay::static_sdl_audio_callback(void* opaque, Uint8* stream, int len)
{
static_cast<Ffplay*>(opaque)->sdl_audio_callback(opaque, stream, len);
}
closing doesn't change from the main file to close the audio and sdl framework
void Ffplay::do_exit(VideoState* is)
{
abort = true;
if(is)
{
stream_close(is);
}
if (renderer)
SDL_DestroyRenderer(renderer);
if (window)
SDL_DestroyWindow(window);
#if CONFIG_AVFILTER
av_freep(&vfilters_list);
#endif
avformat_network_deinit();
SDL_Quit();
}
i call the functions as follows from main gui
ft=std::async(launch::async, &Menu::play_song, this, songs_to_play.at(0));
the menu::play_song function is:
void Menu::play_song(wstring song_path)
{
ready_to_play_song = false;
OutputDebugString(L"\nbefore song\n");
using std::future;
using std::async;
using std::launch;
string input{ song_path.begin(),song_path.end() };
Ffplay ffplay;
ffplay.play_song(input, h_sdl_window, &song_opened);
OutputDebugString(L"\nafter song\n");
ready_to_play_song = true;
}
THE PROBLEM is i can only play one song . if i call the menu::play_song function again the sound is missing and the video/art cover is occasionally missing also. it seems some resources are not been released or something like that.
i have localised the proble to this function
int Ffplay::packet_queue_get(PacketQueue* q, AVPacket* pkt, int block, int* serial)
{
MyAVPacketList* pkt1;
int ret;
int count=0;
SDL_LockMutex(q->mutex);
for (;;)
{
if (q->abort_request)
{
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size + sizeof(*pkt1);
q->duration -= pkt1->pkt.duration;
*pkt = pkt1->pkt;
if (serial)
*serial = pkt1->serial;
av_free(pkt1);
ret = 1;
break;
}
else if (!block) {
ret = 0;
break;
}
else
{
logger.log(logger.LEVEL_INFO, "packet_queue before");
SDL_CondWait(q->cond, q->mutex);
logger.log(logger.LEVEL_INFO, "packet_queue after");
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
the call to SDL_CondWait(q->cond, q->mutex); never returns

First, sorry the question was a bit vague since i couldn't upload much of the code as it was long that's why i posted the link to original one which is similar to mine, the only difference is I've changed functions from C static ones to C++ public ones.
The problem was the variable SDL_AudioDeviceID audio_dev
Assigning the variable using
audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE) was not assigning the variable as planned and when it came to closing the audio device using SDL_CloseAudioDevice(audio_dev); the audio device variable was 0 therefore the device wasn't been closed and as a result the second song was missing sound or video could hang.
This is a result of using various threads that calls the local callback functions that are been cast to a static nature as expected from the SDL api.
The answer was changing the device varible to static one, static SDL_AudioDeviceID audio_dev; therefore the variable could be accessed from any place in the program

Related

Web cam image capture

I am trying to write an application to capture an image from a webcam. I have written a C++ console application and whilst I can get an image, I have difficulty setting the resolution, I want to get higher resolution images and would like to be able to get 4k from a logitech brio webcam for example.
On Windows 10 with a lower resolution usb camera I was able to get 1280x720. On Windows 11 with a 4k USB camera I am only getting 640x480
Here is my code:
#include "vfw.h"
#include <cstdio>
#include <cstdint>
#pragma comment(lib, "Vfw32.lib")
int main(int argc, char** argv)
{
char device_name[100];
char filename[100];
size_t sz = 0;
char* userpath = nullptr;
strcpy_s(device_name, "");
strcpy_s(filename, userpath);
strncat_s(filename, "\\Documents\\image.bmp", 20);
// create the preview window
HWND hCam = capCreateCaptureWindow (
"FMCamCapture",
WS_CHILD | WS_VISIBLE,
0, 0, 1024, 768,
::GetDesktopWindow(), 0);
CAPDRIVERCAPS CapDrvCaps;
capDriverGetCaps(hCam, &CapDrvCaps, sizeof(CAPDRIVERCAPS));
int driver_number = 0;
int n = 1;
//Driver argument
while (n < argc)
{
if (strcmp(argv[n], "/drivernum") == 0)
{
// Set device number to specified value
if (++n < argc) {
driver_number = atoi(argv[n]);
}
else printf("Error: invalid device number");
if (driver_number <= 0)
printf("Error: invalid device number");
}
if (strcmp(argv[n], "/showdrivers") == 0)
{
// Show the list of available drivers and pause and exit
TCHAR szDeviceName[80];
TCHAR szDeviceVersion[80];
for (int wIndex = 0; wIndex < 10; wIndex++)
{
if (capGetDriverDescription(
wIndex,
szDeviceName,
sizeof(szDeviceName),
szDeviceVersion,
sizeof(szDeviceVersion)
))
{
printf("\nEach Driver may represent a different camera. The default driver is 0. Set a differnt driver to change cameras.\nAvailable Drivers:\n\%i - %s\n", wIndex, szDeviceName);
// Append name to list of installed capture drivers
// and then let the user select a driver to use.
}
}
system("pause");
return 0;
}
n++;
}
fprintf(stderr, "Saved file location: %s\n", filename);
CAPTUREPARMS captureParms;
BITMAPINFO bMInfo;
// connect to the first Driver
// for other cameras try index
// 1, 2, in place of the 0 below
if(capDriverConnect(hCam, driver_number))
{
capGetVideoFormat(hCam, &bMInfo, sizeof(BITMAPINFO));
//printf("height %ld", bMInfo.bmiHeader.biHeight);
//printf("width %ld", bMInfo.bmiHeader.biWidth);
//printf("bisize %lu", bMInfo.bmiHeader.biSizeImage);
//printf("compression %lu", bMInfo.bmiHeader.biCompression);
bMInfo.bmiHeader.biWidth = 1280;
bMInfo.bmiHeader.biHeight = 720;
bMInfo.bmiHeader.biSizeImage = 0;
//bMInfo.bmiHeader.biSizeImage = (bMInfo.bmiHeader.biHeight) * (((bMInfo.bmiHeader.biWidth * 2) * bMInfo.bmiHeader.biBitCount + 31) / 32) * 4;
bMInfo.bmiHeader.biCompression = BI_RGB;
if (capSetVideoFormat(hCam, &bMInfo, sizeof(BITMAPINFO))) {
//printf("set res success setvidformat");
}
capFileSaveDIB(hCam, filename);
//printf("Saved!");
capDriverDisconnect(hCam);
}
else
{
printf("Check camera?");
}
DestroyWindow(hCam);
return 0;
}```

Encoded video made using the Nvidia encoded SDK is not correct

I am trying to integrate the Nvidia Encoder SDK with the Nvidia Frame Buffer Capture SDK. I am capturing frames in a cuda buffer and then sending then to the Nvidia Encoder to be encoded in H.264 format.
The encoded video however, is not what I want it to be, it is a completely pink screen.
Are there any reasons as to why it might be happening?
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <dlfcn.h>
#include <string.h>
#include <getopt.h>
#include <unistd.h>
#include<dlfcn.h>
#include"nvEncodeAPI.h"
#include <NvFBC.h>
// If you have the CUDA Toolkit Installed, you can include the CUDA headers
// #include "cuda.h"
//#include "cuda_drvapi_dynlink_cuda.h"
#include"cuda.h"
#include "NvFBCUtils.h"
#define APP_VERSION 4
#define LIB_NVFBC_NAME "libnvidia-fbc.so.1"
#define LIB_CUDA_NAME "libcuda.so.1"
#define N_FRAMES 10
/*
* CUDA entry points
*/
typedef CUresult (* CUINITPROC) (unsigned int Flags);
typedef CUresult (* CUDEVICEGETPROC) (CUdevice *device, int ordinal);
typedef CUresult (* CUCTXCREATEV2PROC) (CUcontext *pctx, unsigned int flags, CUdevice dev);
typedef CUresult (* CUMEMCPYDTOHV2PROC) (void *dstHost, CUdeviceptr srcDevice, size_t ByteCount);
typedef CUresult (* CUGETDEVICEPTR) (CUdeviceptr *dptr, size_t bytesize);
static CUINITPROC cuInit_ptr = NULL;
static CUDEVICEGETPROC cuDeviceGet_ptr = NULL;
static CUCTXCREATEV2PROC cuCtxCreate_v2_ptr = NULL;
static CUMEMCPYDTOHV2PROC cuMemcpyDtoH_v2_ptr = NULL;
static CUGETDEVICEPTR cuGetDevicePtr=NULL;
typedef NVENCSTATUS (*PNVENCCREATEINSTANCE) (NV_ENCODE_API_FUNCTION_LIST* pFuncList);
/**
* Dynamically opens the CUDA library and resolves the symbols that are
* needed for this application.
*
* \param [out] libCUDA
* A pointer to the opened CUDA library.
*
* \return
* NVFBC_TRUE in case of success, NVFBC_FALSE otherwise.
*/
static NVFBC_BOOL cuda_load_library(void *libCUDA)
{
libCUDA = dlopen(LIB_CUDA_NAME, RTLD_NOW);
if (libCUDA == NULL) {
fprintf(stderr, "Unable to open '%s'\n", LIB_CUDA_NAME);
return NVFBC_FALSE;
}
cuInit_ptr = (CUINITPROC) dlsym(libCUDA, "cuInit");
if (cuInit_ptr == NULL) {
fprintf(stderr, "Unable to resolve symbol 'cuInit'\n");
return NVFBC_FALSE;
}
cuDeviceGet_ptr = (CUDEVICEGETPROC) dlsym(libCUDA, "cuDeviceGet");
if (cuDeviceGet_ptr == NULL) {
fprintf(stderr, "Unable to resolve symbol 'cuDeviceGet'\n");
return NVFBC_FALSE;
}
cuCtxCreate_v2_ptr = (CUCTXCREATEV2PROC) dlsym(libCUDA, "cuCtxCreate_v2");
if (cuCtxCreate_v2_ptr == NULL) {
fprintf(stderr, "Unable to resolve symbol 'cuCtxCreate_v2'\n");
return NVFBC_FALSE;
}
cuMemcpyDtoH_v2_ptr = (CUMEMCPYDTOHV2PROC) dlsym(libCUDA, "cuMemcpyDtoH_v2");
if (cuMemcpyDtoH_v2_ptr == NULL) {
fprintf(stderr, "Unable to resolve symbol 'cuMemcpyDtoH_v2'\n");
return NVFBC_FALSE;
}
cuGetDevicePtr = (CUGETDEVICEPTR) dlsym(libCUDA,"cuMemAlloc");
if(cuGetDevicePtr==NULL){
fprintf(stderr, "Unable to resolve symbol 'cuMemAlloc'\n");
return NVFBC_FALSE;
}
return NVFBC_TRUE;
}
/**
* Initializes CUDA and creates a CUDA context.
*
* \param [in] cuCtx
* A pointer to the created CUDA context.
*
* \return
* NVFBC_TRUE in case of success, NVFBC_FALSE otherwise.
*/
static NVFBC_BOOL cuda_init(CUcontext *cuCtx)
{
CUresult cuRes;
CUdevice cuDev;
cuRes = cuInit_ptr(0);
if (cuRes != CUDA_SUCCESS) {
fprintf(stderr, "Unable to initialize CUDA (result: %d)\n", cuRes);
return NVFBC_FALSE;
}
cuRes = cuDeviceGet_ptr(&cuDev, 0);
if (cuRes != CUDA_SUCCESS) {
fprintf(stderr, "Unable to get CUDA device (result: %d)\n", cuRes);
return NVFBC_FALSE;
}
cuRes = cuCtxCreate_v2_ptr(cuCtx, CU_CTX_SCHED_AUTO, cuDev);
if (cuRes != CUDA_SUCCESS) {
fprintf(stderr, "Unable to create CUDA context (result: %d)\n", cuRes);
return NVFBC_FALSE;
}
return NVFBC_TRUE;
}
/**
* Initializes the NvFBC and CUDA libraries and creates an NvFBC instance.
*
* Creates and sets up a capture session to video memory using the CUDA interop.
*
* Captures a bunch of frames every second, converts them to BMP and saves them
* to the disk.
*/
int main(int argc, char *argv[])
{
//handing the opening of the nvidia encoder library
void* handle = dlopen("libnvidia-encode.so",RTLD_NOW);
if(handle==NULL)
{
printf("Unable to load the nvidia encoder library");
return EXIT_FAILURE;
}
PNVENCCREATEINSTANCE NvEncCreateInstance_ptr;
NvEncCreateInstance_ptr = (PNVENCCREATEINSTANCE) dlsym(handle,"NvEncodeAPICreateInstance");
if(NvEncCreateInstance_ptr==NULL)
{
printf("Failure to load the nv encode lib");
}
static struct option longopts[] = {
{ "get-status", no_argument, NULL, 'g' },
{ "track", required_argument, NULL, 't' },
{ "frames", required_argument, NULL, 'f' },
{ "size", required_argument, NULL, 's' },
{ "format", required_argument, NULL, 'o' },
{ NULL, 0, NULL, 0 }
};
int opt, ret;
unsigned int i, nFrames = N_FRAMES;
NVFBC_SIZE frameSize = { 0, 0 };
NVFBC_BOOL printStatusOnly = NVFBC_FALSE;
NVFBC_TRACKING_TYPE trackingType = NVFBC_TRACKING_DEFAULT;
char outputName[NVFBC_OUTPUT_NAME_LEN];
uint32_t outputId = 0;
void *libNVFBC = NULL, *libCUDA = NULL;
PNVFBCCREATEINSTANCE NvFBCCreateInstance_ptr = NULL;
NVFBC_API_FUNCTION_LIST pFn;
CUcontext cuCtx;
NVFBCSTATUS fbcStatus;
NVFBC_BOOL fbcBool;
NVFBC_SESSION_HANDLE fbcHandle;
NVFBC_CREATE_HANDLE_PARAMS createHandleParams;
NVFBC_GET_STATUS_PARAMS statusParams;
NVFBC_CREATE_CAPTURE_SESSION_PARAMS createCaptureParams;
NVFBC_DESTROY_CAPTURE_SESSION_PARAMS destroyCaptureParams;
NVFBC_DESTROY_HANDLE_PARAMS destroyHandleParams;
NVFBC_TOCUDA_SETUP_PARAMS setupParams;
NVFBC_BUFFER_FORMAT bufferFormat = NVFBC_BUFFER_FORMAT_NV12;
char videoFile[64]="video.h264";
FILE* fd;
/*
* Parse the command line.
*/
while ((opt = getopt_long(argc, argv, "hgt:f:s:o:", longopts, NULL)) != -1) {
switch (opt) {
case 'g':
printStatusOnly = NVFBC_TRUE;
break;
case 't':
NvFBCUtilsParseTrackingType(optarg, &trackingType, outputName);
break;
case 'f':
nFrames = (unsigned int) atoi(optarg);
break;
case 's':
ret = sscanf(optarg, "%ux%u", &frameSize.w, &frameSize.h);
if (ret != 2) {
fprintf(stderr, "Invalid size format: '%s'\n", optarg);
return EXIT_FAILURE;
}
break;
case 'o':
if (!strcasecmp(optarg, "rgb")) {
bufferFormat = NVFBC_BUFFER_FORMAT_RGB;
} else if (!strcasecmp(optarg, "argb")) {
bufferFormat = NVFBC_BUFFER_FORMAT_ARGB;
} else if (!strcasecmp(optarg, "nv12")) {
bufferFormat = NVFBC_BUFFER_FORMAT_NV12;
} else if (!strcasecmp(optarg, "yuv444p")) {
bufferFormat = NVFBC_BUFFER_FORMAT_YUV444P;
} else {
fprintf(stderr, "Unknown buffer format: '%s'\n", optarg);
return EXIT_FAILURE;
}
break;
case 'h':
default:
usage(argv[0]);
return EXIT_SUCCESS;
}
}
NvFBCUtilsPrintVersions(APP_VERSION);
/*
* Dynamically load the NvFBC library.
*/
libNVFBC = dlopen(LIB_NVFBC_NAME, RTLD_NOW);
if (libNVFBC == NULL) {
fprintf(stderr, "Unable to open '%s'\n", LIB_NVFBC_NAME);
return EXIT_FAILURE;
}
fbcBool = cuda_load_library(libCUDA);
if (fbcBool != NVFBC_TRUE) {
return EXIT_FAILURE;
}
fbcBool = cuda_init(&cuCtx);
if (fbcBool != NVFBC_TRUE) {
return EXIT_FAILURE;
}
//input output buffer the contains all the pointers to the functions for the api
NV_ENCODE_API_FUNCTION_LIST nvencFunctionList;
nvencFunctionList.version=NV_ENCODE_API_FUNCTION_LIST_VER;
//this function call populates the passed pointer to the nvidia api function list with
//the function pointers to the routines implemented by the encoder api
NVENCSTATUS status = NvEncCreateInstance_ptr(&nvencFunctionList);//NvEncodeAPICreateInstance(&nvencFunctionList);
//exit code if failed to create instance of the api
if(status!=NV_ENC_SUCCESS)
{
printf("The nvidia encoder could not be initialized");
EXIT_FAILURE;
}
//creating the encode session params
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS openSessionExParams;
//initializing them and then adding the values
// memset(&openSessionExParams,0,sizeof(openSessionExParams));
//setting the version
openSessionExParams.version = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER;
openSessionExParams.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
//Note - Maybe change the next line
//ading the device pointer which in this case will be a cuda context
openSessionExParams.device = (void*)cuCtx;
openSessionExParams.reserved = NULL;
openSessionExParams.apiVersion = NVENCAPI_VERSION;
//setting all values of the reserved 1 array to 0
//Note - check the next two lines
memset(openSessionExParams.reserved1,0,253*sizeof(uint32_t));//sizeof(openSessionExParams.reserved1)/sizeof(openSessionExParams.reserved1[0]));
memset(openSessionExParams.reserved2,NULL,64*sizeof(void*));//sizeof(openSessionExParams.reserved2)/sizeof(openSessionExParams.reserved2[0]));
//handle for the encoder
void* encoder;
//Note- Seg fault
status = nvencFunctionList.nvEncOpenEncodeSessionEx(&openSessionExParams,&encoder);
//NvEncOpenEncodeSessionEx(&openSessionExParams,encoder);
if(status!=NV_ENC_SUCCESS)
{
printf("Failure opening encoding session: %u\n",status);
return EXIT_FAILURE;
}
//getting the count of the available encoder settings
uint32_t guidCount;
status =nvencFunctionList.nvEncGetEncodeGUIDCount(encoder,&guidCount);
if(status!=NV_ENC_SUCCESS)
{
printf("Failure getting GUID count");
return EXIT_FAILURE;
}
//allocating a buffer that holds that count
GUID* GUIDs = malloc(guidCount*sizeof(GUID));
//number of supported guid
uint32_t supportedGUID;
//adding all the guids to the previously allocated buffer
status =nvencFunctionList.nvEncGetEncodeGUIDs(encoder,GUIDs,guidCount,&supportedGUID);
if(status!=NV_ENC_SUCCESS)
{
printf("Failure filling the guid buffer");
return EXIT_FAILURE;
}
//setting the encode guid
GUID encodeGuid = NV_ENC_CODEC_H264_GUID;
//settin up the presets
uint32_t encodePresetCount;
status =nvencFunctionList.nvEncGetEncodePresetCount(encoder,encodeGuid,&encodePresetCount);
if(status!=NV_ENC_SUCCESS)
{
printf("Failure getting the encoder preset count");
return EXIT_FAILURE;
}
//allocating a buffer to hold the suppoting preset guid
GUID *presetGUIDs = malloc(encodePresetCount*sizeof(GUID));
uint32_t supportedPresetGUIDs;
status =nvencFunctionList.nvEncGetEncodePresetGUIDs(encoder,encodeGuid,presetGUIDs,encodePresetCount,&supportedPresetGUIDs);
if(status!=NV_ENC_SUCCESS)
{
printf("Failure filling up the preset buffer");
return EXIT_FAILURE;
}
//getting a preset guid Note - check this line later
GUID presetGuid = NV_ENC_PRESET_LOW_LATENCY_HQ_GUID;
//setting up a preset congif Note - check this
NV_ENC_PRESET_CONFIG encPresetConfig;
encPresetConfig.version = NV_ENC_PRESET_CONFIG_VER;
encPresetConfig.presetCfg.version=NV_ENC_CONFIG_VER;
status = nvencFunctionList.nvEncGetEncodePresetConfig(encoder,encodeGuid,presetGuid,&encPresetConfig);
if(status!=NV_ENC_SUCCESS)
{
printf("Error getting the preset configuration");
return EXIT_FAILURE;
}
//getting the input format which is YUV for our solution
//retrieving the input formats
uint32_t inputFormatCount;
status = nvencFunctionList.nvEncGetInputFormatCount(encoder,encodeGuid,&inputFormatCount);
if(status!=NV_ENC_SUCCESS)
{
printf("Error getting the input buffer format count");
return EXIT_FAILURE;
}
//allocating a buffer for the input formats
NV_ENC_BUFFER_FORMAT* encodeBufferFormats = malloc(inputFormatCount*sizeof(encodeBufferFormats));
//holding the size of the supported formats
uint32_t supportedInputFormats;
//filling up the above buffer
status = nvencFunctionList.nvEncGetInputFormats(encoder,encodeGuid, encodeBufferFormats,inputFormatCount,&supportedInputFormats);
if(status!=NV_ENC_SUCCESS)
{
printf("Error getting the supported input formats");
return EXIT_FAILURE;
}
//selecting a buffer format
//NV_ENC_BUFFER_FORMAT bufferFormat = NV_ENC_BUFFER_FORMAT_IYUV;
//querying the underlying hardware encoder capabilities can be done using the API
//initializing the hardware encoder
NV_ENC_INITIALIZE_PARAMS initializationParams;
/* NV_ENC_PRESET_CONFIG presetConfig;
presetConfig.version = NV_ENC_PRESET_CONFIG_VER;
NV_ENC_CONFIG encoderConfig;
encoderConfig.version = NV_ENC_CONFIG_VER;
encoderConfig.profileGUID=NV_ENC_CODEC_PROFILE_AUTOSELECT_GUID;
presetConfig.presetCfg=encoderConfig;
status = nvencFunctionList.nvEncGetEncodePresetConfig(encoder,encodeGuid,presetGuid,&presetConfig);*/
if(status!=NV_ENC_SUCCESS)
{
printf("\nUnable to get preset config %u",status);
return EXIT_FAILURE;
}
//initializing the encoder params
initializationParams.version = NV_ENC_INITIALIZE_PARAMS_VER;
initializationParams.encodeConfig = &encPresetConfig.presetCfg;
initializationParams.encodeGUID = encodeGuid;
initializationParams.presetGUID=presetGuid;
initializationParams.encodeWidth = 600;
initializationParams.encodeHeight = 600;
initializationParams.frameRateNum = 60;
initializationParams.frameRateDen = 1;
initializationParams.enablePTD=1;
status = nvencFunctionList.nvEncInitializeEncoder(encoder,&initializationParams);
if(status!=NV_ENC_SUCCESS)
{
printf("Error initializing encoder, %u",status);
return EXIT_FAILURE;
}
//allocating the nvidia input output buffers
int num_macroblocks = ((600 + 15) / 16) * ((600 + 15) / 16);
int max_surfaces = (num_macroblocks >= 8160) ? 16 : 32;
NV_ENC_INPUT_PTR* inputBuffers = (NV_ENC_INPUT_PTR*) malloc(max_surfaces * sizeof(NV_ENC_INPUT_PTR));
NV_ENC_OUTPUT_PTR* outputBuffers = (NV_ENC_OUTPUT_PTR*) malloc(max_surfaces * sizeof(NV_ENC_OUTPUT_PTR));
//CUdeviceptr* cudaDevicePtrs = (CUdeviceptr*) malloc(max_surfaces*sizeof(CUdeviceptr));
for(int i=0;i<max_surfaces;i++)
{
//creating the input buffer
NV_ENC_CREATE_INPUT_BUFFER inputBufferParams;
inputBufferParams.version = NV_ENC_CREATE_INPUT_BUFFER_VER;
inputBufferParams.width =(600 + 31) & ~31;
inputBufferParams.height=(600 + 31) & ~31;
inputBufferParams.bufferFmt = NV_ENC_BUFFER_FORMAT_IYUV;
inputBufferParams.reserved=0;
memset(inputBufferParams.reserved1,0,57*sizeof(uint32_t));
memset(inputBufferParams.reserved2,NULL,63*sizeof(void*));
status = nvencFunctionList.nvEncCreateInputBuffer(encoder,&inputBufferParams);
if(status!=NV_ENC_SUCCESS)
{
printf("Input buffer could not be created");
return EXIT_FAILURE;
}
//creating the output buffer
NV_ENC_CREATE_BITSTREAM_BUFFER bitstreamBufferParams;
bitstreamBufferParams.version = NV_ENC_CREATE_BITSTREAM_BUFFER_VER;
bitstreamBufferParams.reserved=0;
memset(bitstreamBufferParams.reserved1,0,58*sizeof(uint32_t));
memset(bitstreamBufferParams.reserved2,NULL,64*sizeof(void*));
//This line is giving me the error, NV_ENC_ERR_UNIMPLEMENTED
status = nvencFunctionList.nvEncCreateBitstreamBuffer(encoder,&bitstreamBufferParams);
if(status!=NV_ENC_SUCCESS)
{
printf("Bitstream could not be created %i",status);
return EXIT_FAILURE;
}
/* CUresult cuResult = cuGetDevicePtr(&cudaDevicePtrs[i],600);
if(cuResult!=CUDA_SUCCESS)
{
printf("Could not allocate cuda device pointer, %u\n",cuResult);
return EXIT_FAILURE;
}*/
inputBuffers[i]=inputBufferParams.inputBuffer;
outputBuffers[i]=bitstreamBufferParams.bitstreamBuffer;
}
/*
* Resolve the 'NvFBCCreateInstance' symbol that will allow us to get
* the API function pointers.
*/
NvFBCCreateInstance_ptr =
(PNVFBCCREATEINSTANCE) dlsym(libNVFBC, "NvFBCCreateInstance");
if (NvFBCCreateInstance_ptr == NULL) {
fprintf(stderr, "Unable to resolve symbol 'NvFBCCreateInstance'\n");
return EXIT_FAILURE;
}
/*
* Create an NvFBC instance.
*
* API function pointers are accessible through pFn.
*/
memset(&pFn, 0, sizeof(pFn));
pFn.dwVersion = NVFBC_VERSION;
fbcStatus = NvFBCCreateInstance_ptr(&pFn);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "Unable to create NvFBC instance (status: %d)\n",
fbcStatus);
return EXIT_FAILURE;
}
/*
* Create a session handle that is used to identify the client.
*/
memset(&createHandleParams, 0, sizeof(createHandleParams));
createHandleParams.dwVersion = NVFBC_CREATE_HANDLE_PARAMS_VER;
fbcStatus = pFn.nvFBCCreateHandle(&fbcHandle, &createHandleParams);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "%s\n", pFn.nvFBCGetLastErrorStr(fbcHandle));
return EXIT_FAILURE;
}
/*
* Get information about the state of the display driver.
*
* This call is optional but helps the application decide what it should
* do.
*/
memset(&statusParams, 0, sizeof(statusParams));
statusParams.dwVersion = NVFBC_GET_STATUS_PARAMS_VER;
fbcStatus = pFn.nvFBCGetStatus(fbcHandle, &statusParams);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "%s\n", pFn.nvFBCGetLastErrorStr(fbcHandle));
return EXIT_FAILURE;
}
if (printStatusOnly) {
NvFBCUtilsPrintStatus(&statusParams);
return EXIT_SUCCESS;
}
if (statusParams.bCanCreateNow == NVFBC_FALSE) {
fprintf(stderr, "It is not possible to create a capture session "
"on this system.\n");
return EXIT_FAILURE;
}
if (trackingType == NVFBC_TRACKING_OUTPUT) {
if (!statusParams.bXRandRAvailable) {
fprintf(stderr, "The XRandR extension is not available.\n");
fprintf(stderr, "It is therefore not possible to track an RandR output.\n");
return EXIT_FAILURE;
}
outputId = NvFBCUtilsGetOutputId(statusParams.outputs,
statusParams.dwOutputNum,
outputName);
if (outputId == 0) {
fprintf(stderr, "RandR output '%s' not found.\n", outputName);
return EXIT_FAILURE;
}
}
/*
* Create a capture session.
*/
printf("Creating an asynchronous capture session of %u frames with 1 "
"second internal between captures.\n", nFrames);
memset(&createCaptureParams, 0, sizeof(createCaptureParams));
createCaptureParams.dwVersion = NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER;
createCaptureParams.eCaptureType = NVFBC_CAPTURE_SHARED_CUDA;
createCaptureParams.bWithCursor = NVFBC_TRUE;
createCaptureParams.frameSize = frameSize;
createCaptureParams.eTrackingType = trackingType;
if (trackingType == NVFBC_TRACKING_OUTPUT) {
createCaptureParams.dwOutputId = outputId;
}
fbcStatus = pFn.nvFBCCreateCaptureSession(fbcHandle, &createCaptureParams);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "%s\n", pFn.nvFBCGetLastErrorStr(fbcHandle));
return EXIT_FAILURE;
}
/*
* Set up the capture session.
*/
memset(&setupParams, 0, sizeof(setupParams));
setupParams.dwVersion = NVFBC_TOCUDA_SETUP_PARAMS_VER;
setupParams.eBufferFormat = bufferFormat;
fbcStatus = pFn.nvFBCToCudaSetUp(fbcHandle, &setupParams);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "%s\n", pFn.nvFBCGetLastErrorStr(fbcHandle));
return EXIT_FAILURE;
}
/*
* We are now ready to start grabbing frames.
*/
fd = fopen(videoFile, "wb");
if (fd == NULL)
{
fprintf(stderr,"Unable to create '%s'\n", videoFile);
return EXIT_FAILURE;
}
//configuring the per frame encode parameters
NV_ENC_PIC_PARAMS encoderPicParams;
//Codec specific params
/* NV_ENC_CODEC_PIC_PARAMS h264Params;
h264Params.h264PicParams.reserved3=0;
h264Params.h264PicParams.reservedBitFields=0;
h264Params.h264PicParams.ltrUsageMode=0;
memset(h264Params.h264PicParams.reserved,0,242*sizeof(uint32_t));
memset(h264Params.h264PicParams.reserved2,NULL,61*sizeof(void*));*/
encoderPicParams.version = NV_ENC_PIC_PARAMS_VER;
//encoderPicParams.codecPicParams=h264Params;
encoderPicParams.inputWidth=(600+ 31) & ~31;
encoderPicParams.inputHeight=(600+ 31) & ~31;
encoderPicParams.inputPitch = (600+ 31) & ~31;
//encoderPicParams.encodePicFlags=NV_ENC_PIC_FLAG_FORCEIDR;
//encoderPicParams.bufferFmt=NV_ENC_BUFFER_FORMAT_IYUV;
encoderPicParams.pictureStruct=NV_ENC_PIC_STRUCT_FRAME;
for (i = 0; i < 60; i++) {
static CUdeviceptr cuDevicePtr;
static unsigned char *frame = NULL;
static uint32_t lastByteSize = 0;
//char filename[64];
int index=i;
if(i>=max_surfaces)
{
index=i%max_surfaces;
}
int res;
uint64_t t1, t2, t1_total, t2_total, t_delta, wait_time_ms;
CUresult cuRes;
NVFBC_TOCUDA_GRAB_FRAME_PARAMS grabParams;
NVFBC_FRAME_GRAB_INFO frameInfo;
t1 = NvFBCUtilsGetTimeInMillis();
t1_total = t1;
memset(&grabParams, 0, sizeof(grabParams));
memset(&frameInfo, 0, sizeof(frameInfo));
grabParams.dwVersion = NVFBC_TOCUDA_GRAB_FRAME_PARAMS_VER;
/*
* Use asynchronous calls.
*
* The application will not wait for a new frame to be ready. It will
* capture a frame that is already available. This might result in
* capturing several times the same frame. This can be detected by
* checking the frameInfo.bIsNewFrame structure member.
*/
grabParams.dwFlags = NVFBC_TOCUDA_GRAB_FLAGS_NOWAIT;
/*
* This structure will contain information about the captured frame.
*/
grabParams.pFrameGrabInfo = &frameInfo;
/*
* The frame will be mapped in video memory through this CUDA
* device pointer.
*/
grabParams.pCUDADeviceBuffer = &cuDevicePtr;
/*
* Capture a frame.
*/
fbcStatus = pFn.nvFBCToCudaGrabFrame(fbcHandle, &grabParams);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "%s\n", pFn.nvFBCGetLastErrorStr(fbcHandle));
return EXIT_FAILURE;
}
t2 = NvFBCUtilsGetTimeInMillis();
/*
* Allocate or re-allocate the destination buffer in system memory
* when necessary.
*
* This is to handle change of resolution.
*/
if (lastByteSize < frameInfo.dwByteSize) {
frame = (unsigned char *)realloc(frame, frameInfo.dwByteSize);
if (frame == NULL) {
fprintf(stderr, "Unable to allocate system memory\n");
return EXIT_FAILURE;
}
printf("Reallocated %u KB of system memory\n",
frameInfo.dwByteSize / 1024);
lastByteSize = frameInfo.dwByteSize;
}
printf("%s id %u grabbed in %llu ms",
(frameInfo.bIsNewFrame ? "New frame" : "Frame"),
frameInfo.dwCurrentFrame,
(unsigned long long) (t2 - t1));
/*
* Download frame from video memory to system memory.
*/
t1 = NvFBCUtilsGetTimeInMillis();
cuRes = cuMemcpyDtoH_v2_ptr((void *) frame, cuDevicePtr,
frameInfo.dwByteSize);
if (cuRes != CUDA_SUCCESS) {
fprintf(stderr, "CUDA memcpy failure (result: %d)\n", cuRes);
return EXIT_FAILURE;
}
//creating the params for the nv enc lock input buffer
//inputBufferParams.pSysMemBuffer=(void*) frame;
//status = nvencFunctionList.nvEncCreateInputBuffer(encoder,&inputBufferParams);
//setting up the input params
NV_ENC_REGISTER_RESOURCE registerResource;
registerResource.version=NV_ENC_REGISTER_RESOURCE_VER;
registerResource.resourceType = NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR;
registerResource.width = (600+31)&~31;
registerResource.height=(600+31)&~31;
registerResource.pitch=(600+31)&~31;
registerResource.resourceToRegister = (void*)cuDevicePtr;
registerResource.bufferFormat = NV_ENC_BUFFER_FORMAT_NV12;
status = nvencFunctionList.nvEncRegisterResource(encoder,&registerResource);
if(status!=NV_ENC_SUCCESS)
{
printf("Failed to register resource, %u",status);
return EXIT_FAILURE;
}
//mapping these resources
NV_ENC_MAP_INPUT_RESOURCE mappedResource;
mappedResource.version = NV_ENC_MAP_INPUT_RESOURCE_VER;
mappedResource.registeredResource=registerResource.registeredResource;
status = nvencFunctionList.nvEncMapInputResource(encoder,&mappedResource);
if(status!=NV_ENC_SUCCESS)
{
printf("Could not map resource: %u,",status);
return EXIT_FAILURE;
}
encoderPicParams.outputBitstream=outputBuffers[index];
encoderPicParams.inputBuffer=mappedResource.mappedResource;
encoderPicParams.bufferFmt=mappedResource.mappedBufferFmt;
/* memset(encoderPicParams.reserved1,0,6*sizeof(uint32_t));
memset(encoderPicParams.reserved2,NULL,2*sizeof(void*));
memset(encoderPicParams.reserved3,0,286*sizeof(uint32_t));
memset(encoderPicParams.reserved4,0,60*sizeof(void*));*/
//sending the picture to be encoded
status=nvencFunctionList.nvEncEncodePicture(encoder,&encoderPicParams);
if(status!=NV_ENC_SUCCESS)
{
printf("\n Unable to encode video, Error code: %u",status);
return EXIT_FAILURE;
}
//setting up the lock bitstream params
//sending the data to the output stream
NV_ENC_LOCK_BITSTREAM lockBitstreamParams;
lockBitstreamParams.version=NV_ENC_LOCK_BITSTREAM_VER;
lockBitstreamParams.doNotWait=0;
lockBitstreamParams.outputBitstream=outputBuffers[index];
lockBitstreamParams.reservedBitFields=0;
memset(lockBitstreamParams.reserved,0,219*sizeof(uint32_t));
memset(lockBitstreamParams.reserved1,0,13*sizeof(uint32_t));
memset(lockBitstreamParams.reserved2,NULL,64*sizeof(uint32_t));
status = nvencFunctionList.nvEncLockBitstream(encoder,&lockBitstreamParams);
if(status!=NV_ENC_SUCCESS)
{
printf("error locking bitstream");
return EXIT_FAILURE;
}
//writing the video data to the file
int bytes = fwrite(lockBitstreamParams.bitstreamBufferPtr,1,lockBitstreamParams.bitstreamSizeInBytes ,fd);
printf("\nNumber of bytes: %u\n",bytes);
if(bytes==0)
{
fprintf(stderr, "Unable to write to '%s'\n", videoFile);
return EXIT_FAILURE;
}
status = nvencFunctionList.nvEncUnlockBitstream(encoder,outputBuffers[index]);
status = nvencFunctionList.nvEncUnmapInputResource(encoder,mappedResource.mappedResource);
if(status!=NV_ENC_SUCCESS)
{
printf("failure to map resource, %u",status);
return EXIT_FAILURE;
}

Getting left and right frames with BlackMagic SDK and convert them into opencv mat

I am currently trying to use BlackMagic SDK with a stero camera using a DeckLink 4K pro card on Linux.
I already used it with a monocular one and successfully get the image and convert it in an opencv::mat but no matter what I try with this one I have nothing but empty matrix corresponding to left and right frames. I suspect there is an issue with the mode and/or flags I use but I cannot figure it out... When I try to see the frames with mediaExpress, I have satisfying results so I guess it is not a hardware problem.
I tried to do something that look like to the different sample given by the SDK and I convert the final frames in opencv mat to see the result. When !isStereo, eveything goes well and I get a fusion of both right and left frames but when it's true, I have empty matrix for both left and right frames.
Here is the corresponding part of my code (the things commented are things I tried: definitively, I am almost sure it's something linked with the flags I am using in g_deckLinkInput->EnableVideoInput(...) ):
...
void VideoInputFromBlackMagic::runInput(){
this->running=true;
int m_deckLinkIndex;
int idx;
//Check result
HRESULT result;
IDeckLink* deckLink = NULL;
IDeckLinkInput* g_deckLinkInput = NULL;
IDeckLinkAttributes* deckLinkAttributes = NULL;
IDeckLinkIterator* deckLinkIterator = CreateDeckLinkIteratorInstance();
IDeckLinkInputCallback* callBack;
IDeckLinkDisplayModeIterator* displayModeIterator = NULL;
IDeckLinkDisplayMode* displayMode = NULL;
char* displayModeName = NULL;
BMDDisplayModeSupport displayModeSupported;
bool formatDetectionSupported;
if (!deckLinkIterator)
{
fprintf(stderr, "This application requires the DeckLink drivers installed.\n");
return;
}
//Get the DeckLink Inputs
result = deckLinkIterator->Next(&deckLink);
result = deckLink->QueryInterface(IID_IDeckLinkInput, (void**)&g_deckLinkInput);
if(result != S_OK){
fprintf(stdout, "Cannot get the Input : DeckLink Error\n");
return;
}
//Get the DeckLink attributes (that may not correctly work: format detection does not properly work)
result = deckLink->QueryInterface(IID_IDeckLinkAttributes, (void**)&deckLinkAttributes);
if (!(result == S_OK)){
fprintf(stdout, "Cannot get the DeckLink attributes : DeckLink Error\n");
return;
}
//Format detection
result = deckLinkAttributes->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &formatDetectionSupported);
if (result != S_OK || !formatDetectionSupported){
fprintf(stdout,"Cannot get the format input: DeckLink Error\n");
return;
}
//Index for the different inputs
idx = 0;
//Get all the displayModes
result = g_deckLinkInput->GetDisplayModeIterator(&displayModeIterator);
if (result != S_OK){
fprintf(stdout,"Cannot set an iterator on the different display modes: DeckLink Error\n");
}
//Set idx
while ((result = displayModeIterator->Next(&displayMode)) == S_OK)
{
if (idx == 0)
break;
--idx;
displayMode->Release();
}
if (result != S_OK || displayMode == NULL){
fprintf(stdout,"Cannot get the main display mode: DeckLink Error\n");
return;
}
//Get Mode name: useless
result = displayMode->GetName((const char**)&displayModeName);
// Check display mode is supported with given options
if(this->isStereo){
//result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeSupports3D | bmdDisplayModeColorspaceRec709, &displayModeSupported, NULL);
//result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709 & bmdDisplayModeSupports3D, &displayModeSupported, NULL);
result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p30, bmdFormat8BitYUV, bmdVideoInputDualStream3D, &displayModeSupported, NULL);
//result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709 | bmdVideoInputDualStream3D, &displayModeSupported, NULL);
} else {
result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709, &displayModeSupported, NULL);
}
if (result != S_OK){
fprintf(stdout,"Video Mode not supported : aborted\n");
return;
}
if (displayModeSupported == bmdDisplayModeNotSupported)
{
fprintf(stdout, "The display mode %s is not supported with the selected pixel format\n", displayModeName);
return;
}
//Set the callback on this ( will defined callback on VideoInputFrameArrived and others functions when images arrives or when other events happens
g_deckLinkInput->SetCallback(this);
//Enable the video input with the selected format
if(this->isStereo){
//result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeSupports3D | bmdDisplayModeColorspaceRec709);
//result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709 & bmdDisplayModeSupports3D);
result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmdVideoInputDualStream3D);
//result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709 | bmdVideoInputDualStream3D);
//result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmd3DPreviewFormatRightOnly);
} else {
result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709);
}
if (result != S_OK)
{
fprintf(stderr, "Failed to enable video input. Maybe another application is using the card.\n");
return;
}
//Disable the audio
result = g_deckLinkInput->DisableAudioInput();
//Start the stream
result = g_deckLinkInput->StartStreams();
if (result != S_OK){
fprintf(stdout,"Error while starting the streaming : aborted\n");
}
while(this->running){
}
}
...
HRESULT VideoInputFromBlackMagic::VideoInputFrameArrived(IDeckLinkVideoInputFrame* videoFrame, IDeckLinkAudioInputPacket* audioFrame){
if (!videoFrame){
fprintf(stdout,"Update: No video frame\n");
return S_FALSE;
}
void* data;
void* dataRight;
IDeckLinkVideoFrame3DExtensions* threeDExtensions = NULL;
IDeckLinkVideoFrame* rightEyeFrame = NULL;
if (FAILED(videoFrame->GetBytes(&data))){
fprintf(stdout,"Fail obtaining the data from videoFrame\n");
return S_FALSE;
}
if( this->isStereo ){
if ((videoFrame->QueryInterface(IID_IDeckLinkVideoFrame3DExtensions, (void **) &threeDExtensions) != S_OK) || (threeDExtensions->GetFrameForRightEye(&rightEyeFrame) != S_OK)){
fprintf(stdout,"Fail obtaining right eye frame\n");
return S_FALSE;
}
if (FAILED(rightEyeFrame->GetBytes(&dataRight))){
fprintf(stdout,"Fail obtaining the data from videoFrame\n");
return S_FALSE;
}
}
cv::Mat loadedImage;
cv::Mat mat = cv::Mat(videoFrame->GetHeight(), videoFrame->GetWidth(), CV_8UC2, data, videoFrame->GetRowBytes());
cv::cvtColor(mat, loadedImage, CV_YUV2BGR_UYVY);
//Right eye
cv::Mat loadedImageRight;
cv::Mat matRight;
if(this->isStereo){
matRight = cv::Mat(rightEyeFrame->GetHeight(), rightEyeFrame->GetWidth(), CV_8UC2, dataRight, rightEyeFrame->GetRowBytes());
if (rightEyeFrame){
rightEyeFrame->Release();
}
cv::cvtColor(matRight, loadedImageRight, CV_YUV2BGR_UYVY);
}
if (!loadedImage.data){
fprintf(stdout,"No frame loaded from the video : mainImage will not be updated\n");
} else {
this->currentImage = loadedImage;
//this->currentImage = loadedImageRight;
if(this->isStereo){
//this->currentImageRight = loadedImageRight;
this->currentImageRight = loadedImage;
}
this->initialized = true;
if(this->debugMode){
if(this->isStereo){
cv::imshow("DEBUG : right eye", this->currentImageRight);
cv::imshow("DEBUG : left eye", this->currentImage);
} else {
cv::imshow("DEBUG", this->currentImage);
}
}
if(this->debugMode){
int kp = cv::waitKey(1);
if(kp == 1048603){
//Remove debugMode and remove the window
this->setDebugMode(false);
}
}
}
return S_OK;
}
...
Any idea?

Qt - Mac OS X - Error: cannot initialize a variable of type 'CGError'

We are trying to implement global shortcuts into our application, developed with Qt 5.5 under Mac OS X.
Unfortunately when adding another library the following error pops up:
/Users/user/app/qxtglobalshortcut5/gui/qxtwindowsystem_mac.cpp:17: error: cannot initialize a variable of type 'CGError' with an rvalue of type '(anonymous enum at /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/usr/include/MacTypes.h:382:1)'
CGError err(noErr);
^ ~~~~~
This is the file in question:
#ifndef QXTWINDOWSYSTEM_MAC_CPP
#define QXTWINDOWSYSTEM_MAC_CPP
#include "qxtwindowsystem.h"
#include "qxtwindowsystem_mac.h"
// WId to return when error
#define WINDOW_NOT_FOUND (WId)(0)
WindowList qxt_getWindowsForPSN(ProcessSerialNumber *psn)
{
static CGSConnection connection = _CGSDefaultConnection();
WindowList wlist;
if (!psn) return wlist;
CGError err(noErr);
// get onnection for given process psn
CGSConnection procConnection;
err = CGSGetConnectionIDForPSN(connection, psn, &procConnection);
if (err != noErr) return wlist;
/* get number of windows open by given process
in Mac OS X an application may have multiple windows, which generally
represent documents. It is also possible that there is no window even
though there is an application, it can simply not have any documents open. */
int windowCount(0);
err = CGSGetOnScreenWindowCount(connection, procConnection, &windowCount);
// if there are no windows open by this application, skip
if (err != noErr || windowCount == 0) return wlist;
// get list of windows
int windowList[windowCount];
int outCount(0);
err = CGSGetOnScreenWindowList(connection, procConnection, windowCount, windowList, &outCount);
if (err != noErr || outCount == 0) return wlist;
for (int i=0; i<outCount; ++i)
{
wlist += windowList[i];
}
return wlist;
}
WindowList QxtWindowSystem::windows()
{
WindowList wlist;
ProcessSerialNumber psn = {0, kNoProcess};
// iterate over list of processes
OSErr err;
while ((err = ::GetNextProcess(&psn)) == noErr)
{
wlist += qxt_getWindowsForPSN(&psn);
}
return wlist;
}
WId QxtWindowSystem::activeWindow()
{
ProcessSerialNumber psn;
OSErr err(noErr);
err = ::GetFrontProcess(&psn);
if (err != noErr) return WINDOW_NOT_FOUND;
// in Mac OS X, first window for given PSN is always the active one
WindowList wlist = qxt_getWindowsForPSN(&psn);
if (wlist.count() > 0)
return wlist.at(0);
return WINDOW_NOT_FOUND;
}
QString QxtWindowSystem::windowTitle(WId window)
{
CGSValue windowTitle;
CGError err(noErr);
static CGSConnection connection = _CGSDefaultConnection();
// This code is so dirty I had to wash my hands after writing it.
// most of CoreGraphics private definitions ask for CGSValue as key but since
// converting strings to/from CGSValue was dropped in 10.5, I use CFString, which
// apparently also works.
// FIXME: Not public API function. Can't compile with OS X 10.8
// err = CGSGetWindowProperty(connection, window, (CGSValue)CFSTR("kCGSWindowTitle"), &windowTitle);
if (err != noErr) return QString();
// this is UTF8 encoded
return QCFString::toQString((CFStringRef)windowTitle);
}
QRect QxtWindowSystem::windowGeometry(WId window)
{
CGRect rect;
static CGSConnection connection = _CGSDefaultConnection();
CGError err = CGSGetWindowBounds(connection, window, &rect);
if (err != noErr) return QRect();
return QRect(rect.origin.x, rect.origin.y, rect.size.width, rect.size.height);
}
/* This method is the only one that is not a complete hack
from Quartz Event Services
http://developer.apple.com/library/mac/#documentation/Carbon/Reference/QuartzEventServicesRef/Reference/reference.html
*/
uint QxtWindowSystem::idleTime()
{
// CGEventSourceSecondsSinceLastEventType returns time in seconds as a double
// also has extremely long name
double idle = 1000 * ::CGEventSourceSecondsSinceLastEventType(kCGEventSourceStateCombinedSessionState, kCGAnyInputEventType);
return (uint)idle;
}
// these are copied from X11 implementation
WId QxtWindowSystem::findWindow(const QString& title)
{
WId result = 0;
WindowList list = windows();
foreach(const WId &wid, list)
{
if (windowTitle(wid) == title)
{
result = wid;
break;
}
}
return result;
}
WId QxtWindowSystem::windowAt(const QPoint& pos)
{
WId result = 0;
WindowList list = windows();
for (int i = list.size() - 1; i >= 0; --i)
{
WId wid = list.at(i);
if (windowGeometry(wid).contains(pos))
{
result = wid;
break;
}
}
return result;
}
#endif // QXTWINDOWSYSTEM_MAC_CPP
The last time we used this library there was no such error and we don't quite understand what the problem is.
The library we added is: https://github.com/ddqd/qxtglobalshortcut5
Thank you all in advance!

XAudio2 - playing only a part of the audio

I am trying to play mp3/wma using xaudio2. I managed to use the Media Foundation Source Reader object to do the decoding. My problem is, it is not playing the full audio; I could get only a part of the audio played.
What I am trying to do is, get the next sample from IMFSourceReader and submit this as the next buffer of sourcevoice. This is repeated util all the data is read from IMFSourceReader.
while (true)
{
DWORD dwFlags = 0;
// Read the next sample.
hr = pReader->ReadSample(
(DWORD)MF_SOURCE_READER_FIRST_AUDIO_STREAM,
0, NULL, &dwFlags, NULL, &pSample );
if (dwFlags & MF_SOURCE_READERF_CURRENTMEDIATYPECHANGED)
{
printf("Type change - not supported by WAVE file format.\n");
break;
}
if (dwFlags & MF_SOURCE_READERF_ENDOFSTREAM)
{
printf("End of input file.\n");
break;
}
if (pSample == NULL)
{
printf("No sample\n");
continue;
}
// Get a pointer to the audio data in the sample.
hr = pSample->ConvertToContiguousBuffer(&pBuffer);
if (FAILED(hr)) { break; }
hr = pBuffer->Lock(&pAudioData, NULL, &cbBuffer);
if (FAILED(hr)) { break; }
// Make sure not to exceed the specified maximum size.
if (cbMaxAudioData - cbAudioData < cbBuffer)
{
cbBuffer = cbMaxAudioData - cbAudioData;
}
// Write this data to the output file.
hr = WriteToFile(hFile, pAudioData, cbBuffer);
int audioBufferLength = cbBuffer;
if (FAILED(hr)) { break; }
SubmitBuffer(pAudioData, audioBufferLength);
// Unlock the buffer.
hr = pBuffer->Unlock();
pAudioData = NULL;
if (FAILED(hr)) { break; }
// Update running total of audio data.
cbAudioData += cbBuffer;
if (cbAudioData >= cbMaxAudioData)
{
break;
}
SafeRelease(&pSample);
SafeRelease(&pBuffer);
}
void AudioDecoder::SubmitBuffer(byte *pAudioData, int audioBufferLength)
{
byte * pAudioBuffer = new byte[audioBufferLength];
CopyMemory(pAudioBuffer, pAudioData, audioBufferLength);
if (pAudioBuffer != nullptr)
{
// Create an XAUDIO2_BUFFER for submitting audio data
XAUDIO2_BUFFER buffer = {0};
buffer.AudioBytes = audioBufferLength;
buffer.pAudioData = pAudioBuffer;
buffer.pContext = pAudioBuffer;
HRESULT hresult = m_pSourceVoice->SubmitSourceBuffer(&buffer);
}
}
After this I am calling m_pSourceVoice->Start(). This will start the audio, but not playing the full audio. Do I need to add anything else?
This loop doesn't look like it accounts for if any buffers have been completed before submitting more, so could be running into the limit of XAUDIO2_MAX_QUEUED_BUFFERS. Can you create a counter on your while loop to see how many buffers are submitted to the source voice?
If you've hit a limit you could start playback before fully decoding the file and submit additional buffers via source voice callbacks.
http://msdn.microsoft.com/en-us/library/windows/desktop/ee415769(v=vs.85).aspx