I am writing a program to access WHO_AM_I registers on Accelerometer and Gyroscope via i2c on beaglebone black. Here is the basic code.`
int main(void)
{
char rxBuffer[40];
char txBuffer[32];
int gyroAddress = 0x20;
int acceleroAddress = 0x1e;
int tenBitAddress = 0;
int opResult = 0;
int i2cFD = open("/dev/i2c-1",O_RDWR);
opResult = ioctl(i2cFD, I2C_TENBIT, tenBitAddress);
opResult = ioctl(i2cFD,I2C_SLAVE, acceleroAddress);
memset(rxBuffer, 0, sizeof(rxBuffer));
memset(txBuffer, 0, sizeof(txBuffer));
txBuffer[0] = 0x0D;
opResult = write(i2cFD, txBuffer, 1);
if (opResult !=1) printf("No ACK bit!\n");
opResult = read(i2cFD, rxBuffer, 1);
printf("Part ID: %d\n", (int)rxBuffer[0]);
//for gyro
opResult = ioctl(i2cFD, I2C_SLAVE, gyroAddress);
txBuffer[0] = 0x0C;
opResult = write(i2cFD, txBuffer, 1);
if (opResult != 1) printf("No ACK bit!\n");
opResult = read(i2cFD, rxBuffer, 1);
printf("Part ID: %d\n", (int)rxBuffer[0]);
}
`
I get Part ID for both as zero but when I probe the sensor using i2cget commands it gives me the expected "0xC7" value. Any idea what I am doing wrong?
Related
I am trying to write an application to capture an image from a webcam. I have written a C++ console application and whilst I can get an image, I have difficulty setting the resolution, I want to get higher resolution images and would like to be able to get 4k from a logitech brio webcam for example.
On Windows 10 with a lower resolution usb camera I was able to get 1280x720. On Windows 11 with a 4k USB camera I am only getting 640x480
Here is my code:
#include "vfw.h"
#include <cstdio>
#include <cstdint>
#pragma comment(lib, "Vfw32.lib")
int main(int argc, char** argv)
{
char device_name[100];
char filename[100];
size_t sz = 0;
char* userpath = nullptr;
strcpy_s(device_name, "");
strcpy_s(filename, userpath);
strncat_s(filename, "\\Documents\\image.bmp", 20);
// create the preview window
HWND hCam = capCreateCaptureWindow (
"FMCamCapture",
WS_CHILD | WS_VISIBLE,
0, 0, 1024, 768,
::GetDesktopWindow(), 0);
CAPDRIVERCAPS CapDrvCaps;
capDriverGetCaps(hCam, &CapDrvCaps, sizeof(CAPDRIVERCAPS));
int driver_number = 0;
int n = 1;
//Driver argument
while (n < argc)
{
if (strcmp(argv[n], "/drivernum") == 0)
{
// Set device number to specified value
if (++n < argc) {
driver_number = atoi(argv[n]);
}
else printf("Error: invalid device number");
if (driver_number <= 0)
printf("Error: invalid device number");
}
if (strcmp(argv[n], "/showdrivers") == 0)
{
// Show the list of available drivers and pause and exit
TCHAR szDeviceName[80];
TCHAR szDeviceVersion[80];
for (int wIndex = 0; wIndex < 10; wIndex++)
{
if (capGetDriverDescription(
wIndex,
szDeviceName,
sizeof(szDeviceName),
szDeviceVersion,
sizeof(szDeviceVersion)
))
{
printf("\nEach Driver may represent a different camera. The default driver is 0. Set a differnt driver to change cameras.\nAvailable Drivers:\n\%i - %s\n", wIndex, szDeviceName);
// Append name to list of installed capture drivers
// and then let the user select a driver to use.
}
}
system("pause");
return 0;
}
n++;
}
fprintf(stderr, "Saved file location: %s\n", filename);
CAPTUREPARMS captureParms;
BITMAPINFO bMInfo;
// connect to the first Driver
// for other cameras try index
// 1, 2, in place of the 0 below
if(capDriverConnect(hCam, driver_number))
{
capGetVideoFormat(hCam, &bMInfo, sizeof(BITMAPINFO));
//printf("height %ld", bMInfo.bmiHeader.biHeight);
//printf("width %ld", bMInfo.bmiHeader.biWidth);
//printf("bisize %lu", bMInfo.bmiHeader.biSizeImage);
//printf("compression %lu", bMInfo.bmiHeader.biCompression);
bMInfo.bmiHeader.biWidth = 1280;
bMInfo.bmiHeader.biHeight = 720;
bMInfo.bmiHeader.biSizeImage = 0;
//bMInfo.bmiHeader.biSizeImage = (bMInfo.bmiHeader.biHeight) * (((bMInfo.bmiHeader.biWidth * 2) * bMInfo.bmiHeader.biBitCount + 31) / 32) * 4;
bMInfo.bmiHeader.biCompression = BI_RGB;
if (capSetVideoFormat(hCam, &bMInfo, sizeof(BITMAPINFO))) {
//printf("set res success setvidformat");
}
capFileSaveDIB(hCam, filename);
//printf("Saved!");
capDriverDisconnect(hCam);
}
else
{
printf("Check camera?");
}
DestroyWindow(hCam);
return 0;
}```
I'm trying to resample audio from 44Khz to 48Khz and I'm getting s small light noise after resampling. As if someone is gently ticking the mic. This happens both ways. From 48Khz to 44Khz and vice versa.
I've read that this can happen because swrContext still has some data left and that I shoudl flush the context before resampling next frame. And although this helps a little (less noticeable noise), it's still present.
I've tried using FFmpeg resample filter instead, but the output is just loud incoherent noise. I'm pretty sure that libswresample should not output any noise on resampling which means that I just don't know how to use it well and I'm missing some options.
This is the code for resampler.
int ResampleFrame(VideoState * videoState, AVFrame *decoded_audio_frame, enum AVSampleFormat out_sample_fmt, uint8_t * out_buf)
{
int in_sample_rate = videoState->audio->ptrAudioCodecCtx_->sample_rate;
int out_sample_rate = SAMPLE_RATE;
// get an instance of the AudioResamplingState struct, create if NULL
AudioResamplingState* arState = getAudioResampling(videoState->audio->ptrAudioCodecCtx_->channel_layout);
if (!arState->swr_ctx)
{
printf("swr_alloc error.\n");
return -1;
}
// get input audio channels
arState->in_channel_layout = (videoState->audio->ptrAudioCodecCtx_->channels ==
av_get_channel_layout_nb_channels(videoState->audio->ptrAudioCodecCtx_->channel_layout)) ?
videoState->audio->ptrAudioCodecCtx_->channel_layout :
av_get_default_channel_layout(videoState->audio->ptrAudioCodecCtx_->channels);
// check input audio channels correctly retrieved
if (arState->in_channel_layout <= 0)
{
printf("in_channel_layout error.\n");
return -1;
}
arState->out_channel_layout = AV_CH_LAYOUT_STEREO;
// retrieve number of audio samples (per channel)
arState->in_nb_samples = decoded_audio_frame->nb_samples;
if (arState->in_nb_samples <= 0)
{
printf("in_nb_samples error.\n");
return -1;
}
// Set SwrContext parameters for resampling
av_opt_set_int(arState->swr_ctx, "in_channel_layout", arState->in_channel_layout, 0);
av_opt_set_int(arState->swr_ctx, "in_sample_rate", in_sample_rate, 0);
av_opt_set_sample_fmt(arState->swr_ctx, "in_sample_fmt", videoState->audio->ptrAudioCodecCtx_->sample_fmt, 0);
// Set SwrContext parameters for resampling
av_opt_set_int(arState->swr_ctx, "out_channel_layout", arState->out_channel_layout, 0);
av_opt_set_int(arState->swr_ctx, "out_sample_rate", out_sample_rate, 0);
av_opt_set_sample_fmt(arState->swr_ctx, "out_sample_fmt", out_sample_fmt, 0);
// initialize SWR context after user parameters have been set
int ret = swr_init(arState->swr_ctx);
if (ret < 0)
{
printf("Failed to initialize the resampling context.\n");
return -1;
}
// retrieve output samples number taking into account the progressive delay
int64_t delay = swr_get_delay(arState->swr_ctx, videoState->audio->ptrAudioCodecCtx_->sample_rate) + arState->in_nb_samples;
arState->out_nb_samples = av_rescale_rnd(delay, out_sample_rate, in_sample_rate, AV_ROUND_UP );
// check output samples number was correctly rescaled
if (arState->out_nb_samples <= 0)
{
printf("av_rescale_rnd error\n");
return -1;
}
// get number of output audio channels
arState->out_nb_channels = av_get_channel_layout_nb_channels(arState->out_channel_layout);
// allocate data pointers array for arState->resampled_data and fill data
// pointers and linesize accordingly
// check memory allocation for the resampled data was successful
ret = av_samples_alloc_array_and_samples(&arState->resampled_data, &arState->out_linesize, arState->out_nb_channels, arState->out_nb_samples, out_sample_fmt, 0);
if (ret < 0)
{
printf("av_samples_alloc_array_and_samples() error: Could not allocate destination samples.\n");
return -1;
}
if (arState->swr_ctx)
{
// do the actual audio data resampling
// check audio conversion was successful
int ret_num_samples = swr_convert(arState->swr_ctx,arState->resampled_data,arState->out_nb_samples,(const uint8_t**)decoded_audio_frame->data, decoded_audio_frame->nb_samples);
//int ret_num_samples = swr_convert_frame(arState->swr_ctx,arState->resampled_data,arState->out_nb_samples,(const uint8_t**)decoded_audio_frame->data, decoded_audio_frame->nb_samples);
if (ret_num_samples < 0)
{
printf("swr_convert_error.\n");
return -1;
}
// get the required buffer size for the given audio parameters
// check audio buffer size
arState->resampled_data_size = av_samples_get_buffer_size(&arState->out_linesize, arState->out_nb_channels,ret_num_samples,out_sample_fmt,1);
if (arState->resampled_data_size < 0)
{
printf("av_samples_get_buffer_size error.\n");
return -1;
}
} else {
printf("swr_ctx null error.\n");
return -1;
}
// copy the resampled data to the output buffer
memcpy(out_buf, arState->resampled_data[0], arState->resampled_data_size);
// flush the swr context
int delayed = swr_convert(arState->swr_ctx,arState->resampled_data,arState->out_nb_samples,NULL,0);
if (arState->resampled_data)
{
av_freep(&arState->resampled_data[0]);
}
av_freep(&arState->resampled_data);
arState->resampled_data = NULL;
int ret_data_size = arState->resampled_data_size;
return ret_data_size;
}
I also tries using the filter as shown here but my output is just noise.
This is my filter code
int ResampleFrame(AVFrame *frame, uint8_t *out_buf)
{
/* Push the decoded frame into the filtergraph */
qint32 ret;
ret = av_buffersrc_add_frame_flags(buffersrc_ctx1, frame, AV_BUFFERSRC_FLAG_KEEP_REF);
if (ret < 0)
{
printf("ResampleFrame: Error adding frame to buffer\n");
// Delete input frame and return null
av_frame_unref(frame);
return 0;
}
//printf("resampling\n");
AVFrame *resampled_frame = av_frame_alloc();
/* Pull filtered frames from the filtergraph */
ret = av_buffersink_get_frame(buffersink_ctx1, resampled_frame);
/* Set the timestamp on the resampled frame */
resampled_frame->best_effort_timestamp = resampled_frame->pts;
if (ret < 0)
{
av_frame_unref(frame);
av_frame_unref(resampled_frame);
return 0;
}
int buffer_size = av_samples_get_buffer_size(NULL, 2,resampled_frame->nb_samples,AV_SAMPLE_FMT_S16,1);
memcpy(out_buf,resampled_frame->data,buffer_size);
//av_frame_unref(frame);
av_frame_unref(resampled_frame);
return buffer_size;
}
QString filter_description1 = "aresample=48000,aformat=sample_fmts=s16:channel_layouts=stereo,asetnsamples=n=1024:p=0";
int InitAudioFilter(AVStream *inputStream)
{
char args[512];
int ret;
const AVFilter *buffersrc = avfilter_get_by_name("abuffer");
const AVFilter *buffersink = avfilter_get_by_name("abuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
filter_graph = avfilter_graph_alloc();
const enum AVSampleFormat out_sample_fmts[] = {AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE};
const int64_t out_channel_layouts[] = {AV_CH_LAYOUT_STEREO, -1};
const int out_sample_rates[] = {48000, -1};
snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%" PRIx64,
inputStream->codec->time_base.num, inputStream->codec->time_base.den,
inputStream->codec->sample_rate,
av_get_sample_fmt_name(inputStream->codec->sample_fmt),
inputStream->codec->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx1, buffersrc, "in", args, NULL, filter_graph);
if (ret < 0)
{
printf("InitAudioFilter: Unable to create buffersrc\n");
return -1;
}
ret = avfilter_graph_create_filter(&buffersink_ctx1, buffersink, "out", NULL, NULL, filter_graph);
if (ret < 0)
{
printf("InitAudioFilter: Unable to create buffersink\n");
return ret;
}
// set opt SAMPLE FORMATS
ret = av_opt_set_int_list(buffersink_ctx1, "sample_fmts", out_sample_fmts, -1, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
{
printf("InitAudioFilter: Cannot set output sample format\n");
return ret;
}
// set opt CHANNEL LAYOUTS
ret = av_opt_set_int_list(buffersink_ctx1, "channel_layouts", out_channel_layouts, -1, AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
printf("InitAudioFilter: Cannot set output channel layout\n");
return ret;
}
// set opt OUT SAMPLE RATES
ret = av_opt_set_int_list(buffersink_ctx1, "sample_rates", out_sample_rates, -1, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
{
printf("InitAudioFilter: Cannot set output sample rate\n");
return ret;
}
/* Endpoints for the filter graph. */
outputs -> name = av_strdup("in");
outputs -> filter_ctx = buffersrc_ctx1;
outputs -> pad_idx = 0;
outputs -> next = NULL;
/* Endpoints for the filter graph. */
inputs -> name = av_strdup("out");
inputs -> filter_ctx = buffersink_ctx1;
inputs -> pad_idx = 0;
inputs -> next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_description1.toStdString().c_str(), &inputs, &outputs, NULL)) < 0)
{
printf("InitAudioFilter: Could not add the filter to graph\n");
}
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
{
printf("InitAudioFilter: Could not configure the graph\n");
}
/* Print summary of the sink buffer
* Note: args buffer is reused to store channel layout string */
AVFilterLink *outlink = buffersink_ctx1->inputs[0];
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
QString str = args;
printf("Output: srate:%dHz fmt:%s chlayout: %s\n", (int) outlink->sample_rate,
av_get_sample_fmt_name((AVSampleFormat) outlink->format),
str.toStdString().c_str());
filterGraphInitialized_ = true;
}
And since I don't have much experience with filters or audio for that matter, I'm also probably missing something here. But Can't figure out what.
Thanks
I try to learn C++ by doing everything on it. However, I stuck while establishing serial communication over C++. I send an integer array over MCU(microcontroller) to the VS C++. I have no any software problem on the MCU side(I have tested). Additionally I can read char values correctly. However, when I read values by C++ I get 0 after each integer I read. I couldn't solve the problem, while I think that 0 corresponds to the newline.
For example if I send (5, 10, 15), I read (5, 0, 10, 0, 15, 0).
Can you help me in order to solve the problem? I am using Windows Api for serial communication.
The relevant code:
char ReadData; //temperory Character
int SerialBuffer[64] = { 0 }; //Buffer to send and receive data
.
.
.
do
{
Status = ReadFile(hComm, &ReadData, sizeof(ReadData), &NoBytesRead, NULL);
SerialBuffer[loop] = ReadData;
++loop;
} while (NoBytesRead > 0);
The whole code:
int main(void)
{
HANDLE hComm; // Handle to the Serial port
BOOL Status; // Status
DCB dcbSerialParams = { 0 }; // Initializing DCB structure
COMMTIMEOUTS timeouts = { 0 }; //Initializing timeouts structure
int SerialBuffer[64] = { 0 }; //Buffer to send and receive data
DWORD BytesWritten = 0; // No of bytes written to the port
DWORD dwEventMask; // Event mask to trigger
char ReadData; //temperory Character
DWORD NoBytesRead; // Bytes read by ReadFile()
unsigned char loop = 0;
wchar_t pszPortName[10] = { 0 }; //com port id
wchar_t PortNo[20] = { 0 }; //contain friendly name
hComm = CreateFile("\\\\.\\COM6", //friendly name
GENERIC_READ | GENERIC_WRITE, // Read/Write Access
0, // No Sharing, ports cant be shared
NULL, // No Security
OPEN_EXISTING, // Open existing port only
0, // Non Overlapped I/O
NULL); // Null for Comm Devices
if (hComm == INVALID_HANDLE_VALUE)
{
printf_s("\n Port can't be opened\n\n");
}
//Setting the Parameters for the SerialPort
dcbSerialParams.DCBlength = sizeof(dcbSerialParams);
Status = GetCommState(hComm, &dcbSerialParams); //retreives the current settings
if (Status == FALSE)
{
printf_s("\nError to Get the Com state\n\n");
}
dcbSerialParams.BaudRate = CBR_115200; //BaudRate = 9600
dcbSerialParams.ByteSize = 8; //ByteSize = 8
dcbSerialParams.StopBits = ONESTOPBIT; //StopBits = 1
dcbSerialParams.Parity = NOPARITY; //Parity = None
Status = SetCommState(hComm, &dcbSerialParams);
if (Status == FALSE)
{
printf_s("\nError to Setting DCB Structure\n\n");
}
//Setting Timeouts
timeouts.ReadIntervalTimeout = 50;
timeouts.ReadTotalTimeoutConstant = 50;
timeouts.ReadTotalTimeoutMultiplier = 10;
timeouts.WriteTotalTimeoutConstant = 50;
timeouts.WriteTotalTimeoutMultiplier = 10;
if (SetCommTimeouts(hComm, &timeouts) == FALSE)
{
printf_s("\nError to Setting Time outs");
}
Status = SetCommMask(hComm, EV_RXCHAR);
if (Status == FALSE)
{
printf_s("\nError to in Setting CommMask\n\n");
}
//Setting WaitComm() Event
Status = WaitCommEvent(hComm, &dwEventMask, NULL); //Wait for the character to be received
if (Status == FALSE)
{
printf_s("\nError! in Setting WaitCommEvent()\n\n");
}
//Read data and store in a buffer
do
{
Status = ReadFile(hComm, &ReadData, sizeof(ReadData), &NoBytesRead, NULL);
SerialBuffer[loop] = ReadData;
++loop;
} while (NoBytesRead > 0);
--loop; //Get Actual length of received data
printf_s("\nNumber of bytes received = %d\n\n", loop);
//print receive data on console
printf_s("\n\n");
int index = 0;
for (index = 0; index < loop; ++index)
{
printf_s("%d ", SerialBuffer[index]);
}
printf_s("\n\n");
CloseHandle(hComm);//Closing the Serial Port
return 0;
}
I am collecting data from LiDAR in real time. I created a for loop to iterate over initiating a frame, collecting the data and saving it into an array (distArray). However, I am stuck with the memory mapping part. My array takes 500 integers ==> 2000 bytes of memory .. When I try to map each array element to memory using CopyMemory(), I am getting the following error "Exception thrown at 0x5843335E (vcruntime140d.dll) in file.exe: 0xC0000005: Access violation writing location 0x007E0000." .. Any ideas on how to solve this problem?
In the code below, there are a lot of functions and header calls that are irrelevant to the question, so please don't mind them ..
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <bta.h>
#include <windows.h>
#include <stdio.h>
#include <conio.h>
#include <tchar.h>
#define BUF_SIZE 2000
TCHAR szName[] = TEXT("mapObjFile");
void setTOFParameters(BTA_Handle btaHandle, uint32_t newTint, uint32_t newFPS, uint32_t newFMOD);
static void errorHandling(BTA_Status status);
const int arraySize = 500;
const int numFrames = 500;
int distArray[arraySize];
int main() {
BTA_Status status;
BTA_Config config;
printf("BTAinitConfig()\n\n");
status = BTAinitConfig(&config);
errorHandling(status);
uint8_t udpDataIpAddr[] = { 224, 0, 0, 1 };
config.udpDataIpAddr = udpDataIpAddr;
config.udpDataIpAddrLen = 4;
config.udpDataPort = 10002;
uint8_t tcpDeviceIpAddr[] = { 192, 168, 0, 10 };
config.tcpDeviceIpAddr = tcpDeviceIpAddr;
config.tcpDeviceIpAddrLen = 4;
config.tcpControlPort = 10001;
config.frameQueueMode = BTA_QueueModeDropOldest;
config.frameQueueLength = 500;
config.frameMode = BTA_FrameModeDistAmp;
// OPEN Connection
BTA_Handle btaHandle;
printf("BTAopen()\n\n");
status = BTAopen(&config, &btaHandle);
errorHandling(status);
printf("Service running: %d\n", BTAisRunning(btaHandle));
printf("Connection up: %d\n\n", BTAisConnected(btaHandle));
BTA_DeviceInfo *deviceInfo;
printf("BTAgetDeviceInfo()\n");
status = BTAgetDeviceInfo(btaHandle, &deviceInfo);
errorHandling(status);
printf("Device type: 0x%x\n", deviceInfo->deviceType);
printf("BTAfreeDeviceInfo()\n\n");
BTAfreeDeviceInfo(deviceInfo);
// READ Register
uint32_t regAddr = 0x1004; //MLX75123, Register 0x1000 = I2C_ADDRESS
uint32_t regValue;
status = BTAreadRegister(btaHandle, regAddr, ®Value, 0);
errorHandling(status);
printf("BTAreadRegister : Register 0x%04X has value 0x%04X\n\n", regAddr, regValue);
for (int i = 1; i < numFrames; i++) {
// GET The Frame
printf("Getting distance and amplitude data :\n");
BTA_Frame *frame;
printf("BTAgetFrame()\n");
status = BTAgetFrame(btaHandle, &frame, 300);
errorHandling(status);
BTA_DataFormat dataFormat;
BTA_Unit unit;
uint16_t xRes, yRes;
// Getting the distance data into a buffer and calculating the average amplitude over the entire frame :
uint16_t *distances;
printf("BTAgetDistances()\n");
status = BTAgetDistances(frame, (void**)&distances, &dataFormat, &unit, &xRes, &yRes);
errorHandling(status);
if (dataFormat == BTA_DataFormatUInt16) {
uint32_t distAvg = 0;
for (int y = 0; y < yRes; y++) {
for (int x = 0; x < xRes; x++) {
distAvg += distances[x + y * xRes];
}
}
if (xRes != 0 && yRes != 0) {
distArray[i] = distAvg / xRes / yRes;
printf("The average distance is %d.\n", distArray[i]);
}
}
// FREE The Frame
printf("BTAfreeFrame()\n\n");
status = BTAfreeFrame(&frame);
errorHandling(status);
// ----------------------- Memory Mapping -----------------------
HANDLE hMapFile;
LPCTSTR pBuf;
hMapFile = CreateFileMapping(
INVALID_HANDLE_VALUE, // use paging file
NULL, // default security
PAGE_READWRITE, // read/write access
0, // maximum object size (high-order DWORD)
BUF_SIZE, // maximum object size (low-order DWORD)
szName); // name of mapping object
if (hMapFile == NULL)
{
_tprintf(TEXT("Could not create file mapping object (%d).\n"),
GetLastError());
return 1;
}
pBuf = (LPTSTR)MapViewOfFile(hMapFile, // handle to map object
FILE_MAP_READ, // read/write permission
0,
0,
BUF_SIZE);
if (pBuf == NULL)
{
_tprintf(TEXT("Could not map view of file (%d).\n"),
GetLastError());
CloseHandle(hMapFile);
return 1;
}
CopyMemory((PVOID)pBuf, &distArray, BUF_SIZE);
_getch();
/*UnmapViewOfFile(pBuf);
CloseHandle(hMapFile);*/
}
// CLOSE Connection
printf("BTAclose()\n\n");
status = BTAclose(&btaHandle);
errorHandling(status);
// END Program
printf("Hit <ENTER> to close the window .. ");
fgetc(stdin);
}
After mapping the data to the memory, I will be using the mmap library in Python (https://docs.python.org/3.0/library/mmap.html) to access the data based on the tagname parameter ...
See here: https://learn.microsoft.com/en-us/previous-versions/windows/desktop/legacy/aa366535(v%3Dvs.85)
void CopyMemory(
_In_ PVOID Destination,
_In_ const VOID *Source,
_In_ SIZE_T Length
);
first parameter is the destination, the second is source. But in your code you have:
CopyMemory((PVOID)pBuf, &distArray, BUF_SIZE);
... the other way around. So the exception is because you copy data from distArray to pBuf which is the mapping of read only memory.
I am currently working on a project that involves using DXVA API and the FFmpeg framework to implement hardware-accelerated decoding of H264 video stream files.
I have done some research on GPU decoding and constructed my code based on the hardware acceleration implementation in VLC. From my understanding, using DXVA in FFmpeg involves initializing the DirectXVideoDecoder and implementing several callback functions in AVCodecContext. The decoding process is done with the FFmpeg function avcodec_decode_video2() and each frame is parsed with av_read_frame(). The decoded frame is stored in the graphics memory and displayed using Direct3D.
I tried to time each process with :GetTickCount() function and noticed that the execution time of the program for a 1550 frame video is 35000ms, with the display function taking 90% of the time and decoding function taking 6% of the time.
However, when I tried to comment out the displaying process and execute the code only decoding each frame, the total decoding time surprisingly increased to 25,000ms for the same video, taking 94% of the total time.
Here is the code for the decoding function:
//record start time
DWORD start_time = ::GetTickCount();
//media file to be loaded
const char *filename = "123.mkv";
//time recording parameters
unsigned frame_read_time_total = 0;
unsigned decode_frame_time_total = 0;
unsigned display_time_total = 0;
unsigned setup_time_total = 0;
/*********************Setup and Initialization Code*******************************/
unsigned setup_time_start = ::GetTickCount();
av_register_all();
av_log_set_level(AV_LOG_DEBUG);
int res;
AVFormatContext *file = NULL;
res = avformat_open_input(&file, filename, NULL, NULL);//´ò¿ªÎļþ
if (res < 0) {
printf("error %x in avformat_open_input\n", res);
return 1;
}
res = avformat_find_stream_info(file, NULL);//È¡³öÁ÷ÐÅÏ¢
if (res < 0)
{
printf("error %x in avformat_find_stream_info\n", res);
return 1;
}
av_dump_format(file, 0, filename, 0);//ÁгöÊäÈëÎļþµÄÏà¹ØÁ÷ÐÅÏ¢
int i;
int videoindex = -1;
int audioindex = -1;
for (i = 0; i < file->nb_streams; i++){
if (file->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){
videoindex = i;
}
if (file->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){
audioindex = i;
}
}
if (videoindex == -1){
av_log(NULL, AV_LOG_DEBUG, "can't find video stream\n");
return 0;
}
AVCodec *codec = avcodec_find_decoder(file->streams[videoindex]->codec->codec_id);//¸ù¾ÝÁ÷ÐÅÏ¢ÕÒµ½½âÂëÆ÷
if (!codec){
printf("decoder not found\n");
return 1;
}
AVCodecContext *codecctx = file->streams[videoindex]->codec;
screen_width = codecctx->width;
screen_height = codecctx->height;
//Initialize Win API Window
WNDCLASSEX window;
ZeroMemory(&window, sizeof(window));
window.cbSize = sizeof(window);
window.hbrBackground = (HBRUSH)(COLOR_WINDOW + 1);
window.lpfnWndProc = (WNDPROC)WindowProcess;
window.lpszClassName = L"D3D";
window.style = CS_HREDRAW | CS_VREDRAW;
RegisterClassEx(&window);
HWND hwnd_temp = CreateWindow(L"D3D", L"Player", WS_OVERLAPPEDWINDOW,
0, 0, screen_width, screen_height, NULL, NULL, NULL, NULL);
if (hwnd_temp == NULL){
av_log(NULL, AV_LOG_ERROR, "Error: Cannot create window\n");
system("pause");
}
hwnd.push_back(hwnd_temp);
vlc_va_dxva2_t *dxva = vlc_va_NewDxva2(codecctx->codec_id);
if (NULL == dxva){
return 0;
}
res = Setup(dxva, &codecctx->hwaccel_context, &codecctx->pix_fmt, screen_width, screen_height);
if (res < 0) {
printf("error DXVA setup\n", res);
return 1;
}
//Assign callback function
codecctx->opaque = dxva;
codecctx->get_format = ffmpeg_GetFormat;
codecctx->get_buffer = ffmpeg_GetFrameBuf;
codecctx->reget_buffer = ffmpeg_ReGetFrameBuf;
codecctx->release_buffer = ffmpeg_ReleaseFrameBuf;
codecctx->thread_count = 1;
res = avcodec_open2(codecctx, codec, NULL);
if (res < 0) {
printf("error %x in avcodec_open2\n", res);
return 1;
}
//Initialize Packet
AVPacket pkt = { 0 };
AVFrame *picture = avcodec_alloc_frame();
DWORD wait_for_keyframe = 60;
//initialize frame count
int count = 0;
ShowWindow(hwnd.at(0), SW_SHOWNORMAL);
UpdateWindow(hwnd.at(0));
RECT screen_size;
screen_size.top = 0;
screen_size.bottom = screen_height;
screen_size.left = 0;
screen_size.right = screen_width;
unsigned setup_time_end = ::GetTickCount();
setup_time_total = setup_time_end - setup_time_start;
MSG msg;
ZeroMemory(&msg, sizeof(msg));
while(msg.message!=WM_QUIT)
{
if (PeekMessage(&msg, NULL, 0,0, PM_REMOVE)){
TranslateMessage(&msg);
DispatchMessage(&msg);
continue;
}
int read_status;
unsigned read_frame_start = ::GetTickCount();
read_status = av_read_frame(file, &pkt);
if (read_status < 0)
{
av_free_packet(&pkt);
goto done;
}
unsigned read_frame_end = ::GetTickCount();
frame_read_time_total += (read_frame_end - read_frame_start);
int got_picture = 0;
unsigned decode_start = ::GetTickCount();
int bytes_used = avcodec_decode_video2(codecctx, picture, &got_picture, &pkt);
unsigned decode_end = ::GetTickCount();
decode_frame_time_total += (decode_end - decode_start);
if (got_picture)
{
count++;
unsigned display_start = ::GetTickCount();
//display_frame((vlc_va_dxva2_t *)codecctx->opaque, picture, screen_size,0);
unsigned display_end = ::GetTickCount();
display_time_total += (display_end - display_start);
}
av_free_packet(&pkt);
}
done:
UnregisterClass(L"D3D",0);
printf("Frames = %d\n",count);
unsigned stop_time = ::GetTickCount();
unsigned total_time = stop_time - start_time;
printf("total frame = %d\n", count);
printf("time cost = %d\n", total_time);
printf("total setup time = %d, %f %% total execution time\n", setup_time_total,(float) setup_time_total / total_time * 100);
printf("total frame read time = %d, %f %% total execution time\n", frame_read_time_total, (float)frame_read_time_total / total_time*100);
printf("total frame decode time = %d, %f %% total execution time\n", decode_frame_time_total, (float)decode_frame_time_total / total_time*100);
printf("total display time = %d, %f %% of total execution time\n", display_time_total, (float)display_time_total / total_time*100);
av_free(picture);
av_close_input_file(file);
system("pause");
return 0;
What could be the cause of this strange behavior? My guess is that it may be the possible incorrect use of :GetTickCount() or may be it has to do with the DXVA hardware-accelerated decoding process. Sorry for the long post. Any input and suggestion is appreciated. Thanks in advance.
I think it is a correct behaviour, if the decoding process is asynchronous. I know Ffmpeg uses threads, but it depends on compilation flags or decoding setup.
If the display process is very long, the decoder decodes frames, while the display process executes. So when you ask for rendering, some frames are already decoded, and it's fast.
If you avoid the display process, the decoding process takes all the processor time. Normally, the display process uses some sort of timestamp that lets enough time to the decoding process.
PS : from what i know about Ffmpeg and Dxva2, you also need to provide the directx texture.