How to read h264 stream as a file from the USB webcam directly in c/c++ without using opencv? - c++

I am able to read a video file of h264 format and doing some machine learning inference on top of it. The code works absolutely fine for input from a file. Below code is a sample code from Deepstream SDK
FileDataProvider(const char *szFilePath, simplelogger::Logger *logger)
: logger_(logger)
{
fp_ = fopen(szFilePath, "rb");
//fp_ = fopen("/dev/video0", "rb");
if (nullptr == fp_) {
LOG_ERROR(logger, "Failed to open file " << szFilePath);
exit(1);
}
pLoadBuf_ = new uint8_t[nLoadBuf_];
pPktBuf_ = new uint8_t[nPktBuf_];
assert(nullptr != pLoadBuf_);
}
~FileDataProvider() {
if (fp_) {
fclose(fp_);
}
if (pLoadBuf_) {
delete [] pLoadBuf_;
}
if (pPktBuf_) {
delete [] pPktBuf_;
}
}
What is requirement ?
Read from the Logitech c920 webcam instead for video file.
I know How to read from webcam using opencv. But I don't want to use opencv here.
My Research
Using v4l we can get the stream and display it in vlc.
Camera supports below formats.
#ubox:~$ v4l2-ctl --device=/dev/video1 --list-formats
ioctl: VIDIOC_ENUM_FMT Index : 0 Type : Video Capture
Pixel Format: 'YUYV' Name : YUYV 4:2:2
Index : 1 Type : Video Capture Pixel Format: 'H264'
(compressed) Name : H.264
Index : 2 Type : Video Capture Pixel Format: 'MJPG'
(compressed) Name : Motion-JPEG
Reading output of a USB webcam in Linux
vlc v4l2:///dev/video1 --v4l2-chroma=h264 - this displays the video from the webcam.
How to do this?
- Now how to feed this live stream into
above sample code such that it reads from the webcam rather than file?
[update-1]
- In otherwords, does v4l has some options to write the video stream as h264 formant ? So that, I can read that file like before(above code) when its(v4l) writing to disk.
[update-2]
- we can use ffmpeg instead of v4l. If any solutions for using ffmpeg to save the video stream into disk continuously, so that other programs reads that file ?

Before using ioctl to capture frames from camera, you need to set the format like below first.
fp_ = open("/dev/video0", O_RDWR);
struct v4l2_format fmt = {0};
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
ioctl(fp_, VIDIOC_S_FMT, &fmt);
then, initialize and map buffer
struct Buffer
{
void *start;
unsigned int length;
unsigned int flags;
};
int buffer_count_ = 4;
Buffer *buffers_;
bool AllocateBuffer()
{
struct v4l2_requestbuffers req = {0};
req.count = buffer_count_;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
if (ioctl(fp_, VIDIOC_REQBUFS, &req) < 0)
{
perror("ioctl Requesting Buffer");
return false;
}
buffers_ = new Buffer[buffer_count_];
for (int i = 0; i < buffer_count_; i++)
{
struct v4l2_buffer buf = {0};
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (ioctl(fp_, VIDIOC_QUERYBUF, &buf) < 0)
{
perror("ioctl Querying Buffer");
return false;
}
buffers_[i].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, buf.m.offset);
buffers_[i].length = buf.length;
if (MAP_FAILED == buffers_[i].start)
{
printf("MAP FAILED: %d\n", i);
for (int j = 0; j < i; j++)
munmap(buffers_[j].start, buffers_[j].length);
return false;
}
if (ioctl(fp_, VIDIOC_QBUF, &buf) < 0)
{
perror("ioctl Queue Buffer");
return false;
}
}
return true;
}
STREAMON to start capturing
v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ioctl(fp_, VIDIOC_STREAMON, &type);
finally read a frame from the mapped buffer. Generally, CaptureImage() will be in the while loop.
Buffer CaptureImage()
{
fd_set fds;
FD_ZERO(&fds);
FD_SET(fd_, &fds);
struct timeval tv = {0};
tv.tv_sec = 1;
tv.tv_usec = 0;
int r = select(fd_ + 1, &fds, NULL, NULL, &tv);
if (r == 0)
{
// timeout
}
struct v4l2_buffer buf = {0};
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
while (ioctl(fp_, VIDIOC_DQBUF, &buf) < 0)
{
perror("Retrieving Frame");
}
struct Buffer buffer = {.start = buffers_[buf.index].start,
.length = buf.bytesused,
.flags = buf.flags};
if (ioctl(fp_, VIDIOC_QBUF, &buf) < 0)
{
perror("Queue buffer");
}
return buffer;
}

Related

Capturing a picture in c++ using v4l2, explaining the process

I have been struggling with making a c++ code for capturing a picture from web-camera. I successfully did it, but I would like some clarification about the process i took.
So my code can be described in 6 steps, i will write them here along with my questions:
I. step: Initialize the device and set image format.
const char* dev_name = "/dev/video0";
int width=320;
int height=240;
int fd = v4l2_open(dev_name, O_RDWR | O_NONBLOCK, 0);
struct v4l2_format format = {0};
format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
format.fmt.pix.width = width;
format.fmt.pix.height = height;
format.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;//V4L2_PIX_FMT_YUYV //V4L2_PIX_FMT_RGB24
format.fmt.pix.field = V4L2_FIELD_NONE; //V4L2_FIELD_NONE
xioctl(fd, VIDIOC_S_FMT, &format);
II. step: Request for a buffer.
struct v4l2_requestbuffers req = {0};
req.count = 2;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
xioctl(fd, VIDIOC_REQBUFS, &req);
question: What exactly does this "request a buffer" do?
question: Is this buffer physically located inside the camera or on my pc?
III. step: Query the buffer and mapping.
struct v4l2_buffer buf;
buffer* buffers;
unsigned int i;
buffers = (buffer*) calloc(req.count, sizeof(*buffers));
for (i = 0; i < req.count; i++) {
clear_memmory(&(buf));
(buf).type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
(buf).memory = V4L2_MEMORY_MMAP;
(buf).index = i;
xioctl(fd, VIDIOC_QUERYBUF, &buf);
buffers[i].length = (buf).length;
printf("A buff has a len of: %i\n",buffers[i].length);
buffers[i].start = v4l2_mmap(NULL, (buf).length, PROT_READ | PROT_WRITE, MAP_SHARED,fd, (buf).m.offset);
if (MAP_FAILED == buffers[i].start) {
perror("Can not map the buffers.");
exit(EXIT_FAILURE);
}
}
question: I understand that i do some mapping here, but can somebody explain why is this needed and what exactly is mapped where. To me it sounds like actual buffer is somewhere else and then i mapp it into my own buffer so i can read things from my buffer. Is this correct and where is the actual buffer?
question: Could i also avoid the mapping?
IV. step: Start streaming. Queue the buffer.
for (i = 0; i < 1; i++) {
clear_memmory(&(buf));
(buf).type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
(buf).memory = V4L2_MEMORY_MMAP;
(buf).index = i;
xioctl(fd,VIDIOC_QBUF, &(buf));
}
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
xioctl(fd,VIDIOC_STREAMON, &type);
question: What does the VIDIOC_QBUF do? Please be very precise here and using mostly child words in answer to this since this is very confusing.
question: Should VIDIOC_QBUF happen after VIDIOC_STREAMON? Does it matter?
V. step: DeQueue the buffer and save a frame.
do {
FD_ZERO(&fds);
FD_SET(fd, &fds);
// Timeout.
tv.tv_sec = 2;
tv.tv_usec = 0;
r = select(fd + 1, &fds, NULL, NULL, &tv);
} while ((r == -1 && (errno = EINTR)));
if (r == -1) {
perror("select");
exit(EXIT_FAILURE);
}
clear_memmory(&(buf));
(buf).type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
(buf).memory = V4L2_MEMORY_MMAP;
xioctl(fd,VIDIOC_DQBUF, &(buf));
printf("Buff index: %i\n",(buf).index);
sprintf(out_name, "image%03d.ppm",pic_count);
fout = fopen(out_name, "w");
if (!fout) {
perror("Cannot open image");
exit(EXIT_FAILURE);
}
fwrite(buffers[(buf).index].start, (buf).bytesused, 1, fout);
fclose(fout);
pic_count++;
question: Why is VIDIOC_DQBUF needed and what does it do?
question: If i want to capture 3 frames and if i have only 1 buffer for 1 picture, do i need to queue, dequeue 3 times?
question: If i want 1000000 pictures, can I make that buffer big enough to hold them all? What is limiting the size of that buffer? Is that buffer on the camera and that means all this pictures would be sitting in it?
question: If i want 10 pictures taken at moments I chose, and if i go with making a buffer large enough for 10 pictures. What should i call at the moment i want to take a picture? only VIDIOC_QBUF? VIDIOC_QBUF and VIDIOC_DQBUF?, only VIDIOC_DQBUF?
Please don't point me to https://01.org/linuxgraphics/gfx-docs/drm/media/uapi/v4l/vidioc-qbuf.html or other sites, as I have read everything i was able to found and I am still left with above unclarity. I really want detailed explanations about these questions. I thank you in advance for all helpful answers.

Capturing YUYV in c++ using v4l2

I have a webcam connected to beaglebone via usb. I am coding in c++ and my goal is to capture raw UNCOMPRESSED picture from the webcam.
Firstly i checked what formats are supported via command v4l2-ctl --list-formats and the result was:
Index : 0
Type : Video Capture
Pixel Format: 'MJPG' (compressed)
Name : Motion-JPEG
Index : 1
Type : Video Capture
Pixel Format: 'YUYV'
Name : YUYV 4:2:2
So from this I assume it has to be possible to get an uncompressed picture if i try to use YUYV format.
Knowing this I started writing a program in c++. I successfully written a program to capture a compressed picture, but when trying to capture using format YUYV it doesnt work and i really need some help to get this done.
Here is my code:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <linux/videodev2.h>
#include <libv4l2.h>
template <typename typeXX>
void clear_memmory(typeXX* x) {
memset(x, 0, sizeof(*x));
}
void xioctl(int cd, int request, void *arg){
int response;
do{
//ensures we get the correct response.
response = v4l2_ioctl(cd, request, arg);
}
while (response == -1 && ((errno == EINTR) || (errno == EAGAIN)));
if (response == -1) {
fprintf(stderr, "error %d, %s\n", errno, strerror(errno));
exit(EXIT_FAILURE);
}
}
struct LMSBBB_buffer{
void* start;
size_t length;
};
int main(){
const char* dev_name = "/dev/video0";
int width=1920;
int height=1080;
int fd = v4l2_open(dev_name, O_RDWR | O_NONBLOCK, 0);
struct v4l2_format format = {0};
format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
format.fmt.pix.width = width;
format.fmt.pix.height = height;
format.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;//V4L2_PIX_FMT_YUYV //V4L2_PIX_FMT_RGB24
format.fmt.pix.field = V4L2_FIELD_NONE; //V4L2_FIELD_NONE
xioctl(fd, VIDIOC_S_FMT, &format);
printf("Device initialized.\n");
///request buffers
struct v4l2_requestbuffers req = {0};
req.count = 2;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
xioctl(fd, VIDIOC_REQBUFS, &req);
printf("Buffers requested.\n");
///mapping buffers
struct v4l2_buffer buf;
LMSBBB_buffer* buffers;
unsigned int i;
buffers = (LMSBBB_buffer*) calloc(req.count, sizeof(*buffers));
for (i = 0; i < req.count; i++) {
clear_memmory(&(buf));
(buf).type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
(buf).memory = V4L2_MEMORY_MMAP;
(buf).index = i;
xioctl(fd, VIDIOC_QUERYBUF, &buf);
buffers[i].length = (buf).length;
printf("A buff has a len of: %i\n",buffers[i].length);
buffers[i].start = v4l2_mmap(NULL, (buf).length, PROT_READ | PROT_WRITE, MAP_SHARED,fd, (buf).m.offset);
if (MAP_FAILED == buffers[i].start) {
perror("Can not map the buffers.");
exit(EXIT_FAILURE);
}
}
printf("Buffers mapped.\n");
for (i = 0; i < req.count; i++) {
clear_memmory(&(buf));
(buf).type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
(buf).memory = V4L2_MEMORY_MMAP;
(buf).index = i;
ioctl(fd,VIDIOC_QBUF, &(buf));
}
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ioctl(fd,VIDIOC_STREAMON, &type);
printf("buffers queued and streaming.\n");
int pic_count=0;
///CAPTURE
fd_set fds;
struct timeval tv;
int r;
char out_name[256];
FILE* fout;
do {
FD_ZERO(&fds);
FD_SET(fd, &fds);
// Timeout.
tv.tv_sec = 2;
tv.tv_usec = 0;
r = select(fd + 1, &fds, NULL, NULL, &tv);
} while ((r == -1 && (errno = EINTR)));
if (r == -1) {
perror("select");
exit(EXIT_FAILURE);
}
clear_memmory(&(buf));
(buf).type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
(buf).memory = V4L2_MEMORY_MMAP;
xioctl(fd,VIDIOC_DQBUF, &(buf));
printf("Buff index: %i\n",(buf).index);
sprintf(out_name, "image%03d.ppm",pic_count);
fout = fopen(out_name, "w");
if (!fout) {
perror("Cannot open image");
exit(EXIT_FAILURE);
}
fprintf(fout, "P6\n%d %d 255\n",width, height);
fwrite(buffers[(buf).index].start, (buf).bytesused, 1, fout);
fclose(fout);
pic_count++;
clear_memmory(&(buf));
(buf).type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
(buf).memory = V4L2_MEMORY_MMAP;
xioctl(fd,VIDIOC_DQBUF, &(buf));
printf("Buff index: %i\n",(buf).index);
sprintf(out_name, "image%03d.ppm",pic_count);
fout = fopen(out_name, "w");
if (!fout) {
perror("Cannot open image");
exit(EXIT_FAILURE);
}
fprintf(fout, "P6\n%d %d 255\n",width, height);
fwrite(buffers[(buf).index].start, (buf).bytesused, 1, fout);
fclose(fout);
pic_count++;
///xioctl(fd,VIDIOC_QBUF, &(buf));
return 0;
}
in line 50, i can choose the format between V4L2_PIX_FMT_YUYV and V4L2_PIX_FMT_RGB24.
for V4L2_PIX_FMT_RGB24 i get the picture, but when using V4L2_PIX_FMT_YUYV I get this error:
libv4l2: error dequeuing buf: Resource temporarily unavailable
libv4l2: error dequeuing buf: Resource temporarily unavailable
libv4l2: error dequeuing buf: Resource temporarily unavailable
libv4l2: error dequeuing buf: Resource temporarily unavailable
libv4l2: error dequeuing buf: Resource temporarily unavailable
the error lines goes for ever until i end the program manually.
Does anyone have an idea what to do? I spent over 2 weeks on this and i can't move anywhere from here. I would really appreciate any advice.
From what I see you are requesting a FullHD (1920x1080) buffer in YUYV format from a camera. You did not mention the camera type/model/specs, but if it is a generic USB-attached hardware most likely you will not get a raw FullHD YUYV buffer as an output, only the MJPEG one (which you can decode to YUV, if you hack around with libjpeg) or the decoded RGB buffer (which is pretty much the decoded MJPEG with YUV->RGB conversion) which is not mmapped.
The exact list of formats with framerates can be requested by this command, which would probably tell you it does not provide a 1920x1080 YUYV, only something smaller, like 640x480:
v4l2-ctl --list-formats
If you need video processing with "true" zero-copy access to raw YUYV camera frames, you need direct access to hardware and that specific hardware in the first place. Once you have the USB interface between your software and the camera, you get an extra indirection and that means the speed goes down. Think for a moment, the YUYV frame at 1920x1080 takes up approximately 4 Megabytes of memory. At 30 FPS this is 120 Megabytes (or 960 Megabits) per second of bus throughput. If you have a USB2.0 camera, there is just no bandwidth to support this (thus the need for MJPEG). Even at 15FPS this is 480 Megabits, not counting the USB latency and protocol overhead.
To provide some "actionable feedback" I would advice to first concentrate on the algorithms (probably, you just don't want to loose the processing speed at the very first step) which you want to apply to the image. Don't hesitate to use OpenCV for camera input and basic image processing, later you can switch to some hardware interface and hand-written algorithms.
The easier way of getting raw frames would be to use Android's camera interface and try to process the incoming frames with GLSL shaders using the GL_TEXTURE_EXTERNAL_OES extension, about which there information and code samples available. There you can connect GL textures to AHardwareBuffer instances and then use AHardwareBuffer_lock function to get raw pointers. The exact supported formats also may vary across the hardware, so do not expect this to be super-easy.
I've recently had a similar issue. In my case the camera driver needed the VIDIOC_S_PARM ioctl in order to set the frame rate and initialize the camera for the selected capture mode.
You can try to add this code after the VIDIOC_S_FMT and see if it works for you as well:
struct v4l2_streamparm streamparam;
memset(&streamparam, 0, sizeof(streamparam));
streamparam.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
xioctl(fd, VIDIOC_G_PARM, &streamparam);
streamparam.parm.capture.timeperframe.numerator = 1;
streamparam.parm.capture.timeperframe.denominator = 5;
xioctl(fd, VIDIOC_S_PARM, &streamparam);

Xcode app for macOS. This is how I setup to get audio from usb mic input. Worked a year ago, now doesn't. Why

Here is my audio init code. My app responds when queue buffers are ready, but all data in buffer is zero. Checking sound in system preferences shows that USB Audio CODEC in sound input dialog is active. AudioInit() is called right after app launches.
{
#pragma mark user data struct
typedef struct MyRecorder
{
AudioFileID recordFile;
SInt64 recordPacket;
Float32 *pSampledData;
MorseDecode *pMorseDecoder;
} MyRecorder;
#pragma mark utility functions
void CheckError(OSStatus error, const char *operation)
{
if(error == noErr) return;
char errorString[20];
// see if it appears to be a 4 char code
*(UInt32*)(errorString + 1) = CFSwapInt32HostToBig(error);
if (isprint(errorString[1]) && isprint(errorString[2]) &&
isprint(errorString[3]) && isprint(errorString[4]))
{
errorString[0] = errorString[5] = '\'';
errorString[6] = '\0';
}
else
{
sprintf(errorString, "%d", (int)error);
}
fprintf(stderr, "Error: %s (%s)\n", operation, errorString);
}
OSStatus MyGetDefaultInputDeviceSampleRate(Float64 *outSampleRate)
{
OSStatus error;
AudioDeviceID deviceID = 0;
AudioObjectPropertyAddress propertyAddress;
UInt32 propertySize;
propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
propertyAddress.mElement = 0;
propertySize = sizeof(AudioDeviceID);
error = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&propertyAddress,
0,
NULL,
&propertySize,
&deviceID);
if(error)
return error;
propertyAddress.mSelector = kAudioDevicePropertyNominalSampleRate;
propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
propertyAddress.mElement = 0;
propertySize = sizeof(Float64);
error = AudioObjectGetPropertyData(deviceID,
&propertyAddress,
0,
NULL,
&propertySize,
outSampleRate);
return error;
}
static int MyComputeRecordBufferSize(const AudioStreamBasicDescription *format,
AudioQueueRef queue,
float seconds)
{
int packets, frames, bytes;
frames = (int)ceil(seconds * format->mSampleRate);
if(format->mBytesPerFrame > 0)
{
bytes = frames * format->mBytesPerFrame;
}
else
{
UInt32 maxPacketSize;
if(format->mBytesPerPacket > 0)
{
// constant packet size
maxPacketSize = format->mBytesPerPacket;
}
else
{
// get the largest single packet size possible
UInt32 propertySize = sizeof(maxPacketSize);
CheckError(AudioQueueGetProperty(queue,
kAudioConverterPropertyMaximumOutputPacketSize,
&maxPacketSize,
&propertySize),
"Couldn't get queues max output packet size");
}
if(format->mFramesPerPacket > 0)
packets = frames / format->mFramesPerPacket;
else
// worst case scenario: 1 frame in a packet
packets = frames;
// sanity check
if(packets == 0)
packets = 1;
bytes = packets * maxPacketSize;
}
return bytes;
}
extern void bridgeToMainThread(MorseDecode *pDecode);
static int callBacks = 0;
// ---------------------------------------------
static void MyAQInputCallback(void *inUserData,
AudioQueueRef inQueue,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp *inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription *inPacketDesc)
{
MyRecorder *recorder = (MyRecorder*)inUserData;
Float32 *pAudioData = (Float32*)(inBuffer->mAudioData);
recorder->pMorseDecoder->pBuffer = pAudioData;
recorder->pMorseDecoder->bufferSize = inNumPackets;
bridgeToMainThread(recorder->pMorseDecoder);
CheckError(AudioQueueEnqueueBuffer(inQueue,
inBuffer,
0,
NULL),
"AudioQueueEnqueueBuffer failed");
printf("packets = %ld, bytes = %ld\n",(long)inNumPackets,(long)inBuffer->mAudioDataByteSize);
callBacks++;
//printf("\ncallBacks = %d\n",callBacks);
//if(callBacks == 0)
//audioStop();
}
static AudioQueueRef queue = {0};
static MyRecorder recorder = {0};
static AudioStreamBasicDescription recordFormat;
void audioInit()
{
// set up format
memset(&recordFormat,0,sizeof(recordFormat));
recordFormat.mFormatID = kAudioFormatLinearPCM;
recordFormat.mChannelsPerFrame = 2;
recordFormat.mBitsPerChannel = 32;
recordFormat.mBytesPerPacket = recordFormat.mBytesPerFrame = recordFormat.mChannelsPerFrame * sizeof(Float32);
recordFormat.mFramesPerPacket = 1;
//recordFormat.mFormatFlags = kAudioFormatFlagsCanonical;
recordFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);
UInt32 propSize = sizeof(recordFormat);
CheckError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
0,
NULL,
&propSize,
&recordFormat),
"AudioFormatProperty failed");
recorder.pMorseDecoder = MorseDecode::pInstance();
recorder.pMorseDecoder->m_sampleRate = recordFormat.mSampleRate;
// recorder.pMorseDecoder->setCircularBuffer();
//set up queue
CheckError(AudioQueueNewInput(&recordFormat,
MyAQInputCallback,
&recorder,
NULL,
kCFRunLoopCommonModes,
0,
&queue),
"AudioQueueNewInput failed");
UInt32 size = sizeof(recordFormat);
CheckError(AudioQueueGetProperty(queue,
kAudioConverterCurrentOutputStreamDescription,
&recordFormat,
&size), "Couldn't get queue's format");
// set up buffers and enqueue
const int kNumberRecordBuffers = 3;
int bufferByteSize = MyComputeRecordBufferSize(&recordFormat, queue, AUDIO_BUFFER_DURATION);
for(int bufferIndex = 0; bufferIndex < kNumberRecordBuffers; bufferIndex++)
{
AudioQueueBufferRef buffer;
CheckError(AudioQueueAllocateBuffer(queue,
bufferByteSize,
&buffer),
"AudioQueueAllocateBuffer failed");
CheckError(AudioQueueEnqueueBuffer(queue,
buffer,
0,
NULL),
"AudioQueueEnqueueBuffer failed");
}
}
void audioRun()
{
CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed");
}
void audioStop()
{
CheckError(AudioQueuePause(queue), "AudioQueuePause failed");
}
}
This sounds like the new macOS 'microphone privacy' setting, which, if set to 'no access' for your app, will cause precisely this behaviour. So:
Open the System Preferences pane.
Click on 'Security and Privacy'.
Select the Privacy tab.
Click on 'Microphone' in the left-hand pane.
Locate your app in the right-hand pane and tick the checkbox next to it.
Then restart your app and test it.
Tedious, no?
Edit: As stated in the comments, you can't directly request microphone access, but you can detect whether it has been granted to your app or not by calling [AVCaptureDevice authorizationStatusForMediaType: AVMediaTypeAudio].

How add Data Stream into MXF(using mpeg2video) file with FFmpeg and C/C++

I'm a little bit stuck here trying create a MXF file
with data stream on it. I have several MXF video files that contain
this standard
**1 Video Stream:
Stream #0:0: Video: mpeg2video (4:2:2), yuv422p(tv, bt709, top first), 1920x1080 [SAR 1:1 DAR 16:9], 50000 kb/s, 29.9
16 audio streams
Audio: pcm_s24le, 48000 Hz, 1 channels, s32 (24 bit), 1152 kb/s
1 Data Stream:
Data: none**
This data stream, contain personal data inside video file. I can
open this stream and data is really there. Is all ok. But, when i try
to create a file exactly like this, everytime i call "avformat_write_header"
it returns an error.
If i do comment the creation of this data streams the video file is succeffully
created.
If i change to "mpegts" with this data stream, the video file is also succeffully
created.
But, i can't use mpets and i need this data stream.
I know that is possible MXF with data stream cause i have this originals files
that have this combination.
So, i know that i missing something in my code.
This is the way i create this Data Stream:
void CFFmpegVideoWriter::addDataStream(EOutputStream *ost, AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
{
AVCodecParameters *par;
ost->stream = avformat_new_stream(oc, NULL);
if (ost->stream == NULL)
{
fprintf(stderr, "OOooohhh man: avformat_new_stream() failed.\n");
return;
}
par = ost->stream->codecpar;
ost->stream->index = 17;
par->codec_id = AV_CODEC_ID_NONE;
par->codec_type = AVMEDIA_TYPE_DATA;
ost->stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
the file openning is this:
CFFMpegVideoWriter::CFFMpegVideoWriter(QString outputfilename) : QThread()
{
av_register_all();
avcodec_register_all();
isOpen = false;
shouldClose = false;
frameIndex = 0;
#ifdef __linux__
QByteArray bFilename = outputfilename.toUtf8();
#else
QByteArray bFilename = outputfilename.toLatin1();
#endif
const char* filename = bFilename.data();
codecContext = NULL;
//encontra o formato desejado...
outputFormat = av_guess_format("mp2v", filename, nullptr);
if (!outputFormat)
{
qDebug("Could not find suitable output format\n");
return;
}
//encontra o codec...
codec = avcodec_find_encoder(outputFormat->video_codec);
if (!codec)
{
qDebug( "Codec not found\n");
return;
}
//aloca o contexto do codec...
codecContext = avcodec_alloc_context3(codec);
codecContext->field_order = AV_FIELD_TT;
codecContext->profile = FF_PROFILE_MPEG2_422;
//aloca o contexto do formato...
formatContext = avformat_alloc_context();
formatContext->oformat = outputFormat;
//aloca o contexto da midia de saida...
avformat_alloc_output_context2(&formatContext, NULL, NULL, filename);
if (!formatContext)
{
qDebug("Erro");
return;
}
videoStream.tmp_frame = NULL;
videoStream.swr_ctx = NULL;
//adiciona a stream de video...
if (outputFormat->video_codec != AV_CODEC_ID_NONE)
{
addVideoStream(&videoStream, formatContext, &video_codec, outputFormat->video_codec);
}
//adiciona as 16 streams de audio...
if (outputFormat->audio_codec != AV_CODEC_ID_NONE)
{
for (int i = 0; i < 16; i++)
{
addAudioStream(&audioStream[i], formatContext, &audio_codec, outputFormat->audio_codec);
}
}
addDataStream(&datastream, formatContext, &video_codec, outputFormat->video_codec);
videoStream.sws_ctx = NULL;
for (int i = 0; i < 16; i++)
{
audioStream[i].sws_ctx = NULL;
}
opt = NULL;
//carreca o codec de video para stream de video...
initVideoCodec(formatContext, video_codec, &videoStream, opt);
//carrega o codec de audio para stream de audio...s
for (int i = 0; i < 16; i++)
{
initAudioCodec(formatContext, audio_codec, &audioStream[i], opt);
}
av_dump_format(formatContext, 0, filename, 1);
//abrea o arquivo de saida..
if (!(outputFormat->flags & AVFMT_NOFILE))
{
ret = avio_open(&formatContext->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0)
{
qDebug("Could not open'%s", filename);
return;
}
}
//escreve o cabecalho do arquivo...
ret = avformat_write_header(formatContext, &opt);
if (ret < 0)
{
qDebug("Error occurred when opening output file");
return;
}
isOpen = true;
QThread::start();
}
The code always fails at "avformat_write_header" call.
But if i remove "datastream" or change it to mpegts everything runs fine.
Any ideia of what am i doing wrong here?
Thanks for reading this.
Helmuth
After some long hours trying a lot of solutions i found what was wrong. I had to add a metadata item specifing data type.
In my case, data type was "vbi_vanc_smpte_436M" wich is supported by MXF.
so, i dit with:
av_dict_set(&out_stream->metadata, "data_type", "vbi_vanc_smpte_436M", AV_DICT_IGNORE_SUFFIX);
Then everything works fine.
I hope this can help anyone else with same problem.

How to play FFMPEG sound sample with OpenAL?

I am using FFMPEG to load Audio Video from File. It works with video, but I don't know how to play audio samples.
Here is my code to get audio samples:
m_AdotimeBase = (int64_t(m_Adocdec_ctx->time_base.num) * AV_TIME_BASE) / int64_t(m_Adocdec_ctx->time_base.den);
if(!m_Adofmt_ctx)
{
//AfxMessageBox(L"m_timeBase");
return FALSE ;
}
int64_t seekAdoTarget = int64_t(m_currFrame) * m_AdotimeBase;
if(av_seek_frame(m_Adofmt_ctx, -1, seekAdoTarget, AVSEEK_FLAG_ANY) < 0)
{
/*CString st;
st.Format(L"%d",m_currFrame);
AfxMessageBox(L"av_seek_frame "+st);*/
m_currFrame = m_totalFrames-1;
return FALSE ;
}
if ((ret = av_read_frame(m_Adofmt_ctx, &packet)) < 0)
return FALSE;
if (packet.stream_index == 0)
{
ret = avcodec_decode_audio4(m_Adocdec_ctx, &in_AdeoFrame, &got_frame, &packet);
if (ret < 0)
{
av_free_packet(&packet);
return FALSE;
}
}
My problem is I want to listen that sample using OPENAL.
I would appreciate any tutorials or references on the subject.