Xcode app for macOS. This is how I setup to get audio from usb mic input. Worked a year ago, now doesn't. Why - c++

Here is my audio init code. My app responds when queue buffers are ready, but all data in buffer is zero. Checking sound in system preferences shows that USB Audio CODEC in sound input dialog is active. AudioInit() is called right after app launches.
{
#pragma mark user data struct
typedef struct MyRecorder
{
AudioFileID recordFile;
SInt64 recordPacket;
Float32 *pSampledData;
MorseDecode *pMorseDecoder;
} MyRecorder;
#pragma mark utility functions
void CheckError(OSStatus error, const char *operation)
{
if(error == noErr) return;
char errorString[20];
// see if it appears to be a 4 char code
*(UInt32*)(errorString + 1) = CFSwapInt32HostToBig(error);
if (isprint(errorString[1]) && isprint(errorString[2]) &&
isprint(errorString[3]) && isprint(errorString[4]))
{
errorString[0] = errorString[5] = '\'';
errorString[6] = '\0';
}
else
{
sprintf(errorString, "%d", (int)error);
}
fprintf(stderr, "Error: %s (%s)\n", operation, errorString);
}
OSStatus MyGetDefaultInputDeviceSampleRate(Float64 *outSampleRate)
{
OSStatus error;
AudioDeviceID deviceID = 0;
AudioObjectPropertyAddress propertyAddress;
UInt32 propertySize;
propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
propertyAddress.mElement = 0;
propertySize = sizeof(AudioDeviceID);
error = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&propertyAddress,
0,
NULL,
&propertySize,
&deviceID);
if(error)
return error;
propertyAddress.mSelector = kAudioDevicePropertyNominalSampleRate;
propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
propertyAddress.mElement = 0;
propertySize = sizeof(Float64);
error = AudioObjectGetPropertyData(deviceID,
&propertyAddress,
0,
NULL,
&propertySize,
outSampleRate);
return error;
}
static int MyComputeRecordBufferSize(const AudioStreamBasicDescription *format,
AudioQueueRef queue,
float seconds)
{
int packets, frames, bytes;
frames = (int)ceil(seconds * format->mSampleRate);
if(format->mBytesPerFrame > 0)
{
bytes = frames * format->mBytesPerFrame;
}
else
{
UInt32 maxPacketSize;
if(format->mBytesPerPacket > 0)
{
// constant packet size
maxPacketSize = format->mBytesPerPacket;
}
else
{
// get the largest single packet size possible
UInt32 propertySize = sizeof(maxPacketSize);
CheckError(AudioQueueGetProperty(queue,
kAudioConverterPropertyMaximumOutputPacketSize,
&maxPacketSize,
&propertySize),
"Couldn't get queues max output packet size");
}
if(format->mFramesPerPacket > 0)
packets = frames / format->mFramesPerPacket;
else
// worst case scenario: 1 frame in a packet
packets = frames;
// sanity check
if(packets == 0)
packets = 1;
bytes = packets * maxPacketSize;
}
return bytes;
}
extern void bridgeToMainThread(MorseDecode *pDecode);
static int callBacks = 0;
// ---------------------------------------------
static void MyAQInputCallback(void *inUserData,
AudioQueueRef inQueue,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp *inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription *inPacketDesc)
{
MyRecorder *recorder = (MyRecorder*)inUserData;
Float32 *pAudioData = (Float32*)(inBuffer->mAudioData);
recorder->pMorseDecoder->pBuffer = pAudioData;
recorder->pMorseDecoder->bufferSize = inNumPackets;
bridgeToMainThread(recorder->pMorseDecoder);
CheckError(AudioQueueEnqueueBuffer(inQueue,
inBuffer,
0,
NULL),
"AudioQueueEnqueueBuffer failed");
printf("packets = %ld, bytes = %ld\n",(long)inNumPackets,(long)inBuffer->mAudioDataByteSize);
callBacks++;
//printf("\ncallBacks = %d\n",callBacks);
//if(callBacks == 0)
//audioStop();
}
static AudioQueueRef queue = {0};
static MyRecorder recorder = {0};
static AudioStreamBasicDescription recordFormat;
void audioInit()
{
// set up format
memset(&recordFormat,0,sizeof(recordFormat));
recordFormat.mFormatID = kAudioFormatLinearPCM;
recordFormat.mChannelsPerFrame = 2;
recordFormat.mBitsPerChannel = 32;
recordFormat.mBytesPerPacket = recordFormat.mBytesPerFrame = recordFormat.mChannelsPerFrame * sizeof(Float32);
recordFormat.mFramesPerPacket = 1;
//recordFormat.mFormatFlags = kAudioFormatFlagsCanonical;
recordFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);
UInt32 propSize = sizeof(recordFormat);
CheckError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
0,
NULL,
&propSize,
&recordFormat),
"AudioFormatProperty failed");
recorder.pMorseDecoder = MorseDecode::pInstance();
recorder.pMorseDecoder->m_sampleRate = recordFormat.mSampleRate;
// recorder.pMorseDecoder->setCircularBuffer();
//set up queue
CheckError(AudioQueueNewInput(&recordFormat,
MyAQInputCallback,
&recorder,
NULL,
kCFRunLoopCommonModes,
0,
&queue),
"AudioQueueNewInput failed");
UInt32 size = sizeof(recordFormat);
CheckError(AudioQueueGetProperty(queue,
kAudioConverterCurrentOutputStreamDescription,
&recordFormat,
&size), "Couldn't get queue's format");
// set up buffers and enqueue
const int kNumberRecordBuffers = 3;
int bufferByteSize = MyComputeRecordBufferSize(&recordFormat, queue, AUDIO_BUFFER_DURATION);
for(int bufferIndex = 0; bufferIndex < kNumberRecordBuffers; bufferIndex++)
{
AudioQueueBufferRef buffer;
CheckError(AudioQueueAllocateBuffer(queue,
bufferByteSize,
&buffer),
"AudioQueueAllocateBuffer failed");
CheckError(AudioQueueEnqueueBuffer(queue,
buffer,
0,
NULL),
"AudioQueueEnqueueBuffer failed");
}
}
void audioRun()
{
CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed");
}
void audioStop()
{
CheckError(AudioQueuePause(queue), "AudioQueuePause failed");
}
}

This sounds like the new macOS 'microphone privacy' setting, which, if set to 'no access' for your app, will cause precisely this behaviour. So:
Open the System Preferences pane.
Click on 'Security and Privacy'.
Select the Privacy tab.
Click on 'Microphone' in the left-hand pane.
Locate your app in the right-hand pane and tick the checkbox next to it.
Then restart your app and test it.
Tedious, no?
Edit: As stated in the comments, you can't directly request microphone access, but you can detect whether it has been granted to your app or not by calling [AVCaptureDevice authorizationStatusForMediaType: AVMediaTypeAudio].

Related

Why does adding audio stream to ffmpeg's libavcodec output container cause a crash?

As it stands, my project correctly uses libavcodec to decode a video, where each frame is manipulated (it doesn't matter how) and output to a new video. I've cobbled this together from examples found online, and it works. The result is a perfect .mp4 of the manipulated frames, minus the audio.
My problem is, when I try to add an audio stream to the output container, I get a crash in mux.c that I can't explain. It's in static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *pkt). Where st->internal->priv_pts->val = pkt->dts; is attempted, priv_pts is nullptr.
I don't recall the version number, but this is from a November 4, 2020 ffmpeg build from git.
My MediaContentMgr is much bigger than what I have here. I'm stripping out everything to do with the frame manipulation, so if I'm missing anything, please let me know and I'll edit.
The code that, when added, triggers the nullptr exception, is called out inline
The .h:
#ifndef _API_EXAMPLE_H
#define _API_EXAMPLE_H
#include <glad/glad.h>
#include <GLFW/glfw3.h>
#include "glm/glm.hpp"
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
}
#include "shader_s.h"
class MediaContainerMgr {
public:
MediaContainerMgr(const std::string& infile, const std::string& vert, const std::string& frag,
const glm::vec3* extents);
~MediaContainerMgr();
void render();
bool recording() { return m_recording; }
// Major thanks to "shi-yan" who helped make this possible:
// https://github.com/shi-yan/videosamples/blob/master/libavmp4encoding/main.cpp
bool init_video_output(const std::string& video_file_name, unsigned int width, unsigned int height);
bool output_video_frame(uint8_t* buf);
bool finalize_output();
private:
AVFormatContext* m_format_context;
AVCodec* m_video_codec;
AVCodec* m_audio_codec;
AVCodecParameters* m_video_codec_parameters;
AVCodecParameters* m_audio_codec_parameters;
AVCodecContext* m_codec_context;
AVFrame* m_frame;
AVPacket* m_packet;
uint32_t m_video_stream_index;
uint32_t m_audio_stream_index;
void init_rendering(const glm::vec3* extents);
int decode_packet();
// For writing the output video:
void free_output_assets();
bool m_recording;
AVOutputFormat* m_output_format;
AVFormatContext* m_output_format_context;
AVCodec* m_output_video_codec;
AVCodecContext* m_output_video_codec_context;
AVFrame* m_output_video_frame;
SwsContext* m_output_scale_context;
AVStream* m_output_video_stream;
AVCodec* m_output_audio_codec;
AVStream* m_output_audio_stream;
AVCodecContext* m_output_audio_codec_context;
};
#endif
And, the hellish .cpp:
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include "media_container_manager.h"
MediaContainerMgr::MediaContainerMgr(const std::string& infile, const std::string& vert, const std::string& frag,
const glm::vec3* extents) :
m_video_stream_index(-1),
m_audio_stream_index(-1),
m_recording(false),
m_output_format(nullptr),
m_output_format_context(nullptr),
m_output_video_codec(nullptr),
m_output_video_codec_context(nullptr),
m_output_video_frame(nullptr),
m_output_scale_context(nullptr),
m_output_video_stream(nullptr)
{
// AVFormatContext holds header info from the format specified in the container:
m_format_context = avformat_alloc_context();
if (!m_format_context) {
throw "ERROR could not allocate memory for Format Context";
}
// open the file and read its header. Codecs are not opened here.
if (avformat_open_input(&m_format_context, infile.c_str(), NULL, NULL) != 0) {
throw "ERROR could not open input file for reading";
}
printf("format %s, duration %lldus, bit_rate %lld\n", m_format_context->iformat->name, m_format_context->duration, m_format_context->bit_rate);
//read avPackets (?) from the avFormat (?) to get stream info. This populates format_context->streams.
if (avformat_find_stream_info(m_format_context, NULL) < 0) {
throw "ERROR could not get stream info";
}
for (unsigned int i = 0; i < m_format_context->nb_streams; i++) {
AVCodecParameters* local_codec_parameters = NULL;
local_codec_parameters = m_format_context->streams[i]->codecpar;
printf("AVStream->time base before open coded %d/%d\n", m_format_context->streams[i]->time_base.num, m_format_context->streams[i]->time_base.den);
printf("AVStream->r_frame_rate before open coded %d/%d\n", m_format_context->streams[i]->r_frame_rate.num, m_format_context->streams[i]->r_frame_rate.den);
printf("AVStream->start_time %" PRId64 "\n", m_format_context->streams[i]->start_time);
printf("AVStream->duration %" PRId64 "\n", m_format_context->streams[i]->duration);
printf("duration(s): %lf\n", (float)m_format_context->streams[i]->duration / m_format_context->streams[i]->time_base.den * m_format_context->streams[i]->time_base.num);
AVCodec* local_codec = NULL;
local_codec = avcodec_find_decoder(local_codec_parameters->codec_id);
if (local_codec == NULL) {
throw "ERROR unsupported codec!";
}
if (local_codec_parameters->codec_type == AVMEDIA_TYPE_VIDEO) {
if (m_video_stream_index == -1) {
m_video_stream_index = i;
m_video_codec = local_codec;
m_video_codec_parameters = local_codec_parameters;
}
m_height = local_codec_parameters->height;
m_width = local_codec_parameters->width;
printf("Video Codec: resolution %dx%d\n", m_width, m_height);
}
else if (local_codec_parameters->codec_type == AVMEDIA_TYPE_AUDIO) {
if (m_audio_stream_index == -1) {
m_audio_stream_index = i;
m_audio_codec = local_codec;
m_audio_codec_parameters = local_codec_parameters;
}
printf("Audio Codec: %d channels, sample rate %d\n", local_codec_parameters->channels, local_codec_parameters->sample_rate);
}
printf("\tCodec %s ID %d bit_rate %lld\n", local_codec->name, local_codec->id, local_codec_parameters->bit_rate);
}
m_codec_context = avcodec_alloc_context3(m_video_codec);
if (!m_codec_context) {
throw "ERROR failed to allocate memory for AVCodecContext";
}
if (avcodec_parameters_to_context(m_codec_context, m_video_codec_parameters) < 0) {
throw "ERROR failed to copy codec params to codec context";
}
if (avcodec_open2(m_codec_context, m_video_codec, NULL) < 0) {
throw "ERROR avcodec_open2 failed to open codec";
}
m_frame = av_frame_alloc();
if (!m_frame) {
throw "ERROR failed to allocate AVFrame memory";
}
m_packet = av_packet_alloc();
if (!m_packet) {
throw "ERROR failed to allocate AVPacket memory";
}
}
MediaContainerMgr::~MediaContainerMgr() {
avformat_close_input(&m_format_context);
av_packet_free(&m_packet);
av_frame_free(&m_frame);
avcodec_free_context(&m_codec_context);
glDeleteVertexArrays(1, &m_VAO);
glDeleteBuffers(1, &m_VBO);
}
bool MediaContainerMgr::advance_frame() {
while (true) {
if (av_read_frame(m_format_context, m_packet) < 0) {
// Do we actually need to unref the packet if it failed?
av_packet_unref(m_packet);
continue;
//return false;
}
else {
if (m_packet->stream_index == m_video_stream_index) {
//printf("AVPacket->pts %" PRId64 "\n", m_packet->pts);
int response = decode_packet();
av_packet_unref(m_packet);
if (response != 0) {
continue;
//return false;
}
return true;
}
else {
printf("m_packet->stream_index: %d\n", m_packet->stream_index);
printf(" m_packet->pts: %lld\n", m_packet->pts);
printf(" mpacket->size: %d\n", m_packet->size);
if (m_recording) {
int err = 0;
//err = avcodec_send_packet(m_output_video_codec_context, m_packet);
printf(" encoding error: %d\n", err);
}
}
}
// We're done with the packet (it's been unpacked to a frame), so deallocate & reset to defaults:
/*
if (m_frame == NULL)
return false;
if (m_frame->data[0] == NULL || m_frame->data[1] == NULL || m_frame->data[2] == NULL) {
printf("WARNING: null frame data");
continue;
}
*/
}
}
int MediaContainerMgr::decode_packet() {
// Supply raw packet data as input to a decoder
// https://ffmpeg.org/doxygen/trunk/group__lavc__decoding.html#ga58bc4bf1e0ac59e27362597e467efff3
int response = avcodec_send_packet(m_codec_context, m_packet);
if (response < 0) {
char buf[256];
av_strerror(response, buf, 256);
printf("Error while receiving a frame from the decoder: %s\n", buf);
return response;
}
// Return decoded output data (into a frame) from a decoder
// https://ffmpeg.org/doxygen/trunk/group__lavc__decoding.html#ga11e6542c4e66d3028668788a1a74217c
response = avcodec_receive_frame(m_codec_context, m_frame);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
return response;
} else if (response < 0) {
char buf[256];
av_strerror(response, buf, 256);
printf("Error while receiving a frame from the decoder: %s\n", buf);
return response;
} else {
printf(
"Frame %d (type=%c, size=%d bytes) pts %lld key_frame %d [DTS %d]\n",
m_codec_context->frame_number,
av_get_picture_type_char(m_frame->pict_type),
m_frame->pkt_size,
m_frame->pts,
m_frame->key_frame,
m_frame->coded_picture_number
);
}
return 0;
}
bool MediaContainerMgr::init_video_output(const std::string& video_file_name, unsigned int width, unsigned int height) {
if (m_recording)
return true;
m_recording = true;
advance_to(0L); // I've deleted the implmentation. Just seeks to beginning of vid. Works fine.
if (!(m_output_format = av_guess_format(nullptr, video_file_name.c_str(), nullptr))) {
printf("Cannot guess output format.\n");
return false;
}
int err = avformat_alloc_output_context2(&m_output_format_context, m_output_format, nullptr, video_file_name.c_str());
if (err < 0) {
printf("Failed to allocate output context.\n");
return false;
}
//TODO(P0): Break out the video and audio inits into their own methods.
m_output_video_codec = avcodec_find_encoder(m_output_format->video_codec);
if (!m_output_video_codec) {
printf("Failed to create video codec.\n");
return false;
}
m_output_video_stream = avformat_new_stream(m_output_format_context, m_output_video_codec);
if (!m_output_video_stream) {
printf("Failed to find video format.\n");
return false;
}
m_output_video_codec_context = avcodec_alloc_context3(m_output_video_codec);
if (!m_output_video_codec_context) {
printf("Failed to create video codec context.\n");
return(false);
}
m_output_video_stream->codecpar->codec_id = m_output_format->video_codec;
m_output_video_stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
m_output_video_stream->codecpar->width = width;
m_output_video_stream->codecpar->height = height;
m_output_video_stream->codecpar->format = AV_PIX_FMT_YUV420P;
// Use the same bit rate as the input stream.
m_output_video_stream->codecpar->bit_rate = m_format_context->streams[m_video_stream_index]->codecpar->bit_rate;
m_output_video_stream->avg_frame_rate = m_format_context->streams[m_video_stream_index]->avg_frame_rate;
avcodec_parameters_to_context(m_output_video_codec_context, m_output_video_stream->codecpar);
m_output_video_codec_context->time_base = m_format_context->streams[m_video_stream_index]->time_base;
//TODO(P1): Set these to match the input stream?
m_output_video_codec_context->max_b_frames = 2;
m_output_video_codec_context->gop_size = 12;
m_output_video_codec_context->framerate = m_format_context->streams[m_video_stream_index]->r_frame_rate;
//m_output_codec_context->refcounted_frames = 0;
if (m_output_video_stream->codecpar->codec_id == AV_CODEC_ID_H264) {
av_opt_set(m_output_video_codec_context, "preset", "ultrafast", 0);
} else if (m_output_video_stream->codecpar->codec_id == AV_CODEC_ID_H265) {
av_opt_set(m_output_video_codec_context, "preset", "ultrafast", 0);
} else {
av_opt_set_int(m_output_video_codec_context, "lossless", 1, 0);
}
avcodec_parameters_from_context(m_output_video_stream->codecpar, m_output_video_codec_context);
m_output_audio_codec = avcodec_find_encoder(m_output_format->audio_codec);
if (!m_output_audio_codec) {
printf("Failed to create audio codec.\n");
return false;
}
I've commented out all of the audio stream init beyond this next line, because this is where
the trouble begins. Creating this output stream causes the null reference I mentioned. If I
uncomment everything below here, I still get the null deref. If I comment out this line, the
deref exception vanishes. (IOW, I commented out more and more code until I found that this
was the trigger that caused the problem.)
I assume that there's something I'm doing wrong in the rest of the commented out code, that,
when fixed, will fix the nullptr and give me a working audio stream.
m_output_audio_stream = avformat_new_stream(m_output_format_context, m_output_audio_codec);
if (!m_output_audio_stream) {
printf("Failed to find audio format.\n");
return false;
}
/*
m_output_audio_codec_context = avcodec_alloc_context3(m_output_audio_codec);
if (!m_output_audio_codec_context) {
printf("Failed to create audio codec context.\n");
return(false);
}
m_output_audio_stream->codecpar->codec_id = m_output_format->audio_codec;
m_output_audio_stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
m_output_audio_stream->codecpar->format = m_format_context->streams[m_audio_stream_index]->codecpar->format;
m_output_audio_stream->codecpar->bit_rate = m_format_context->streams[m_audio_stream_index]->codecpar->bit_rate;
m_output_audio_stream->avg_frame_rate = m_format_context->streams[m_audio_stream_index]->avg_frame_rate;
avcodec_parameters_to_context(m_output_audio_codec_context, m_output_audio_stream->codecpar);
m_output_audio_codec_context->time_base = m_format_context->streams[m_audio_stream_index]->time_base;
*/
//TODO(P2): Free assets that have been allocated.
err = avcodec_open2(m_output_video_codec_context, m_output_video_codec, nullptr);
if (err < 0) {
printf("Failed to open codec.\n");
return false;
}
if (!(m_output_format->flags & AVFMT_NOFILE)) {
err = avio_open(&m_output_format_context->pb, video_file_name.c_str(), AVIO_FLAG_WRITE);
if (err < 0) {
printf("Failed to open output file.");
return false;
}
}
err = avformat_write_header(m_output_format_context, NULL);
if (err < 0) {
printf("Failed to write header.\n");
return false;
}
av_dump_format(m_output_format_context, 0, video_file_name.c_str(), 1);
return true;
}
//TODO(P2): make this a member. (Thanks to https://emvlo.wordpress.com/2016/03/10/sws_scale/)
void PrepareFlipFrameJ420(AVFrame* pFrame) {
for (int i = 0; i < 4; i++) {
if (i)
pFrame->data[i] += pFrame->linesize[i] * ((pFrame->height >> 1) - 1);
else
pFrame->data[i] += pFrame->linesize[i] * (pFrame->height - 1);
pFrame->linesize[i] = -pFrame->linesize[i];
}
}
This is where we take an altered frame and write it to the output container. This works fine
as long as we haven't set up an audio stream in the output container.
bool MediaContainerMgr::output_video_frame(uint8_t* buf) {
int err;
if (!m_output_video_frame) {
m_output_video_frame = av_frame_alloc();
m_output_video_frame->format = AV_PIX_FMT_YUV420P;
m_output_video_frame->width = m_output_video_codec_context->width;
m_output_video_frame->height = m_output_video_codec_context->height;
err = av_frame_get_buffer(m_output_video_frame, 32);
if (err < 0) {
printf("Failed to allocate output frame.\n");
return false;
}
}
if (!m_output_scale_context) {
m_output_scale_context = sws_getContext(m_output_video_codec_context->width, m_output_video_codec_context->height,
AV_PIX_FMT_RGB24,
m_output_video_codec_context->width, m_output_video_codec_context->height,
AV_PIX_FMT_YUV420P, SWS_BICUBIC, nullptr, nullptr, nullptr);
}
int inLinesize[1] = { 3 * m_output_video_codec_context->width };
sws_scale(m_output_scale_context, (const uint8_t* const*)&buf, inLinesize, 0, m_output_video_codec_context->height,
m_output_video_frame->data, m_output_video_frame->linesize);
PrepareFlipFrameJ420(m_output_video_frame);
//TODO(P0): Switch m_frame to be m_input_video_frame so I don't end up using the presentation timestamp from
// an audio frame if I threadify the frame reading.
m_output_video_frame->pts = m_frame->pts;
printf("Output PTS: %d, time_base: %d/%d\n", m_output_video_frame->pts,
m_output_video_codec_context->time_base.num, m_output_video_codec_context->time_base.den);
err = avcodec_send_frame(m_output_video_codec_context, m_output_video_frame);
if (err < 0) {
printf(" ERROR sending new video frame output: ");
switch (err) {
case AVERROR(EAGAIN):
printf("AVERROR(EAGAIN): %d\n", err);
break;
case AVERROR_EOF:
printf("AVERROR_EOF: %d\n", err);
break;
case AVERROR(EINVAL):
printf("AVERROR(EINVAL): %d\n", err);
break;
case AVERROR(ENOMEM):
printf("AVERROR(ENOMEM): %d\n", err);
break;
}
return false;
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = nullptr;
pkt.size = 0;
pkt.flags |= AV_PKT_FLAG_KEY;
int ret = 0;
if ((ret = avcodec_receive_packet(m_output_video_codec_context, &pkt)) == 0) {
static int counter = 0;
printf("pkt.key: 0x%08x, pkt.size: %d, counter:\n", pkt.flags & AV_PKT_FLAG_KEY, pkt.size, counter++);
uint8_t* size = ((uint8_t*)pkt.data);
printf("sizes: %d %d %d %d %d %d %d %d %d\n", size[0], size[1], size[2], size[2], size[3], size[4], size[5], size[6], size[7]);
av_interleaved_write_frame(m_output_format_context, &pkt);
}
printf("push: %d\n", ret);
av_packet_unref(&pkt);
return true;
}
bool MediaContainerMgr::finalize_output() {
if (!m_recording)
return true;
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = nullptr;
pkt.size = 0;
for (;;) {
avcodec_send_frame(m_output_video_codec_context, nullptr);
if (avcodec_receive_packet(m_output_video_codec_context, &pkt) == 0) {
av_interleaved_write_frame(m_output_format_context, &pkt);
printf("final push:\n");
} else {
break;
}
}
av_packet_unref(&pkt);
av_write_trailer(m_output_format_context);
if (!(m_output_format->flags & AVFMT_NOFILE)) {
int err = avio_close(m_output_format_context->pb);
if (err < 0) {
printf("Failed to close file. err: %d\n", err);
return false;
}
}
return true;
}
EDIT
The call stack on the crash (which I should have included in the original question):
avformat-58.dll!compute_muxer_pkt_fields(AVFormatContext * s, AVStream * st, AVPacket * pkt) Line 630 C
avformat-58.dll!write_packet_common(AVFormatContext * s, AVStream * st, AVPacket * pkt, int interleaved) Line 1122 C
avformat-58.dll!write_packets_common(AVFormatContext * s, AVPacket * pkt, int interleaved) Line 1186 C
avformat-58.dll!av_interleaved_write_frame(AVFormatContext * s, AVPacket * pkt) Line 1241 C
CamBot.exe!MediaContainerMgr::output_video_frame(unsigned char * buf) Line 553 C++
CamBot.exe!main() Line 240 C++
If I move the call to avformat_write_header so it's immediately before the audio stream initialization, I still get a crash, but in a different place. The crash happens on line 6459 of movenc.c, where we have:
/* Non-seekable output is ok if using fragmentation. If ism_lookahead
* is enabled, we don't support non-seekable output at all. */
if (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL) && // CRASH IS HERE
(!(mov->flags & FF_MOV_FLAG_FRAGMENT) || mov->ism_lookahead)) {
av_log(s, AV_LOG_ERROR, "muxer does not support non seekable output\n");
return AVERROR(EINVAL);
}
The exception is a nullptr exception, where s->pb is NULL. The call stack is:
avformat-58.dll!mov_init(AVFormatContext * s) Line 6459 C
avformat-58.dll!init_muxer(AVFormatContext * s, AVDictionary * * options) Line 407 C
[Inline Frame] avformat-58.dll!avformat_init_output(AVFormatContext *) Line 489 C
avformat-58.dll!avformat_write_header(AVFormatContext * s, AVDictionary * * options) Line 512 C
CamBot.exe!MediaContainerMgr::init_video_output(const std::string & video_file_name, unsigned int width, unsigned int height) Line 424 C++
CamBot.exe!main() Line 183 C++
Please note that you should always try to provide a self-contained minimal working example to make it easier for others to help. With the actual code, the matching FFmpeg version, and an input video that triggers the segmentation fault (to be sure), the issue would be a matter of analyzing the control flow to identify why st->internal->priv_pts was not allocated. Without the full scenario, I have to report to making assumptions that may or may not correspond to your actual code.
Based on your description, I attempted to reproduce the issue by cloning https://github.com/FFmpeg/FFmpeg.git and creating a new branch from commit b52e0d95 (November 4, 2020) to approximate your FFmpeg version.
I recreated your scenario using the provided code snippets by
including the avformat_new_stream() call for the audio stream
keeping the remaining audio initialization commented out
including the original avformat_write_header() call site (unchanged order)
With that scenario, the video write with MP4 video/audio input fails in avformat_write_header():
[mp4 # 0x2b39f40] sample rate not set 0
The call stack of the error location:
#0 0x00007ffff75253d7 in raise () from /lib64/libc.so.6
#1 0x00007ffff7526ac8 in abort () from /lib64/libc.so.6
#2 0x000000000094feca in init_muxer (s=0x2b39f40, options=0x0) at libavformat/mux.c:309
#3 0x00000000009508f4 in avformat_init_output (s=0x2b39f40, options=0x0) at libavformat/mux.c:490
#4 0x0000000000950a10 in avformat_write_header (s=0x2b39f40, options=0x0) at libavformat/mux.c:514
[...]
In init_muxer(), the sample rate in the stream parameters is checked unconditionally:
case AVMEDIA_TYPE_AUDIO:
if (par->sample_rate <= 0) {
av_log(s, AV_LOG_ERROR, "sample rate not set %d\n", par->sample_rate); abort();
ret = AVERROR(EINVAL);
goto fail;
}
That condition has been in effect since 2014-06-18 at the very least (didn't go back any further) and still exists. With a version from November 2020, the check must be active and the parameter must be set accordingly.
If I uncomment the remaining audio initialization, the situation remains unchanged (as expected). So, satisfy the condition, I added the missing parameter as follows:
m_output_audio_stream->codecpar->sample_rate =
m_format_context->streams[m_audio_stream_index]->codecpar->sample_rate;
With that, the check succeeds, avformat_write_header() succeeds, and the actual video write succeeds.
As you indicated in your question, the segmentation fault is caused by st->internal->priv_pts being NULL at this location:
#0 0x00000000009516db in compute_muxer_pkt_fields (s=0x2b39f40, st=0x2b3a580, pkt=0x7fffffffe2d0) at libavformat/mux.c:632
#1 0x0000000000953128 in write_packet_common (s=0x2b39f40, st=0x2b3a580, pkt=0x7fffffffe2d0, interleaved=1) at libavformat/mux.c:1125
#2 0x0000000000953473 in write_packets_common (s=0x2b39f40, pkt=0x7fffffffe2d0, interleaved=1) at libavformat/mux.c:1188
#3 0x0000000000953634 in av_interleaved_write_frame (s=0x2b39f40, pkt=0x7fffffffe2d0) at libavformat/mux.c:1243
[...]
In the FFmpeg code base, the allocation of priv_pts is handled by init_pts() for all streams referenced by the context. init_pts() has two call sites:
libavformat/mux.c:496:
if (s->oformat->init && ret) {
if ((ret = init_pts(s)) < 0)
return ret;
return AVSTREAM_INIT_IN_INIT_OUTPUT;
}
libavformat/mux.c:530:
if (!s->internal->streams_initialized) {
if ((ret = init_pts(s)) < 0)
goto fail;
}
In both cases, the calls are triggered by avformat_write_header() (indirectly via avformat_init_output() for the first, directly for the second). According to control flow analysis, there's no success case that would leave priv_pts unallocated.
Considering a high probability that our versions of FFmpeg are compatible in terms of behavior, I have to assume that 1) the sample rate must be provided for audio streams and 1) priv_pts is always allocated by avformat_write_header() in the absence of errors. Therefore, two possible root causes come to mind:
Your stream is not an audio stream (unlikely; the type is based on the codec, which in turn is based on the output file extension - assuming mp4)
You do not call avformat_write_header() (unlikely) or do not handle the error in the caller of your C++ member function (the return value of avformat_write_header() is checked but I do not have code corresponding to the caller of the C++ member function; your actual code might differ significantly from the code provided, so it's possible and the only plausible conclusion that can be drawn from available data)
The solution: Ensure that processing does not continue if avformat_write_header() fails. By adding the audio stream, avformat_write_header() starts to fail unless you set the stream sample rate. If the error is ignored, av_interleaved_write_frame() triggers a segmentation fault by accessing the unallocated st->internal->priv_pts.
As mentioned initially, scenario is incomplete. If you do call avformat_write_header() and stop processing in case of an error (meaning you do not call av_interleaved_write_frame()), more information is needed. As it stands now, that is unlikely. For further analysis, the executable output (stdout, stderr) is required to see your traces and FFmpeg log messages. If that does not reveal new information, a self-contained minimal working example and the video input are needed to get all the full picture.

get process inode using netlink

I want to try and correlate an IP packet (using libpcap) to a process. I have had some limited success using the relevant /proc/net/ files but found that on some of the machines i'm using, this file can be many thousands of lines and parsing it is not efficient (caching has alleviated some performance problems).
I read that using sock_diag netlink subsystem could help by directly querying the kernel about the socket I am interested in. I've had limited success with my attempts but have hit a mental block on what is wrong.
For the initial query I have:
if (query_fd_) {
struct {
nlmsghdr nlh;
inet_diag_req_v2 id_req;
} req = {
.nlh = {
.nlmsg_len = sizeof(req),
.nlmsg_type = SOCK_DIAG_BY_FAMILY,
.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP
},
.id_req = {
.sdiag_family = packet.l3_protocol,
.sdiag_protocol = packet.l4_protocol,
.idiag_ext = 0,
.pad = 0,
.idiag_states = -1,
.id = {
.idiag_sport = packet.src_port,
.idiag_dport = packet.dst_port
}
}
};
//packet ips are just binary data stored as strings!
memcpy(req.id_req.id.idiag_src, packet.src_ip.c_str(), 4);
memcpy(req.id_req.id.idiag_dst, packet.dst_ip.c_str(), 4);
struct sockaddr_nl nladdr = {
.nl_family = AF_NETLINK
};
struct iovec iov = {
.iov_base = &req,
.iov_len = sizeof(req)
};
struct msghdr msg = {
.msg_name = (void *) &nladdr,
.msg_namelen = sizeof(nladdr),
.msg_iov = &iov,
.msg_iovlen = 1
};
// Send message to kernel
for (;;) {
if (sendmsg(query_fd_, &msg, 0) < 0) {
if (errno == EINTR)
continue;
perror("sendmsg");
return false;
}
return true;
}
}
return false;
For the receive code I have:
long buffer[8192];
struct sockaddr_nl nladdr = {
.nl_family = AF_NETLINK
};
struct iovec iov = {
.iov_base = buffer,
.iov_len = sizeof(buffer)
};
struct msghdr msg = {
.msg_name = (void *) &nladdr,
.msg_namelen = sizeof(nladdr),
.msg_iov = &iov,
.msg_iovlen = 1
};
int flags = 0;
for (;;) {
ssize_t rv = recvmsg(query_fd_, &msg, flags);
// error handling
if (rv < 0) {
if (errno == EINTR)
continue;
if ((errno == EAGAIN) || (errno == EWOULDBLOCK))
break;
perror("Failed to recv from netlink socket");
return 0;
}
if (rv == 0) {
printf("Unexpected shutdown of NETLINK socket");
return 0;
}
for (const struct nlmsghdr* header = reinterpret_cast<const struct nlmsghdr*>(buffer);
rv >= 0 && NLMSG_OK(header, static_cast<uint32_t>(rv));
header = NLMSG_NEXT(header, rv)) {
// The end of multipart message
if (header->nlmsg_type == NLMSG_DONE)
return 0;
if (header->nlmsg_type == NLMSG_ERROR) {
const struct nlmsgerr *err = reinterpret_cast<nlmsgerr*>(NLMSG_DATA(header));
if (err == NULL)
return 100;
errno = -err->error;
perror("NLMSG_ERROR");
return 0;
}
if (header->nlmsg_type != SOCK_DIAG_BY_FAMILY) {
printf("unexpected nlmsg_type %u\n", (unsigned)header->nlmsg_type);
continue;
}
// Get the details....
const struct inet_diag_msg* diag = reinterpret_cast<inet_diag_msg*>(NLMSG_DATA(header));
if (header->nlmsg_len < NLMSG_LENGTH(sizeof(*diag))) {
printf("Message too short %d vs %d\n", header->nlmsg_len, NLMSG_LENGTH(sizeof(*diag)));
return 0;
}
if (diag->idiag_family != PF_INET) {
printf("unexpected family %u\n", diag->idiag_family);
return 1;
}
return diag->idiag_inode;
The Problem:
The diag->udiag_inode value doesn't match the one I see in netstat output or in the /proc/net/ files. Is it supposed too? If not, is it possible to use this approach to retrieve the inode number for the process so that I can then query /proc for the corresponding PID?
Another thing I didn't quite understand is the NLMSG_DONE when checking the nlmsg_type in the header. What I am seeing:
1 - TCP 10.0.9.15:51002 -> 192.168.64.11:3128 [15047]
2 - TCP 192.168.64.11:3128 -> 10.0.9.15:51002 [0]
3 - TCP 10.0.9.15:51002 -> 192.168.64.11:3128 [0]
4 - TCP 192.168.64.11:3128 -> 10.0.9.15:51002 [15047]
5 - TCP 10.0.9.15:51002 -> 192.168.64.11:3128 [0]
6 - TCP 192.168.64.11:3128 -> 10.0.9.15:51002 [0]
7 - TCP 10.0.9.15:51002 -> 192.168.64.11:3128 [15047]
So I get an inode number on first query, then some NLMSG_DONE returns (stepping through code confirmed this was the path). Why don't I get the same result for say lines 1 and 3?
Appreciate any help or advice.
Found the answer and posting in case anyone stumbles across it:
I had a uint16_t as the return type from the recv code when in fact it should have been ino_t or uint32_t. I discovered this when I noticed that a few of the inodes matched correctly after a fresh reboot and then after a while stopped matching with no code changed (inode count obviously incrementing). Using the correct type in the function return sorted the problem (so the code I posted is actually correct!)
I was getting multi part messages. I should have looped whilst NLM_F_MULTI was set in the flags and then left the loop when receiving NLMSG_DONE.

How to read h264 stream as a file from the USB webcam directly in c/c++ without using opencv?

I am able to read a video file of h264 format and doing some machine learning inference on top of it. The code works absolutely fine for input from a file. Below code is a sample code from Deepstream SDK
FileDataProvider(const char *szFilePath, simplelogger::Logger *logger)
: logger_(logger)
{
fp_ = fopen(szFilePath, "rb");
//fp_ = fopen("/dev/video0", "rb");
if (nullptr == fp_) {
LOG_ERROR(logger, "Failed to open file " << szFilePath);
exit(1);
}
pLoadBuf_ = new uint8_t[nLoadBuf_];
pPktBuf_ = new uint8_t[nPktBuf_];
assert(nullptr != pLoadBuf_);
}
~FileDataProvider() {
if (fp_) {
fclose(fp_);
}
if (pLoadBuf_) {
delete [] pLoadBuf_;
}
if (pPktBuf_) {
delete [] pPktBuf_;
}
}
What is requirement ?
Read from the Logitech c920 webcam instead for video file.
I know How to read from webcam using opencv. But I don't want to use opencv here.
My Research
Using v4l we can get the stream and display it in vlc.
Camera supports below formats.
#ubox:~$ v4l2-ctl --device=/dev/video1 --list-formats
ioctl: VIDIOC_ENUM_FMT Index : 0 Type : Video Capture
Pixel Format: 'YUYV' Name : YUYV 4:2:2
Index : 1 Type : Video Capture Pixel Format: 'H264'
(compressed) Name : H.264
Index : 2 Type : Video Capture Pixel Format: 'MJPG'
(compressed) Name : Motion-JPEG
Reading output of a USB webcam in Linux
vlc v4l2:///dev/video1 --v4l2-chroma=h264 - this displays the video from the webcam.
How to do this?
- Now how to feed this live stream into
above sample code such that it reads from the webcam rather than file?
[update-1]
- In otherwords, does v4l has some options to write the video stream as h264 formant ? So that, I can read that file like before(above code) when its(v4l) writing to disk.
[update-2]
- we can use ffmpeg instead of v4l. If any solutions for using ffmpeg to save the video stream into disk continuously, so that other programs reads that file ?
Before using ioctl to capture frames from camera, you need to set the format like below first.
fp_ = open("/dev/video0", O_RDWR);
struct v4l2_format fmt = {0};
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
ioctl(fp_, VIDIOC_S_FMT, &fmt);
then, initialize and map buffer
struct Buffer
{
void *start;
unsigned int length;
unsigned int flags;
};
int buffer_count_ = 4;
Buffer *buffers_;
bool AllocateBuffer()
{
struct v4l2_requestbuffers req = {0};
req.count = buffer_count_;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
if (ioctl(fp_, VIDIOC_REQBUFS, &req) < 0)
{
perror("ioctl Requesting Buffer");
return false;
}
buffers_ = new Buffer[buffer_count_];
for (int i = 0; i < buffer_count_; i++)
{
struct v4l2_buffer buf = {0};
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (ioctl(fp_, VIDIOC_QUERYBUF, &buf) < 0)
{
perror("ioctl Querying Buffer");
return false;
}
buffers_[i].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, buf.m.offset);
buffers_[i].length = buf.length;
if (MAP_FAILED == buffers_[i].start)
{
printf("MAP FAILED: %d\n", i);
for (int j = 0; j < i; j++)
munmap(buffers_[j].start, buffers_[j].length);
return false;
}
if (ioctl(fp_, VIDIOC_QBUF, &buf) < 0)
{
perror("ioctl Queue Buffer");
return false;
}
}
return true;
}
STREAMON to start capturing
v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ioctl(fp_, VIDIOC_STREAMON, &type);
finally read a frame from the mapped buffer. Generally, CaptureImage() will be in the while loop.
Buffer CaptureImage()
{
fd_set fds;
FD_ZERO(&fds);
FD_SET(fd_, &fds);
struct timeval tv = {0};
tv.tv_sec = 1;
tv.tv_usec = 0;
int r = select(fd_ + 1, &fds, NULL, NULL, &tv);
if (r == 0)
{
// timeout
}
struct v4l2_buffer buf = {0};
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
while (ioctl(fp_, VIDIOC_DQBUF, &buf) < 0)
{
perror("Retrieving Frame");
}
struct Buffer buffer = {.start = buffers_[buf.index].start,
.length = buf.bytesused,
.flags = buf.flags};
if (ioctl(fp_, VIDIOC_QBUF, &buf) < 0)
{
perror("Queue buffer");
}
return buffer;
}

Detect USB hardware keylogger

I need to determine is there hardware keylogger that was plugged to PC with USB keyboard. It needs to be done via software method, from user-land. However wiki says that it is impossible to detect HKL using soft, there are several methods exists. The best and I think only one overiew that present in net relating that theme is "Detecting Hardware Keyloggers, by Fabian Mihailowitsch - youtube".
Using this overview I am developing a tool to detect USB hardware keyloggers. The sources for detecting PS/2 keyloggers was already shared by author and available here. So my task is to make it worked for USB only.
As suggested I am using libusb library to interfere with USB devices in system.
So, there are methods I had choosen in order to detect HKL:
Find USB keyboard that bugged by HKL. Note that HKL is usually
invisible from device list in system or returned by libusb.
Detect Keyghost HKL by: Interrupt read from USB HID device, send usb reset (libusb_reset_device), read interrupt again. If data returned on last read is not nulls then keylogger detected. It is described on page 45 of Mihailowitsch's presentation
Time measurement. The idea is measure time of send/receive packets using control transfer for original keyboard for thousands times. In case HKL has been plugged, program will measure time again and then compare the time with the original value. For HKL it have to be much(or not so much) greater.
Algorithm is:
Send an output report to Keyboard(as Control transfer) (HID_REPORT_TYPE_OUTPUT 0x02 )
Wait for ACKed packet
Repeat Loop (10.000 times)
Measure time
Below is my code according to steps of detection.
1. Find USB keyboard
libusb_device * UsbKeyboard::GetSpecifiedDevice(PredicateType pred)
{
if (_usbDevices == nullptr) return nullptr;
int i = 0;
libusb_device *dev = nullptr;
while ((dev = _usbDevices[i++]) != NULL)
{
struct libusb_device_descriptor desc;
int r = libusb_get_device_descriptor(dev, &desc);
if (r >= 0)
{
if (pred(desc))
return dev;
}
}
return nullptr;
}
libusb_device * UsbKeyboard::FindKeyboard()
{
return GetSpecifiedDevice([&](libusb_device_descriptor &desc) {
bool isKeyboard = false;
auto dev_handle = libusb_open_device_with_vid_pid(_context, desc.idVendor, desc.idProduct);
if (dev_handle != nullptr)
{
unsigned char buf[255] = "";
// product description contains 'Keyboard', usually string is 'USB Keyboard'
if (libusb_get_string_descriptor_ascii(dev_handle, desc.iProduct, buf, sizeof(buf)) >= 0)
isKeyboard = strstr((char*)buf, "Keyboard") != nullptr;
libusb_close(dev_handle);
}
return isKeyboard;
});
}
Here we're iterating through all USB devices in system and checks their Product string. In my system this string for keyboard is 'USB keyboard' (obviously).
Is it stable way to detect keyboard through Product string? Is there other ways?
2. Detect Keyghost HKL using Interrupt read
int UsbKeyboard::DetectKeyghost(libusb_device *kbdev)
{
int r, i;
int transferred;
unsigned char answer[PACKET_INT_LEN];
unsigned char question[PACKET_INT_LEN];
for (i = 0; i < PACKET_INT_LEN; i++) question[i] = 0x40 + i;
libusb_device_handle *devh = nullptr;
if ((r = libusb_open(kbdev, &devh)) < 0)
{
ShowError("Error open device", r);
return r;
}
r = libusb_set_configuration(devh, 1);
if (r < 0)
{
ShowError("libusb_set_configuration error ", r);
goto out;
}
printf("Successfully set usb configuration 1\n");
r = libusb_claim_interface(devh, 0);
if (r < 0)
{
ShowError("libusb_claim_interface error ", r);
goto out;
}
r = libusb_interrupt_transfer(devh, 0x81 , answer, PACKET_INT_LEN,
&transferred, TIMEOUT);
if (r < 0)
{
ShowError("Interrupt read error ", r);
goto out;
}
if (transferred < PACKET_INT_LEN)
{
ShowError("Interrupt transfer short read %", r);
goto out;
}
for (i = 0; i < PACKET_INT_LEN; i++) {
if (i % 8 == 0)
printf("\n");
printf("%02x, %02x; ", question[i], answer[i]);
}
printf("\n");
out:
libusb_close(devh);
return 0;
}
I've got such error on libusb_interrupt_transfer:
libusb: error [hid_submit_bulk_transfer] HID transfer failed: [5] Access denied
Interrupt read error - Input/Output Error (LIBUSB_ERROR_IO) (GetLastError() - 1168)
No clue why 'access denied', then IO error, and GetLastError() returns 1168, which means - Element not found (What element?). Looking for help here.
Time measurement. Send output report and wait for ACK packet.
int UsbKeyboard::SendOutputReport(libusb_device *kbdev)
{
const int PACKET_INT_LEN = 1;
int r, i;
unsigned char answer[PACKET_INT_LEN];
unsigned char question[PACKET_INT_LEN];
for (i = 0; i < PACKET_INT_LEN; i++) question[i] = 0x30 + i;
for (i = 1; i < PACKET_INT_LEN; i++) answer[i] = 0;
libusb_device_handle *devh = nullptr;
if ((r = libusb_open(kbdev, &devh)) < 0)
{
ShowError("Error open device", r);
return r;
}
r = libusb_set_configuration(devh, 1);
if (r < 0)
{
ShowError("libusb_set_configuration error ", r);
goto out;
}
printf("Successfully set usb configuration 1\n");
r = libusb_claim_interface(devh, 0);
if (r < 0)
{
ShowError("libusb_claim_interface error ", r);
goto out;
}
printf("Successfully claim interface\n");
r = libusb_control_transfer(devh, CTRL_OUT, HID_SET_REPORT, (HID_REPORT_TYPE_OUTPUT << 8) | 0x00, 0, question, PACKET_INT_LEN, TIMEOUT);
if (r < 0) {
ShowError("Control Out error ", r);
goto out;
}
r = libusb_control_transfer(devh, CTRL_IN, HID_GET_REPORT, (HID_REPORT_TYPE_INPUT << 8) | 0x00, 0, answer, PACKET_INT_LEN, TIMEOUT);
if (r < 0) {
ShowError("Control In error ", r);
goto out;
}
out:
libusb_close(devh);
return 0;
}
Error the same as for read interrupt:
Control Out error - Input/Output Error (LIBUSB_ERROR_IO) (GetLastError() - 1168
)
How to fix please? Also how to wait for ACK packet?
Thank you.
UPDATE:
I've spent a day on searching and debbuging. So currently my problem is only to
send Output report via libusb_control_transfer. The 2nd method with interrupt read is unnecessary to implement because of Windows denies access to read from USB device using ReadFile.
It is only libusb stuff left, here is the code I wanted to make work (from 3rd example):
// sending Output report (LED)
// ...
unsigned char buf[65];
buf[0] = 1; // First byte is report number
buf[1] = 0x80;
r = libusb_control_transfer(devh, CTRL_OUT,
HID_SET_REPORT/*0x9*/, (HID_REPORT_TYPE_OUTPUT/*0x2*/ << 8) | 0x00,
0, buf, (uint16_t)2, 1000);
...
The error I've got:
[ 0.309018] [00001c0c] libusb: debug [_hid_set_report] Failed to Write HID Output Report: [1] Incorrect function
Control Out error - Input/Output Error (LIBUSB_ERROR_IO) (GetLastError() - 1168)
This error occures right after DeviceIoControl call in libusb internals.
What means "Incorrect function" there?

How to read the initial state of a MIDI Foot Controller?

I know MIDI allows me to read the state of a MIDI Foot Controller by catching a MIDI Message indicating a Control Change. But what if the user has not touched/changed the control yet? Am I still able to read the state/value? What would be the way to do that?
This is my code for catching Midi Messages using OSX CoreMIDI
void initMidi()
{
MIDIClientRef midiClient;
MIDIPortRef inputPort;
OSStatus status;
MIDIEndpointRef src;
status = MIDIClientCreate(CFSTR("testing"), NULL, NULL, &midiClient);
if (status != noErr)
NSLog(#"Error creating MIDI client: %d", status);
status = MIDIInputPortCreate(midiClient, CFSTR("Input"), midiInputCallback, NULL, &inputPort);
if (status != noErr)
NSLog(#"Error creating MIDI input port: %d", status);
ItemCount numOfDevices = MIDIGetNumberOfDevices();
// just try to connect to every device
for (ItemCount i = 0; i < numOfDevices; i++) {
src = MIDIGetSource(i);
status = MIDIPortConnectSource(inputPort, src, NULL);
}
}
void midiInputCallback(const MIDIPacketList *list,
void *procRef,
void *srcRef)
{
for (UInt32 i = 0; i < list->numPackets; i++) {
const MIDIPacket *packet = &list->packet[i];
for (UInt16 j = 0, size = 0; j < packet->length; j += size) {
UInt8 status = packet->data[j];
if (status < 0xC0) size = 3;
else if (status < 0xE0) size = 2;
else if (status < 0xF0) size = 3;
else if (status < 0xF3) size = 3;
else if (status == 0xF3) size = 2;
else size = 1;
switch (status & 0xF0) {
case 0xb0:
NSLog(#"MIDI Control Changed: %d %d", packet->data[j + 1], packet->data[j + 2]);
break;
}
}
}
}
If you did not reset the device, and did not change a control, then your program does not know the state of a control until it receives a message.
Some devices might have vendor-specific commands to read the current state of a control, or to dump the entire state.
The short answer is - No - you cannot know until an event occurs
Other answers are correct, if you have IN and OUT connected to a controller that allows interrogation through SysEx messages (Manufacturer specific)
To be more helpful:
The default state of all controllers (you are wanting to use) should be OFF on startup
e.g. Pitch Bend = centered, Modulation = ZERO, Sustain = OFF etc…
This has been the state of play since 1980's so it is not been a real problem
If you have your foot down (on a pedal) before you start your app you will be in sync the moment you release it
Good luck