Here is my situation:
I'm using Audio Queue Services in order to record sound. When the callback function is called (as soon as the buffer is full), I send the buffer content to an objective-C object to process it.
void AQRecorder::MyInputBufferHandler(void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
try {
if (inNumPackets > 0) {
NSLog(#"Callback ! Sending buffer content ...");
aqr->objectiveC_Call([NSData dataWithBytes:inBuffer->mAudioData length:inBuffer->mAudioDataBytesCapacity]);
aqr->mRecordPacket += inNumPackets;
}
if (aqr->IsRunning())
XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
} catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
void AQRecorder::objectiveC_Call(NSData *buffer) {
MyObjCObject *myObj = [[MyObjCObject alloc] init];
[myObj process:buffer];
}
The problem here is that I get an EXC_BAD_ACCESS during my process (from myObj's process method), and after some research I guess that it's related to myObj being released.
MyObjCObject.process performs a for loop from the buffer content, and I get the EXC_BAD_ACCESS error even if I just do a NSLog on the buffer values.
-(void)run:(NSData *)bufferReceived {
NSUInteger bufferSize = [bufferReceived length];
self.buffer = (short *)malloc(bufferSize);
memcpy(self.buffer, [bufferReceived bytes], bufferSize);
for(int i= 0; i < bufferSize; i++) {
NSLog("value: %i", buffer[i]);
}
}
Can you please tell me the way to do this ?
ps: My files have the .mm extension, ARC is enabled on the whole project and the rest of my code seems to works as expected.
Thanks !
You malloc the buffer and cast to a '(short*)' but then you enumerate the buffer using 'bufferSize' (number of bytes). That would mean that the 'for' loop would eventually attempt to read past the end of the buffer potentially resulting in 'EXE_BAD_ACCESS'. That would be because each iteration is moving forward by a 'short' rather than a 'byte'. You should change the loop to something like:
for(int i= 0; i < bufferSize / sizeof(short); i++) {
NSLog("value: %i", buffer[i]);
}
Either that or change the type of the 'buffer' member variable.
Related
I am trying to build a NDK based c++ low latancy audio player which will encounter three operations for multiple audios.
Play from assets.
Stream from an online source.
Play from local device storage.
From one of the Oboe samples provided by Google, I added another function to the class NDKExtractor.cpp to extract a URL based audio and render it to audio device while reading from source at the same time.
int32_t NDKExtractor::decode(char *file, uint8_t *targetData, AudioProperties targetProperties) {
LOGD("Using NDK decoder: %s",file);
// Extract the audio frames
AMediaExtractor *extractor = AMediaExtractor_new();
//using this method instead of AMediaExtractor_setDataSourceFd() as used for asset files in the rythem game example
media_status_t amresult = AMediaExtractor_setDataSource(extractor, file);
if (amresult != AMEDIA_OK) {
LOGE("Error setting extractor data source, err %d", amresult);
return 0;
}
// Specify our desired output format by creating it from our source
AMediaFormat *format = AMediaExtractor_getTrackFormat(extractor, 0);
int32_t sampleRate;
if (AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, &sampleRate)) {
LOGD("Source sample rate %d", sampleRate);
if (sampleRate != targetProperties.sampleRate) {
LOGE("Input (%d) and output (%d) sample rates do not match. "
"NDK decoder does not support resampling.",
sampleRate,
targetProperties.sampleRate);
return 0;
}
} else {
LOGE("Failed to get sample rate");
return 0;
};
int32_t channelCount;
if (AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &channelCount)) {
LOGD("Got channel count %d", channelCount);
if (channelCount != targetProperties.channelCount) {
LOGE("NDK decoder does not support different "
"input (%d) and output (%d) channel counts",
channelCount,
targetProperties.channelCount);
}
} else {
LOGE("Failed to get channel count");
return 0;
}
const char *formatStr = AMediaFormat_toString(format);
LOGD("Output format %s", formatStr);
const char *mimeType;
if (AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mimeType)) {
LOGD("Got mime type %s", mimeType);
} else {
LOGE("Failed to get mime type");
return 0;
}
// Obtain the correct decoder
AMediaCodec *codec = nullptr;
AMediaExtractor_selectTrack(extractor, 0);
codec = AMediaCodec_createDecoderByType(mimeType);
AMediaCodec_configure(codec, format, nullptr, nullptr, 0);
AMediaCodec_start(codec);
// DECODE
bool isExtracting = true;
bool isDecoding = true;
int32_t bytesWritten = 0;
while (isExtracting || isDecoding) {
if (isExtracting) {
// Obtain the index of the next available input buffer
ssize_t inputIndex = AMediaCodec_dequeueInputBuffer(codec, 2000);
//LOGV("Got input buffer %d", inputIndex);
// The input index acts as a status if its negative
if (inputIndex < 0) {
if (inputIndex == AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
// LOGV("Codec.dequeueInputBuffer try again later");
} else {
LOGE("Codec.dequeueInputBuffer unknown error status");
}
} else {
// Obtain the actual buffer and read the encoded data into it
size_t inputSize;
uint8_t *inputBuffer = AMediaCodec_getInputBuffer(codec, inputIndex,
&inputSize);
//LOGV("Sample size is: %d", inputSize);
ssize_t sampleSize = AMediaExtractor_readSampleData(extractor, inputBuffer,
inputSize);
auto presentationTimeUs = AMediaExtractor_getSampleTime(extractor);
if (sampleSize > 0) {
// Enqueue the encoded data
AMediaCodec_queueInputBuffer(codec, inputIndex, 0, sampleSize,
presentationTimeUs,
0);
AMediaExtractor_advance(extractor);
} else {
LOGD("End of extractor data stream");
isExtracting = false;
// We need to tell the codec that we've reached the end of the stream
AMediaCodec_queueInputBuffer(codec, inputIndex, 0, 0,
presentationTimeUs,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM);
}
}
}
if (isDecoding) {
// Dequeue the decoded data
AMediaCodecBufferInfo info;
ssize_t outputIndex = AMediaCodec_dequeueOutputBuffer(codec, &info, 0);
if (outputIndex >= 0) {
// Check whether this is set earlier
if (info.flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) {
LOGD("Reached end of decoding stream");
isDecoding = false;
} else {
// Valid index, acquire buffer
size_t outputSize;
uint8_t *outputBuffer = AMediaCodec_getOutputBuffer(codec, outputIndex,
&outputSize);
/*LOGV("Got output buffer index %d, buffer size: %d, info size: %d writing to pcm index %d",
outputIndex,
outputSize,
info.size,
m_writeIndex);*/
// copy the data out of the buffer
memcpy(targetData + bytesWritten, outputBuffer, info.size);
bytesWritten += info.size;
AMediaCodec_releaseOutputBuffer(codec, outputIndex, false);
}
} else {
// The outputIndex doubles as a status return if its value is < 0
switch (outputIndex) {
case AMEDIACODEC_INFO_TRY_AGAIN_LATER:
LOGD("dequeueOutputBuffer: try again later");
break;
case AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED:
LOGD("dequeueOutputBuffer: output buffers changed");
break;
case AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED:
LOGD("dequeueOutputBuffer: output outputFormat changed");
format = AMediaCodec_getOutputFormat(codec);
LOGD("outputFormat changed to: %s", AMediaFormat_toString(format));
break;
}
}
}
}
// Clean up
AMediaFormat_delete(format);
AMediaCodec_delete(codec);
AMediaExtractor_delete(extractor);
return bytesWritten;
}
Now the problem i am facing is that this code it first extracts all the audio data saves it into a buffer which then becomes part of AFileDataSource which i derived from DataSource class in the same sample.
And after its done extracting the whole file it plays by calling the onAudioReady() for Oboe AudioStreamBuilder.
What I need is to play as it streams the chunk of audio buffer.
Optional Query: Also aside from the question it blocks the UI even though i created a foreground service to communicate with the NDK functions to execute this code. Any thoughts on this?
You probably solved this already, but for future readers...
You need a FIFO buffer to store the decoded audio. You can use the Oboe's FIFO buffer e.g. oboe::FifoBuffer.
You can have a low/high watermark for the buffer and a state machine, so you start decoding when the buffer is almost empty and you stop decoding when it's full (you'll figure out the other states that you need).
As a side note, I implemented such player only to find at some later time, that the AAC codec is broken on some devices (Xiaomi and Amazon come to mind), so I had to throw away the AMediaCodec/AMediaExtractor parts and use an AAC library instead.
You have to implement a ringBuffer (or use the one implemented in the oboe example LockFreeQueue.h) and copy the data on buffers that you send on the ringbuffer from the extracting thread. On the other end of the RingBuffer, the audio thread will get that data from the queue and copy it to the audio buffer. This will happen on onAudioReady(oboe::AudioStream *oboeStream, void *audioData, int32_t numFrames) callback that you have to implement in your class (look oboe docs). Be sure to follow all the good practices on the Audio thread (don't allocate/deallocate memory there, no mutexes and no file I/O etc.)
Optional query: A service doesn't run in a separate thread, so obviously if you call it from UI thread it blocks the UI. Look at other types of services, there you can have IntentService or a service with a Messenger that will launch a separate thread on Java, or you can create threads in C++ side using std::thread
I need debugger I am writing to give me the name of shared lib that program being debugged is linking with, or loading dynamically. I get the rendezvous structure as described in link.h, and answers to other questions, using DT_DEBUG, in the loop over _DYNAMIC[].
First, debugger never hits the break point set at r_brk.
Then I put a break in the program being debugged, and use link_map to print all loaded libraries. It only prints libraries loaded by the debugger, not the program being debugged.
It seems that, the rendezvous structure I am getting belongs to the debugger itself. If so, could you please tell me how to get the rendezvous structure of the program I am debugging? If what I am doing must work, your confirmation will be helpful, perhaps with some hint as to what else might be needed.
Thank you.
// You need to include <link.h>. All structures are explained
// in elf(5) manual pages.
// Caller has opened "prog_name", the debugee, and fd is the
// file descriptor. You can send the name instead, and do open()
// here.
// Debugger is tracing the debugee, so we are using ptrace().
void getRandezvousStructure(int fd, pid_t pd, r_debug& rendezvous) {
Elf64_Ehdr elfHeader;
char* elfHdrPtr = (char*) &elfHeader;
read(fd, elfHdrPtr, sizeof(elfHeader));
Elf64_Addr debugeeEntry = elfHeader.e_entry; // entry point of debugee
// Here, set a break at debugeeEntry, and after "PTRACE_CONT",
// and waitpid(), remove the break, and set rip back to debugeeEntry.
// After that, here it goes.
lseek(fd, elfHeader.e_shoff, SEEK_SET); // offset of section header
Elf64_Shdr secHeader;
elfHdrPtr = (char*) &secHeader;
Elf64_Dyn* dynPtr;
// Keep reading until we get: secHeader.sh_addr.
// That is the address of _DYNAMIC.
for (int i = 0; i < elfHeader.e_shnum; i++) {
read(fd, elfHdrPtr, elfHeader.e_shentsize);
if (secHeader.sh_type == SHT_DYNAMIC) {
dynPtr = (Elf64_Dyn*) secHeader.sh_addr; // address of _DYNAMIC
break;
}
}
// Here, we get "dynPtr->d_un.d_ptr" which points to rendezvous
// structure, r_debug
uint64_t data;
for (;; dynPtr++) {
data = ptrace(PTRACE_PEEKDATA, pd, dynPtr, 0);
if (data == DT_NULL) break;
if (data == DT_DEBUG) {
data = ptrace(PTRACE_PEEKDATA, pd, (uint64_t) dynPtr + 8 , 0);
break;
}
}
// Using ptrace() we read sufficient chunk of memory of debugee
// to copy to rendezvous.
int ren_size = sizeof(rendezvous);
char* buffer = new char[2 * ren_size];
char* p = buffer;
int total = 0;
uint64_t value;
for (;;) {
value = ptrace(PTRACE_PEEKDATA, pd, data, 0);
memcpy(p, &value, sizeof(value));
total += sizeof(value);
if (total > ren_size + sizeof(value)) break;
data += sizeof(data);
p += sizeof(data);
}
// Finally, copy the memory to rendezvous, which was
// passed by reference.
memcpy(&rendezvous, buffer, ren_size);
delete [] buffer;
}
I am building a screen recorder, I am using ffmpeg to make the video out from frames I get from Google Chrome. I get green screen in the output video. I think there is a race condition in the threads since I am not allowed to use main thread to do the processing. here how the code look like
This function works each time I get a new frame, I suspect the functions avpicture_fill & vpx_codec_get_cx_data are being rewritten before write_ivf_frame_header & WriteFile are done.
I am thinking of creating a queue where this function push the object pp::VideoFrame then another thread with mutex will dequeue and do the processing below.
What is the best solution for this problem? and what is the optimal way of debugging it
void EncoderInstance::OnGetFrame(int32_t result, pp::VideoFrame frame) {
if (result != PP_OK)
return;
const uint8_t* data = static_cast<const uint8_t*>(frame.GetDataBuffer());
pp::Size size;
frame.GetSize(&size);
uint32_t buffersize = frame.GetDataBufferSize();
if (is_recording_) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt;
// copy the pixels into our "raw input" container.
int bytes_filled = avpicture_fill(&pic_raw, data, AV_PIX_FMT_YUV420P, out_width, out_height);
if(!bytes_filled) {
Logger::Log("Cannot fill the raw input buffer");
return;
}
if(vpx_codec_encode(&codec, &raw, frame_cnt, 1, flags, VPX_DL_REALTIME))
die_codec(&codec, "Failed to encode frame");
while( (pkt = vpx_codec_get_cx_data(&codec, &iter)) ) {
switch(pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT:
glb_app_thread.message_loop().PostWork(callback_factory_.NewCallback(&EncoderInstance::write_ivf_frame_header, pkt));
glb_app_thread.message_loop().PostWork(callback_factory_.NewCallback(&EncoderInstance::WriteFile, pkt));
break;
default:break;
}
}
frame_cnt++;
}
video_track_.RecycleFrame(frame);
if (need_config_) {
ConfigureTrack();
need_config_ = false;
} else {
video_track_.GetFrame(
callback_factory_.NewCallbackWithOutput(
&EncoderInstance::OnGetFrame));
}
}
I'm trying to transcode a video with help of libavcodec.
On transcoding big video files(hour or more) i get huge memory leaks in avcodec_encode_video. I have tried to debug it, but with different video files different functions produce leaks, i have got a little bit confused about that :). Here FFMPEG with QT memory leak is the same issue that i have, but i have no idea how did that person solve it. QtFFmpegwrapper seems to do the same i do(or i missed something).
my method is lower. I took care about aFrame and aPacket outside with av_free and av_free_packet.
int
Videocut::encode(
AVStream *anOutputStream,
AVFrame *aFrame,
AVPacket *aPacket
)
{
AVCodecContext *outputCodec = anOutputStream->codec;
if (!anOutputStream ||
!aFrame ||
!aPacket)
{
return 1;
/* NOTREACHED */
}
uint8_t * buffer = (uint8_t *)malloc(
sizeof(uint8_t) * _DefaultEncodeBufferSize
);
if (NULL == buffer) {
return 2;
/* NOTREACHED */
}
int packetSize = avcodec_encode_video(
outputCodec,
buffer,
_DefaultEncodeBufferSize,
aFrame
);
if (packetSize < 0) {
free(buffer);
return 1;
/* NOTREACHED */
}
aPacket->data = buffer;
aPacket->size = packetSize;
return 0;
}
The first step would be to try to reproduce your problem under Valgrind on a Linux box, if you can.
ffmpeg encoders and decoders usually not dynamically allocate memory; they reuse buffers between calls. Leaks are usually going to be in the frames somewhere.
Note that av_free_packet will only free your dynamically allocated buffer if the packet has a destructor function!
Look at how the function is defined in libavcodec/avpacket.c:
void av_free_packet(AVPacket *pkt)
{
if (pkt) {
if (pkt->destruct) pkt->destruct(pkt);
pkt->data = NULL; pkt->size = 0;
pkt->side_data = NULL;
pkt->side_data_elems = 0;
}
}
If there is no pkt->destruct function, no clean up takes place!
I'll start with the code:
typedef std::vector<unsigned char> CharBuf;
static const int RCV_BUF_SIZE = 1024;
SOCKET m_socket = a connected and working socket;
// ...
CharBuf buf; // Declare buffer
buf.resize(RCV_BUF_SIZE); // resize buffer to 1024
char* p_buf = reinterpret_cast<char*>(&buf[0]); // change from unsigned char to char
//char p_buf[RCV_BUF_SIZE];
int ret = recv(m_socket, p_buf, RCV_BUF_SIZE, 0); // Does not work
for (int i=0; i<RCV_BUF_SIZE; ++i) // Works (does not crash, so the buffer is ok)
char c = p_buf[i];
//...
Now when I run this code ret becomes -1 and WSAGetLastError() returns 10014 which means the pointer is bad.
However I can't see why this shouldn't work? If I comment out the reinterpret_cast line and use the line below it works!
It could be argued that reinterpret_cast is risky, but I think it should be ok as both unsigned char and signed char has the exact same size.
std::vectors should be safe to address directly in memory as far as I know as well.
The funny part is that when I do the same thing with the same vector-type in send() it works! Send function:
void SendData(const CharBuf& buf)
{
buf.resize(RCV_BUF_SIZE); // resize buffer to 1024
const char* p_buf = reinterpret_cast<const char*>(&buf[0]); // change from unsigned char to char
int ret = send(m_socket, p_buf, (int)buf.size(), 0); // Works
}
As we see, no difference except CharBuf being const in this case, can that change anything?
Why is recv() more sensitive than send()? How can recv() even know the pointer is invalid (which it obviously isn't)?? all it should see is a char array!
As per request my whole receive function (bear in mind that I can't spell out every function in it, but I think they should be fairly self-explanatory.
bool TcpSocket::ReceiveData(CharBuf* pData)
{
if (!CheckInitialized("ReceiveData"))
return false;
if (m_status != CONNECTED_STAT)
{
AddToErrLog("Socket not connected", 1, "ReceiveData");
return false;
}
int ret;
pData->resize(RCV_BUF_SIZE);
char* p_buf = reinterpret_cast<char*>(&pData[0]);
ret = recv(m_socket, p_buf, RCV_BUF_SIZE, 0);
switch (ret)
{
case 0: // Gracefully closed
AddToLog("Connection gracefully closed", 2);
Shutdown(); // The connection is closed, no idea to keep running
return true;
case SOCKET_ERROR: // Error
ret = WSAGetLastError();
if (ret == 10004) // This indicates the socket was closed while we were waiting
AddToLog("Socket was shut down while waiting for data", 1, "ReceiveData(1)");
else
AddToErrLog("Receive data failed with code: " + CStr(ret));
AddToLog("Connection ended with error", 2);
Shutdown();
return false;
default: // Normal operation
pData->resize(ret); // Remove unused space
return true;
}
}
Never mind. I found it while I was pasting the function. Like always, you find your error when you try to explain it for someone else :)
I leave it up to the reader to figure out what was wrong, but I'll give &pData[0] as a hint.
Thanks for your help :D
Found the answer myself while pasting the whole function, &pData[0] is a hint.