Unhandled expcetion(Xutility) C++ - c++

I am using VS2012. I am writing a simple network game. Send/receve functions work fine. I don't understand why does program still throw the same exception at the same moment - when waitForOthers() function is completed. I toggled a lot of breakpoints and it looks like the program crashes after while loop is broken in this function BEFORE waitForOthersMove() in update fun.
Expcetion:
"First-chance exception at 0x64D1CCC8 (msvcp110d.dll) in StatkiKlient.exe: 0xC0000005: Access violation reading location 0x004B6CF4.
If there is a handler for this exception, the program may be safely continued."
xutility code which vs2012 points:
// MEMBER FUNCTIONS FOR _Container_base12
inline void _Container_base12::_Orphan_all()
{ // orphan all iterators
#if _ITERATOR_DEBUG_LEVEL == 2
if (_Myproxy != 0)
{ // proxy allocated, drain it
_Lockit _Lock(_LOCK_DEBUG);
for (_Iterator_base12 **_Pnext = &_Myproxy->_Myfirstiter;
HERE -> *_Pnext != 0; *_Pnext = (*_Pnext)->_Mynextiter) <- HERE
(*_Pnext)->_Myproxy = 0;
_Myproxy->_Myfirstiter = 0;
}
#endif /* _ITERATOR_DEBUG_LEVEL == 2 */
}
Here's the code:
"main.cpp":
#include "Client.h"
Client * client;
void clientLoop();
int main()
{
client = new Client();
clientLoop();
return 0;
}
void clientLoop()
{
while(true)
{
client->update();
}
}
"Client.cpp":
#include "Client.h"
Client::Client()
{
initialized = false;
me = new Gamer();
cnet = new ClientNetwork();
}
void Client::update()
{
if(!initialized)
waitForStart(); //here crashes
waitForOthersMove();
}
void Client::waitForOthersMove()
{
Packet packet;
char data[2000];
int iResult = 0;
std::cout<<"Oczekiwanie na przeciwnikow...\n\n";
while(true)
{
iResult = cnet->receveData(data, sizeof(Packet));
if(iResult < 0)
continue;
packet.deserialize(data);
if(packet.packet_type = YOUR_MOVE)
{
//action
break;
}
}
}
void Client::consoleInit() //only for tests
{
Gamer set_up;
Packet packet;
char data[2000];
set_up.nickname = "... " + me->id;
set_up.ships[0].dir = 2;
set_up.ships[0].size = 2;
set_up.ships[0].x = 0;
set_up.ships[0].y = 0;
set_up.ships[0].dir = 1;
set_up.ships[0].size = 2;
set_up.ships[0].x = 10;
set_up.ships[0].y = 15;
set_up.ships[0].dir = 2;
set_up.ships[0].size = 2;
set_up.ships[0].x = 0;
set_up.ships[0].y = 0;
set_up.ships[0].dir = 2;
set_up.ships[0].size = 2;
set_up.ships[0].x = 0;
set_up.ships[0].y = 0;
set_up.ships[0].dir = 2;
set_up.ships[0].size = 2;
set_up.ships[0].x = 0;
set_up.ships[0].y = 0;
set_up.ships[0].dir = 2;
set_up.ships[0].size = 2;
set_up.ships[0].x = 0;
set_up.ships[0].y = 0;
packet.player = set_up;
packet.serialize(data);
cnet->sendMessage(data, sizeof(Packet));
}
void Client::gameInit()
{
Packet packet;
char packet_data[2000];
initialized = true;
std::cout<<"My id: "<<me->id<<"\n";
packet.packet_type = GAME_INIT_R;
packet.serialize(packet_data);
cnet->sendMessage(packet_data, sizeof(Packet));
consoleInit();
std::cout<<"wyslano game_init_r\n";
}
void Client::waitForStart()
{
Packet packet;
int messageLength = 0;
printf("\nPołączono z serwerem. Oczekiwanie na pozostałych graczy...\n");
cnet->testConnection();
while(true)
{
messageLength = cnet->receveData(dataBuff, sizeof(Packet));
if(messageLength < 0)
continue;
packet.deserialize(dataBuff);
if(packet.packet_type = GAME_INIT)
{
me->id = packet.player.id;
gameInit();
printf("Initialized...");
break;
}
}
} - after this parenthesis exception is thorw(i toggled breakpoint here and after this in update function)

Related

rapid TS fragment ffmpeg decoding - memory leak

Environment:
Ubuntu 16.04 (x64)
C++
ffmpeg
Use-case
Multiple MPEG-TS fragments are rapidly decoded ( numerous every sec )
The format of the TS fragments is dynamic and can't be known ahead of time
The first A/V frames of each fragment are needed to be extracted
Problem statement
The code bellow successfully decodes A/V, BUT, has a huge memory leak ( MBytes/sec )
According to the docs seems all memory is freed as it should ( does it... ? )
Why do I get this huge mem leak, what am I missing in the following code snap ?
struct MEDIA_TYPE {
ffmpeg::AVMediaType eType;
union {
struct {
ffmpeg::AVPixelFormat colorspace;
int width, height;
float fFPS;
} video;
struct : WAVEFORMATEX {
short sSampleFormat;
} audio;
} format;
};
struct FRAME {
enum { MAX_PALNES = 3 + 1 };
int iStrmId;
int64_t pts; // Duration in 90Khz clock resolution
uint8_t** ppData; // Null terminated
int32_t* pStride;// Zero terminated
};
HRESULT ProcessTS(IN Operation op, IN uint8_t* pTS, IN uint32_t uiBytes, bool(*cb)(IN const MEDIA_TYPE& mt, IN FRAME& frame, IN PVOID pCtx), IN PVOID pCbCtx)
{
uiBytes -= uiBytes % 188;// align to 188 packet size
struct CONTEXT {
uint8_t* pTS;
uint32_t uiBytes;
int32_t iPos;
} ctx = { pTS, uiBytes, 0 };
LOGTRACE(TSDecoder, "ProcessTS(%d, 0x%.8x, %d, 0x%.8x, 0x%.8x), this=0x%.8x\r\n", (int)op, pTS, uiBytes, cb, pCbCtx, this);
ffmpeg::AVFormatContext* pFmtCtx = 0;
if (0 == (pFmtCtx = ffmpeg::avformat_alloc_context()))
return E_OUTOFMEMORY;
ffmpeg::AVIOContext* pIoCtx = ffmpeg::avio_alloc_context(pTS, uiBytes, 0, &ctx
, [](void *opaque, uint8_t *buf, int buf_size)->int {
auto pCtx = (CONTEXT*)opaque;
int size = pCtx->uiBytes;
if (pCtx->uiBytes - pCtx->iPos < buf_size)
size = pCtx->uiBytes - pCtx->iPos;
if (size > 0) {
memcpy(buf, pCtx->pTS + pCtx->iPos, size);
pCtx->iPos += size;
}
return size;
}
, 0
, [](void* opaque, int64_t offset, int whence)->int64_t {
auto pCtx = (CONTEXT*)opaque;
switch (whence)
{
case SEEK_SET:
pCtx->iPos = offset;
break;
case SEEK_CUR:
pCtx->iPos += offset;
break;
case SEEK_END:
pCtx->iPos = pCtx->uiBytes - offset;
break;
case AVSEEK_SIZE:
return pCtx->uiBytes;
}
return pCtx->iPos;
});
pFmtCtx->pb = pIoCtx;
int iRet = ffmpeg::avformat_open_input(&pFmtCtx, "fakevideo.ts", m_pInputFmt, 0);
if (ERROR_SUCCESS != iRet) {
assert(false);
pFmtCtx = 0;// a user-supplied AVFormatContext will be freed on failure.
return E_FAIL;
}
struct DecodeContext {
ffmpeg::AVStream* pStream;
ffmpeg::AVCodec* pDecoder;
int iFramesProcessed;
};
HRESULT hr = S_OK;
int iStreamsProcessed = 0;
bool bVideoFound = false;
int64_t ptsLast = 0;
int64_t dtsLast = 0;
auto pContext = (DecodeContext*)alloca(sizeof(DecodeContext) * pFmtCtx->nb_streams);
for (unsigned int i = 0; i < pFmtCtx->nb_streams; i++) {
assert(pFmtCtx->streams[i]->index == i);
pContext[i].pStream = pFmtCtx->streams[i];
pContext[i].pDecoder = ffmpeg::avcodec_find_decoder(pFmtCtx->streams[i]->codec->codec_id);
pContext[i].iFramesProcessed= 0;
if (0 == pContext[i].pDecoder)
continue;
if ((iRet = ffmpeg::avcodec_open2(pFmtCtx->streams[i]->codec, pContext[i].pDecoder, NULL)) < 0) {
_ASSERT(FALSE);
hr = E_FAIL;
goto ErrExit;
}
}
while (S_OK == hr) {
ffmpeg::AVFrame* pFrame = 0;
ffmpeg::AVPacket pkt;
ffmpeg::av_init_packet(&pkt);
if (ERROR_SUCCESS != (iRet = ffmpeg::av_read_frame(pFmtCtx, &pkt))) {
hr = E_FAIL;
break;
}
if ((0 == dtsLast) && (0 != pkt.dts))
dtsLast = pkt.dts;
if ((0 == ptsLast) && (0 != pkt.pts))
ptsLast = pkt.pts;
DecodeContext& ctx = pContext[pkt.stream_index];
if (Operation::DECODE_FIRST_FRAME_OF_EACH_STREAM == op) {
if (iStreamsProcessed == pFmtCtx->nb_streams) {
hr = S_FALSE;
goto Next;
}
if (ctx.iFramesProcessed > 0)
goto Next;
iStreamsProcessed++;
}
if (0 == ctx.pDecoder)
goto Next;
if (0 == (pFrame = ffmpeg::av_frame_alloc())) {
hr = E_OUTOFMEMORY;
goto Next;
}
LOGTRACE(TSDecoder, "ProcessTS(%d, 0x%.8x, %d, 0x%.8x, 0x%.8x), this=0x%.8x, decode, S:%d, T:%d\r\n", (int)op, pTS, uiBytes, cb, pCbCtx, this, pkt.stream_index, ctx.pStream->codec->codec_type);
int bGotFrame = false;
int iBytesUsed = 0;
MEDIA_TYPE mt;
memset(&mt, 0, sizeof(mt));
mt.eType = ctx.pStream->codec->codec_type;
switch (mt.eType) {
case ffmpeg::AVMediaType::AVMEDIA_TYPE_AUDIO:
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if((iRet = ffmpeg::avcodec_decode_audio4(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
goto Next;
}
_ASSERT(pkt.size == iRet);
// FFMPEG AAC decoder oddity, first call to 'avcodec_decode_audio4' results mute audio where the second result the expected audio
bGotFrame = false;
if ((iRet = ffmpeg::avcodec_decode_audio4(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
goto Next;
}
_ASSERT(pkt.size == iRet);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if (false == bGotFrame)
goto Next;
iBytesUsed = ctx.pStream->codec->frame_size;
mt.format.audio.nChannels = ctx.pStream->codec->channels;
mt.format.audio.nSamplesPerSec = ctx.pStream->codec->sample_rate;
mt.format.audio.wBitsPerSample = ffmpeg::av_get_bytes_per_sample(ctx.pStream->codec->sample_fmt) * 8;
mt.format.audio.nBlockAlign = mt.format.audio.nChannels * mt.format.audio.wBitsPerSample / 8;
mt.format.audio.sSampleFormat = (short)pFrame->format;
break;
case ffmpeg::AVMediaType::AVMEDIA_TYPE_VIDEO:
if ((iRet = ffmpeg::avcodec_decode_video2(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
break;
}
if (false == bGotFrame)
goto Next;
assert(ffmpeg::AVPixelFormat::AV_PIX_FMT_YUV420P == ctx.pStream->codec->pix_fmt);// Thats is the only color space currently supported
iBytesUsed = (ctx.pStream->codec->width * ctx.pStream->codec->height * 3) / 2;
mt.format.video.width = ctx.pStream->codec->width;
mt.format.video.height = ctx.pStream->codec->height;
mt.format.video.colorspace = ctx.pStream->codec->pix_fmt;
mt.format.video.fFPS = (float)ctx.pStream->codec->framerate.num / ctx.pStream->codec->framerate.den;
bVideoFound = true;
break;
default:
goto Next;
}
ctx.iFramesProcessed++;
{
FRAME f = { ctx.pStream->index, ((0 == ptsLast) ? dtsLast : ptsLast), (uint8_t**)pFrame->data, (int32_t*)pFrame->linesize };
if ((iRet > 0) && (false == cb(mt, f, pCbCtx)))
hr = S_FALSE;// Breaks the loop
}
Next:
ffmpeg::av_free_packet(&pkt);
if (0 != pFrame) {
//ffmpeg::av_frame_unref(pFrame);
ffmpeg::av_frame_free(&pFrame);
pFrame = 0;
}
}
ErrExit:
for (unsigned int i = 0; i < pFmtCtx->nb_streams; i++)
ffmpeg::avcodec_close(pFmtCtx->streams[i]->codec);
pIoCtx->buffer = 0;// We have allocated the buffer, no need for ffmpeg to free it 4 us
pFmtCtx->pb = 0;
ffmpeg::av_free(pIoCtx);
ffmpeg::avformat_close_input(&pFmtCtx);
ffmpeg::avformat_free_context(pFmtCtx);
return hr;
}
You need to unref the packets before reusing them. And there's no need to allocate and deallocate them all the time.
Here's how I do it which might help you:
// Initialise a packet queue
std::list<AVPacket *> packets;
...
for (int c = 0; c < MAX_PACKETS; c++) {
ff->packets.push_back(av_packet_alloc());
}
while (!quit) {
... get packet from queue
int err = av_read_frame(ff->context, packet);
... process packet (audio, video, etc)
av_packet_unref(packet); // add back to queue for reuse
}
// Release packets
while (ff->packets.size()) { // free packets
AVPacket *packet = ff->packets.front();
av_packet_free(&packet);
ff->packets.pop_front();
}
In your code you've freed a packet which wasn't allocated in the first place.

Cannot submit cmdbuffer using image ...

I'm trying to clear the the screen with a color, but I'm always getting an error
"Cannot submit cmd buffer using image (...) with layout VK_IMAGE_LAYOUT_UNDEFINED when first use is VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL.
And Actully i tried to modify some undefine values to depth_stenci_attachment optimal, but The only thing is I get is more of these errors. So is there a field,that is incorrectly filled, or I forget to fill?
So here is my full main.cpp Because I have no idea where the error could be.
#include<stdio.h>
#include<string.h>
#include<vector>
#define DEBUG
#ifdef _WIN32
#define VK_USE_PLATFORM_WIN32_KHR
#endif
#define KNOCH_JULIA 42
#include"window.h"
using namespace std;
#ifdef DEBUG
#include<iostream>
using namespace std;
VkDebugReportCallbackEXT report;
void init_debug(vulkan *vulk);
PFN_vkCreateDebugReportCallbackEXT fvkCreateDebugReportCallbackEXT = VK_NULL_HANDLE;
PFN_vkDestroyDebugReportCallbackEXT fvkDestroyDebugReportCallbackEXT = VK_NULL_HANDLE;
VKAPI_ATTR VkBool32 VKAPI_CALL callback(VkDebugReportFlagsEXT flag, VkDebugReportObjectTypeEXT obj_t, uint64_t src_obj, size_t loc, int32_t msg_code, const char* layer_pref, const char* msg, void* user_data) {
switch (flag) {
case VK_DEBUG_REPORT_ERROR_BIT_EXT:
cout<<"error!"<<" "<< flag<<" source:"<<src_obj<<"location: "<<loc<<": "<< msg<<endl;
break;
case VK_DEBUG_REPORT_WARNING_BIT_EXT:
cout << "warning!" << obj_t << ": " << msg << endl;
break;
}
return false;
}
void init_debug(vulkan *vulk) {
fvkCreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT)vkGetInstanceProcAddr(vulk->inst, "vkCreateDebugReportCallbackEXT");
fvkDestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr(vulk->inst, "vkDestroyDebugReportCallbackEXT");
if (nullptr == fvkCreateDebugReportCallbackEXT || nullptr == fvkDestroyDebugReportCallbackEXT) {
exit(-5);
}
VkDebugReportCallbackCreateInfoEXT info = {};
info.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT;
info.pfnCallback = callback;
info.pNext = nullptr;
info.flags = VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_ERROR_BIT_EXT;
fvkCreateDebugReportCallbackEXT(vulk->inst, &info, nullptr, &report);
}
#endif
FILE *fileptr;
void initInstance(vulkan *vulk){
vector<char*> ext;
ext.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
ext.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
VkApplicationInfo app_info = {};
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
app_info.apiVersion = VK_MAKE_VERSION(1, 0, 39);
app_info.engineVersion = VK_MAKE_VERSION(0, 0, 1);
app_info.pApplicationName = "szar";
app_info.pEngineName = "yayitstarts";
app_info.pNext = nullptr;
VkInstanceCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
#ifdef DEBUG
vector<char*>layers;
layers.push_back("VK_LAYER_LUNARG_object_tracker");
layers.push_back("VK_LAYER_LUNARG_core_validation");
layers.push_back("VK_LAYER_LUNARG_parameter_validation");
//layers.push_back("VK_LAYER_LUNARG_vktrace");
layers.push_back("VK_LAYER_LUNARG_swapchain");
layers.push_back("VK_LAYER_LUNARG_image");
ext.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
#endif
#ifdef DEBUG
info.enabledLayerCount = layers.size();
info.ppEnabledLayerNames = layers.data();
#else
info.enabledLayerCount = 0;
info.ppEnabledLayerNames = nullptr;
#endif
info.pApplicationInfo = &app_info;
info.enabledExtensionCount = ext.size();
info.ppEnabledExtensionNames = ext.data();
info.flags = 0;
info.pNext = nullptr;
vkCreateInstance(&info, nullptr, &(vulk->inst));
}
void getGPU(vulkan *vulk) {
uint32_t dev_c=0;
vkEnumeratePhysicalDevices(vulk->inst,&dev_c,nullptr);
VkPhysicalDevice *gpus=(VkPhysicalDevice*)malloc(sizeof(VkPhysicalDevice)*dev_c);
vkEnumeratePhysicalDevices(vulk->inst, &dev_c, gpus);
vulk->gpu = gpus[0];
}
void createDevice(vulkan *vulk) {
VkPhysicalDeviceFeatures features;
vkGetPhysicalDeviceFeatures(vulk->gpu, &features);
float prior[] = { 1.0f };
uint32_t prop_c;
vkGetPhysicalDeviceQueueFamilyProperties(vulk->gpu, &prop_c, nullptr);
VkQueueFamilyProperties *props = (VkQueueFamilyProperties*)malloc(sizeof(VkQueueFamilyProperties)*prop_c);
vkGetPhysicalDeviceQueueFamilyProperties(vulk->gpu, &prop_c, props);
uint32_t index = -1;
for (int i = 0; i < prop_c; i++) {
VkBool32 supported;
vkGetPhysicalDeviceSurfaceSupportKHR(vulk->gpu, i, vulk->surface_struct.surface, &supported);
if (props[i].queueFlags&VK_QUEUE_GRAPHICS_BIT&&supported) {
index = i;
}
}
if (index == -1) {
printf("no graphic queue family found");
exit(-1);
}
#ifdef DEBUG
uint32_t count;
vkEnumerateInstanceLayerProperties(&count, nullptr);
vector<VkLayerProperties>layers_access(count);
vkEnumerateInstanceLayerProperties(&count, layers_access.data());
for (int i = 0; i < count; i++) {
printf("%s\n", layers_access[i].layerName);
}
uint32_t dev_count;
vkEnumerateDeviceLayerProperties(vulk->gpu, &dev_count, nullptr);
vector<VkLayerProperties>layers_access_dev(count);
vkEnumerateDeviceLayerProperties(vulk->gpu, &dev_count, layers_access.data());
for (int i = 0; i < dev_count; i++) {
printf("dev: %s\n", layers_access_dev[i].layerName);
}
#endif
vulk->queue_fam_ind = index;
int queue_count=1;
vector<char*> ext;
ext.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
#ifdef DEBUG
vector<char*> layers;
layers.push_back("VK_LAYER_LUNARG_object_tracker");
layers.push_back("VK_LAYER_LUNARG_core_validation");
#endif
VkDeviceQueueCreateInfo queue_info = {};
queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info.pQueuePriorities = prior;
queue_info.queueCount = queue_count;
queue_info.queueFamilyIndex = index;
queue_info.flags = 0;
queue_info.pNext = nullptr;
VkDeviceCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
#ifdef DEBUG
info.enabledLayerCount = layers.size();
info.ppEnabledLayerNames = layers.data();
#else
info.enabledLayerCount = 0;
info.ppEnabledLayerNames = nullptr;
#endif
info.pEnabledFeatures = &features;
info.enabledExtensionCount = ext.size();
info.ppEnabledExtensionNames = ext.data();
info.pQueueCreateInfos = &queue_info;
info.queueCreateInfoCount = 1;
info.pNext = nullptr;
if (VK_SUCCESS != vkCreateDevice(vulk->gpu, &info, nullptr, &(vulk->device))) {
exit(-1);
}
vulk->queue = (VkQueue*)malloc(sizeof(VkQueue)*queue_count);
vkGetDeviceQueue(vulk->device, index, 0, &(vulk->queue[0]));
}
void createSwapchain(vulkan *vulk) {
VkSurfaceCapabilitiesKHR capabilities;
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(vulk->gpu, vulk->surface_struct.surface,&capabilities);
uint32_t format_c;
vkGetPhysicalDeviceSurfaceFormatsKHR(vulk->gpu, vulk->surface_struct.surface, &format_c, nullptr);
VkSurfaceFormatKHR *formats = (VkSurfaceFormatKHR*)malloc(sizeof(VkSurfaceFormatKHR)*format_c);
vkGetPhysicalDeviceSurfaceFormatsKHR(vulk->gpu, vulk->surface_struct.surface, &format_c, formats);
uint32_t pres_mode_c;
vkGetPhysicalDeviceSurfacePresentModesKHR(vulk->gpu, vulk->surface_struct.surface, &pres_mode_c, nullptr);
VkPresentModeKHR *pres_modes = (VkPresentModeKHR*)malloc(sizeof(VkPresentModeKHR)*pres_mode_c);
vkGetPhysicalDeviceSurfacePresentModesKHR(vulk->gpu, vulk->surface_struct.surface, &pres_mode_c, pres_modes);
int pres_mode_i = 0;
for (int i = 0; i < pres_mode_c; i++) {
if (pres_modes[i] == VK_PRESENT_MODE_MAILBOX_KHR) {
pres_mode_i = i;
}
}
vulk->surface_struct.extent = capabilities.currentExtent;
vulk->image.color_format= formats[0].format;
VkSwapchainCreateInfoKHR info = {};
info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
info.clipped = VK_TRUE;
info.compositeAlpha = (VkCompositeAlphaFlagBitsKHR)capabilities.supportedCompositeAlpha;
info.flags = 0;
info.imageArrayLayers=1;
info.imageColorSpace = formats[0].colorSpace;
info.imageExtent = capabilities.currentExtent;
info.imageFormat = formats[0].format;
info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
info.minImageCount = capabilities.minImageCount;
info.oldSwapchain =VK_NULL_HANDLE;
info.pNext = nullptr;
info.pQueueFamilyIndices =&(vulk->queue_fam_ind);
info.presentMode = pres_modes[pres_mode_i];
info.preTransform = capabilities.currentTransform;
info.queueFamilyIndexCount = 1;
info.surface = vulk->surface_struct.surface;
VkResult not_VK_SUCCESS = vkCreateSwapchainKHR(vulk->device, &info, nullptr, &(vulk->swapchain_struct.swapchain));
if (not_VK_SUCCESS != VK_SUCCESS) {
exit(-1);
}
vulk->swapchain_struct.format = formats[0].format;
}
void createImages(vulkan *vulk,Memory *depth_img_memory) {
uint32_t img_c;
vkGetSwapchainImagesKHR(vulk->device, vulk->swapchain_struct.swapchain, &img_c, nullptr);
vulk->image.color_images = (VkImage*)malloc(sizeof(VkImage)*img_c);
vkGetSwapchainImagesKHR(vulk->device, vulk->swapchain_struct.swapchain, &img_c, vulk->image.color_images);
vulk->image_c = img_c;
vulk->image.depth_images = (VkImage*)malloc(sizeof(VkImage));
vulk->image.color_image_views=(VkImageView*)malloc(sizeof(VkImageView)*img_c);
vulk->image.depth_image_views=(VkImageView*)malloc(sizeof(VkImageView));
VkComponentMapping mapping = {};
mapping.r = VK_COMPONENT_SWIZZLE_R;
mapping.g = VK_COMPONENT_SWIZZLE_G;
mapping.b = VK_COMPONENT_SWIZZLE_B;
mapping.a = VK_COMPONENT_SWIZZLE_A;
vulk->image.color_range = (VkImageSubresourceRange*)malloc(sizeof(VkImageSubresourceRange)*img_c);
VkImageSubresourceRange range = {};
range.aspectMask =VK_IMAGE_ASPECT_COLOR_BIT;
range.baseArrayLayer = 0;
range.baseMipLevel = 0;
range.layerCount = 1;
range.levelCount = 1;
for (int i = 0; i < img_c; i++) {
VkImageViewCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
info.components = mapping;
info.flags = 0;
info.format = vulk->swapchain_struct.format;
info.image = (vulk->image.color_images)[i];
info.pNext = nullptr;
info.subresourceRange = range;
info.viewType = VK_IMAGE_VIEW_TYPE_2D;
vulk->image.color_range[i] = range;
vkCreateImageView(vulk->device, &info, nullptr, &(vulk->image.color_image_views)[i]);
}
vulk->image.depth_range = (VkImageSubresourceRange*)malloc(sizeof(VkImageSubresourceRange));
vulk->image.depth_range[0] = range;
vector<VkFormat> depth_formats{
VK_FORMAT_D32_SFLOAT_S8_UINT,
VK_FORMAT_D32_SFLOAT,
VK_FORMAT_D24_UNORM_S8_UINT,
VK_FORMAT_D16_UNORM_S8_UINT,
VK_FORMAT_D16_UNORM
};
VkFormat depth_format;
for (int i = 0; i < depth_formats.size(); i++) {
VkFormatProperties props;
vkGetPhysicalDeviceFormatProperties(vulk->gpu, depth_formats[i], &props);
if (props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) {
depth_format = depth_formats[i];
break;
}
}
vulk->image.depth_format = depth_format;
VkImageCreateInfo img_info = {};
img_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
img_info.arrayLayers = 1;
img_info.extent.width = vulk->surface_struct.extent.width;
img_info.extent.height = vulk->surface_struct.extent.height;
img_info.extent.depth = 1;
img_info.flags = 0;
img_info.format = depth_format;
img_info.imageType = VK_IMAGE_TYPE_2D;
img_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
img_info.mipLevels = 1;
img_info.pNext = nullptr;
img_info.pQueueFamilyIndices = &(vulk->queue_fam_ind);
img_info.queueFamilyIndexCount = 1;
img_info.samples = VK_SAMPLE_COUNT_1_BIT;
img_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
img_info.tiling = VK_IMAGE_TILING_OPTIMAL;
img_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
if (VK_SUCCESS!=vkCreateImage(vulk->device, &img_info, nullptr, &(vulk->image.depth_images)[0])) {
printf("It not works");
}
VkMemoryRequirements req;
vkGetImageMemoryRequirements(vulk->device, (vulk->image.depth_images)[0], &req);
vkGetPhysicalDeviceMemoryProperties(vulk->gpu, &(depth_img_memory->props));
uint32_t mem_index=-2;
for (int i = 0; i < depth_img_memory->props.memoryTypeCount; i++) {
if (req.memoryTypeBits & (1 << i)) {
if ((depth_img_memory->props.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
mem_index = i;
break;
}
}
}
if (mem_index == -2) {
printf("no supported memorytype");
exit(-2);
}
VkMemoryAllocateInfo mem_info = {};
mem_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_info.pNext = nullptr;
mem_info.allocationSize = req.size;
mem_info.memoryTypeIndex = mem_index;
vkAllocateMemory(vulk->device, &mem_info, nullptr, &(depth_img_memory->dev_mem));
vkBindImageMemory(vulk->device, (vulk->image.depth_images)[0], depth_img_memory->dev_mem, 0);
VkComponentMapping mapping_d = {
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
};
range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
VkImageViewCreateInfo img_view_info_d = {};
img_view_info_d.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
img_view_info_d.components = mapping_d;
img_view_info_d.flags = 0;
img_view_info_d.format = depth_format;
img_view_info_d.image = (vulk->image.depth_images)[0];
img_view_info_d.pNext = nullptr;
img_view_info_d.subresourceRange = range;
img_view_info_d.viewType = VK_IMAGE_VIEW_TYPE_2D;
if (VK_SUCCESS != vkCreateImageView(vulk->device, &img_view_info_d, nullptr, &(vulk->image.depth_image_views)[0])) {
printf("huge pile of shit!!!");
exit(-1);
}
}
void createCommandPool(vulkan vulk,cmd_pool *pool, uint32_t cmd_buff_c) {
VkCommandPoolCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT|VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
info.pNext = nullptr;
info.queueFamilyIndex = vulk.queue_fam_ind;
vkCreateCommandPool(vulk.device, &info, nullptr, &(pool->pool));
pool->cmd_buff_c = cmd_buff_c;
pool->cmd_buffs = (VkCommandBuffer*)malloc(sizeof(VkCommandBuffer) * cmd_buff_c);
VkCommandBufferAllocateInfo cmd_info = {};
cmd_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd_info.commandPool = pool->pool;
cmd_info.commandBufferCount = cmd_buff_c ;
cmd_info.pNext = nullptr;
vkAllocateCommandBuffers(vulk.device, &cmd_info, pool->cmd_buffs);
}
VkClearValue *clear;
void createFramebuffer(vulkan *vulk,VkExtent2D extent) {
vulk->fbo = (VkFramebuffer*)malloc(sizeof(VkFramebuffer)*vulk->image_c);
for (int i = 0; i < vulk->image_c; i++) {
VkImageView *img_views = (VkImageView*)malloc(sizeof(VkImageView) * 2);
img_views[0] = vulk->image.color_image_views[i];
img_views[1] = vulk->image.depth_image_views[0];
VkFramebufferCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
info.attachmentCount = 2;
info.pAttachments = img_views;
info.width = extent.height;
info.height = extent.width;
info.layers = 1;
info.renderPass = vulk->render_pass;
info.flags = 0;
info.pNext = nullptr;
if (VK_SUCCESS != vkCreateFramebuffer(vulk->device, &info, nullptr, &(vulk->fbo[i]))) {
printf("could not create framebuffer");
}
}
}
VkSemaphore *semaphores;
void createSemaphore(vulkan *vulk ,VkSemaphore *semaphore) {
VkSemaphoreCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
info.pNext = nullptr;
info.flags = 0;
vkCreateSemaphore(vulk->device, &info, nullptr, semaphore);
}
void createRenderPass(vulkan *vulk) {
VkAttachmentDescription *descr = (VkAttachmentDescription*)malloc(sizeof(VkAttachmentDescription) * 2);
VkAttachmentDescription color_descr = {};
color_descr.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
color_descr.format = vulk->image.color_format;
color_descr.samples = VK_SAMPLE_COUNT_1_BIT;
color_descr.initialLayout= VK_IMAGE_LAYOUT_UNDEFINED;
color_descr.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
color_descr.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
color_descr.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
color_descr.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
color_descr.flags = 0;
VkAttachmentDescription depth_descr = {};
depth_descr.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
depth_descr.format = vulk->image.depth_format;
depth_descr.samples = VK_SAMPLE_COUNT_1_BIT;
depth_descr.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
depth_descr.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
depth_descr.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
depth_descr.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
depth_descr.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
depth_descr.flags = 0;
*descr = color_descr;
*(descr + 1) = depth_descr;
VkAttachmentReference color_ref = {};
color_ref.attachment = 0;
color_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkAttachmentReference depth_ref = {};
depth_ref.attachment = 1;
depth_ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkSubpassDescription subp_descr = {};
subp_descr.colorAttachmentCount = 1;
subp_descr.pColorAttachments=&color_ref;
subp_descr.pDepthStencilAttachment = &depth_ref;
subp_descr.inputAttachmentCount = 0;
subp_descr.pInputAttachments = nullptr;
subp_descr.preserveAttachmentCount = 0;
subp_descr.pPreserveAttachments = nullptr;
subp_descr.pResolveAttachments = nullptr;
subp_descr.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subp_descr.flags = 0;
VkRenderPassCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
info.attachmentCount = 2;
info.pAttachments = descr;
info.dependencyCount = 0;
info.pDependencies = VK_NULL_HANDLE;
info.subpassCount = 1;
info.pSubpasses = &subp_descr;
info.flags = 0;
info.pNext = nullptr;
if (VK_SUCCESS != vkCreateRenderPass(vulk->device, &info, nullptr, &(vulk->render_pass))) {
printf("Could not create render pass.");
}
}
VkFence fence;
void createFences(vulkan *vulk) {
VkFenceCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
info.pNext = nullptr;
info.flags = 0;
vkCreateFence(vulk->device, &info, nullptr, &fence);
}
int main(int argc,char** argv) {
vulkan vulk;
Memory depth_memory;
cmd_pool pool;
initInstance(&vulk);
getGPU(&vulk);
Window window = Window();
window.open(&vulk);
createDevice(&vulk);
VkViewport viewport = {};
viewport.width = window.extent.width;
viewport.height = window.extent.height;
viewport.x = 0;
viewport.y = 0;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
init_debug(&vulk);
createSwapchain(&vulk);
createImages(&vulk, &depth_memory);
createRenderPass(&vulk);
createFramebuffer(&vulk,window.extent);
semaphores= (VkSemaphore*) malloc(sizeof(VkSemaphore)*2);
createSemaphore(&vulk, &semaphores[0]);
createSemaphore(&vulk, &semaphores[1]);
createFences(&vulk);
createCommandPool(vulk,&pool,2);
uint32_t img_pres;
VkResult result;
VkPresentInfoKHR info = {};
info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
info.pImageIndices = &img_pres;
info.pResults = &result;
info.swapchainCount = 1;
info.pSwapchains = &vulk.swapchain_struct.swapchain;
info.waitSemaphoreCount =0;
info.pWaitSemaphores = nullptr;
info.pNext = nullptr;
VkCommandBufferBeginInfo beg = {};
beg.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beg.pInheritanceInfo = nullptr;
beg.pNext = nullptr;
beg.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
VkClearValue val[2];
val[1] = { 0.0f,1.0f,1.0f,1.0f };
val[0] = { 0.0f,0 };
VkRenderPassBeginInfo render = {};
render.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
render.clearValueCount = 2;
render.framebuffer = vulk.fbo[0];
render.pClearValues = val;
render.pNext = nullptr;
render.renderArea.offset = { 0,0 };
render.renderArea.extent = { window.extent.height, window.extent.width };
render.renderPass = vulk.render_pass;
vkBeginCommandBuffer(pool.cmd_buffs[0], &beg);
vkCmdBeginRenderPass(pool.cmd_buffs[0], &render, VK_SUBPASS_CONTENTS_INLINE);
vkCmdEndRenderPass(pool.cmd_buffs[0]);
vkEndCommandBuffer(pool.cmd_buffs[0]);
VkRenderPassBeginInfo render_2 = {};
render_2.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
render_2.clearValueCount = 2;
render_2.framebuffer = vulk.fbo[1];
render_2.pClearValues = val;
render_2.pNext = nullptr;
render_2.renderArea = { 0,0,window.extent.height,window.extent.width };
render_2.renderPass = vulk.render_pass;
vkBeginCommandBuffer(pool.cmd_buffs[1], &beg);
vkCmdBeginRenderPass(pool.cmd_buffs[1], &render_2, VK_SUBPASS_CONTENTS_INLINE);
vkCmdEndRenderPass(pool.cmd_buffs[1]);
vkEndCommandBuffer(pool.cmd_buffs[1]);
VkSubmitInfo sub = {};
sub.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
sub.commandBufferCount = 1;
sub.pNext = nullptr;
sub.pSignalSemaphores = nullptr;
sub.pWaitDstStageMask = nullptr;
sub.pWaitSemaphores = nullptr;
sub.signalSemaphoreCount = 0;
sub.waitSemaphoreCount = 0;
VkResult res=VK_ERROR_DEVICE_LOST;
sub.pCommandBuffers = &pool.cmd_buffs[0];
vkQueueSubmit(vulk.queue[0], 1, &sub, VK_NULL_HANDLE);
sub.pCommandBuffers = &pool.cmd_buffs[1];
vkQueueSubmit(vulk.queue[0], 1, &sub, VK_NULL_HANDLE);
while (window.running) {
if (VK_SUCCESS != vkAcquireNextImageKHR(vulk.device, vulk.swapchain_struct.swapchain, UINT64_MAX, VK_NULL_HANDLE, fence, &img_pres)) {
return -2;
}
vkWaitForFences(vulk.device, 1, &fence, VK_TRUE, UINT64_MAX);
vkResetFences(vulk.device, 1, &fence);vkQueueWaitIdle(vulk.queue[0]);
sub.pCommandBuffers = &pool.cmd_buffs[img_pres];
if (res == vkQueueSubmit(vulk.queue[0], 1, &sub, VK_NULL_HANDLE)) {
printf("img: %d\n",res);
}
cout << hex << vulk.image.depth_images[0] << endl;
vkQueuePresentKHR(vulk.queue[0], &info);
window.run();
}
return 0;
}
1) Update your SDK. There is even no VK_LAYER_LUNARG_image layer since 1.0.42.0. And you know, there was probably a bunchload of bugfixes.
2) You are enabling layers in wrong order. Use the VK_LAYER_LUNARG_standard_validation meta-layer instead of doing it manually (also avoids problem in 1 trying to use obsoleted layer).
3) I see many errors in your code. Layers do not necessarily have full coverage yet (and another uncought error may cause to show another error down the road that does not make sense without the context).
E.g. no synchronization (your semaphores are unused there), many memory leaks (due to C style programming), assuming there will be at least two swapchain images, not checking VkResults...
4) I cannot reproduce it with your code. Firstly I get a problem with having only one swapchain image and the code not expecting it (mentioned in 3). After fixing that I get error about vkAcquireNextImageKHR getting more images than allowed (driver layer bug if using VkPresentInfoKHR::pResults). Workarounding that I get no error messages.
I haven't checked your code exhaustively, but the renderpass says that both attachments are initially in UNDEFINED layout, transition to COLOR_ATTACHMENT_OPTIMAL/DEPTH_STENCIL_ATTACHMENT_OPTIMAL, and then finally to PRESENT_SRC/DEPTH_STENCIL_ATTACHMENT_OPTIMAL. That seems correct, and the validation layers appear to be ignoring the renderpass initialLayout settings. If you're seeing this on the most recent version of the SDK, please file a bug at https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers.

CoreAudio Stuff From C++ to Swift

How to convert the code below to swift? Someone help.
I want to convert this cpp code to swift, from project FreeStreamer.
But some C++ struct and some C callback drive me crazy.
Help.
Here is the code from audio_stream.h and audio_stream.cpp
// var
queued_packet_t *m_queuedHead;
queued_packet_t *m_queuedTail;
queued_packet_t *m_playPacket;
std::list <queued_packet_t*> m_processedPackets;
//struct
typedef struct queued_packet {
UInt64 identifier;
AudioStreamPacketDescription desc;
struct queued_packet *next;
char data[];
} queued_packet_t;
//function one
OSStatus Audio_Stream::encoderDataCallback(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets, AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void *inUserData) {
Audio_Stream *THIS = (Audio_Stream *)inUserData;
pthread_mutex_trylock(&THIS->m_packetQueueMutex);
// Dequeue one packet per time for the decoder
queued_packet_t *front = THIS->m_playPacket;
if (!front) {
/* Don't deadlock */
AS_LOCK_TRACE("encoderDataCallback 2: unlock\n");
pthread_mutex_unlock(&THIS->m_packetQueueMutex);
pthread_mutex_trylock(&THIS->m_streamStateMutex);
THIS->m_converterRunOutOfData = true;
pthread_mutex_unlock(&THIS->m_streamStateMutex);
*ioNumberDataPackets = 0;
ioData->mBuffers[0].mDataByteSize = 0;
return noErr;
}
*ioNumberDataPackets = 1;
ioData->mBuffers[0].mData = front->data;
ioData->mBuffers[0].mDataByteSize = front->desc.mDataByteSize;
ioData->mBuffers[0].mNumberChannels = THIS->m_srcFormat.mChannelsPerFrame;
if (outDataPacketDescription) {
*outDataPacketDescription = &front->desc;
}
THIS->m_playPacket = front->next;
THIS->m_processedPackets.push_front(front);
AS_LOCK_TRACE("encoderDataCallback 5: unlock\n");
pthread_mutex_unlock(&THIS->m_packetQueueMutex);
return noErr;
}
//function two
void Audio_Stream::streamDataCallback(void *inClientData, UInt32 inNumberBytes, UInt32 inNumberPackets, const void *inInputData, AudioStreamPacketDescription *inPacketDescriptions) {
AS_TRACE("%s: inNumberBytes %u, inNumberPackets %u\n", __FUNCTION__, (unsigned int)inNumberBytes, (unsigned int)inNumberPackets);
Audio_Stream *THIS = static_cast<Audio_Stream*>(inClientData);
if (!THIS->m_audioStreamParserRunning) {
AS_TRACE("%s: stray callback detected!\n", __PRETTY_FUNCTION__);
return;
}
for (int i = 0; i < inNumberPackets; i++) {
/* Allocate the packet */
UInt32 size = inPacketDescriptions[i].mDataByteSize;
queued_packet_t *packet = (queued_packet_t *)malloc(sizeof(queued_packet_t) + size);
packet->identifier = THIS->m_packetIdentifier;
// If the stream didn't provide bitRate (m_bitRate == 0), then let's calculate it
if (THIS->m_bitRate == 0 && THIS->m_bitrateBufferIndex < kAudioStreamBitrateBufferSize) {
// Only keep sampling for one buffer cycle; this is to keep the counters (for instance) duration
// stable.
THIS->m_bitrateBuffer[THIS->m_bitrateBufferIndex++] = 8 * inPacketDescriptions[i].mDataByteSize / THIS->m_packetDuration;
if (THIS->m_bitrateBufferIndex == kAudioStreamBitrateBufferSize) {
if (THIS->m_delegate) {
THIS->m_delegate->bitrateAvailable();
}
}
}
AS_LOCK_TRACE("streamDataCallback: lock\n");
pthread_mutex_trylock(&THIS->m_packetQueueMutex);
/* Prepare the packet */
packet->next = NULL;
packet->desc = inPacketDescriptions[i];
packet->desc.mStartOffset = 0;
memcpy(packet->data, (const char *)inInputData + inPacketDescriptions[i].mStartOffset,
size);
if (THIS->m_queuedHead == NULL) {
THIS->m_queuedHead = THIS->m_queuedTail = THIS->m_playPacket = packet;
} else {
THIS->m_queuedTail->next = packet;
THIS->m_queuedTail = packet;
}
THIS->m_cachedDataSize += size;
THIS->m_packetIdentifier++;
AS_LOCK_TRACE("streamDataCallback: unlock\n");
pthread_mutex_unlock(&THIS->m_packetQueueMutex);
}
THIS->determineBufferingLimits();
}
All the FreeStreamer project has been rewrite wieth Swift 3.0 in here FreePlayer
Answer can be found here AudioStream

Encoding AAC with ffmpeg (c++)

I'm working on video encoding that will be used in a Unity plugin. I have made image encoding work, but now I'm at the audio. So trying only with the audio in to a mp4 file with AAC encoding. And I'm stuck. The resulting file does not contain anything. Also, from what I understand, AAC in ffmpeg only supports AV_SAMPLE_FMT_FLTP, that's why I use it. Here's my code:
Setup:
int initialize_encoding_audio(const char *filename)
{
int ret;
AVCodecID aud_codec_id = AV_CODEC_ID_AAC;
AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP;
avcodec_register_all();
av_register_all();
aud_codec = avcodec_find_encoder(aud_codec_id);
avcodec_register(aud_codec);
if (!aud_codec)
return COULD_NOT_FIND_AUD_CODEC;
aud_codec_context = avcodec_alloc_context3(aud_codec);
if (!aud_codec_context)
return CONTEXT_CREATION_ERROR;
aud_codec_context->bit_rate = 192000;
aud_codec_context->sample_rate = select_sample_rate(aud_codec);
aud_codec_context->sample_fmt = sample_fmt;
aud_codec_context->channel_layout = AV_CH_LAYOUT_STEREO;
aud_codec_context->channels = av_get_channel_layout_nb_channels(aud_codec_context->channel_layout);
aud_codec_context->codec = aud_codec;
aud_codec_context->codec_id = aud_codec_id;
ret = avcodec_open2(aud_codec_context, aud_codec, NULL);
if (ret < 0)
return COULD_NOT_OPEN_AUD_CODEC;
outctx = avformat_alloc_context();
ret = avformat_alloc_output_context2(&outctx, NULL, "mp4", filename);
outctx->audio_codec = aud_codec;
outctx->audio_codec_id = aud_codec_id;
audio_st = avformat_new_stream(outctx, aud_codec);
audio_st->codecpar->bit_rate = aud_codec_context->bit_rate;
audio_st->codecpar->sample_rate = aud_codec_context->sample_rate;
audio_st->codecpar->channels = aud_codec_context->channels;
audio_st->codecpar->channel_layout = aud_codec_context->channel_layout;
audio_st->codecpar->codec_id = aud_codec_id;
audio_st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
audio_st->codecpar->format = sample_fmt;
audio_st->codecpar->frame_size = aud_codec_context->frame_size;
audio_st->codecpar->block_align = aud_codec_context->block_align;
audio_st->codecpar->initial_padding = aud_codec_context->initial_padding;
outctx->streams = new AVStream*[1];
outctx->streams[0] = audio_st;
av_dump_format(outctx, 0, filename, 1);
if (!(outctx->oformat->flags & AVFMT_NOFILE))
{
if (avio_open(&outctx->pb, filename, AVIO_FLAG_WRITE) < 0)
return COULD_NOT_OPEN_FILE;
}
ret = avformat_write_header(outctx, NULL);
aud_frame = av_frame_alloc();
aud_frame->nb_samples = aud_codec_context->frame_size;
aud_frame->format = aud_codec_context->sample_fmt;
aud_frame->channel_layout = aud_codec_context->channel_layout;
int buffer_size = av_samples_get_buffer_size(NULL, aud_codec_context->channels, aud_codec_context->frame_size,
aud_codec_context->sample_fmt, 0);
av_frame_get_buffer(aud_frame, buffer_size / aud_codec_context->channels);
if (!aud_frame)
return COULD_NOT_ALLOCATE_FRAME;
aud_frame_counter = 0;
return 0;
}
Encoding:
int encode_audio_samples(uint8_t **aud_samples)
{
int ret;
int buffer_size = av_samples_get_buffer_size(NULL, aud_codec_context->channels, aud_codec_context->frame_size,
aud_codec_context->sample_fmt, 0);
for (size_t i = 0; i < buffer_size / aud_codec_context->channels; i++)
{
aud_frame->data[0][i] = aud_samples[0][i];
aud_frame->data[1][i] = aud_samples[1][i];
}
aud_frame->pts = aud_frame_counter++;
ret = avcodec_send_frame(aud_codec_context, aud_frame);
if (ret < 0)
return ERROR_ENCODING_SAMPLES_SEND;
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
fflush(stdout);
while (true)
{
ret = avcodec_receive_packet(aud_codec_context, &pkt);
if (!ret)
{
av_packet_rescale_ts(&pkt, aud_codec_context->time_base, audio_st->time_base);
pkt.stream_index = audio_st->index;
av_write_frame(outctx, &pkt);
av_packet_unref(&pkt);
}
if (ret == AVERROR(EAGAIN))
break;
else if (ret < 0)
return ERROR_ENCODING_SAMPLES_RECEIVE;
else
break;
}
return 0;
}
Finish encoding:
int finish_audio_encoding()
{
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
fflush(stdout);
int ret = avcodec_send_frame(aud_codec_context, NULL);
if (ret < 0)
return ERROR_ENCODING_FRAME_SEND;
while (true)
{
ret = avcodec_receive_packet(aud_codec_context, &pkt);
if (!ret)
{
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(pkt.pts, aud_codec_context->time_base, audio_st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts = av_rescale_q(pkt.dts, aud_codec_context->time_base, audio_st->time_base);
av_write_frame(outctx, &pkt);
av_packet_unref(&pkt);
}
if (ret == -AVERROR(AVERROR_EOF))
break;
else if (ret < 0)
return ERROR_ENCODING_FRAME_RECEIVE;
}
av_write_trailer(outctx);
}
Main:
void get_audio_frame(float_t *left_samples, float_t *right_samples, int frame_size, float* t, float* tincr, float* tincr2)
{
int j, i;
float v;
for (j = 0; j < frame_size; j++)
{
v = sin(*t);
*left_samples = v;
*right_samples = v;
left_samples++;
right_samples++;
*t += *tincr;
*tincr += *tincr2;
}
}
int main()
{
int frame_rate = 30; // this should be like 96000 / 1024 or somthing i guess?
float t, tincr, tincr2;
initialize_encoding_audio("audio.mp4");
int sec = 50;
float_t** aud_samples;
int src_samples_linesize;
int src_nb_samples = 1024;
int src_channels = 2;
int ret = av_samples_alloc_array_and_samples((uint8_t***)&aud_samples, &src_samples_linesize, src_channels,
src_nb_samples, AV_SAMPLE_FMT_FLTP, 0);
t = 0;
tincr = 0;
tincr2 = 0;
for (size_t i = 0; i < frame_rate * sec; i++)
{
get_audio_frame(aud_samples[0], aud_samples[1], src_nb_samples, &t, &tincr, &tincr2);
encode_audio_samples((uint8_t **)aud_samples);
}
finish_audio_encoding();
//cleanup();
return 0;
}
I guess the first thing that I would want to make sure I got right is the synthetic sound generation and how I transfer that to the AVFrame. Are my conversions correct? But feel free to point out anything that might be wrong.
Thanks in advance!
Edit: the whole source: http://pastebin.com/jYtmkhek
Edit2: Added initialization of tincr & tincr2
Unless I'm missing something from the pastebin, you forgot to initialize a few variables. You're using garbage to generate your samples.
float t, tincr, tincr2;
[...]
get_audio_frame(aud_samples[0], aud_samples[1], src_nb_samples, &t, &tincr, &tincr2);
You probably want to start with t=0 and increment by 2 * PI * frequency / sample rate for a sine wave.
Also, avformat_new_stream() creates the stream for you, don't do it with new.
Update:
I removed all the c++ stuff to test this. Here's the code that works: pastebin
And here's the resulting file: audio.mp4
ffmpeg -i audio.mp4 -filter_complex "showwaves=s=640x120:mode=line:colors=white" -frames:v 1 wave.jpg
Diff:
1,6d0
< #include "encoder.h"
< #include <algorithm>
< #include <iterator>
<
< extern "C"
< {
14a9
> #include <math.h>
40,41c35,36
< SwsContext *sws_ctx;
< SwrContext *swr_ctx = NULL;
---
> struct SwsContext *sws_ctx;
> struct SwrContext *swr_ctx = NULL;
76,77c71,72
< AVCodecID aud_codec_id = AV_CODEC_ID_AAC;
< AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP;
---
> enum AVCodecID aud_codec_id = AV_CODEC_ID_AAC;
> enum AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP;
125,126c120,121
< outctx->streams = new AVStream*[1];
< outctx->streams[0] = audio_st;
---
> //outctx->streams = new AVStream*[1];
> //outctx->streams[0] = audio_st;
182c177
< while (true)
---
> while (1)
216c211
< while (true)
---
> while (1)
291c286
< float t, tincr, tincr2;
---
> float t = 0, tincr = 2 * M_PI * 440.0 / 96000, tincr2 = 0;
317d311
< }

Some new version of opencv_performance for opencv_traincascade?

I have noted that the cascades trained with the program opencv_traincascade does not run with the current version of opencv_performance. I've tried to convert the old performance cpp file to load the new types of cascades, but without success. The code is here:
#include "cv.h"
#include "highgui.h"
#include <cstdio>
#include <cmath>
#include <ctime>
#include <math.h>
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#ifndef PATH_MAX
#define PATH_MAX 512
#endif /* PATH_MAX */
/*typedef struct HidCascade {
int size;
int count;
} HidCascade;
*/
typedef struct ObjectPos {
float x;
float y;
float width;
int found; /* for reference */
int neghbors;
} ObjectPos;
using namespace std;
using namespace cv;
int main(int argc, char* argv[]) {
int i, j;
char* classifierdir = NULL;
//char* samplesdir = NULL;
int saveDetected = 1;
double scale_factor = 1.1;
float maxSizeDiff = 1.5F;
float maxPosDiff = 1.1F;
/* number of stages. if <=0 all stages are used */
//int nos = -1, nos0;
int width = 25;
int height = 15;
int rocsize;
FILE* info;
FILE* resultados;
char* infoname;
char fullname[PATH_MAX];
//char detfilename[PATH_MAX];
char* filename;
//char detname[] = "det-";
CascadeClassifier cascade;
double totaltime;
if (!(resultados = fopen("resultados.txt", "w"))) {
printf("Cannot create results file.\n");
exit(-1);
}
infoname = (char*) "";
rocsize = 20;
if (argc == 1) {
printf("Usage: %s\n -data <classifier_directory_name>\n"
" -info <collection_file_name>\n"
" [-maxSizeDiff <max_size_difference = %f>]\n"
" [-maxPosDiff <max_position_difference = %f>]\n"
" [-sf <scale_factor = %f>]\n"
" [-ni]\n"
" [-rs <roc_size = %d>]\n"
" [-w <sample_width = %d>]\n"
" [-h <sample_height = %d>]\n", argv[0], maxSizeDiff,
maxPosDiff, scale_factor, rocsize, width, height);
return 0;
}
for (i = 1; i < argc; i++) {
if (!strcmp(argv[i], "-data")) {
classifierdir = argv[++i];
} else if (!strcmp(argv[i], "-info")) {
infoname = argv[++i];
} else if (!strcmp(argv[i], "-maxSizeDiff")) {
maxSizeDiff = (float) atof(argv[++i]);
} else if (!strcmp(argv[i], "-maxPosDiff")) {
maxPosDiff = (float) atof(argv[++i]);
} else if (!strcmp(argv[i], "-sf")) {
scale_factor = atof(argv[++i]);
} else if (!strcmp(argv[i], "-ni")) {
saveDetected = 0;
} else if (!strcmp(argv[i], "-rs")) {
rocsize = atoi(argv[++i]);
} else if (!strcmp(argv[i], "-w")) {
width = atoi(argv[++i]);
} else if (!strcmp(argv[i], "-h")) {
height = atoi(argv[++i]);
}
}
if (!cascade.load(classifierdir)) {
printf("Unable to load classifier from %s\n", classifierdir);
return 1;
}
strcpy(fullname, infoname);
filename = strrchr(fullname, '\\');
if (filename == NULL) {
filename = strrchr(fullname, '/');
}
if (filename == NULL) {
filename = fullname;
} else {
filename++;
}
info = fopen(infoname, "r");
totaltime = 0.0;
if (info != NULL) {
int x, y, width, height;
Mat img;
int hits, missed, falseAlarms;
int totalHits, totalMissed, totalFalseAlarms;
int found;
float distance;
int refcount;
ObjectPos* ref;
int detcount;
ObjectPos* det;
int error = 0;
int* pos;
int* neg;
pos = (int*) cvAlloc(rocsize * sizeof(*pos));
neg = (int*) cvAlloc(rocsize * sizeof(*neg));
for (i = 0; i < rocsize; i++) {
pos[i] = neg[i] = 0;
}
printf("+================================+======+======+======+\n");
printf("| File Name | Hits |Missed| False|\n");
printf("+================================+======+======+======+\n");
fprintf(resultados,
"+================================+======+======+======+\n");
fprintf(resultados,
"| File Name | Hits |Missed| False|\n");
fprintf(resultados,
"+================================+======+======+======+\n");
//fprintf (resultados, "%d\n",framesCnt);
totalHits = totalMissed = totalFalseAlarms = 0;
while (!feof(info)) {
fscanf(info, "%s %d", filename, &refcount);
img = imread(fullname);
if (!img.data) {
cout << "ow" << endl;
return -1;
}
ref = (ObjectPos*) cvAlloc(refcount * sizeof(*ref));
for (i = 0; i < refcount; i++) {
error = (fscanf(info, "%d %d %d %d", &x, &y, &width, &height)
!= 4);
if (error)
break;
ref[i].x = 0.5F * width + x;
ref[i].y = 0.5F * height + y;
ref[i].width = sqrt(0.5F * (width * width + height * height));
ref[i].found = 0;
ref[i].neghbors = 0; //in the new cascade, where to get the neighbors?
}
vector<Rect> obj_detectados;
Rect retang;
if (!error) {
totaltime -= time(0);
cascade.detectMultiScale(img, obj_detectados, scale_factor, 4, 0
//|CV_HAAR_FIND_BIGGEST_OBJECT
// |CV_HAAR_DO_ROUGH_SEARCH
| CV_HAAR_SCALE_IMAGE, Size(25, 15));
totaltime += time(0);
if (obj_detectados.size() == 0) {
detcount = 0;
} else {
detcount = obj_detectados.size();
}
det = (detcount > 0) ?
((ObjectPos*) cvAlloc(detcount * sizeof(*det))) : NULL;
hits = missed = falseAlarms = 0;
for (vector<Rect>::const_iterator r = obj_detectados.begin();
r != obj_detectados.end(); r++, i++) {
Point r1, r2;
r1.x = (r->x);
r1.y = (r->y);
r2.x = (r->x + r->width);
r2.y = (r->y + r->height);
retang.x = r1.x;
retang.y = r1.y;
retang.width = abs(r2.x - r1.x);
retang.height = abs(r2.y - r1.y);
if (saveDetected) {
rectangle(img, retang, Scalar(0, 0, 255), 3, CV_AA);
}
det[i].x = 0.5F*r->width + r->x;
det[i].y = 0.5F*r->height + r->y;
det[i].width = sqrt(0.5F * (r->width * r->width
+ r->height * r->height));
det[i].neghbors = 1; // i don't know if it will work...
// det[i].neghbors = r.neighbors; --- how to do it in the new version??
found = 0;
for (j = 0; j < refcount; j++) {
distance = sqrtf( (det[i].x - ref[j].x) * (det[i].x - ref[j].x) +
(det[i].y - ref[j].y) * (det[i].y - ref[j].y) );
//cout << distance << endl;
if( (distance < ref[j].width * maxPosDiff) &&
(det[i].width > ref[j].width / maxSizeDiff) &&
(det[i].width < ref[j].width * maxSizeDiff) )
{
ref[j].found = 1;
ref[j].neghbors = MAX( ref[j].neghbors, det[i].neghbors );
found = 1;
}
}
if (!found) {
falseAlarms++;
neg[MIN(det[i].neghbors, rocsize - 1)]++;
//neg[MIN(0, rocsize - 1)]++;
}
}
//imshow("teste", img);
if (saveDetected) {
//strcpy(detfilename, detname);
//strcat(detfilename, filename);
//strcpy(filename, detfilename);
imwrite(fullname, img);
//cvvSaveImage(fullname, img);
}
for (j = 0; j < refcount; j++) {
if (ref[j].found) {
hits++;
//pos[MIN(0, rocsize - 1)]++;
pos[MIN(ref[j].neghbors, rocsize - 1)]++;
} else {
missed++;
}
}
totalHits += hits;
totalMissed += missed;
totalFalseAlarms += falseAlarms;
printf("|%32.64s|%6d|%6d|%6d|\n", filename, hits, missed,
falseAlarms);
//printf("+--------------------------------+------+------+------+\n");
fprintf(resultados, "|%32.64s|%6d|%6d|%6d|\n", filename, hits,
missed, falseAlarms);
//fprintf(resultados,
// "+--------------------------------+------+------+------+\n");
fflush(stdout);
if (det) {
cvFree( &det);
det = NULL;
}
} /* if( !error ) */
//char c = (char) waitKey(10);
// if (c == 27)
// exit(0);
cvFree( &ref);
}
fclose(info);
printf("|%32.32s|%6d|%6d|%6d|\n", "Total", totalHits, totalMissed,
totalFalseAlarms);
fprintf(resultados, "|%32.32s|%6d|%6d|%6d|\n", "Total", totalHits,
totalMissed, totalFalseAlarms);
printf("+================================+======+======+======+\n");
fprintf(resultados,
"+================================+======+======+======+\n");
//printf("Number of stages: %d\n", nos);
//printf("Number of weak classifiers: %d\n", numclassifiers[nos - 1]);
printf("Total time: %f\n", totaltime);
fprintf(resultados, "Total time: %f\n", totaltime);
/* print ROC to stdout */
for (i = rocsize - 1; i > 0; i--) {
pos[i - 1] += pos[i];
neg[i - 1] += neg[i];
}
//fprintf(stderr, "%d\n", nos);
for (i = 0; i < rocsize; i++) {
fprintf(stderr, "\t%d\t%d\t%f\t%f\n", pos[i], neg[i],
((float) pos[i]) / (totalHits + totalMissed),
((float) neg[i]) / (totalHits + totalMissed));
}
cvFree( &pos);
cvFree( &neg);
}
return 0;
}
My doubt is about the det[i].neghbors = r.neighbors; in the old performance.cpp. How I retrieve the neighbors in this new version?
Anyone could help me to convert opencv_performance to run the new cascades from opencv_traincascade?
Many thanks!