Why would vkCreateSwapchainKHR result in an access violation at 0? - c++

I'm trying to learn Vulkan by following the great tutorials from vulkan-tutorial.com but I'm having some trouble at the point where I must create the swap chain. As stated in the title, the vkCreateSwapchainKHR creates the following error: Access violation executing location 0x0000000000000000.
The tutorial suggest this might be a conflict with the steam overlay. This is not the case for me as copying the whole code from the tutorial works.
I'm trying to figure out what went wrong with my code and to learn how to debug such issues as I will not have a reference code in the future. The incriminated line looks this:
if (vkCreateSwapchainKHR(device, &swapChainCreateInfo, nullptr, &swapChain) != VK_SUCCESS) {
throw std::runtime_error("Could not create swap chain");
}
I setup a breakpoint at this line to compare the values of the arguments in my code with the values from the reference code. As far as I can tell, there is no difference. (The adresses of course are different)
Where should I look for a problem in my code? The variable swapChain is a NULL as expected. A wrongly formed swapChainCreateInfo should not make vkCreateSwapchainKHR crash. It would merely make it return something that is not VK_SUCCESS. And device was created without problem:
if (vkCreateDevice(physicalDevice, &createInfo, nullptr, &device) != VK_SUCCESS) {
throw std::runtime_error("Failed to create logical device");
}
EDIT - I am using the validation layer VK_LAYER_LUNARG_standard_validation and my createInfo setup is the following.
// Useful functions and structures
VkPhysicalDevice physicalDevice;
VkSurfaceKHR surface;
VkSwapchainKHR swapChain;
struct QueueFamilyIndices {
std::optional<uint32_t> graphicsFamily;
std::optional<uint32_t> presentationFamily;
bool isComplete() {
return graphicsFamily.has_value() && presentationFamily.has_value();
}
};
struct SwapChainSupportDetails {
VkSurfaceCapabilitiesKHR surfaceCapabilities;
std::vector<VkSurfaceFormatKHR> formats;
std::vector<VkPresentModeKHR> presentModes;
};
SwapChainSupportDetails querySwapChainSupport(VkPhysicalDevice physicalDevice) {
SwapChainSupportDetails swapChainSupportDetails;
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, &swapChainSupportDetails.surfaceCapabilities);
uint32_t formatCount = 0;
vkGetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, &formatCount, nullptr);
if (formatCount != 0) {
swapChainSupportDetails.formats.resize(formatCount);
vkGetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, &formatCount, swapChainSupportDetails.formats.data());
}
uint32_t presentModeCount = 0;
vkGetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, &presentModeCount, nullptr);
if (presentModeCount != 0) {
swapChainSupportDetails.presentModes.resize(presentModeCount);
vkGetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, &presentModeCount, swapChainSupportDetails.presentModes.data());
}
return swapChainSupportDetails;
}
VkSurfaceFormatKHR chooseSwapChainSurfaceFormat(const std::vector<VkSurfaceFormatKHR> & availableFormats) {
if (availableFormats.size() == 1 && availableFormats[0].format == VK_FORMAT_UNDEFINED) {
return { VK_FORMAT_B8G8R8A8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR };
}
for (const auto & availableFormat : availableFormats) {
if (availableFormat.format == VK_FORMAT_B8G8R8A8_UNORM && availableFormat.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
return availableFormat;
}
}
return availableFormats[0];
}
VkPresentModeKHR chooseSwapChainPresentMode(const std::vector<VkPresentModeKHR> & availablePresentModes) {
VkPresentModeKHR bestMode = VK_PRESENT_MODE_FIFO_KHR;
for (const auto & availablePresentMode : availablePresentModes) {
if (availablePresentMode == VK_PRESENT_MODE_MAILBOX_KHR) {
return availablePresentMode;
}
else if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
bestMode = availablePresentMode;
}
}
return bestMode;
}
VkExtent2D chooseSwapChainExtent2D(const VkSurfaceCapabilitiesKHR & surfaceCapabilities) {
if (surfaceCapabilities.currentExtent.width != std::numeric_limits<uint32_t>::max()) {
return surfaceCapabilities.currentExtent;
}
else {
VkExtent2D actualExtent = { WIDTH, HEIGHT };
actualExtent.width = std::max(std::min(surfaceCapabilities.maxImageExtent.width, actualExtent.width), surfaceCapabilities.minImageExtent.width);
actualExtent.height = std::max(std::min(surfaceCapabilities.maxImageExtent.height, actualExtent.height), surfaceCapabilities.minImageExtent.height);
return actualExtent;
}
}
// Swap Chain creation code
SwapChainSupportDetails swapChainSupportDetails = querySwapChainSupport(physicalDevice);
VkSurfaceFormatKHR surfaceFormat = chooseSwapChainSurfaceFormat(swapChainSupportDetails.formats);
VkPresentModeKHR presentMode = chooseSwapChainPresentMode(swapChainSupportDetails.presentModes);
VkExtent2D extent = chooseSwapChainExtent2D(swapChainSupportDetails.surfaceCapabilities);
uint32_t imageCount = swapChainSupportDetails.surfaceCapabilities.minImageCount + 1;
if (swapChainSupportDetails.surfaceCapabilities.maxImageCount > 0 && imageCount > swapChainSupportDetails.surfaceCapabilities.maxImageCount) {
imageCount = swapChainSupportDetails.surfaceCapabilities.minImageCount;
}
VkSwapchainCreateInfoKHR swapChainCreateInfo = {};
swapChainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
swapChainCreateInfo.surface = surface;
swapChainCreateInfo.minImageCount = imageCount;
swapChainCreateInfo.imageFormat = surfaceFormat.format;
swapChainCreateInfo.imageColorSpace = surfaceFormat.colorSpace;
swapChainCreateInfo.imageExtent = extent;
swapChainCreateInfo.imageArrayLayers = 1;
swapChainCreateInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
QueueFamilyIndices familyIndices = findQueueFamilies(physicalDevice);
uint32_t queueFamilyIndices[] = { familyIndices.graphicsFamily.value(), familyIndices.presentationFamily.value() };
if (familyIndices.graphicsFamily != familyIndices.presentationFamily) {
swapChainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
swapChainCreateInfo.queueFamilyIndexCount = 2;
swapChainCreateInfo.pQueueFamilyIndices = queueFamilyIndices;
}
else {
swapChainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
swapChainCreateInfo.queueFamilyIndexCount = 0;
swapChainCreateInfo.pQueueFamilyIndices = nullptr;
}
swapChainCreateInfo.preTransform = swapChainSupportDetails.surfaceCapabilities.currentTransform;
swapChainCreateInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
swapChainCreateInfo.presentMode = presentMode;
swapChainCreateInfo.clipped = VK_TRUE;
swapChainCreateInfo.oldSwapchain = VK_NULL_HANDLE;
if (vkCreateSwapchainKHR(device, &swapChainCreateInfo, nullptr, &swapChain) != VK_SUCCESS) {
throw std::runtime_error("Could not create swap chain");
}
I get the resulting structure:

Well, when creating the logical device one needs to set enabledExtensionCount to the actual number of required extensions and not 0 if one expects extensions to work. In my case, it was a simple edit failure. Here is the gem in my code:
createInfo.enabledExtensionCount = static_cast<uint32_t>(deviceExtensions.size());
createInfo.ppEnabledExtensionNames = deviceExtensions.data();
createInfo.enabledExtensionCount = 0;
I figured it out by replacing every function from my code by the ones from the reference code until it worked. I'm a bit disappointed that the validation layers didn't catch this. Did I set them wrong? Is this something they should be catching?
EDIT: As pointed out by LIANG LIU, here is the initialization for deviceExtensions:
const std::vector<const char*> deviceExtensions = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME
};

Enable VK_KHR_SWAPCHAIN_EXTENSION_NAME when creating VkDevice
void VKRenderer::createVkLogicalDevice()
{
// device extensions
vector<const char*>::type deviceExtensionNames = { VK_KHR_SWAPCHAIN_EXTENSION_NAME };
// priorities
float queuePrioritys[2] = { 1.f, 1.f};
// graphics queue
VkDeviceQueueCreateInfo queueCreateInfos;
queueCreateInfos.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfos.pNext = nullptr;
queueCreateInfos.queueFamilyIndex = getGraphicsQueueFamilyIndex();
queueCreateInfos.queueCount = 1;
queueCreateInfos.pQueuePriorities = &queuePrioritys[0];
// device features
VkPhysicalDeviceFeatures deviceFeatures = {};
VkDeviceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.pQueueCreateInfos = &queueCreateInfos;
createInfo.queueCreateInfoCount = 1;
createInfo.pEnabledFeatures = &deviceFeatures;
createInfo.enabledExtensionCount = deviceExtensionNames.size();
createInfo.ppEnabledExtensionNames = deviceExtensionNames.data();
// create logical device and retrieve graphics queue
if (VK_SUCCESS == vkCreateDevice(m_vkPhysicalDevice, &createInfo, nullptr, &m_vkDevice))
{
vkGetDeviceQueue(m_vkDevice, getGraphicsQueueFamilyIndex(), 0, &m_vkGraphicsQueue);
vkGetDeviceQueue(m_vkDevice, getPresentQueueFamilyIndex(), 0, &m_vkPresentQueue);
}
else
{
EchoLogError("Failed to create vulkan logical device!");
}
}

It looks like you are calling vkCreateDevice at the end of your code segment for creating the swapchain and passing in the VkSwapchainCreateInfo into it. Perhaps you want to call vkCreateSwapchainKHR instead, like:
if (vkCreateSwapchainKHR(device, &swapChainCreateInfo, nullptr, &swapChain) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create swap chain");
}
If you are actually calling vkCreateSwapchainKHR, could you edit your question to indicate this?

Related

E_INVALIDARG when calling CreateGraphicsPipelineState

I've been getting a weird error when calling CreateGraphicsPipelineState().
The function returns E_INVALIDARG even though the description is all set up.
The description worked before and the I tried to add indexbuffers to my pipeline, I didn't even touch any of the code for PSO or Shaders and now the PSO creation is all messed up.
The issue is that I don't get any DX-error messages from the driver when enabling the debug-layer. I only get this
"Microsoft C++ exception: _com_error at memory location
when I step through the function.
It feels like it is some pointer error or similar, but I can't figure out what the error is. Perhaps anyone of you can see an obvious mistake that I made?
Here's my code:
CGraphicsPSO* pso = new CGraphicsPSO();
D3D12_GRAPHICS_PIPELINE_STATE_DESC psoDesc = {};
// Input Layout
std::vector<D3D12_INPUT_ELEMENT_DESC> elements;
if (aPSODesc.inputLayout != nullptr)
{
auto& ilData = aPSODesc.inputLayout->desc;
for (auto& element : ilData)
{
// All Data here is correct when breaking
D3D12_INPUT_ELEMENT_DESC elementDesc;
elementDesc.SemanticName = element.mySemanticName;
elementDesc.SemanticIndex = element.mySemanticIndex;
elementDesc.InputSlot = element.myInputSlot;
elementDesc.AlignedByteOffset = element.myAlignedByteOffset;
elementDesc.InputSlotClass = _ConvertInputClassificationDX12(element.myInputSlotClass);
elementDesc.Format = _ConvertFormatDX12(element.myFormat);
elementDesc.InstanceDataStepRate = element.myInstanceDataStepRate;
elements.push_back(elementDesc);
}
D3D12_INPUT_LAYOUT_DESC inputLayout = {};
inputLayout.NumElements = (UINT)elements.size();
inputLayout.pInputElementDescs = elements.data();
psoDesc.InputLayout = inputLayout;
}
// TOPOLOGY
switch (aPSODesc.topology)
{
default:
case EPrimitiveTopology::TriangleList:
psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; // <--- Always this option
break;
case EPrimitiveTopology::PointList:
psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT;
break;
case EPrimitiveTopology::LineList:
psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE;
break;
//case EPrimitiveTopology::Patch:
// psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_PATCH;
// break;
}
// Shaders
if (aPSODesc.vs != nullptr)
{
D3D12_SHADER_BYTECODE vertexShaderBytecode = {};
vertexShaderBytecode.BytecodeLength = aPSODesc.vs->myByteCodeSize;
vertexShaderBytecode.pShaderBytecode = aPSODesc.vs->myByteCode;
psoDesc.VS = vertexShaderBytecode;
}
if (aPSODesc.ps != nullptr)
{
D3D12_SHADER_BYTECODE pixelShaderBytecode = {};
pixelShaderBytecode.BytecodeLength = aPSODesc.ps->myByteCodeSize;
pixelShaderBytecode.pShaderBytecode = aPSODesc.ps->myByteCode;
psoDesc.PS = pixelShaderBytecode;
}
psoDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM; // format of the render target
DXGI_SAMPLE_DESC sampleDesc = {};
sampleDesc.Count = 1;
sampleDesc.Quality = 0;
psoDesc.DepthStencilState.DepthEnable = FALSE;
psoDesc.DepthStencilState.StencilEnable = FALSE;
psoDesc.SampleDesc = sampleDesc; // must be the same sample description as the swapchain and depth/stencil buffer
psoDesc.SampleMask = UINT_MAX; // sample mask has to do with multi-sampling. 0xffffffff means point sampling is done
psoDesc.RasterizerState = CD3DX12_RASTERIZER_DESC(D3D12_DEFAULT); // a default rasterizer state.
psoDesc.BlendState = CD3DX12_BLEND_DESC(D3D12_DEFAULT); // a default blent state.
psoDesc.NumRenderTargets = 1; // we are only binding one render target
psoDesc.pRootSignature = myGraphicsRootSignature;
psoDesc.Flags = D3D12_PIPELINE_STATE_FLAG_NONE;
ID3D12PipelineState* pipelineState;
HRESULT hr = myDevice->CreateGraphicsPipelineState(&psoDesc, IID_PPV_ARGS(&pipelineState));
pso->myPipelineState = pipelineState;
if (FAILED(hr))
{
delete pso;
return nullptr;
}
return pso;
So I just found the error.
It seems like the way I parsed my semantics for my input-layout gave me an invalid pointer.
Thus the memory at the adress was invalid and giving the DX12-device incorrect decriptions.
So what I did was to locally store the semantic-names within my CreatePSO function until the PSO was created, and now it all works.
Looks to me like the pointers to storage you promised are going out of scope.
..
D3D12_INPUT_LAYOUT_DESC inputLayout = {};
..
psoDesc.InputLayout = inputLayout;
}

MFXVideoDECODE_Init fail with MFX_ERR_MEMORY_ALLOC

I'm trying to use intel-media-sdk decoder for h.264 videos. Here is my code for initializing decoder :
mfxStatus decoder::initDecoder(HWND window, mfxBitstream *Header) {
mfxStatus sts = MFX_ERR_NONE;
mfxVersion ver = { { 0, 1 } };
mfxVideoParam mfxVideoParams;
mfxFrameAllocator mfxAllocator;
mfxFrameAllocResponse mfxResponse;
sts = m_mfxSession.Init(MFX_IMPL_AUTO_ANY, &ver); //sts = MFX_ERR_NONE
if (sts == MFX_ERR_NONE) {
sts = m_mfxSession.SetHandle(MFX_HANDLE_DIRECT3D_DEVICE_MANAGER9,
m_renderer.initD3d(GetIntelDeviceAdapterNum(), window)); //sts = MFX_ERR_NONE
if (sts == MFX_ERR_NONE) {
mfxAllocator.pthis = m_mfxSession;
sts = m_mfxSession.SetFrameAllocator(&mfxAllocator); //sts = MFX_ERR_NONE
if (sts == MFX_ERR_NONE) {
MFXVideoDECODE mfxDEC(m_mfxSession);
m_mfxVideoDecode = mfxDEC;
memset(&mfxVideoParams, 0, sizeof(mfxVideoParams));
mfxVideoParams.mfx.CodecId = MFX_CODEC_AVC;
mfxVideoParams.IOPattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
sts = m_mfxVideoDecode.DecodeHeader(Header, &mfxVideoParams); //sts = MFX_ERR_NONE
if (sts == MFX_ERR_NONE) {
memset(&m_mfxRequest, 0, sizeof(m_mfxRequest));
sts = m_mfxVideoDecode.QueryIOSurf(&mfxVideoParams, &m_mfxRequest); //sts = MFX_ERR_NONE
if (sts == MFX_ERR_NONE) {
sts = m_renderer.allocSurfaces(mfxAllocator.pthis, &m_mfxRequest, &mfxResponse);
if (sts == MFX_ERR_NONE) {
m_pmfxSurfaces = new mfxFrameSurface1 *[m_mfxRequest.NumFrameSuggested];
for (int i = 0; i < m_mfxRequest.NumFrameSuggested; i++) {
m_pmfxSurfaces[i] = new mfxFrameSurface1;
memset(m_pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1));
memcpy(&(m_pmfxSurfaces[i]->Info), &(mfxVideoParams.mfx.FrameInfo), sizeof(mfxFrameInfo));
// MID (memory id) represents one video NV12 surface
m_pmfxSurfaces[i]->Data.MemId = mfxResponse.mids[i];
};
sts = m_mfxVideoDecode.Init(&mfxVideoParams); //sts = MFX_ERR_MEMORY_ALLOC
}
}
}
}
}
}
return sts;
}
So as you can see MFXVideoDECODE::Init(mfxVideoParam*) (which internally calls MFXVideoDECODE_Init) returns MFX_ERR_MEMORY_ALLOC and the strange thing here is in this document it says this function does not have this return value.
Here is some debug information about mfxVideoParams :
AllocId = 0, AsyncDepth = 0, IOPattern = 16, mfx.CodecId = 541283905,
mfx.CodecProfile = 77, mfx.CodecLevel = 30, vpp.In.FourCC = 842094158,
vpp.In.Width = 864, vpp.In.Height = 480, vpp.In.CropW = 854,
vpp.In.CropH = 480, vpp.In.BufferSize = 31458144, vpp.In.AspectRatioW
= 1, vpp.In.AspectRatioH = 1, vpp.In.PicStruct = 1, vpp.In.ChromaFormat = 1
Here is Some member data definition in header which used here :
MFXVideoSession m_mfxSession;
MFXVideoDECODE m_mfxVideoDecode;
mfxFrameAllocRequest m_mfxRequest;
mfxFrameSurface1** m_pmfxSurfaces;
And Here is some information about my current working device that might relate to this problem:
Operating System : Windows 8.1
Processor : Intel(R) Core(TM) i5-3470 CPU # 3.20GHZ
System type : 64-bit operating system, x64-based processor
Installed memory (RAM) : 8.00 GB
And finally to reproduce the exact same situation I downloaded video named big_buck_bunny_1080p_h264.mov from this site and then extracted it with ffmpeg to h264 and used it in my program.
You need to initialize mfxFrameAlocator callback functions (Alloc, Free, GetHDL, ...) with proper functions.
for example :
//static member
mfxStatus decoder::gethdl(mfxHDL pthis, mfxMemId mid, mfxHDL* handle)
{
pthis; // To avoid warning for this unused parameter
if (handle == 0) return MFX_ERR_INVALID_HANDLE;
*handle = mid;
return MFX_ERR_NONE;
}
mfxStatus decoder::initDecoder(HWND window, mfxBitstream *Header) {
//blah blah
mfxAllocator.pthis = m_mfxSession;
mfxAllocator.GetHDL = gethdl;
//define for these too
//mfxAllocator.Alloc = alloc;
//mfxAllocator.Free = free;
//mfxAllocator.Lock = lock;
//mfxAllocator.Unlock = unlock;
//rest of your code
}

Custom C++ ASP .NET Membership Login

Does anyone know how can I hash the user's password using the salt key provided by the ASP .NET Membership?
I'm developing a C++ Linux application and I have only access to the SQL Server.
Thanks,
Here is the encoding algorithm used by ASP.Net Membership which is written in C#.
It uses System.Security, so you might want to look at MONO if you want to run on Lunix.
Note: I'm not familiar with MONO.
private string EncodePassword(string pass, int passwordFormat, string salt)
{
if (passwordFormat == 0) // MembershipPasswordFormat.Clear
return pass;
byte[] bIn = Encoding.Unicode.GetBytes(pass);
byte[] bSalt = Convert.FromBase64String(salt);
byte[] bRet = null;
if (passwordFormat == 1)
{ // MembershipPasswordFormat.Hashed
HashAlgorithm hm = GetHashAlgorithm();
if (hm is KeyedHashAlgorithm)
{
KeyedHashAlgorithm kha = (KeyedHashAlgorithm)hm;
if (kha.Key.Length == bSalt.Length)
{
kha.Key = bSalt;
}
else if (kha.Key.Length < bSalt.Length)
{
byte[] bKey = new byte[kha.Key.Length];
Buffer.BlockCopy(bSalt, 0, bKey, 0, bKey.Length);
kha.Key = bKey;
}
else
{
byte[] bKey = new byte[kha.Key.Length];
for (int iter = 0; iter < bKey.Length; )
{
int len = Math.Min(bSalt.Length, bKey.Length - iter);
Buffer.BlockCopy(bSalt, 0, bKey, iter, len);
iter += len;
}
kha.Key = bKey;
}
bRet = kha.ComputeHash(bIn);
}
else
{
byte[] bAll = new byte[bSalt.Length + bIn.Length];
Buffer.BlockCopy(bSalt, 0, bAll, 0, bSalt.Length);
Buffer.BlockCopy(bIn, 0, bAll, bSalt.Length, bIn.Length);
bRet = hm.ComputeHash(bAll);
}
}
else
{
byte[] bAll = new byte[bSalt.Length + bIn.Length];
Buffer.BlockCopy(bSalt, 0, bAll, 0, bSalt.Length);
Buffer.BlockCopy(bIn, 0, bAll, bSalt.Length, bIn.Length);
bRet = EncryptPassword(bAll, _LegacyPasswordCompatibilityMode);
}
return Convert.ToBase64String(bRet);
}
private string GenerateSalt()
{
byte[] buf = new byte[SALT_SIZE];
(new RNGCryptoServiceProvider()).GetBytes(buf);
return Convert.ToBase64String(buf);
}

Not able to add certificate policy extension using openssl APIs in c++

I tried using following syntax for the same :
add_ext(x509OutCertificate, NID_certificate_policies, "Policy: 2.16.840.1.113733.1.7.54 ,CPS: https://www.verisign.com/cps");
add_ext(x509OutCertificate, NID_certificate_policies, "2.16.840.1.113733.1.7.54,https://www.verisign.com/cps");
& many more combinations.
but not able to add this extension in certificate. Any clue what is wrong?
Thanks in advance
This is really a comment, but the comment does not have the space.
$ grep -R NID_certificate_policies *crypto/objects/obj_dat.h: NID_certificate_policies,3,&(lvalues[512]),0},
crypto/objects/objects.h:#define NID_certificate_policies 89
crypto/objects/obj_mac.h:#define NID_certificate_policies 89
crypto/x509v3/v3_cpols.c:NID_certificate_policies, 0,ASN1_ITEM_ref(CERTIFICATEPOLICIES),
crypto/x509v3/pcy_cache.c: ext_cpols = X509_get_ext_d2i(x, NID_certificate_policies, &i, NULL);
crypto/x509v3/v3_purp.c: NID_certificate_policies, /* 89 */
Looking at v3_cpols.c, there's an ominous warning:
/* Certificate policies extension support: this one is a bit complex... */
Here's how its declared:
const X509V3_EXT_METHOD v3_cpols = {
NID_certificate_policies, 0,ASN1_ITEM_ref(CERTIFICATEPOLICIES),
0,0,0,0,
0,0,
0,0,
(X509V3_EXT_I2R)i2r_certpol,
(X509V3_EXT_R2I)r2i_certpol,
NULL
};
ASN1_ITEM_TEMPLATE(CERTIFICATEPOLICIES) =
ASN1_EX_TEMPLATE_TYPE(ASN1_TFLG_SEQUENCE_OF, 0, CERTIFICATEPOLICIES, POLICYINFO)
ASN1_ITEM_TEMPLATE_END(CERTIFICATEPOLICIES)
IMPLEMENT_ASN1_FUNCTIONS(CERTIFICATEPOLICIES)
v3_cpol is then used in ext_dat.h:
static const X509V3_EXT_METHOD *standard_exts[] = {
&v3_nscert,
&v3_ns_ia5_list[0],
&v3_ns_ia5_list[1],
&v3_ns_ia5_list[2],
&v3_ns_ia5_list[3],
&v3_ns_ia5_list[4],
&v3_ns_ia5_list[5],
&v3_ns_ia5_list[6],
...
&v3_cpols,
...
};
There does not appear to be documentation or clear usage. The two books I have on OpenSSL lack a treatment on it. It looks like you are in muddy waters.
Perhaps the folks at the OpenSSL user's list can help out. I suggest it because some folks on the list can probably answer it (SH, DT, VD, etc), but I have not seen them on Stack Overflow's site.
Its been a long time for this question, but i looked into openssl1.0.2k source code, and i found its not support add cps extension directly:
static STACK_OF(POLICYINFO) *r2i_certpol(X509V3_EXT_METHOD *method,
X509V3_CTX *ctx, char *value)
{
WriteLogToFile("In r2i_certpol");
STACK_OF(POLICYINFO) *pols = NULL;
char *pstr;
POLICYINFO *pol;
ASN1_OBJECT *pobj;
STACK_OF(CONF_VALUE) *vals;
CONF_VALUE *cnf;
int i, ia5org;
pols = sk_POLICYINFO_new_null();
if (pols == NULL) {
X509V3err(X509V3_F_R2I_CERTPOL, ERR_R_MALLOC_FAILURE);
return NULL;
}
WriteLogToFile("Before X509V3_parse_list");
vals = X509V3_parse_list(value);
WriteLogToFile("After X509V3_parse_list");
if (vals == NULL) {
X509V3err(X509V3_F_R2I_CERTPOL, ERR_R_X509V3_LIB);
goto err;
}
ia5org = 0;
for (i = 0; i < sk_CONF_VALUE_num(vals); i++) {
cnf = sk_CONF_VALUE_value(vals, i);
if (cnf->value || !cnf->name) {
char str[1000];
sprintf(str, "cnf->value: %s, cnf->name: %s", cnf->value, cnf->name);
WriteLogToFile(str);
X509V3err(X509V3_F_R2I_CERTPOL,
X509V3_R_INVALID_POLICY_IDENTIFIER);
X509V3_conf_err(cnf);
goto err;
}
pstr = cnf->name;
WriteLogToFile(pstr);
if (!strcmp(pstr, "ia5org")) {
ia5org = 1;
continue;
} else if (*pstr == '#') {
STACK_OF(CONF_VALUE) *polsect;
polsect = X509V3_get_section(ctx, pstr + 1);
if (!polsect) {
X509V3err(X509V3_F_R2I_CERTPOL, X509V3_R_INVALID_SECTION);
X509V3_conf_err(cnf);
goto err;
}
pol = policy_section(ctx, polsect, ia5org);
X509V3_section_free(ctx, polsect);
if (!pol)
goto err;
} else {
if (!(pobj = OBJ_txt2obj(cnf->name, 0))) {
X509V3err(X509V3_F_R2I_CERTPOL,
X509V3_R_INVALID_OBJECT_IDENTIFIER);
X509V3_conf_err(cnf);
goto err;
}
pol = POLICYINFO_new();
if (pol == NULL) {
X509V3err(X509V3_F_R2I_CERTPOL, ERR_R_MALLOC_FAILURE);
goto err;
}
pol->policyid = pobj;
}
if (!sk_POLICYINFO_push(pols, pol)) {
POLICYINFO_free(pol);
X509V3err(X509V3_F_R2I_CERTPOL, ERR_R_MALLOC_FAILURE);
goto err;
}
}
sk_CONF_VALUE_pop_free(vals, X509V3_conf_free);
return pols;
err:
sk_CONF_VALUE_pop_free(vals, X509V3_conf_free);
sk_POLICYINFO_pop_free(pols, POLICYINFO_free);
return NULL;
}
The "CPS" has to be in section part, which is configured by openssl.conf file, so anyone met this problem has to put cps in that configure file, and tell openssl to search that part, like the code below:
bool AddX509ExtensionFromFile(X509* cert, X509* issuer, int nid, char* value,char* extFile)
{
if (extFile)
{
long errorline = -1;
X509V3_CTX ctx2;
CONF* extconf = NCONF_new(NULL);
if (!NCONF_load(extconf, extFile, &errorline))
{
if (errorline <= 0)
{
printf("NCONF_load error\n");
}
else
{
printf("error on line %ld of config file '%s'\n", errorline, extFile);
}
}
char* extsect = "default";
X509V3_set_ctx_test(&ctx2);
X509V3_set_nconf(&ctx2, extconf);
if (!X509V3_EXT_add_nconf(extconf, &ctx2, extsect, NULL))
{
printf("error loading extension section %s\n", extsect);
}
X509V3_set_ctx(&ctx2, issuer, cert, NULL, NULL, 0);
X509_EXTENSION* ex = X509V3_EXT_conf_nid(NULL, &ctx2, nid, value);
if (!ex) {
return false;
}
int result = X509_add_ext(cert, ex, -1);
X509_EXTENSION_free(ex);
return (result == 0) ? true : false;
}
return false;
}

How to write a Live555 FramedSource to allow me to stream H.264 live

I've been trying to write a class that derives from FramedSource in Live555 that will allow me to stream live data from my D3D9 application to an MP4 or similar.
What I do each frame is grab the backbuffer into system memory as a texture, then convert it from RGB -> YUV420P, then encode it using x264, then ideally pass the NAL packets on to Live555. I made a class called H264FramedSource that derived from FramedSource basically by copying the DeviceSource file. Instead of the input being an input file, I've made it a NAL packet which I update each frame.
I'm quite new to codecs and streaming, so I could be doing everything completely wrong. In each doGetNextFrame() should I be grabbing the NAL packet and doing something like
memcpy(fTo, nal->p_payload, nal->i_payload)
I assume that the payload is my frame data in bytes? If anybody has an example of a class they derived from FramedSource that might at least be close to what I'm trying to do I would love to see it, this is all new to me and a little tricky to figure out what's happening. Live555's documentation is pretty much the code itself which doesn't exactly make it easy for me to figure out.
Ok, I finally got some time to spend on this and got it working! I'm sure there are others who will be begging to know how to do it so here it is.
You will need your own FramedSource to take each frame, encode, and prepare it for streaming, I will provide some of the source code for this soon.
Essentially throw your FramedSource into the H264VideoStreamDiscreteFramer, then throw this into the H264RTPSink. Something like this
scheduler = BasicTaskScheduler::createNew();
env = BasicUsageEnvironment::createNew(*scheduler);
framedSource = H264FramedSource::createNew(*env, 0,0);
h264VideoStreamDiscreteFramer
= H264VideoStreamDiscreteFramer::createNew(*env, framedSource);
// initialise the RTP Sink stuff here, look at
// testH264VideoStreamer.cpp to find out how
videoSink->startPlaying(*h264VideoStreamDiscreteFramer, NULL, videoSink);
env->taskScheduler().doEventLoop();
Now in your main render loop, throw over your backbuffer which you've saved to system memory to your FramedSource so it can be encoded etc. For more info on how to setup the encoding stuff check out this answer How does one encode a series of images into H264 using the x264 C API?
My implementation is very much in a hacky state and is yet to be optimised at all, my d3d application runs at around 15fps due to the encoding, ouch, so I will have to look into this. But for all intents and purposes this StackOverflow question is answered because I was mostly after how to stream it. I hope this helps other people.
As for my FramedSource it looks a little something like this
concurrent_queue<x264_nal_t> m_queue;
SwsContext* convertCtx;
x264_param_t param;
x264_t* encoder;
x264_picture_t pic_in, pic_out;
EventTriggerId H264FramedSource::eventTriggerId = 0;
unsigned H264FramedSource::FrameSize = 0;
unsigned H264FramedSource::referenceCount = 0;
int W = 720;
int H = 960;
H264FramedSource* H264FramedSource::createNew(UsageEnvironment& env,
unsigned preferredFrameSize,
unsigned playTimePerFrame)
{
return new H264FramedSource(env, preferredFrameSize, playTimePerFrame);
}
H264FramedSource::H264FramedSource(UsageEnvironment& env,
unsigned preferredFrameSize,
unsigned playTimePerFrame)
: FramedSource(env),
fPreferredFrameSize(fMaxSize),
fPlayTimePerFrame(playTimePerFrame),
fLastPlayTime(0),
fCurIndex(0)
{
if (referenceCount == 0)
{
}
++referenceCount;
x264_param_default_preset(&param, "veryfast", "zerolatency");
param.i_threads = 1;
param.i_width = 720;
param.i_height = 960;
param.i_fps_num = 60;
param.i_fps_den = 1;
// Intra refres:
param.i_keyint_max = 60;
param.b_intra_refresh = 1;
//Rate control:
param.rc.i_rc_method = X264_RC_CRF;
param.rc.f_rf_constant = 25;
param.rc.f_rf_constant_max = 35;
param.i_sps_id = 7;
//For streaming:
param.b_repeat_headers = 1;
param.b_annexb = 1;
x264_param_apply_profile(&param, "baseline");
encoder = x264_encoder_open(&param);
pic_in.i_type = X264_TYPE_AUTO;
pic_in.i_qpplus1 = 0;
pic_in.img.i_csp = X264_CSP_I420;
pic_in.img.i_plane = 3;
x264_picture_alloc(&pic_in, X264_CSP_I420, 720, 920);
convertCtx = sws_getContext(720, 960, PIX_FMT_RGB24, 720, 760, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (eventTriggerId == 0)
{
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
}
H264FramedSource::~H264FramedSource()
{
--referenceCount;
if (referenceCount == 0)
{
// Reclaim our 'event trigger'
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}
}
void H264FramedSource::AddToBuffer(uint8_t* buf, int surfaceSizeInBytes)
{
uint8_t* surfaceData = (new uint8_t[surfaceSizeInBytes]);
memcpy(surfaceData, buf, surfaceSizeInBytes);
int srcstride = W*3;
sws_scale(convertCtx, &surfaceData, &srcstride,0, H, pic_in.img.plane, pic_in.img.i_stride);
x264_nal_t* nals = NULL;
int i_nals = 0;
int frame_size = -1;
frame_size = x264_encoder_encode(encoder, &nals, &i_nals, &pic_in, &pic_out);
static bool finished = false;
if (frame_size >= 0)
{
static bool alreadydone = false;
if(!alreadydone)
{
x264_encoder_headers(encoder, &nals, &i_nals);
alreadydone = true;
}
for(int i = 0; i < i_nals; ++i)
{
m_queue.push(nals[i]);
}
}
delete [] surfaceData;
surfaceData = NULL;
envir().taskScheduler().triggerEvent(eventTriggerId, this);
}
void H264FramedSource::doGetNextFrame()
{
deliverFrame();
}
void H264FramedSource::deliverFrame0(void* clientData)
{
((H264FramedSource*)clientData)->deliverFrame();
}
void H264FramedSource::deliverFrame()
{
x264_nal_t nalToDeliver;
if (fPlayTimePerFrame > 0 && fPreferredFrameSize > 0) {
if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
// This is the first frame, so use the current time:
gettimeofday(&fPresentationTime, NULL);
} else {
// Increment by the play time of the previous data:
unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime;
fPresentationTime.tv_sec += uSeconds/1000000;
fPresentationTime.tv_usec = uSeconds%1000000;
}
// Remember the play time of this data:
fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize;
fDurationInMicroseconds = fLastPlayTime;
} else {
// We don't know a specific play time duration for this data,
// so just record the current time as being the 'presentation time':
gettimeofday(&fPresentationTime, NULL);
}
if(!m_queue.empty())
{
m_queue.wait_and_pop(nalToDeliver);
uint8_t* newFrameDataStart = (uint8_t*)0xD15EA5E;
newFrameDataStart = (uint8_t*)(nalToDeliver.p_payload);
unsigned newFrameSize = nalToDeliver.i_payload;
// Deliver the data here:
if (newFrameSize > fMaxSize) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
}
else {
fFrameSize = newFrameSize;
}
memcpy(fTo, nalToDeliver.p_payload, nalToDeliver.i_payload);
FramedSource::afterGetting(this);
}
}
Oh and for those who want to know what my concurrent queue is, here it is, and it works brilliantly http://www.justsoftwaresolutions.co.uk/threading/implementing-a-thread-safe-queue-using-condition-variables.html
Enjoy and good luck!
The deliverFrame method lacks the following check at its start:
if (!isCurrentlyAwaitingData()) return;
see DeviceSource.cpp in LIVE