I am trying to clip a raster using a polygon an GDAL. At the moment i get an error that there is a read access violation when initializing the WarpOperation. I can access my Shapefile and check the num of features so the access is fine i think. Also i can access my Raster Data (GetProjectionRef).. All files are in the same CRS. Is there a way to use GdalWarp with Cutline?
const char* inputPath = "input.tif";
const char* outputPath = "output.tif";
//clipper Polygon
auto w_read_filenamePoly = "Polygon.shp";
char* read_filenamePoly = new char[w_read_filenamePoly.length() + 1];
wcstombs(read_filenamePoly, w_read_filenamePoly.c_str(), w_read_filenamePoly.length() + 1);
GDALDataset* hSrcDS;
GDALDataset* hDstDS;
GDALAllRegister();
hSrcDS =(GDALDataset *) GDALOpen(inputPath, GA_Update);
hDstDS = (GDALDataset*)GDALOpen(outputPath, GA_Update);
const char* proj = hSrcDS->GetProjectionRef();
const char* proj2 = hDstDS->GetProjectionRef();
//clipper Layer
GDALDataset* poDSClipper;
poDSClipper = (GDALDataset*)GDALOpenEx(read_filenamePoly, GDAL_OF_UPDATE, NULL, NULL, NULL);
Assert::IsNotNull(poDSClipper);
delete[]read_filenamePoly;
OGRLayer* poLayerClipper;
poLayerClipper = poDSClipper->GetLayerByName("Polygon");
int numClip = poLayerClipper->GetFeatureCount();
//setup warp options
GDALWarpOptions* psWarpOptions = GDALCreateWarpOptions();
psWarpOptions->hSrcDS = hSrcDS;
psWarpOptions->hDstDS = hDstDS;
psWarpOptions->nBandCount = 1;
psWarpOptions->panSrcBands = (int *) CPLMalloc(sizeof(int) * psWarpOptions->nBandCount);
psWarpOptions->panSrcBands[0] = 1;
psWarpOptions->panDstBands = (int*)CPLMalloc(sizeof(int) * psWarpOptions->nBandCount);
psWarpOptions->panDstBands[0] = 1;
psWarpOptions->pfnProgress = GDALTermProgress;
psWarpOptions->hCutline = poLayerClipper;
// Establish reprojection transformer.
psWarpOptions->pTransformerArg = GDALCreateGenImgProjTransformer(hSrcDS,proj, hDstDS, proj2, FALSE, 0.0, 1);
psWarpOptions->pfnTransformer = GDALGenImgProjTransform;
GDALWarpOperation oOperation;
oOperation.Initialize(psWarpOptions);
oOperation.ChunkAndWarpImage(0, 0, GDALGetRasterXSize(hDstDS), GDALGetRasterYSize(hDstDS));
GDALDestroyGenImgProjTransformer(psWarpOptions->pTransformerArg);
GDALDestroyWarpOptions(psWarpOptions);
GDALClose(hDstDS);
GDALClose(hSrcDS);
Your psWarpOptions->hCutline should be a polygon, not a layer.
Also the cutline should be in source pixel/line coordinates.
Check TransformCutlineToSource from gdalwarp_lib.cpp, you can probably simply get the code from there.
This particular GDAL operation, when called from C++, is so full of pitfalls - and there are so many open questions about it here - that I am reproducing a full working example:
Warping (reprojecting) a raster image with a polygon mask (cutline):
#include <gdal/gdal.h>
#include <gdal/gdal_priv.h>
#include <gdal/gdalwarper.h>
#include <gdal/ogrsf_frmts.h>
int main() {
const char *inputPath = "input.tif";
const char *outputPath = "output.tif";
// clipper Polygon
// THIS FILE MUST BE IN PIXEL/LINE COORDINATES or otherwise one should
// copy the function gdalwarp_lib.cpp:TransformCutlineToSource()
// from GDAL's sources
// It is expected that it contains a single polygon feature
const char *read_filenamePoly = "cutline.json";
GDALDataset *hSrcDS;
GDALDataset *hDstDS;
GDALAllRegister();
auto poDriver = GetGDALDriverManager()->GetDriverByName("GTiff");
hSrcDS = (GDALDataset *)GDALOpen(inputPath, GA_ReadOnly);
hDstDS = (GDALDataset *)poDriver->CreateCopy(
outputPath, hSrcDS, 0, nullptr, nullptr, nullptr);
// Without this step the cutline is useless - because the background
// will be carried over from the original image
CPLErr e = hDstDS->GetRasterBand(1)->Fill(0);
const char *src_srs = hSrcDS->GetProjectionRef();
const char *dst_srs = hDstDS->GetProjectionRef();
// clipper Layer
GDALDataset *poDSClipper;
poDSClipper = (GDALDataset *)GDALOpenEx(
read_filenamePoly, GDAL_OF_UPDATE, NULL, NULL, NULL);
auto poLayerClipper = poDSClipper->GetLayer(0);
auto geom = poLayerClipper->GetNextFeature()->GetGeometryRef();
// setup warp options
GDALWarpOptions *psWarpOptions = GDALCreateWarpOptions();
psWarpOptions->hSrcDS = hSrcDS;
psWarpOptions->hDstDS = hDstDS;
psWarpOptions->nBandCount = 1;
psWarpOptions->panSrcBands =
(int *)CPLMalloc(sizeof(int) * psWarpOptions->nBandCount);
psWarpOptions->panSrcBands[0] = 1;
psWarpOptions->panDstBands =
(int *)CPLMalloc(sizeof(int) * psWarpOptions->nBandCount);
psWarpOptions->panDstBands[0] = 1;
psWarpOptions->pfnProgress = GDALTermProgress;
psWarpOptions->hCutline = geom;
// Establish reprojection transformer.
psWarpOptions->pTransformerArg = GDALCreateGenImgProjTransformer(
hSrcDS, src_srs, hDstDS, dst_srs, TRUE, 1000, 1);
psWarpOptions->pfnTransformer = GDALGenImgProjTransform;
GDALWarpOperation oOperation;
oOperation.Initialize(psWarpOptions);
oOperation.ChunkAndWarpImage(
0, 0, GDALGetRasterXSize(hDstDS), GDALGetRasterYSize(hDstDS));
GDALDestroyGenImgProjTransformer(psWarpOptions->pTransformerArg);
GDALDestroyWarpOptions(psWarpOptions);
GDALClose(hDstDS);
GDALClose(hSrcDS);
}
Related
I'm currently working with gdal api C/C++ and I'm facing an issue with gdal warp region to buffer functionality (WarpRegionToBuffer).
When my destination dataset is not strictly contained in the frame of my source dataset, the area where there should be no data values is filled with random data (see out_code.tif enclosed). However gdalwarp command line functionality, which also uses WarpRegionToBuffer function, does not seem to have this problem.
1/ Here is the code I use:
#include <iostream>
#include <string>
#include <vector>
#include "gdal.h"
#include "gdalwarper.h"
#include "cpl_conv.h"
int main(void)
{
std::string pathSrc = "in.dt1";
//these datas will be provided by command line
std::string pathDst = "out_code.tif";
double resolutionx = 0.000833333;
double resolutiony = 0.000833333;
//destination corner coordinates: top left (tl) bottom right (br)
float_t xtl = -1;
float_t ytl = 45;
float_t xbr = 2;
float_t ybr = 41;
//tile size defined by user
int tilesizex = 256;
int tilesizey = 256;
float width = std::ceil((xbr - xtl)/resolutionx);
float height = std::ceil((ytl - ybr)/resolutiony);
double adfDstGeoTransform[6] = {xtl, resolutionx, 0, ytl, 0, -resolutiony};
GDALDatasetH hSrcDS, hDstDS;
// Open input file
GDALAllRegister();
hSrcDS = GDALOpen(pathSrc.c_str(), GA_ReadOnly);
GDALDataType eDT = GDALGetRasterDataType(GDALGetRasterBand(hSrcDS,1));
// Create output file, using same spatial reference as input image, but new geotransform
GDALDriverH hDriver = GDALGetDriverByName( "GTiff" );
hDstDS = GDALCreate( hDriver, pathDst.c_str(), width, height, GDALGetRasterCount(hSrcDS), eDT, NULL );
OGRSpatialReference oSRS;
char *pszWKT = NULL;
//force geo projection
oSRS.SetWellKnownGeogCS( "WGS84" );
oSRS.exportToWkt( &pszWKT );
GDALSetProjection( hDstDS, pszWKT );
//Fetches the coefficients for transforming between pixel/line (P,L) raster space,
//and projection coordinates (Xp,Yp) space.
GDALSetGeoTransform( hDstDS, adfDstGeoTransform );
// Setup warp options
GDALWarpOptions *psWarpOptions = GDALCreateWarpOptions();
psWarpOptions->hSrcDS = hSrcDS;
psWarpOptions->hDstDS = hDstDS;
psWarpOptions->nBandCount = 1;
psWarpOptions->panSrcBands = (int *) CPLMalloc(sizeof(int) * psWarpOptions->nBandCount );
psWarpOptions->panSrcBands[0] = 1;
psWarpOptions->panDstBands = (int *) CPLMalloc(sizeof(int) * psWarpOptions->nBandCount );
psWarpOptions->panDstBands[0] = 1;
psWarpOptions->pfnProgress = GDALTermProgress;
//these datas will be calculated in order to warp tile by tile
//current tile size
int cursizex = 0;
int cursizey = 0;
double nbtilex = std::ceil(width/tilesizex);
double nbtiley = std::ceil(height/tilesizey);
int starttilex = 0;
int starttiley = 0;
// Establish reprojection transformer
psWarpOptions->pTransformerArg =
GDALCreateGenImgProjTransformer(hSrcDS,
GDALGetProjectionRef(hSrcDS),
hDstDS,
GDALGetProjectionRef(hDstDS),
FALSE, 0.0, 1);
psWarpOptions->pfnTransformer = GDALGenImgProjTransform;
// Initialize and execute the warp operation on region
GDALWarpOperation oOperation;
oOperation.Initialize(psWarpOptions);
for (int ty = 0; ty < nbtiley; ty++) {
//handle last tile size
//if it last tile change size otherwise keep tilesize
for (int tx = 0; tx < nbtilex; tx++) {
//if it last tile change size otherwise keep tilesize
starttiley = ty * tilesizey;
starttilex = tx * tilesizex;
cursizex = std::min(starttilex + tilesizex, (int)width) - starttilex;
cursizey = std::min(starttiley + tilesizey, (int)height) - starttiley;
float * buffer = new float[cursizex*cursizey];
memset(buffer, 0, cursizex*cursizey);
//warp source
CPLErr ret = oOperation.WarpRegionToBuffer(
starttilex, starttiley, cursizex, cursizey,
buffer,
eDT);
if (ret != 0) {
CEA_SIMONE_ERROR(CPLGetLastErrorMsg());
throw std::runtime_error("warp error");
}
//write the fuzed tile in dest
ret = GDALRasterIO(GDALGetRasterBand(hDstDS,1),
GF_Write,
starttilex, starttiley, cursizex, cursizey,
buffer, cursizex, cursizey,
eDT,
0, 0);
if (ret != 0) {
CEA_SIMONE_ERROR("raster io write error");
throw std::runtime_error("raster io write error");
}
delete(buffer);
}
}
// Clean memory
GDALDestroyGenImgProjTransformer( psWarpOptions->pTransformerArg );
GDALDestroyWarpOptions( psWarpOptions );
GDALClose( hDstDS );
GDALClose( hSrcDS );
return 0;
}
The result:
output image of previous sample of code (as png, as I can't enclose TIF img)
The GdalWarp command line:
gdalwarp -te -1 41 2 45 -tr 0.000833333 0.000833333 in.dt1 out_cmd_line.tif
The command line result:
output image of previous command line (as png, as I can't enclose TIF img)
Can you please help me find what is wrong with my use of GDAL C/C++ API in order to have a similar behaviour as gdalwarp command line? There is probably an algorithm in gdalwarp that computes a mask of useful pixels in destination frame before calling WarpRegionToBuffer, but I didn't find it.
I would really appreciate help on this problem!
Best regards
I've been trying to sample a YCbCr image in Vulkan but I keep getting incorrect results, and I was hoping someone might be able to spot my mistake.
I have a NV12 YCbCr image which I want to render onto two triangles forming a quad. If i understand correctly, the VkFormat that corresponds to NV12 is VK_FORMAT_G8_B8R8_2PLANE_420_UNORM. Below is the code that I would expect to work, but I'll try to explain what I'm trying to do as well:
Create a VkSampler with a VkSamplerYcbcrConversion (with the correct format) in pNext
Read NV12 data into staging buffer
Create VkImage with the correct format and specify that the planes are disjoint
Get memory requirements (and offset for plane 1) for each plane (0 and 1)
Allocate device local memory for the image data
Bind each plane to the correct location in memory
Copy staging buffer to image memory
Create VkImageView with the same format as the VkImage and the same VkSamplerYcbcrConversionInfo as the VkSampler in pNext.
Code:
VkSamplerYcbcrConversion ycbcr_sampler_conversion;
VkSamplerYcbcrConversionInfo ycbcr_info;
VkSampler ycbcr_sampler;
VkImage image;
VkDeviceMemory image_memory;
VkDeviceSize memory_offset_plane0, memory_offset_plane1;
VkImageView image_view;
enum YCbCrStorageFormat
{
NV12
};
unsigned char* ReadYCbCrFile(const std::string& filename, YCbCrStorageFormat storage_format, VkFormat vulkan_format, uint32_t* buffer_size, uint32_t* buffer_offset_plane1, uint32_t* buffer_offset_plane2)
{
std::ifstream file;
file.open(filename.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
if (!file.is_open()) { ELOG("Failed to open YCbCr image"); }
*buffer_size = file.tellg();
file.seekg(0);
unsigned char* data;
switch (storage_format)
{
case NV12:
{
if (vulkan_format != VK_FORMAT_G8_B8R8_2PLANE_420_UNORM)
{
ILOG("A 1:1 relationship doesn't exist between NV12 and 420, exiting");
exit(1);
}
*buffer_offset_plane1 = (*buffer_size / 3) * 2;
*buffer_offset_plane2 = 0; //Not used
data = new unsigned char[*buffer_size];
file.read((char*)(data), *buffer_size);
break;
}
default:
ELOG("A YCbCr storage format is required");
break;
}
file.close();
return data;
}
VkFormatProperties format_properties;
vkGetPhysicalDeviceFormatProperties(physical_device, VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, &format_properties);
bool cosited = false, midpoint = false;
if (format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT)
{
cosited = true;
}
else if (format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT)
{
midpoint = true;
}
if (!cosited && !midpoint)
{
ELOG("Nither VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT nor VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT is supported for VK_FORMAT_G8_B8R8_2PLANE_420_UNORM");
}
VkSamplerYcbcrConversionCreateInfo conversion_info = {};
conversion_info.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO;
conversion_info.pNext = NULL;
conversion_info.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
conversion_info.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709;
conversion_info.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
conversion_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
conversion_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
conversion_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
conversion_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
if (cosited)
{
conversion_info.xChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
conversion_info.yChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
}
else
{
conversion_info.xChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
conversion_info.yChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
}
conversion_info.chromaFilter = VK_FILTER_LINEAR;
conversion_info.forceExplicitReconstruction = VK_FALSE;
VkResult res = vkCreateSamplerYcbcrConversion(logical_device, &conversion_info, NULL, &ycbcr_sampler_conversion);
CHECK_VK_RESULT(res, "Failed to create YCbCr conversion sampler");
ILOG("Successfully created YCbCr conversion");
ycbcr_info.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO;
ycbcr_info.pNext = NULL;
ycbcr_info.conversion = ycbcr_sampler_conversion;
VkSamplerCreateInfo sampler_info = {};
sampler_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
sampler_info.pNext = &ycbcr_info;
sampler_info.flags = 0;
sampler_info.magFilter = VK_FILTER_LINEAR;
sampler_info.minFilter = VK_FILTER_LINEAR;
sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_info.mipLodBias = 0.0f;
sampler_info.anisotropyEnable = VK_FALSE;
//sampler_info.maxAnisotropy IGNORED
sampler_info.compareEnable = VK_FALSE;
//sampler_info.compareOp = IGNORED
sampler_info.minLod = 0.0f;
sampler_info.maxLod = 1.0f;
sampler_info.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
sampler_info.unnormalizedCoordinates = VK_FALSE;
res = vkCreateSampler(logical_device, &sampler_info, NULL, &ycbcr_sampler);
CHECK_VK_RESULT(res, "Failed to create YUV sampler");
ILOG("Successfully created sampler with YCbCr in pNext");
std::string filename = "tree_nv12_1920x1080.yuv";
uint32_t width = 1920, height = 1080;
VkFormat format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
uint32_t buffer_size, buffer_offset_plane1, buffer_offset_plane2;
unsigned char* ycbcr_data = ReadYCbCrFile(filename, NV12, VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, &buffer_size, &buffer_offset_plane1, &buffer_offset_plane2);
//Load image into staging buffer
VkDeviceMemory stage_buffer_memory;
VkBuffer stage_buffer = create_vk_buffer(buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, stage_buffer_memory);
void* stage_memory_ptr;
vkMapMemory(logical_device, stage_buffer_memory, 0, buffer_size, 0, &stage_memory_ptr);
memcpy(stage_memory_ptr, ycbcr_data, buffer_size);
vkUnmapMemory(logical_device, stage_buffer_memory);
delete[] ycbcr_data;
//Create image
VkImageCreateInfo img_info = {};
img_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
img_info.flags = VK_IMAGE_CREATE_DISJOINT_BIT;
img_info.imageType = VK_IMAGE_TYPE_2D;
img_info.extent.width = width;
img_info.extent.height = height;
img_info.extent.depth = 1;
img_info.mipLevels = 1;
img_info.arrayLayers = 1;
img_info.format = format;
img_info.tiling = VK_IMAGE_TILING_LINEAR;//VK_IMAGE_TILING_OPTIMAL;
img_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
img_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
img_info.samples = VK_SAMPLE_COUNT_1_BIT;
img_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkResult result = vkCreateImage(logical_device, &img_info, NULL, &image);
CHECK_VK_RESULT(result, "vkCreateImage failed to create image handle");
ILOG("Image created!");
//Get memory requirements for each plane and combine
//Plane 0
VkImagePlaneMemoryRequirementsInfo image_plane_info = {};
image_plane_info.sType = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO;
image_plane_info.pNext = NULL;
image_plane_info.planeAspect = VK_IMAGE_ASPECT_PLANE_0_BIT;
VkImageMemoryRequirementsInfo2 image_info2 = {};
image_info2.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2;
image_info2.pNext = &image_plane_info;
image_info2.image = image;
VkImagePlaneMemoryRequirementsInfo memory_plane_requirements = {};
memory_plane_requirements.sType = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO;
memory_plane_requirements.pNext = NULL;
memory_plane_requirements.planeAspect = VK_IMAGE_ASPECT_PLANE_0_BIT;
VkMemoryRequirements2 memory_requirements2 = {};
memory_requirements2.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2;
memory_requirements2.pNext = &memory_plane_requirements;
vkGetImageMemoryRequirements2(logical_device, &image_info2, &memory_requirements2);
VkDeviceSize image_size = memory_requirements2.memoryRequirements.size;
uint32_t image_bits = memory_requirements2.memoryRequirements.memoryTypeBits;
//Set offsets
memory_offset_plane0 = 0;
memory_offset_plane1 = image_size;
//Plane 1
image_plane_info.planeAspect = VK_IMAGE_ASPECT_PLANE_1_BIT;
memory_plane_requirements.planeAspect = VK_IMAGE_ASPECT_PLANE_1_BIT;
vkGetImageMemoryRequirements2(logical_device, &image_info2, &memory_requirements2);
image_size += memory_requirements2.memoryRequirements.size;
image_bits = image_bits | memory_requirements2.memoryRequirements.memoryTypeBits;
//Allocate image memory
VkMemoryAllocateInfo allocate_info = {};
allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocate_info.allocationSize = image_size;
allocate_info.memoryTypeIndex = get_device_memory_type(image_bits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
result = vkAllocateMemory(logical_device, &allocate_info, NULL, &image_memory);
CHECK_VK_RESULT(result, "vkAllocateMemory failed to allocate image memory");
//Bind each image plane to memory
std::vector<VkBindImageMemoryInfo> bind_image_memory_infos(2);
//Plane 0
VkBindImagePlaneMemoryInfo bind_image_plane0_info = {};
bind_image_plane0_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO;
bind_image_plane0_info.pNext = NULL;
bind_image_plane0_info.planeAspect = VK_IMAGE_ASPECT_PLANE_0_BIT;
VkBindImageMemoryInfo& bind_image_memory_plane0_info = bind_image_memory_infos[0];
bind_image_memory_plane0_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bind_image_memory_plane0_info.pNext = &bind_image_plane0_info;
bind_image_memory_plane0_info.image = image;
bind_image_memory_plane0_info.memory = image_memory;
bind_image_memory_plane0_info.memoryOffset = memory_offset_plane0;
//Plane 1
VkBindImagePlaneMemoryInfo bind_image_plane1_info = {};
bind_image_plane1_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO;
bind_image_plane1_info.pNext = NULL;
bind_image_plane1_info.planeAspect = VK_IMAGE_ASPECT_PLANE_1_BIT;
VkBindImageMemoryInfo& bind_image_memory_plane1_info = bind_image_memory_infos[1];
bind_image_memory_plane1_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bind_image_memory_plane1_info.pNext = &bind_image_plane1_info;
bind_image_memory_plane1_info.image = image;
bind_image_memory_plane1_info.memory = image_memory;
bind_image_memory_plane1_info.memoryOffset = memory_offset_plane1;
vkBindImageMemory2(logical_device, bind_image_memory_infos.size(), bind_image_memory_infos.data());
context.transition_vk_image_layout(image, format, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
//Copy staging buffer to device local buffer
VkCommandBuffer tmp_cmd_buffer = begin_tmp_vk_cmd_buffer();
std::vector<VkBufferImageCopy> plane_regions(2);
plane_regions[0].bufferOffset = 0;
plane_regions[0].bufferRowLength = 0;
plane_regions[0].bufferImageHeight = 0;
plane_regions[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT;
plane_regions[0].imageSubresource.mipLevel = 0;
plane_regions[0].imageSubresource.baseArrayLayer = 0;
plane_regions[0].imageSubresource.layerCount = 1;
plane_regions[0].imageOffset = { 0, 0, 0 };
plane_regions[0].imageExtent = { width, height, 1 };
plane_regions[1].bufferOffset = buffer_offset_plane1;
plane_regions[1].bufferRowLength = 0;
plane_regions[1].bufferImageHeight = 0;
plane_regions[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT;
plane_regions[1].imageSubresource.mipLevel = 0;
plane_regions[1].imageSubresource.baseArrayLayer = 0;
plane_regions[1].imageSubresource.layerCount = 1;
plane_regions[1].imageOffset = { 0, 0, 0 };
plane_regions[1].imageExtent = { width / 2, height / 2, 1 };
vkCmdCopyBufferToImage(tmp_cmd_buffer, stage_buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, plane_regions.size(), plane_regions.data());
end_tmp_vk_cmd_buffer(tmp_cmd_buffer); //Submit and waits
vkFreeMemory(logical_device, stage_buffer_memory, NULL);
vkDestroyBuffer(logical_device, stage_buffer, NULL);
transition_vk_image_layout(image, format, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
VkImageViewCreateInfo image_view_info = {};
image_view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_info.pNext = &ycbcr_info;
image_view_info.flags = 0;
image_view_info.image = image;
image_view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_info.format = format;
image_view_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
image_view_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
image_view_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
image_view_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
image_view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
image_view_info.subresourceRange.baseMipLevel = 0;
image_view_info.subresourceRange.levelCount = 1;
image_view_info.subresourceRange.baseArrayLayer = 0;
image_view_info.subresourceRange.layerCount = 1;
VkResult res = vkCreateImageView(logical_device, &image_view_info, NULL, &.image_view);
CHECK_VK_RESULT(res, "Failed to create image view");
ILOG("Successfully created image, allocated image memory and created image view");
I receive one validation error: vkCmdCopyBufferToImage() parameter, VkImageAspect pRegions->imageSubresource.aspectMask, is an unrecognized enumerator, but from inspecting the validation code, it seems that it's just a bit outdated and this shouldn't be an issue.
The rest of the code just sets up regular descriptor layouts/pools and allocated and updates accordingly (I've verified with a regular RGB texture).
The fragment shader is as follows:
vec2 uv = vec2(gl_FragCoord.x / 1024.0, 1.0 - (gl_FragCoord.y / 1024.0));
out_color = vec4(texture(ycbcr_image, uv).rgb, 1.0f);
When I run my program I only get a red components (the image is essentially a greyscale image). from a little testing, it seems that the VkSamplerYcbcrconversion setup as removing it from both the VkSamplerCreateInfo.pNext and VkImageViewCreateInfo.pNext doesn't change anything.
I've also looked here, Khronos YCbCr tests, but I can't find any real mistake.
Solution: according to the spec, sec. 12.1, Conversion must be fixed at pipeline creation time, through use of a combined image sampler with an immutable sampler in VkDescriptorSetLayoutBinding.
By adding the ycbcr_sampler to pImmutableSamplers when setting up the descriptor set layout binding it now works:
VkDescriptorSetLayoutBinding image_binding = {};
image_binding.binding = 0;
image_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
image_binding.descriptorCount = 1;
image_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
image_binding.pImmutableSamplers = &ycbcr_sampler;
I used with Zed-Camera for get depth (https://www.stereolabs.com/).
I want to get the data in c++ (using the OpenCV library).
I took the code from here:
https://www.stereolabs.com/blog/index.php/2015/06/28/zed-with-opencv/
The code on the website is not working, because one line does not compile:
sl::zed::ERRCODE err = zed->init(sl::zed::MODE::PERFORMANCE, 0, true);
I get 2 errors:
initial value of reference to non-const must be an lvalue
too many arguments in function call.
I looked in the function, the function get:
ERRCODE init(InitParams ¶meters);
I would appreciate your help
Yes as you can see the parameters was changed to InitParams.
sl::zed::InitParams params;
params.verbose = true;
sl::zed::ERRCODE err = camera->init(params);
You can do something similar to this.
m_pZed = new sl::Camera();
sl::InitParameters zedInit;
zedInit.camera_buffer_count_linux = 4;
zedInit.camera_disable_self_calib = false;
zedInit.camera_fps = m_zedFPS;
zedInit.camera_image_flip = m_bZedFlip;
zedInit.camera_linux_id = 0;
zedInit.camera_resolution = (sl::RESOLUTION) m_zedResolution;
zedInit.coordinate_system = sl::COORDINATE_SYSTEM::COORDINATE_SYSTEM_IMAGE;
zedInit.coordinate_units = sl::UNIT::UNIT_METER;
zedInit.depth_minimum_distance = m_zedMinDist;
zedInit.depth_mode = (sl::DEPTH_MODE) m_zedDepthMode;
zedInit.sdk_gpu_id = -1;
zedInit.sdk_verbose = true;
sl::ERROR_CODE err = m_pZed->open(zedInit);
if (err != sl::SUCCESS)
{
LOG(ERROR)<< "ZED Error code: " << sl::errorCode2str(err) << std::endl;
return false;
}
m_pZed->setConfidenceThreshold(m_zedConfidence);
m_pZed->setDepthMaxRangeValue((float) m_zedMaxDist);
// Set runtime parameters after opening the camera
m_zedRuntime.sensing_mode = (sl::SENSING_MODE) m_zedSenseMode;
m_zedRuntime.enable_depth = true;
m_zedRuntime.enable_point_cloud = false;
m_zedRuntime.move_point_cloud_to_world_frame = false;
// Create sl and cv Mat to get ZED left image and depth image
sl::Resolution zedImgSize = m_pZed->getResolution();
// Initialize color image and depth
m_width = zedImgSize.width;
m_height = zedImgSize.height;
m_centerH = m_width / 2;
m_centerV = m_height / 2;
// Best way of sharing sl::Mat and cv::Mat :
// Create a sl::Mat and then construct a cv::Mat using the ptr to sl::Mat data.
m_pzDepth = new sl::Mat(zedImgSize, sl::MAT_TYPE_32F_C1, sl::MEM_GPU);
m_gDepth = slMat2cvGpuMat(*m_pzDepth);
m_gDepth2 = GpuMat(m_gDepth.size(), m_gDepth.type());
More details can be found here( https://github.com/yankailab/OpenKAI/blob/master/src/Vision/_ZED.cpp )
CGAL provides a method to instance a Image_3 object by function read, that is reading an existing image file from disk. I'm wondering whether I can instance it with height,width,depth and a data pointer like this:
CGAL::Image_3 im;
int Height = 512;
int Width = 512;
int Depth = 100;
int* dataptr = new int [Height*Width*Depth];
memset(dataptr, 0, sizeof(int)*Height*Width*Depth);
MyCreate(im, Height, Width, Depth, dataptr); // <== my function to instance this object.
So how to do that?
You can use the following non-documented piece of code (not really tested):
// create and fill the _image struct
_image* image = ::_initImage();
image->vectMode = VM_SCALAR;
image->xdim = Height;
image->ydim = Width
image->zdim = Depth;
image->vdim = 1;
image->vx = 1.;
image->vy = 1.;
image->vz = 1.;
image->endianness = ::_getEndianness();
image->wdim = sizeof(int);
image->wordKind = WK_FIXED;
image->sign = SGN_SIGNED;
image->data = dataptr;
// then create the CGAL::Image_3 object:
CGAL::Image_3 im(image);
I have adapted the code from the function Image_3::read_vtk_image_data (in CGAL-4.5/src/CGAL_ImageIO/Image_3.cpp.
i'm trying to make a video from an OpenGl context.
I'm Using glReadPixel, to be sure RGB buffer data is Ok i save it into a bmp file, wich i can read correctly.
My .h264 video is encoded but there are some artefact and i don't understand why.
I tried a lot of different parameters for the x264_param_t but anything better !
Bitmap saved (OpenGL real data) : Bitmap from OpenGl (1mo)
Raw h264 with error : Raw h264 video (1mo)
OpenGl ReadPixel :
int nSize = ClientHeight * ClientWidth * 3;
GLubyte *inBuff = new GLubyte[nSize];
glReadBuffer(GL_FRONT);
glReadPixels(0, 0, ldwidth, ldheight, GL_BGR, GL_UNSIGNED_BYTE, inBuff);
The params define :
x264_param_default(&mX264_param_t);
x264_param_default_preset(&mX264_param_t, "placebo", "film");
mX264_param_t.i_csp = X264_CSP_BGR;
mX264_param_t.i_threads = 6;
mX264_param_t.i_width = mWidth;
mX264_param_t.i_height = mHeight;
mX264_param_t.i_fps_num = mFps;
mX264_param_t.i_fps_den = 1;
// Intra refres:
mX264_param_t.i_keyint_max = mFps;
mX264_param_t.b_intra_refresh = 1;
//Rate control:
mX264_param_t.rc.i_rc_method = X264_RC_CRF;
mX264_param_t.rc.f_rf_constant = 25;
mX264_param_t.rc.f_rf_constant_max = 35;
int bps = 5000;
mX264_param_t.rc.i_bitrate = bps;
mX264_param_t.rc.i_vbv_max_bitrate = bps;
mX264_param_t.i_bframe = 2;
mX264_param_t.i_keyint_min = mFps / 4;
//For streaming:
mX264_param_t.b_repeat_headers = 1;
mX264_param_t.b_annexb = 1;
mX264_param_t.i_log_level = X264_LOG_DEBUG;
x264_param_apply_profile(&mX264_param_t, "baseline");
mpEncoder = x264_encoder_open(&mX264_param_t);
x264_encoder_parameters(mpEncoder, &mX264_param_t);
mpPictureOut = new x264_picture_t();
mpPictureIn = new x264_picture_t();
x264_picture_alloc(mpPictureIn, X264_CSP_BGR | X264_CSP_VFLIP, mWidth, mHeight);
Then the encoding loop :
mpPictureIn->img.i_csp = X264_CSP_BGR;
mpPictureIn->img.i_plane = 1;
mpPictureIn->img.i_stride[0] = 3 * mWidth;
mpPictureIn->img.plane[0] = rgbframe;
mpPictureIn->i_pts = mFrameCount;
mpPictureIn->i_type = X264_TYPE_AUTO;
mpPictureOut->i_pts = mFrameCount;
int i_nals;
x264_nal_t* nals;
int frame_size = x264_encoder_encode(mpEncoder, &nals, &i_nals, mpPictureIn, mpPictureOut);
if(frame_size > 0)
{
mpFileOut->write_frame(nals[0].p_payload, frame_size, mpPictureOut);
mFrameCount++;
}
The write frame :
int TVideoFileWriter::write_frame(uint8_t *p_nalu, int i_size, x264_picture_t *p_picture)
{
if(fwrite(p_nalu, i_size, 1, mFileHandle))
return i_size;
return -1;
}
You opened your output file in text mode (and not binary mode) and so all 0x0A bytes where replaced with 0x0D 0x0A bytes.
Here is your output with this replace reverted: out_fixed.h264
And it plays fine.