Custom resolution on GCP VM (with T4 GPU on Windows Server 2019) - c++

I am currently searching for a way to set a fully custom resolution on a Windows Server 2019 VM with GPU (T4, with grid licence, and virtual workstation grid drivers) with C++.
I have tried different way to achieve this, I can make this work on my laptop, but seems to have some limitations on GCP VMs (or Windows Server limitation).
I have tried to do this with ChangeDisplaySettings/ChangeDisplaySettingsEx (winuser.h), I can change to a known resolution, but can't make it works with a custom one (not even with CDS_ENABLE_UNSAFE_MODE).
DWORD deviceIndex = 0;
DISPLAY_DEVICE displayDevice = { 0 };
displayDevice.cb = sizeof(DISPLAY_DEVICE);
while (EnumDisplayDevices(NULL, deviceIndex, &displayDevice, 0)) {
deviceIndex++;
DEVMODE dm = { 0 };
dm.dmSize = sizeof(DEVMODE);
DEVMODE finalDm = { 0 };
finalDm.dmSize = sizeof(DEVMODE);
//Check if able to retrieve current settings
if (!EnumDisplaySettings(displayDevice.DeviceName, ENUM_CURRENT_SETTINGS, &dm)) {
continue;
}
//Check if there is a difference in resolution list if UNSAFE_MODE is enabled or not (it seems to not change anything)
int result = ChangeDisplaySettingsEx(displayDevice.DeviceName, &dm, 0, CDS_DISABLE_UNSAFE_MODES, NULL);
std::cout << "CDS_DISABLE_UNSAFE_MODE" << std::endl;
if (result == DISP_CHANGE_SUCCESSFUL) {
for (int i = 0; EnumDisplaySettings(displayDevice.DeviceName, i, &dm) != 0; i++) {
if (dm.dmBitsPerPel == 32) {
std::cout << i << ". Found available resolution : " << dm.dmPelsWidth << " x " << dm.dmPelsHeight << " x " << dm.dmBitsPerPel << " # " << dm.dmDisplayFrequency << std::endl;
}
}
}
result = ChangeDisplaySettingsEx(displayDevice.DeviceName, &dm, 0, CDS_ENABLE_UNSAFE_MODES, NULL);
std::cout << "CDS_ENABLE_UNSAFE_MODE" << std::endl;
if (result == DISP_CHANGE_SUCCESSFUL) {
for (int i = 0; EnumDisplaySettings(displayDevice.DeviceName, i, &dm) != 0; i++) {
if (dm.dmBitsPerPel == 32) {
std::cout << i << ". Found available resolution : " << dm.dmPelsWidth << " x " << dm.dmPelsHeight << " x " << dm.dmBitsPerPel << " # " << dm.dmDisplayFrequency << std::endl;
}
}
}
std::cout << "Please enter width : ";
int width, height;
std::cin >> width;
std::cout << "Please enter height : ";
std::cin >> height;
dm.dmPelsWidth = width;
dm.dmPelsHeight = height;
if (width > height) {
dm.dmDisplayOrientation = DMDO_DEFAULT;
}
else {
dm.dmDisplayOrientation = DMDO_90;
}
dm.dmFields = DM_PELSWIDTH | DM_PELSHEIGHT | DM_DISPLAYORIENTATION;
//result = ChangeDisplaySettingsEx(displayDevice.DeviceName, &dm, NULL, CDS_TEST, NULL);
result = ChangeDisplaySettingsEx(displayDevice.DeviceName, &dm, NULL, 0, NULL);
if (result != DISP_CHANGE_SUCCESSFUL) {
std::cout << "Impossible to ChangeDisplaySettings" << endl;
}
else {
std::cout << "OK" << endl;
}
break;
}
I then take a look at NVAPI, and same here, I can make it works on my PC but still nothing on the GCP VMs... I have found a way to make NVAPI create and use custom resolution on my local PC, but can't make it works on GCP VM once again... (Code example found here)
NvAPI_Status result = NVAPI_ERROR;
NvU32 primaryDisplayId = 0;
//Testing resolution
int horizontal = 1920, vertical = 1090;
result = NvAPI_Initialize();
if (result != NVAPI_OK) {
printf("Could not initialize NvAPI");
return false;
}
MONITORINFOEX monInfo;
HMONITOR hMon;
const POINT ptZero = { 0, 0 };
// determine the location of the primary monitor
hMon = MonitorFromPoint(ptZero, MONITOR_DEFAULTTOPRIMARY);
ZeroMemory(&monInfo, sizeof(monInfo));
monInfo.cbSize = sizeof(monInfo);
GetMonitorInfo(hMon, &monInfo);
result = NvAPI_DISP_GetGDIPrimaryDisplayId(&primaryDisplayId);
if (result != NVAPI_OK) {
printf("Could not get display ID from device");
NvAPI_Unload();
return false;
}
NvU32 deviceCount = 0;
NV_CUSTOM_DISPLAY cd[NVAPI_MAX_DISPLAYS] = { 0 };
float refreshRate = 60;
// timing computation (to get timing that suits the changes made)
NV_TIMING_FLAG flag = { 0 };
NV_TIMING_INPUT timing = { 0 };
timing.version = NV_TIMING_INPUT_VER;
timing.height = vertical;
timing.width = horizontal;
timing.rr = refreshRate;
timing.flag = flag;
timing.type = NV_TIMING_OVERRIDE_CVT_RB;
result = NvAPI_DISP_GetTiming(primaryDisplayId, &timing, &cd[0].timing);
if (result != NVAPI_OK) {
printf("Failed to get timing for display"); // failed to get custom display timing
NvAPI_Unload();
return false;
}
cd[0].width = horizontal;
cd[0].height = vertical;
cd[0].xRatio = 1;
cd[0].yRatio = 1;
cd[0].srcPartition = { 0, 0, 1.0, 1.0 };
cd[0].depth = 32;
cd[0].version = NV_CUSTOM_DISPLAY_VER;
cd[0].colorFormat = NV_FORMAT_A8R8G8B8;
//Returns NVAPI_ERROR on GCP but NVAPI_OK on my laptop
result = NvAPI_DISP_TryCustomDisplay(&primaryDisplayId, 1, cd);
if (result != NVAPI_OK) {
printf("Could not set custom resolution");
NvAPI_DISP_RevertCustomDisplayTrial(&primaryDisplayId, 1);
NvAPI_Unload();
return false;
}
else {
NvAPI_DISP_SaveCustomDisplay(&primaryDisplayId, 1, true, true);
}
This part works perfectly well on my laptop, I can use a new dynamic resolution (It works with 1920x400, 1920x500, 1920x600), but not on my GCP VM, this parts :
NvAPI_DISP_TryCustomDisplay(&primaryDisplayId, 1, cd);
always returns NVAPI_ERROR
I have found another trick, I can edit this registry entry : HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Video{RANDOM_ID}\0001\NV_Modes
(Here is an old pdf, after some testing, it seems it is still working this way)
If I add some resolution using NVAPI, I can then set through ChangeDisplaySettingsEx function this resolution (it needs GPU driver restart, or Windows restart be able to change to a fresh new added resolution).
But I need to be able to rotate screen, playing with "dmDisplayOrientation", and it does not seem to work on GCP VM once again, if I authorize for example 1920x1090 I can set resolution to this, but cannot set 1090x1920 with a "dmDisplayOrientation = DMDO_90" (even if I authorize 1090x1920 too...)
So if anyone found a way, or have any idea on how to do this, it would be great, I am running out of idea right now...

Related

How to use Windows API to realize line by line printing

I want to use Windows API to realize the line by line printing of the printer.
For example, if there is only one message at present, print one message. Then the printer waits and the paper stops printing. When there is information next time, continue printing on the same paper.
With windows API, I can only print by page in the following way. I don't know how to print by line. After one line is finished, the printing is suspended and the next line continues to be printed after waiting for a new task.
bool CMFCApplication1Dlg::printTicket(CString& szPrinter, CString&
szContent)
{
static DOCINFO di = { sizeof(DOCINFO), (LPTSTR)TEXT("printer"),NULL };
HDC hdcPrint = CreateDC(nullptr, szPrinter.GetBuffer(), nullptr, nullptr);
if (hdcPrint != 0)
{
if (StartDoc(hdcPrint, &di) > 0)
{
StartPage(hdcPrint);
SaveDC(hdcPrint);
int xDistance = 20;
int yDistance = 20;
LOGFONT logFont = { 0 };
logFont.lfCharSet = DEFAULT_CHARSET;
logFont.lfPitchAndFamily = DEFAULT_PITCH;
logFont.lfWeight = FW_NORMAL;
logFont.lfHeight = 60;
logFont.lfWeight = 36;
HFONT hFont = CreateFontIndirect(&logFont);
SelectObject(hdcPrint, hFont);
TextOut(hdcPrint, xDistance, yDistance, szContent.GetBuffer(), szContent.GetLength());
RestoreDC(hdcPrint, -1);
EndPage(hdcPrint);
EndDoc(hdcPrint);
}
else
{
cout << "StartDoc failed!" << endl;
string errorCode = to_string(GetLastError());
cout << "Error code is:" << errorCode << endl;
return false;
}
DeleteDC(hdcPrint);
}
else
{
cout << "CreateDC failed!" << endl;
string errorCode = to_string(GetLastError());
cout << "Error code is :" << errorCode << endl;
return false;
}
return true;
}
bool CMFCApplication1Dlg::SetPrinterParameters(CString& szPrinter)
{
HANDLE hPrinter = nullptr;
PRINTER_INFO_2* pi2 = nullptr;
DEVMODE* pDevMode = nullptr;
PRINTER_DEFAULTS pd;
DWORD dwNeeded = 0;
BOOL bFlag;
LONG lFlag;
WCHAR szDevName[MAX_PATH] = L"";
DWORD dwLength = MAX_PATH;
if (!GetDefaultPrinter(szDevName, &dwLength))
{
return false;
}
szPrinter = szDevName;
return true;
}
Maybe this has something to do with the printer driver?
I use C# winspool library and can't meet this requirement too, maybe I don't know how to use them.
Hope your help.

vkDestroyDevice returns vkDevice has not been Destroyed and EXC_BAD_ACCESS

Running MacOS Catalina and VSCode.
This is how I initialize my device
void Renderer::InitDevice() {
{
uint32_t gpu_count = 0;
vkEnumeratePhysicalDevices(Instance, &gpu_count, nullptr);
std::vector<VkPhysicalDevice> gpu_list(gpu_count);
vkEnumeratePhysicalDevices(Instance, &gpu_count, gpu_list.data()); //populates gpu_list with all gpu handles
GPU = gpu_list[0]; //grab first gpu. debug and figure out a ranking system
vkGetPhysicalDeviceProperties(GPU, &GPU_device_properties);
}
{
uint32_t family_count = 0;
vkGetPhysicalDeviceQueueFamilyProperties(GPU, &family_count, nullptr);
std::vector<VkQueueFamilyProperties> family_properties(family_count);
vkGetPhysicalDeviceQueueFamilyProperties(GPU, &family_count, family_properties.data());
bool found_graphics_bit = false;
for ( u_int32_t i = 0; i<family_count; i++){
if(family_properties[i].queueFlags & VK_QUEUE_GRAPHICS_BIT){
found_graphics_bit = true;
graphics_family_index = i;
}
}
if (!found_graphics_bit){
assert(1 && "Vulkan Error: Queue family supporting graphics card not found");
std::exit(-1);
}
}
{
uint32_t layer_count = 0;
vkEnumerateInstanceLayerProperties(&layer_count, nullptr);
std::vector<VkLayerProperties> layer_properties(layer_count);
vkEnumerateInstanceLayerProperties(&layer_count, layer_properties.data()); //something about system layers
std::cout << "Instance Layers: \n";
for (auto &layer_property: layer_properties){
std::cout << "\t" << layer_property.layerName << "\n";
std::cout << "\t\t" << layer_property.description << "\n";
}
std::cout << "\n";
}
{
uint32_t layer_count = 0;
vkEnumerateDeviceLayerProperties(GPU, &layer_count, nullptr);
std::vector<VkLayerProperties> layer_properties(layer_count);
vkEnumerateDeviceLayerProperties(GPU, &layer_count, layer_properties.data()); //something about system layers
std::cout << "Device Layers: \n";
for (auto &layer_property: layer_properties){
std::cout << "\t" << layer_property.layerName << "\n";
std::cout << "\t\t" << layer_property.description << "\n";
}
std::cout << "\n";
}
float queue_priorities[] { 1.0f };
VkDeviceQueueCreateInfo device_queue_info = {};
//need to understand queue properties
device_queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
device_queue_info.queueFamilyIndex = graphics_family_index;
device_queue_info.queueCount = 1;
device_queue_info.pQueuePriorities = queue_priorities;
VkDeviceCreateInfo device_info{};
device_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_info.queueCreateInfoCount = 1;
device_info.pQueueCreateInfos = &device_queue_info;
device_info.enabledLayerCount = device_layers.size();
device_info.ppEnabledLayerNames = device_layers.data();
device_info.enabledExtensionCount = device_extensions.size();
device_info.ppEnabledExtensionNames = device_extensions.data();
auto err = vkCreateDevice(GPU, &device_info, nullptr, &Device);
if (err != VK_SUCCESS){
assert(1 && "Vulkan Error: Device Creation Failed");
std::exit(-1);
}
}
Vulkan device creation return VK_SUCCESS when this error occurs.
And this is how I destroy it
void Renderer::DeinitDevice() {
vkDeviceWaitIdle(Device);
vkDestroyDevice(Device, nullptr); //uncommenting this causes program to crash.
Device = nullptr;
}
on vkDeviceWaitIdle(Device); or if that is removed it happens on vkDestroyDevice(Device, nullptr);
I get EXC_BAD_ACCESS
and
#"2020-05-17 15:06:54.832625-0400 main[4816:29494] flock failed to lock maps file: errno = 35\r\n"
#"UNASSIGNED-ObjectTracker-ObjectLeak(ERROR / SPEC): msgNum: 699204130 - Validation Error: [ UNASSIGNED-ObjectTracker-ObjectLeak ] Object 0: handle = 0x10104e018, type = VK_OBJECT_TYPE_DEVICE; | MessageID = 0x29ad0222 | OBJ ERROR : VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT object VkDevice 0x10104e018[] has not been destroyed.\r\n"
#" Objects: 1\r\n"
#" [0] 0x10104e018, type: 3, name: NULL\r\n"
I was following the vulkan-tutorial and I couldn't really understand that but I found a few videos that explained the things the tutorial really doesn't go over. So far I've put this together but I'm not sure what's causing the error. I've seen other similar errors but can't seem to find out how they fixed it.
The code I didn't post that actually contained the issue was this
Renderer::~Renderer(){
DestroyInstance();
DeinitDevice();
}
I transposed these two from the tutorial I was following on accident. and should be this
Renderer::~Renderer(){
DeinitDevice();
DestroyInstance();
}

Moving third-party windows in Mac OS

I try to support the program in Mac OS. The program must move one specific window of another program (the program must have more than 1 window open) to the specified monitor and expand it to the full screen. For Windows and Linux, this was implemented using a native API, but for MacOS it did not find an API to change Windows from its application. Could find a way to get the window ID but not change its state.
m_currentWindow->mac = 0;
CGWindowListOption option = kCGWindowListOptionAll;
CGWindowID id = 0;
CFArrayRef windows = CGWindowListCreate(option, id);
if(windows == nullptr)
{
qCCritical(actOp) << "windows is null";
return;
}
CFArrayRef desc = CGWindowListCreateDescriptionFromArray(windows);
if(desc == nullptr)
{
qCCritical(actOp) << "windows description is null";
return;
}
CFIndex count = CFArrayGetCount(desc);
qCDebug(actOp) << "finded " << count << " window";
QList<quint32> allWindows;
for(CFIndex i=0; i<count; i++)
{
CFDictionaryRef dictionary = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(desc, i));
quint32 id;
CFNumberGetValue(static_cast<CFNumberRef>(CFDictionaryGetValue(dictionary, kCGWindowNumber)),
kCFNumberSInt32Type, &id);
allWindows << id;
}
CFRelease(desc);
CFRelease(windows);
do
{
QThread::currentThread()->msleep(100);
windows = CGWindowListCreate(option, id);
desc = CGWindowListCreateDescriptionFromArray(windows);
count = CFArrayGetCount(desc);
for(CFIndex i=0; i<count; i++)
{
CFDictionaryRef dictionary = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(desc, i));
quint32 id;
CFNumberGetValue(static_cast<CFNumberRef>(CFDictionaryGetValue(dictionary, kCGWindowNumber)),
kCFNumberSInt32Type, &id);
if(allWindows.contains(id))
continue;
QString name = QString::fromCFString(static_cast<CFStringRef>(
CFDictionaryGetValue(dictionary, kCGWindowOwnerName)));
qCDebug(actOp) << static_cast<CFNumberRef>(CFDictionaryGetValue(dictionary, kCGWindowNumber))
<< " "
<< static_cast<CFStringRef>(CFDictionaryGetValue(dictionary, kCGWindowOwnerName));
if(name.contains(m_browserTitle))
{
m_currentWindow->mac = id;
qCDebug(actOp) << "window is finded";
return;
}
else
allWindows << id;
}
CFRelease(desc);
CFRelease(windows);
}
while(m_currentWindow->mac == 0 && !timer.hasExpired(maxTime));
I tried to look in the direction of QWindow, but could not find how to get NSView, for the method Window::fromWinId, having only the desired window CGWindowID.
Tell me how you would implement such a task. Thanks.

gss_init_sec_context return No credentials cache found (Windows, C++)

I try use gssapi32.dll in my application but I receive
exception when app start
name like 'HTTP/proxy.domain.com#domain.com'
I saw this name in Kerberos Ticket Tools
but I receive "No credentials cache found"
maybe anybody already has similar problem? and can help
Windows 7 (x64)
MSVS C++ 2010 Express
thank you for your advice & sorry for my English
char* cHttp::getNegotiateToken(const char *service, const char *server) {
char *token = 0;
OM_uint32 major, minor;
gss_buffer_desc gss_buffer;
gss_buffer_desc gss_buffer_user;
gss_name_t gss_name;
gss_name_t gss_user_name;
gss_ctx_id_t gss_context = GSS_C_NO_CONTEXT;
gss_buffer_desc gss_input_token = GSS_C_EMPTY_BUFFER;
gss_buffer_desc gss_output_token = GSS_C_EMPTY_BUFFER;
OM_uint32 req_flags = GSS_C_MUTUAL_FLAG | GSS_C_REPLAY_FLAG;
static gss_OID_desc gss_krb5_mech_oid_desc =
{ 9, (void *) "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" };
if(!service || !server) {
Logger.writeLogHead(llError) << "Service and server values cannot be NULL!" << EndOfLine;
return 0;
}
gss_buffer.length = 28;//strln(service) + strln(server) + 2;
gss_buffer.value = malloc(gss_buffer.length);
gss_buffer_user.length = 26;
gss_buffer_user.value = malloc(gss_buffer_user.length);
Logger.writeLogHead(llError) << "service \"" << service << "\" server \"" << server << "\" length " << (int)gss_buffer.length << EndOfLine;
if(!gss_buffer.value) {
Logger.writeLogHead(llError) << "malloc failed" << EndOfLine;
return 0;
}
sprintf((char *)gss_buffer.value, "%s", "HTTP/proxy.domain.com#domain.com");
major = gss_import_name(&minor, &gss_buffer, GSS_C_NT_HOSTBASED_SERVICE, &gss_name);
free(gss_buffer.value);
if (major != GSS_S_COMPLETE) {
logGssError(major, minor, "gss_import_name");
return 0;
}
gss_buffer_desc out_name;
major = gss_display_name(&minor, gss_name, &out_name, NULL);
if (major != GSS_S_COMPLETE) {
logGssError(major, minor, "gss_display_name");
return 0;
}
Logger.writeLogHead(llWarning) << "Service name : " << (const char*)out_name.value << EndOfLine;
major = gss_init_sec_context(
&minor,
GSS_C_NO_CREDENTIAL,
&gss_context,
gss_name,
&gss_krb5_mech_oid_desc,
req_flags,
GSS_C_INDEFINITE,
GSS_C_NO_CHANNEL_BINDINGS,
&gss_input_token,
NULL,
&gss_output_token,
NULL,
NULL);
if (major == GSS_S_NO_CRED) {
Logger.writeLogHead(llError) << "gss_init_sec_context GSS_S_NO_CRED" << EndOfLine;
}
if (major != GSS_S_COMPLETE) {
logGssError(major, minor, "gss_init_sec_context");
return 0;
}
if (gss_output_token.length == 0) {
Logger.writeLogHead(llError) << "Token don't need to be send." << EndOfLine;
return 0;
}
// TODO: Need to make SPNEGO token (spnegohelp)
token = base64_encode((const char *)gss_output_token.value, gss_output_token.length);
major = gss_delete_sec_context(&minor, &gss_context, GSS_C_NO_BUFFER);
if (major != GSS_S_COMPLETE) {
logGssError(major, minor, "gss_delete_sec_context");
return 0;
}
major = gss_release_name(&minor,&gss_name);
if (major != GSS_S_COMPLETE) {
logGssError(major, minor, "gss_release_name");
return 0;
}
return token;
}
-- edited
thank you friend - I think that I will go on proposed way
In my case need using spnego mechanism instead of gss_krb5_mech_oid_desc
static gss_OID_desc gss_spnego_mech_oid_desc =
{ 6, (void *) "\x2b\x06\x01\x05\x05\x02" };
//{ 9, (void*) "\x06\x06\x2b\x06\x01\x05\x05\x02\xa0" };
gss_OID mMechOID;
mMechOID = &gss_krb5_mech_oid_desc;
Do you really need the GSS-API under Windows? You should rather use SSPI. The GSS-API does not have direct access to the ticket cache in memory. Check my answer here.

Enable display programmatically

I am trying to enable a secondary monitor in C++. What I have seems to try and change the display settings but nothing really happens, can anyone tell me where I am going wrong?
std::wstring devName( L"Intel(R) HD Graphics Family" );
std::wstring dispName( L"\\\\.\\DISPLAY3" );
DISPLAY_DEVICE theDisplay;
theDisplay.cb = sizeof(theDisplay);
DWORD dev = 0;
while(EnumDisplayDevices(0, dev, &theDisplay, 0))
{
if (devName.compare(theDisplay.DeviceString) == 0 && dispName.compare(theDisplay.DeviceName) == 0)
{
// found display adapter we're looking for
if (theDisplay.StateFlags & DISPLAY_DEVICE_ATTACHED_TO_DESKTOP)
{
// Display is part of desktop, turn al other monitors off
cout << "Display is part of desktop\n";
}
else
{
// Display is off, turn it on
DEVMODE dm;
memset(&dm,0,sizeof(DEVMODE));
dm.dmSize = sizeof (DEVMODE);
dm.dmFields = DM_POSITION;
dm.dmPosition.x = 3361;
dm.dmPosition.y = 0;
dm.dmPelsWidth = 1920;
dm.dmPelsHeight = 1080;
LONG ret = ChangeDisplaySettingsEx (theDisplay.DeviceName, &dm, NULL, CDS_UPDATEREGISTRY, NULL);
if (ret != DISP_CHANGE_SUCCESSFUL)
{
cout << "failed";
}
}
}
dev++;
}
system ("pause");
return 0;