I'm implementing following Microsoft example of launching a file via concurrency:
int main(int argc, char* argv[])
{
auto installFolder = Windows::ApplicationModel::Package::Current->InstalledLocation;
concurrency::task<Windows::Storage::StorageFile^> getFileOperation(installFolder->GetFileAsync("images\\test.png"));
getFileOperation.then([](Windows::Storage::StorageFile^ file)
{
if (file != nullptr)
{
// Set the option to show the picker
auto launchOptions = ref new Windows::System::LauncherOptions();
launchOptions->DisplayApplicationPicker = true;
// Launch the retrieved file
concurrency::task<bool> launchFileOperation(Windows::System::Launcher::LaunchFileAsync(file, launchOptions));
launchFileOperation.then([](bool success)
{
if (success)
{
// File launched
}
else
{
// File launch failed
}
});
}
else
{
// Could not find file
}
});
}
I don't know why, but this code doesn't open a file.
If I use .wait() on the .then()'s, it throws invalid_operation exception: "Illegal to wait on a task in a Windows Runtime STA."
How can I use concurrency without .wait(), if I run on STA?
Related
I wrote a small test app to use the Boos Message_Queue to send data between two processes. That all worked correctly and I was able to print the data that I sent.
I moved my test code into my main project and now the main project is not waking from the Receives. The main project runs as systems so I tracked down one error and saw that the message_queue was not checking the same memory location for some reason. I defined BOOST_INTERPROCESS_SHARED_DIR_PATH and then the main project was able to open the queue created by the test app. But when the test app sends the main project never woke from its receive. The main project should be running as system and the test app as a user. But I figured since it is sharing the memory location it should work correctly?
If I open the queue in the test app again it wakes and receives all of the messages right away. Am I missing something or is this a limitation on BOOST message_queue?
The code from the test app:
MessageQueue::MessageQueue(int, boost::interprocess::permissions perm) :
mq(boost::interprocess::create_only, "TestChannel", 100, sizeof(QueueData), perm)
{
}
MessageQueue::MessageQueue(bool) :
mq(boost::interprocess::open_only, "TestChannel")
{
}
MessageQueue::~MessageQueue()
{
int num = mq.get_num_msg();
wprintf(_T("sent: %d\n"), num);
boost::interprocess::message_queue::remove("TestChannel");
}
void MessageQueue::SetCommand(int i)
{
QueueData qd;
qd.fakeInfo = i;
qd.exit = false;
CoCreateGuid(&qd.msgGuid);
mq.send(&qd, sizeof(qd), 0);
OLECHAR* uidOleStr;
if (StringFromCLSID(qd.msgGuid, &uidOleStr) != S_OK)
throw std::runtime_error("Unknown error occurred when trying to convert a GUID to string!");
// Copy the ole str into a CString and then free it immediately, so we don't have to worry about it.
CString guidString(uidOleStr);
CoTaskMemFree(uidOleStr);
wprintf(_T("sent: %d, %s\n"), qd.fakeInfo, guidString);
}
void MessageQueue::WaitForCommand()
{
while(true)
{
QueueData qd;
size_t size, pri;
mq.receive(&qd, sizeof(qd), size, pri);
if (qd.fakeInfo == 2)
sendExit();
OLECHAR* uidOleStr;
if (StringFromCLSID(qd.msgGuid, &uidOleStr) != S_OK)
throw std::runtime_error("Unknown error occurred when trying to convert a GUID to string!");
// Copy the ole str into a CString and then free it immediately, so we don't have to worry about it.
CString guidString(uidOleStr);
CoTaskMemFree(uidOleStr);
wprintf(_T("Recieved: %d, %s\n"), qd.fakeInfo, guidString);
if (qd.exit)
break;
}
}
void MessageQueue::sendExit()
{
QueueData qd;
qd.exit = true;
mq.send(&qd, sizeof(qd), 0);
wprintf(_T("Sent Exit"));
}
.h file:
#pragma once
#define BOOST_INTERPROCESS_SHARED_DIR_PATH "C:\\Program Files (x86)\\Users"
#include <boost/interprocess/ipc/message_queue.hpp>
#include <boost/interprocess/permissions.hpp>
class QueueData
{
public:
int fakeInfo;
GUID msgGuid;
bool exit;
};
class MessageQueue
{
public:
MessageQueue(int, boost::interprocess::permissions perm);
MessageQueue(bool);
~MessageQueue();
boost::interprocess::message_queue mq;
void SetCommand(int);
void WaitForCommand();
void sendExit();
};
test app running code: (I have been using breakpoints)
void waiter()
{
MessageQueue mq(true);
mq.WaitForCommand();
}
void sender()
{
boost::interprocess::permissions perm;
perm.set_unrestricted();
try
{
boost::interprocess::message_queue::remove("TestChannel");
MessageQueue mq(2, perm);
mq.SetCommand(1);
mq.SetCommand(1);
mq.SetCommand(2);
}
catch (boost::interprocess::interprocess_exception e)
{
}
}
int main() {
waiter();
sender();
}
The code from the main project: (To test I did have it use the wait of the above code and still nothing)
void MemoryChannel::WaitForCmd( const std::function< void ( MemoryChannelCmd cmd, const char *pData, TCHAR *tempPath, GUID msgGuid ) > func )
{
QueueData mcObject;
size_t size, pri;
while (true)
{
pMCD->dataQueue.timed_receive(&mcObject, sizeof(mcObject), size, pri, boost::posix_time::microsec_clock::universal_time() + boost::posix_time::milliseconds(30000));
size_t num = pMCD->dataQueue.get_num_msg();
//func(MemoryChannelCmd::MEMORY_CHANNEL_RUN_SQL_SELECT, "", _T(""), mcObject.msgGuid);
}
}
Doesn't seem to be a code issue since it works in the test app but not in the main project even sharing code.
I am at a loss.
For inter-process communication the higher privilege process must start first. Only then can lower privilege processes can connect.
In your example system process should start the queue, test app connects and then they can communicate. This is why it works when restarting the test app.
It's designed this way to prevent lower privilege users to access higher privilege user memory without permission.
I'm having difficulty to get my custom exception handler to work.
This is the Enclave code:
#include "Enclave_DivideZero_t.h"
#include "sgx_trts_exception.h"
#include "sgx_trts.h"
#include <string>
static char buf[200] = "Handler not called";
int divide_by_zero_handler(sgx_exception_info_t* info) {
buf[0] = '1';
return EXCEPTION_CONTINUE_EXECUTION;
}
void Enclave_DivideByZero() {
Ocall_printf(buf);
if (sgx_register_exception_handler(1, divide_by_zero_handler) == NULL) {
Ocall_printf("register failed");
} else {
Ocall_printf("register success");
}
int a(1);
int b(3/(a-a));
(void) a;
(void) b;
Ocall_printf(buf);
}
We used buf as an indication of whether the handler has been actually executed. However, the output is this:
Enclave created!
[Ocall printf] - Handler not called
[Ocall printf] - register success
[Ocall printf] - Handler not called <-- should be: "1andler not called" ("1andler" instead of "Handler")
Also, here is the App code (i.e. the untrusted code)
#include "stdafx.h"
#include "sgx_urts.h"
#include "Enclave_DivideZero_u.h"
#define ENCLAVE_FILE _T("Enclave_DivideZero.signed.dll")
sgx_status_t createEnclave(sgx_enclave_id_t *eid) {
sgx_status_t ret = SGX_SUCCESS;
sgx_launch_token_t token = {0};
int updated = 0;
ret = sgx_create_enclave(ENCLAVE_FILE, SGX_DEBUG_FLAG, &token, &updated, eid, NULL);
return ret;
}
void Ocall_printf( char* str) {
printf("[Ocall printf] - %s\n", str);
}
int _tmain(int argc, _TCHAR* argv[]) {
sgx_enclave_id_t eid;
sgx_status_t res = createEnclave(&eid);
if (res != SGX_SUCCESS) {
printf("App: error-, failed to create enclave.\n");
return -1;
} else {
printf("Enclave created!\n");
}
Enclave_DivideByZero(eid);
return 0;
}
The Problem: As you can see from the output indicates that the handler is registered successfully, but is not executed.
1- Why the registration doesn't work?
2- I tried to put the registeration with the App code, and for this I had to add the handler in the edl, the problem for this is passing the sgx_exception_info_t *info param, which is not clear which flags it needs (i.e. [in, .. <'flags'>]). So, is it correct to define it inside the Enclave?
p.s. I ran the code with Prerelease Mode.
[EDIT] I documented the project + code that I conducted on SGX here
So the question is a bit old now, but I got stuck at the same place, so maybe someone can still use my insight.
In trts_veh.cpp, where it is checked whether the exception was handled, it says in a comment that the "instruction triggering the exception will be executed again". This leads to the exception handling loop, as you noticed yourself.
However, you can increase the rip yourself, as it is stored in the pointer to the sgx_exception_info_t, by using "info->cpu_context.rip += 2;". Adding this inside your exception handler should do the trick.
Ok I've been researching this issue for a few days now so let me go over what I know so far which leads me to believe this might be an issue with NVidia's driver and not my code.
Basically my game starts stuttering after running a few seconds (random frames take 70ms instead of 16ms, on a regularish pattern). This ONLY happens if a setting called "Threaded Optimization" is enabled in the Nvidia control panel (latest drivers, windows 10). Unfortunately this setting is enabled by default and I'd rather not have to have people tweak their settings to get an enjoyable experience.
The game is not CPU or GPU intensive (2ms a frame without vsync on). It's not calling any openGL functions that need to synchronize data, and it's not streaming any buffers or reading data back from the GPU or anything. About the simplest possible renderer.
The problem was always there it just only started becoming noticeable when I added in fmod for audio. fmod is not the cause of this (more later in the post)
Trying to debug the problem with NVidia Nsight made the problem go away. "Start Collecting Data" instantly causes stuttering to go away. No dice here.
In the Profiler, a lot of cpu time is spent in "nvoglv32.dll". This process only spawns if Threaded Optimization is on. I suspect it's a synchronization issue then, so I debug with visual studio Concurrency Viewer.
A-HA!
Investigating these blocks of CPU time on the nvidia thread, the earliest named function I can get in their callstack is "CreateToolhelp32Snapshot" followed by a lot of time spent in Thread32Next. I noticed Thread32Next in the profiler when looking at CPU times earlier so this does seem like I'm on the right track.
So it looks like periodically the nvidia driver is grabbing a snapshot of the whole process for some reason? What could possibly be the reason, why is it doing this, and how do I stop it?
Also this explains why the problem started becoming noticeable once I added in fmod, because its grabbing info for all the processes threads, and fmod spawns a lot of threads.
Any help? Is this just a bug in nvidia's driver or is there something I can do to fix it other telling people to disable Threaded "Optimization"?
edit 1: The same issue occurs with current nvidia drivers on my laptop too. So I'm not crazy
edit 2: the same issue occurs on version 362 (previous major version) of nvidia's driver
... or is there something
I can do to fix it other telling people to disable Threaded
"Optimization"?
Yes.
You can create custom "Application Profile" for your game using NVAPI and disable "Threaded Optimization" setting in it.
There is a .PDF file on NVIDIA site with some help and code examples regarding NVAPI usage.
In order to see and manage all your NVIDIA profiles I recommend using NVIDIA Inspector. It is more convenient than the default NVIDIA Control Panel.
Also, here is my code example which creates "Application Profile" with "Threaded Optimization" disabled:
#include <stdlib.h>
#include <stdio.h>
#include <nvapi.h>
#include <NvApiDriverSettings.h>
const wchar_t* profileName = L"Your Profile Name";
const wchar_t* appName = L"YourGame.exe";
const wchar_t* appFriendlyName = L"Your Game Casual Name";
const bool threadedOptimization = false;
void CheckError(NvAPI_Status status)
{
if (status == NVAPI_OK)
return;
NvAPI_ShortString szDesc = {0};
NvAPI_GetErrorMessage(status, szDesc);
printf("NVAPI error: %s\n", szDesc);
exit(-1);
}
void SetNVUstring(NvAPI_UnicodeString& nvStr, const wchar_t* wcStr)
{
for (int i = 0; i < NVAPI_UNICODE_STRING_MAX; i++)
nvStr[i] = 0;
int i = 0;
while (wcStr[i] != 0)
{
nvStr[i] = wcStr[i];
i++;
}
}
int main(int argc, char* argv[])
{
NvAPI_Status status;
NvDRSSessionHandle hSession;
status = NvAPI_Initialize();
CheckError(status);
status = NvAPI_DRS_CreateSession(&hSession);
CheckError(status);
status = NvAPI_DRS_LoadSettings(hSession);
CheckError(status);
// Fill Profile Info
NVDRS_PROFILE profileInfo;
profileInfo.version = NVDRS_PROFILE_VER;
profileInfo.isPredefined = 0;
SetNVUstring(profileInfo.profileName, profileName);
// Create Profile
NvDRSProfileHandle hProfile;
status = NvAPI_DRS_CreateProfile(hSession, &profileInfo, &hProfile);
CheckError(status);
// Fill Application Info
NVDRS_APPLICATION app;
app.version = NVDRS_APPLICATION_VER_V1;
app.isPredefined = 0;
SetNVUstring(app.appName, appName);
SetNVUstring(app.userFriendlyName, appFriendlyName);
SetNVUstring(app.launcher, L"");
SetNVUstring(app.fileInFolder, L"");
// Create Application
status = NvAPI_DRS_CreateApplication(hSession, hProfile, &app);
CheckError(status);
// Fill Setting Info
NVDRS_SETTING setting;
setting.version = NVDRS_SETTING_VER;
setting.settingId = OGL_THREAD_CONTROL_ID;
setting.settingType = NVDRS_DWORD_TYPE;
setting.settingLocation = NVDRS_CURRENT_PROFILE_LOCATION;
setting.isCurrentPredefined = 0;
setting.isPredefinedValid = 0;
setting.u32CurrentValue = threadedOptimization ? OGL_THREAD_CONTROL_ENABLE : OGL_THREAD_CONTROL_DISABLE;
setting.u32PredefinedValue = threadedOptimization ? OGL_THREAD_CONTROL_ENABLE : OGL_THREAD_CONTROL_DISABLE;
// Set Setting
status = NvAPI_DRS_SetSetting(hSession, hProfile, &setting);
CheckError(status);
// Apply (or save) our changes to the system
status = NvAPI_DRS_SaveSettings(hSession);
CheckError(status);
printf("Success.\n");
NvAPI_DRS_DestroySession(hSession);
return 0;
}
Thanks for subGlitch's answer first, based on that proposal, I just make a safer one, which would enable you to cache and change the thread optimization, then restore it afterward.
Code is like below:
#include <stdlib.h>
#include <stdio.h>
#include <nvapi.h>
#include <NvApiDriverSettings.h>
enum NvThreadOptimization {
NV_THREAD_OPTIMIZATION_AUTO = 0,
NV_THREAD_OPTIMIZATION_ENABLE = 1,
NV_THREAD_OPTIMIZATION_DISABLE = 2,
NV_THREAD_OPTIMIZATION_NO_SUPPORT = 3
};
bool NvAPI_OK_Verify(NvAPI_Status status)
{
if (status == NVAPI_OK)
return true;
NvAPI_ShortString szDesc = {0};
NvAPI_GetErrorMessage(status, szDesc);
char szResult[255];
sprintf(szResult, "NVAPI error: %s\n\0", szDesc);
printf(szResult);
return false;
}
NvThreadOptimization GetNVidiaThreadOptimization()
{
NvAPI_Status status;
NvDRSSessionHandle hSession;
NvThreadOptimization threadOptimization = NV_THREAD_OPTIMIZATION_NO_SUPPORT;
status = NvAPI_Initialize();
if(!NvAPI_OK_Verify(status))
return threadOptimization;
status = NvAPI_DRS_CreateSession(&hSession);
if(!NvAPI_OK_Verify(status))
return threadOptimization;
status = NvAPI_DRS_LoadSettings(hSession);
if(!NvAPI_OK_Verify(status))
{
NvAPI_DRS_DestroySession(hSession);
return threadOptimization;;
}
NvDRSProfileHandle hProfile;
status = NvAPI_DRS_GetBaseProfile(hSession, &hProfile);
if(!NvAPI_OK_Verify(status))
{
NvAPI_DRS_DestroySession(hSession);
return threadOptimization;;
}
NVDRS_SETTING originalSetting;
originalSetting.version = NVDRS_SETTING_VER;
status = NvAPI_DRS_GetSetting(hSession, hProfile, OGL_THREAD_CONTROL_ID, &originalSetting);
if(NvAPI_OK_Verify(status))
{
threadOptimization = (NvThreadOptimization)originalSetting.u32CurrentValue;
}
NvAPI_DRS_DestroySession(hSession);
return threadOptimization;
}
void SetNVidiaThreadOptimization(NvThreadOptimization threadedOptimization)
{
NvAPI_Status status;
NvDRSSessionHandle hSession;
if(threadedOptimization == NV_THREAD_OPTIMIZATION_NO_SUPPORT)
return;
status = NvAPI_Initialize();
if(!NvAPI_OK_Verify(status))
return;
status = NvAPI_DRS_CreateSession(&hSession);
if(!NvAPI_OK_Verify(status))
return;
status = NvAPI_DRS_LoadSettings(hSession);
if(!NvAPI_OK_Verify(status))
{
NvAPI_DRS_DestroySession(hSession);
return;
}
NvDRSProfileHandle hProfile;
status = NvAPI_DRS_GetBaseProfile(hSession, &hProfile);
if(!NvAPI_OK_Verify(status))
{
NvAPI_DRS_DestroySession(hSession);
return;
}
NVDRS_SETTING setting;
setting.version = NVDRS_SETTING_VER;
setting.settingId = OGL_THREAD_CONTROL_ID;
setting.settingType = NVDRS_DWORD_TYPE;
setting.u32CurrentValue = (EValues_OGL_THREAD_CONTROL)threadedOptimization;
status = NvAPI_DRS_SetSetting(hSession, hProfile, &setting);
if(!NvAPI_OK_Verify(status))
{
NvAPI_DRS_DestroySession(hSession);
return;
}
status = NvAPI_DRS_SaveSettings(hSession);
NvAPI_OK_Verify(status);
NvAPI_DRS_DestroySession(hSession);
}
Based on the two interfaces (Get/Set) above, you may well save the original setting and restore it when your application exits. That means your setting to disable thread optimization only impact your own application.
static NvThreadOptimization s_OriginalNVidiaThreadOptimization = NV_THREAD_OPTIMIZATION_NO_SUPPORT;
// Set
s_OriginalNVidiaThreadOptimization = GetNVidiaThreadOptimization();
if( s_OriginalNVidiaThreadOptimization != NV_THREAD_OPTIMIZATION_NO_SUPPORT
&& s_OriginalNVidiaThreadOptimization != NV_THREAD_OPTIMIZATION_DISABLE)
{
SetNVidiaThreadOptimization(NV_THREAD_OPTIMIZATION_DISABLE);
}
//Restore
if( s_OriginalNVidiaThreadOptimization != NV_THREAD_OPTIMIZATION_NO_SUPPORT
&& s_OriginalNVidiaThreadOptimization != NV_THREAD_OPTIMIZATION_DISABLE)
{
SetNVidiaThreadOptimization(s_OriginalNVidiaThreadOptimization);
};
Hate to state the obvious but I feel like it needs to be said.
Threaded optimization is notorious for causing stuttering in many games, even those that take advantage of multithreading. Unless your application works well with the threaded optimization setting, the only logical answer is to tell your users to disable it. If users are stubborn and don't want to do that, that's their fault.
The only bug in recent memory I can think of is that older versions of the nvidia driver caused applications w/ threaded optimization running in Wine to crash, but that's unrelated to the stuttering issue you describe.
Building off of #subGlitch's answer, the following checks to see if an application profile already exists, and if so updates the existing profile instead of creating a new one. It is also encapsulated into a function which can be called, that will bypass the logic if the nvidia api is not found on the system (AMD/Intel users), or an issue is encountered which prohibits modifying the profile:
#include <iostream>
#include <nvapi.h>
#include <NvApiDriverSettings.h>
const wchar_t* profileName = L"Application for testing nvidia api";
const wchar_t* appName = L"nvapi.exe";
const wchar_t* appFriendlyName = L"Nvidia api test";
const bool threadedOptimization = false;
bool nvapiStatusOk(NvAPI_Status status)
{
if (status != NVAPI_OK)
{
// will need to not print these in prod, just return false
// full list of codes in nvapi_lite_common.h line 249
std::cout << "Status Code:" << status << std::endl;
NvAPI_ShortString szDesc = { 0 };
NvAPI_GetErrorMessage(status, szDesc);
printf("NVAPI Error: %s\n", szDesc);
return false;
}
return true;
}
void setNVUstring(NvAPI_UnicodeString& nvStr, const wchar_t* wcStr)
{
for (int i = 0; i < NVAPI_UNICODE_STRING_MAX; i++)
nvStr[i] = 0;
int i = 0;
while (wcStr[i] != 0)
{
nvStr[i] = wcStr[i];
i++;
}
}
void initNvidiaApplicationProfile()
{
NvAPI_Status status;
// if status does not equal NVAPI_OK (0) after initialization,
// either the system does not use an nvidia gpu, or something went
// so wrong that we're unable to use the nvidia api...therefore do nothing
/*
if (!nvapiStatusOk(NvAPI_Initialize()))
return;
*/
// for debugging use ^ in prod
if (!nvapiStatusOk(NvAPI_Initialize()))
{
std::cout << "Unable to initialize Nvidia api" << std::endl;
return;
}
else
{
std::cout << "Nvidia api initialized successfully" << std::endl;
}
// initialize session
NvDRSSessionHandle hSession;
if (!nvapiStatusOk(NvAPI_DRS_CreateSession(&hSession)))
return;
// load settings
if (!nvapiStatusOk(NvAPI_DRS_LoadSettings(hSession)))
return;
// check if application already exists
NvDRSProfileHandle hProfile;
NvAPI_UnicodeString nvAppName;
setNVUstring(nvAppName, appName);
NVDRS_APPLICATION app;
app.version = NVDRS_APPLICATION_VER_V1;
// documentation states this will return ::NVAPI_APPLICATION_NOT_FOUND, however I cannot
// find where that is defined anywhere in the headers...so not sure what's going to happen with this?
//
// This is returning NVAPI_EXECUTABLE_NOT_FOUND, which might be what it's supposed to return when it can't
// find an existing application, and the documentation is just outdated?
status = NvAPI_DRS_FindApplicationByName(hSession, nvAppName, &hProfile, &app);
if (!nvapiStatusOk(status))
{
// if status does not equal NVAPI_EXECUTABLE_NOT_FOUND, then something bad happened and we should not proceed
if (status != NVAPI_EXECUTABLE_NOT_FOUND)
{
NvAPI_Unload();
return;
}
// create application as it does not already exist
// Fill Profile Info
NVDRS_PROFILE profileInfo;
profileInfo.version = NVDRS_PROFILE_VER;
profileInfo.isPredefined = 0;
setNVUstring(profileInfo.profileName, profileName);
// Create Profile
//NvDRSProfileHandle hProfile;
if (!nvapiStatusOk(NvAPI_DRS_CreateProfile(hSession, &profileInfo, &hProfile)))
{
NvAPI_Unload();
return;
}
// Fill Application Info, can't re-use app variable for some reason
NVDRS_APPLICATION app2;
app2.version = NVDRS_APPLICATION_VER_V1;
app2.isPredefined = 0;
setNVUstring(app2.appName, appName);
setNVUstring(app2.userFriendlyName, appFriendlyName);
setNVUstring(app2.launcher, L"");
setNVUstring(app2.fileInFolder, L"");
// Create Application
if (!nvapiStatusOk(NvAPI_DRS_CreateApplication(hSession, hProfile, &app2)))
{
NvAPI_Unload();
return;
}
}
// update profile settings
NVDRS_SETTING setting;
setting.version = NVDRS_SETTING_VER;
setting.settingId = OGL_THREAD_CONTROL_ID;
setting.settingType = NVDRS_DWORD_TYPE;
setting.settingLocation = NVDRS_CURRENT_PROFILE_LOCATION;
setting.isCurrentPredefined = 0;
setting.isPredefinedValid = 0;
setting.u32CurrentValue = threadedOptimization ? OGL_THREAD_CONTROL_ENABLE : OGL_THREAD_CONTROL_DISABLE;
setting.u32PredefinedValue = threadedOptimization ? OGL_THREAD_CONTROL_ENABLE : OGL_THREAD_CONTROL_DISABLE;
// load settings
if (!nvapiStatusOk(NvAPI_DRS_SetSetting(hSession, hProfile, &setting)))
{
NvAPI_Unload();
return;
}
// save changes
if (!nvapiStatusOk(NvAPI_DRS_SaveSettings(hSession)))
{
NvAPI_Unload();
return;
}
// disable in prod
std::cout << "Nvidia application profile updated successfully" << std::endl;
NvAPI_DRS_DestroySession(hSession);
// unload the api as we're done with it
NvAPI_Unload();
}
int main()
{
// if building for anything other than windows, we'll need to not call this AND have
// some preprocessor logic to not include any of the api code. No linux love apparently...so
// that's going to be a thing we'll have to figure out down the road -_-
initNvidiaApplicationProfile();
std::cin.get();
return 0;
}
I'm working on a project which has to mount samba shares on Ubuntu. This project will be used by non-root users. At now I'm using an application called gvfs-mount because that doesn't require the root password for mounting.
My application runs that executable with specific command line arguments and it works, but the error checking is difficult. I'm using a library called pstreams to launch the gvfs-mount and to write and read to it's stdin/out, but I can predict when the application will write something to stdout. And that is a problem, because if I want to read something from the gvfs-mount's output, but the application doesn't wrote anything, the host application will be blocked, because that will wait for something that will never come.
I know that I could use the mount function from sys/mount.h, but that would require root privileges. My question is: Is there any API, library or tutorial about this topic in C++?
Edit:
As filmor mentioned I had a look to gvfs-mount's source code and I converted to C++. Here is my very basic code:
#include <gtkmm.h>
#include <stdexcept>
#include <iostream>
Glib::RefPtr<Gio::File> file;
Glib::RefPtr<Glib::MainLoop> main_loop;
void on_async_ready(Glib::RefPtr<Gio::AsyncResult>& result)
{
file->mount_enclosing_volume_finish(result);
main_loop->quit();
}
int main()
{
Gio::init();
Glib::init();
main_loop = Glib::MainLoop::create(false);
file = Gio::File::create_for_commandline_arg("smb://192.168.1.3/Memory\\ core");
Glib::RefPtr<Gio::MountOperation> mount_operation = Gio::MountOperation::create();
mount_operation->set_domain("domain");
mount_operation->set_username("user");
mount_operation->set_password("password");
try
{
file->mount_enclosing_volume(mount_operation, &on_async_ready);
}
catch(const Glib::Error& ex)
{
std::cerr << ex.what() << std::endl;
}
main_loop->run();
return 0;
}
The problem is that when I run this code as normal user I get this output:
(process:5816): glibmm-CRITICAL **:
unhandled exception (type Glib::Error) in signal handler:
domain: g-io-error-quark
code : 0
what : Failed to mount Windows share: Operation not permitted
When I run as sudo I get this:
(process:5862): glibmm-CRITICAL **:
unhandled exception (type Glib::Error) in signal handler:
domain: g-io-error-quark
code : 15
what : volume doesn't implement mount
Any suggestion about solving this? The code should work with normal user privileges.
Edit 2:
I updated the source code, because it was an error in uri. I found that if I run the gvfs-mount as sudo, I get the same error message like in my application. So my idea is that there is something wrong with permissions. My username belongs to fuse group it that matters.
#include <gtkmm.h>
#include <iostream>
Glib::RefPtr<Gio::File> file;
Glib::RefPtr<Glib::MainLoop> main_loop;
void on_async_ready(Glib::RefPtr<Gio::AsyncResult>& result)
{
try
{
file->mount_enclosing_volume_finish(result);
}
catch(const Glib::Error& ex)
{
std::cerr << ex.what() << std::endl;
}
main_loop->quit();
}
int main()
{
Gio::init();
Glib::init();
main_loop = Glib::MainLoop::create(false);
file = Gio::File::create_for_commandline_arg("smb://192.168.1.3/Memory core");
Glib::RefPtr<Gio::MountOperation> mount_operation = Gio::MountOperation::create();
mount_operation->set_domain("domain");
mount_operation->set_username("user");
mount_operation->set_password("password");
file->mount_enclosing_volume(mount_operation, &on_async_ready);
main_loop->run();
return 0;
}
I was able to resolve this problem in my Rust application which at first showed the same behaviour as reported in this question.
The solution was to register a callback for the ask-password signal, use this code path to fill in the credentials and then - most importantly - call reply on the mount operation with the Handled flag.
PoC in Rust attached, should transfer easily to C++, too:
use gio::prelude::*;
use glib::{self, clone};
use futures::prelude::*;
use gio::{AskPasswordFlags, MountMountFlags, MountOperation, MountOperationResult};
// read_file taken from https://github.com/gtk-rs/gtk-rs-core/blob/master/examples/gio_futures_await/main.rs#L29
async fn read_file(file: gio::File) -> Result<(), String> {
// Try to open the file.
let strm = file
.read_future(glib::PRIORITY_DEFAULT)
.map_err(|err| format!("Failed to open file: {}", err))
.await?;
// If opening the file succeeds, we asynchronously loop and
// read the file in up to 64 byte chunks and re-use the same
// vec for each read.
let mut buf = vec![0; 64];
let mut idx = 0;
loop {
let (b, len) = strm
.read_future(buf, glib::PRIORITY_DEFAULT)
.map_err(|(_buf, err)| format!("Failed to read from stream: {}", err))
.await?;
// Once 0 is returned, we know that we're done with reading, otherwise
// loop again and read another chunk.
if len == 0 {
break;
}
buf = b;
println!("line {}: {:?}", idx, std::str::from_utf8(&buf[0..len]).unwrap());
idx += 1;
}
// Asynchronously close the stream in the end.
let _ = strm
.close_future(glib::PRIORITY_DEFAULT)
.map_err(|err| format!("Failed to close stream: {}", err))
.await?;
Ok(())
}
// one could probably also use glib to drive the futures
// but this was more familiar to me
#[tokio::main]
async fn main() {
env_logger::init();
let c = glib::MainContext::default();
let file = gio::File::for_uri("smb://host/users/username/Desktop/test.txt");
// check whether the surrounding share is already mounted
let cancellable = gio::Cancellable::new();
if file.find_enclosing_mount(Some(&cancellable)).is_err() {
log::info!("Enclosing share not mounted, trying to mount it");
let mount_op = MountOperation::new();
mount_op.connect_ask_password(|op, msg, default_user, default_domain, flags| {
op.set_anonymous(false);
if flags.contains(AskPasswordFlags::NEED_USERNAME) {
op.set_username(Some("my-user"));
}
if flags.contains(AskPasswordFlags::NEED_PASSWORD) {
op.set_password(Some("my-password"));
}
if flags.contains(AskPasswordFlags::NEED_DOMAIN) {
op.set_domain(Some(default_domain)); // should not be required, let's see
}
// this is the important part!
op.reply(MountOperationResult::Handled);
});
let mount_result = file.mount_enclosing_volume_future(MountMountFlags::empty(), Some(&mount_op));
let mount_result = c.block_on(mount_result);
if let Err(err) = mount_result {
log::error!("Failed to mount: {}", err);
return
}
}
let future = async {
match read_file(file).await {
Ok(()) => (),
Err(err) => eprintln!("Got error: {}", err),
}
};
c.block_on(future);
}
I want to know how i can create file and append data inside it in c++ addon (.cc) file of node.js ??
I have used below code to do same, but not able to find file "data.txt" in my ubuntu machine(reason behind it may be below code is not correct way to create file, but strange i haven't received any error/warning at compile time).
FILE * pFileTXT;
pFileTXT = fopen ("data.txt","a+");
const char * c = localReq->strResponse.c_str();
fprintf(pFileTXT,c);
fclose (pFileTXT);
Node.js relies on libuv, a C library to handle the I/O (asynchronous or not). This allows you to use the event loop.
You'd be interested in this free online book/introduction to libuv: http://nikhilm.github.com/uvbook/index.html
Specifically, there is a chapter dedicated to reading/writing files.
int main(int argc, char **argv) {
// Open the file in write-only and execute the "on_open" callback when it's ready
uv_fs_open(uv_default_loop(), &open_req, argv[1], O_WRONLY, 0, on_open);
// Run the event loop.
uv_run(uv_default_loop());
return 0;
}
// on_open callback called when the file is opened
void on_open(uv_fs_t *req) {
if (req->result != -1) {
// Specify the on_write callback "on_write" as last argument
uv_fs_write(uv_default_loop(), &write_req, 1, buffer, req->result, -1, on_write);
}
else {
fprintf(stderr, "error opening file: %d\n", req->errorno);
}
// Don't forget to cleanup
uv_fs_req_cleanup(req);
}
void on_write(uv_fs_t *req) {
uv_fs_req_cleanup(req);
if (req->result < 0) {
fprintf(stderr, "Write error: %s\n", uv_strerror(uv_last_error(uv_default_loop())));
}
else {
// Close the handle once you're done with it
uv_fs_close(uv_default_loop(), &close_req, open_req.result, NULL);
}
}
Spend some time reading the book if you want to write C++ for node.js. It's worth it.