Windows: How to control the adaptive brightness? - c++

I want to control(On/Off) the adaptive brightness like the Power Options
Enable adaptive brightness:
by API in Win 10. I guess the API is included in the Power Management Functions: https://msdn.microsoft.com/en-us/library/windows/desktop/aa373163(v=vs.85).aspx
But I cannot find the function... May someone provide me some suggestions or directions. Thanks a lot!

I found the solution by myself, and I share the way to everyone who needs!
GUID *guidScheme;
bool bResult = false;
byte enableFunction= 0x1; //Set 0x0 to disable
bResult = PowerGetActiveScheme(NULL, &guidScheme);
if (bResult != ERROR_SUCCESS){
//error message
}
GUID guidSubVideo = { 0x7516b95f, 0xf776, 0x4464, 0x8c, 0x53, 0x06, 0x16, 0x7f, 0x40, 0xcc, 0x99 };
GUID guidAdaptBright = { 0xfbd9aa66, 0x9553, 0x4097, 0xba, 0x44, 0xed, 0x6e, 0x9d, 0x65, 0xea, 0xb8 };
bResult = PowerWriteDCValueIndex(NULL, guidScheme, &guidSubVideo, &guidAdaptBright, enableFunction);
if (bResult != ERROR_SUCCESS){
//error message
}

Related

Why BCryptDeriveKeyPBKDF2 return STATUS_INVALID_PARAMETER?

#include <iostream>
#include<Windows.h>
#include<bcrypt.h>
#include <ntstatus.h>
#include<string>
#include<vector>
#pragma comment(lib, "bcrypt.lib")
void test_status(NTSTATUS return_val)
{
switch (return_val)
{
case(STATUS_SUCCESS):
{
std::cout << "STATUS_SUCCESS\n";
break;
}
case(STATUS_BUFFER_TOO_SMALL):
{
std::cout << "STATUS_BUFFER_TOO_SMALL\n";
break;
}
case(STATUS_INVALID_HANDLE):
{
std::cout << "STATUS_INVALID_HANDLE\n";
break;
}
case(STATUS_INVALID_PARAMETER):
{
std::cout << "STATUS_INVALID_PARAMETER\n";
break;
}
case(STATUS_NOT_SUPPORTED):
{
std::cout << "STATUS_NOT_SUPPORTED\n";
break;
}
};
}
int main()
{
BCRYPT_ALG_HANDLE phAlgorithm = nullptr;
BCRYPT_HASH_HANDLE phHash = nullptr;
LPCWSTR pszAlgId = TEXT("XTS-AES");
LPCWSTR pszImplementation = TEXT("Advanced Encryption Standard");
PUCHAR pbHashObject = nullptr;
std::vector<BYTE> pbSalt = { 0x77, 0x1f, 0x5b, 0x30, 0x2c, 0xf7, 0xc5, 0x31,
0xa9, 0x86, 0x46, 0x52, 0xe2, 0xff, 0x4a, 0x17,
0xab, 0xd0, 0x02, 0xdd, 0x4f, 0xb0, 0x2f, 0x71,
0x0f, 0xe5, 0xa8, 0x1a, 0xfe, 0xe7, 0x9c, 0x6b }; // 771f5b302cf7c531a9864652e2ff4a17abd002dd4fb02f710fe5a81afee79c6b
NTSTATUS status = BCryptOpenAlgorithmProvider(
&phAlgorithm,
BCRYPT_PBKDF2_ALGORITHM,
NULL,
NULL
);
test_status(status);
PUCHAR pbOutput = nullptr;
ULONG pcbResult = NULL;
status = BCryptGetProperty(
phAlgorithm,
BCRYPT_OBJECT_LENGTH,
pbOutput,
sizeof(DWORD),
&pcbResult,
NULL
);
test_status(status);
PUCHAR DerivedKey = nullptr;
DWORD cbDerivedKey = NULL;
std::string pbPassword = "MySecretPass";
std::string DerivedKeyString;
status = BCryptDeriveKeyPBKDF2(
phAlgorithm,
(BYTE*)pbPassword.data(),
pbPassword.length(),
(BYTE*)pbSalt.data(),
sizeof(BYTE),
10000,
(PUCHAR)DerivedKeyString.c_str(),
64,
0);
test_status(status);
status = BCryptCloseAlgorithmProvider(
phAlgorithm,
NULL
);
test_status(status);
}
This code should get the key from the password, but at this stage of generating this key, I get an error STATUS_INVALID_PARAMETER. Why is this happening and how to fix it
-I tried to change the encryption algorithm and pass the parameters in a different way, but all this did not lead to success or gave the error STATUS_INVALID_PARAMETER.
There are two errors:
Firstly, you are performing hash computation, the BCryptOpenAlgorithmProvider function must use flags and identifiers for the hash function's behavior.
In the function BCryptOpenAlgorithmProvider, replace BCRYPT_PBKDF2_ALGORITHM with BCRYPT_SHA256_ALGORITHM, use BCRYPT_ALG_HANDLE_HMAC_FLAG in the fourth dwFlags parameter.
The documentation for these two parameters is as follows:
https://learn.microsoft.com/en-us/windows/win32/seccng/cng-algorithm-identifiers
https://learn.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptopenalgorithmprovider
Secondly, do not use string type of the parameter pbDerivedKey, or it will crash the memory.
Replace std::string DerivedKeyString with BYTE DerivedKeyString[64].
In the function BCryptDeriveKeyPBKDF2, replace (PUCHAR)DerivedKeyString.c_str() with DerivedKeyString.

Missing something when decompressing HTTP gzipped response

I have recently been setting up various testing environments and in this cas I nneed to read and decode a gzip response from a HTTP server. I know what I have so far works as I have tested it with wireshark and hardcoded data as outlined below, my question is what is wrong with how I am handling the gizzped data from a HTTP server?
Here is what Im using:
From this thread http://www.qtcentre.org/threads/30031-qUncompress-data-from-gzip I am using the gzipDecopress function with the data provided and seeing that it works.
QByteArray gzipDecompress( QByteArray compressData )
{
//Hardcode sample data
const char dat[40] = {
0x1F, 0x8B, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xAA, 0x2E, 0x2E, 0x49, 0x2C, 0x29,
0x2D, 0xB6, 0x4A, 0x4B, 0xCC, 0x29, 0x4E, 0xAD, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x03, 0x00,
0x2A, 0x63, 0x18, 0xC5, 0x0E, 0x00, 0x00, 0x00};
compressData = QByteArray::fromRawData( dat, 40);
//decompress GZIP data
//strip header and trailer
compressData.remove(0, 10);
compressData.chop(12);
const int buffersize = 16384;
quint8 buffer[buffersize];
z_stream cmpr_stream;
cmpr_stream.next_in = (unsigned char *)compressData.data();
cmpr_stream.avail_in = compressData.size();
cmpr_stream.total_in = 0;
cmpr_stream.next_out = buffer;
cmpr_stream.avail_out = buffersize;
cmpr_stream.total_out = 0;
cmpr_stream.zalloc = Z_NULL;
cmpr_stream.zalloc = Z_NULL;
if( inflateInit2(&cmpr_stream, -8 ) != Z_OK) {
qDebug() << "cmpr_stream error!";
}
QByteArray uncompressed;
do {
int status = inflate( &cmpr_stream, Z_SYNC_FLUSH );
if(status == Z_OK || status == Z_STREAM_END) {
uncompressed.append(QByteArray::fromRawData((char *)buffer, buffersize - cmpr_stream.avail_out));
cmpr_stream.next_out = buffer;
cmpr_stream.avail_out = buffersize;
} else {
inflateEnd(&cmpr_stream);
}
if(status == Z_STREAM_END) {
inflateEnd(&cmpr_stream);
break;
}
}while(cmpr_stream.avail_out == 0);
return uncompressed;
}
When the data is hardcoded as in that example, the string is decompressed. However, when I read the response from a HTTP server and store it in a QByteArray, it cannot be uncompressed. I am reading the response as follows and I can see it works when comparing the results on wireshark
//Read that length of encoded data
char EncodedData[ LengthToRead ];
memset( EncodedData, 0, LengthToRead );
recv( socketDesc, EncodedData, LengthToRead, 0 );
EndOfData = true;
//EncodedDataBytes = QByteArray((char*)EncodedData);
EncodedDataBytes = QByteArray::fromRawData(EncodedData, LengthToRead );
I assume i am missing some header or byte order when reading the response, but at the moment have no idea what. Any help very welcome!!
EDIT: So I have been looking at this a little more over the weekend and at the moment im trying to test the encode and decode of the given hex string, which is "{status:false}" in plain text. I have tried to use online gzip encoders such as http://www.txtwizard.net/compression but it returns some ascii text that does not match the hex string in the above code. When I use PHPs gzcompress( "{status:false}", 1) function it gives me non-ascii values, that I cannot copy/paste to test since they are ascii. So I am wondering if there is any standard reference for gzip encode/decode? It is definitely not in some special encoding since both firefox and wireshark can decode the packets, but my software cannot.
So the issue was with my gzip function, the correct function I found on this link: uncompress error when using zlib
As mentioned above by Cornstalks the infalteInit2 function needs to take MAX_WBITS+16 as its max bit size, I think that was the issue. If anybody knows any libraries or plugins to handle this please post them here! I am surprised that this had to be coded manually when it is so commonly used by HTTP clients/servers.

Newbie C/C++ struct array pointing to other struct array elements

I stumbled upon a neat trick that I've started using to write binary files into (flash) memory on arduino/esp8266 using a library someone posted to one of the esp8266 forums. I've been trying a number of ways to expand upon it. Most recently I've been minifying and compressing my web content files and compiling them in with sketches on my ESP.
The script he posted first uses the output of the unix command xxd -i to write the binary file into an array of hex. The second part uses a struct to combine the file details with a pointer to the array that you can reference from the code whenever the server gets a uri request that matches an entry in the array.
What I would like to do is create a second array of these things with 'default' tools already pre-compressed so I don't have to go through it every time and/or modify my script that builds the header file any time I create a new server sketch. Basically compress and xxd stuff like jquery.js, bootstrap.css and bootstrap.js (or more often their smaller counterparts like backbone or barekit)
Currently once a file is dumped to hex, for example:
FLASH_ARRAY(uint8_t, __js__simple_js,
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x4b, 0x2b,
0xcd, 0x4b, 0x2e, 0xc9, 0xcc, 0xcf, 0x53, 0xc8, 0xad, 0xf4, 0xcf, 0xf3,
0xc9, 0x4f, 0x4c, 0xd1, 0xd0, 0xac, 0x4e, 0xcc, 0x49, 0x2d, 0x2a, 0xd1,
0x50, 0x0a, 0xc9, 0xc8, 0x2c, 0x56, 0x00, 0xa2, 0xc4, 0x3c, 0x85, 0xfc,
0xbc, 0x1c, 0xa0, 0x94, 0x42, 0x6e, 0x6a, 0x71, 0x71, 0x62, 0x7a, 0xaa,
0x92, 0xa6, 0x75, 0x51, 0x6a, 0x49, 0x69, 0x51, 0x9e, 0x42, 0x49, 0x51,
0x69, 0x6a, 0x2d, 0x00, 0x16, 0xa6, 0x25, 0xe5, 0x43, 0x00, 0x00, 0x00);
The existing code added them all at once along with the struct definition:
struct t_websitefiles {
const char* path;
const char* mime;
const unsigned int len;
const char* enc;
const _FLASH_ARRAY<uint8_t>* content;
} files[] = {
{
.path = "/js/simple.js",
.mime = "application/javascript",
.len = 84,
.enc = "gzip",
.content = &__js__simple_js,
},
{
/* details for file2 ...*/
},
{
/* details for file3 ...*/
}
};
Building an array of the structs representing the various files.
My questions amount to noob questions regarding the language syntax. Can I assume that I can use an identical populated struct in the place of what is inside the curly brackets? For example, if I had a second header file with my regularly used libraries, and jquery was compressed in an array called 'default_files' at position 3, could I use something like &default_files[3] in the place of { /* definitions stuffs */ }. Such as:
struct t_websitefiles {
const char* path;
const char* mime;
const unsigned int len;
const char* enc;
const _FLASH_ARRAY<uint8_t>* content;
} files[] = {
{
.path = "/js/simple.js",
.mime = "application/javascript",
.len = 84,
.enc = "gzip",
.content = &__js__simple_js,
},
&default_files[1],
&default_files[3],
{
.path = "/text/readme.txt",
.mime = "text/text",
.len = 112,
.enc = "",
.content = &__text__readme_txt,
}
};
(I'm guessing based on what I've learned thus far it needs the & in front of it?)
I also assume rather than re-writing the struct definition twice,I could do it as a typedef and then just do:
t_websitefiles files[] = { {/*definitions*/},{ /*stuffs*/ } };
Is that correct? Any help is appreciated. It's hard sometimes to find details on the syntax for specific use cases in documentation covering basics. (I would just try it, but I'm not conveniently in front of a compiler at the moment nor do I have direct access to my codebase but want to work on it later when I might not have direct access to the net)
From what I understand, you want create an array of structs such contains both compound literals and items from another array, all defined in header information.
I don't think this is possible - or at least not in the exact way you suggest. I'll try and provide an alternative though.
Can I assume that I can use an identical populated struct in the place of what is inside the curly brackets?
No - you're mixing your types. 'files' is defined as an array of 'struct t_website'.
The code
struct t_websitefiles files[] = {
...
&default_files[1],
...
}
won't compile as you are mixing your types. files is defined as an array of struct t_websitefile, but &default_files[1] is a pointer. C makes a distinction between pointers and non-pointers. They are seperate types.
The obvious option that I can see to do what you want is to use pointers. This will allow you to define everything in header information.
struct t_websitefiles default_files[] = {
....
}
struct t_websitefiles files[] = {
....
}
// An array of pointers
struct t_websitefiles *files_combined[] = {
&files[0],
&files[1],
&default_files[0],
// Or whatever values you want here
...
}
// Example main, just iterates through the combined list
// of files
int main(int argc, char* argv[]) {
int i;
int files_combined_len = sizeof(files_combined)/sizeof(struct t_websitefiles);
for (i=0; i<files_combined_len; i++) {
printf("File %s\r\n", files_combined[i]->path);
}
return 0;
}
Hope this helps.

SizeOfImage member causing program crash

Im trying to look for BYTE patterns in programs but for some reason when i assign the value to from MINFO.SizeOfImage to ModuleSize it causes the program i injected the DLL into to crash.
DWORD FindPattern(const BYTE* Pattern,SIZE_T PatternSize)
{
DWORD ModuleBase = (DWORD)GetModuleHandle(NULL);
DWORD ModuleSize = 0;
MODULEINFO MINFO;
HANDLE hProcess = OpenProcess(PROCESS_ALL_ACCESS,0,GetCurrentProcessId());
if(hProcess)
{
GetModuleInformation(hProcess,GetModuleHandle(NULL),&MINFO,sizeof(MODULEINFO));
CloseHandle(hProcess);
ModuleSize = MINFO.SizeOfImage;
}
else
return 0;
for(int i = 0;i < ModuleSize;i++)
{
if(memcmp((void*)(ModuleBase + i),Pattern,PatternSize) == 0)
return ModuleBase + i;
}
return 0;
}
You code worked just fine when i compiled it and injected. I even tested it against the current FindPattern i am using. I didnt get any errors. Heres my code & yours
bool Compare(const BYTE* pData, const BYTE* bMask, const char* szMask)
{
for(;*szMask;++szMask,++pData,++bMask)
if(*szMask=='x' && *pData!=*bMask) return 0;
return (*szMask) == NULL;
}
DWORD FindPattern(DWORD dwAddress, DWORD dwLen, BYTE *bMask, char * szMask)
{
for(DWORD i=0; i<dwLen; i++)
if (Compare((BYTE*)(dwAddress+i),bMask,szMask)) return (DWORD)(dwAddress+i);
return 0;
}
And then when i run this through it
uint8 DecryptNeedle[] = {0x56, 0x8B, 0x74, 0x24, 0x08, 0x89, 0x71, 0x10,
0x0F, 0xB6, 0x16, 0x0F, 0xB6, 0x46, 0x01, 0x03,
0xC2, 0x8B, 0x51, 0x28, 0x25, 0xFF, 0x00, 0x00,
0x00, 0x89, 0x41, 0x04, 0x0F, 0xB6, 0x04, 0x10};
char DecryptMask[] = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
DWORD addrDecrypt = FindPattern(dwModuleStartAddr, 0xA000, DecryptNeedle, DecryptMask);
DWORD decrypt2 = YourFindPattern(DecryptNeedle, 32);
output is identical in both.
I would double check your injection code, and check whatelse could be causing the error. Also, do a quick error check
if(hProcess)
{
if(!GetModuleInformation(hProcess,GetModuleHandle(NULL),&MINFO,sizeof(MODULEINFO)));
{
//error
}
CloseHandle(hProcess);
ModuleSize = MINFO.SizeOfImage;
}

Invalid Algorithm Specified CryptoAPI

I am trying to decrypt something using 128BIT AES Decryption. When i attempt to calling CryptDecrypt i get an Error stating "Invalid Algorithm Specified". I get the same problem when using the library posted here: http://www.codeproject.com/KB/security/WinAES.aspx
What can cause this error?
I am using CryptoAPI along on vista64bit with visual studio 2008. I checked in the registry and the AES library is there...
EDIT
BYTE*& encryptedData /* get data length */
HCRYPTPROV cryptoHandle = NULL;
HCRYPTKEY aesKeyHandle = NULL;
hr = InitWinCrypt(cryptoHandle);
if(FAILED(hr))
{
return hr;
}
AesKeyOffering aesKey = { {PLAINTEXTKEYBLOB, CUR_BLOB_VERSION, 0, CALG_AES_128}, 16, { 0xFF, 0x00, 0xFF, 0x1C, 0x1D, 0x1E, 0x03, 0x04, 0x05, 0x0F, 0x20, 0x21, 0xAD, 0xAF, 0xA4, 0x04 }};
if(CryptImportKey(cryptoHandle, (CONST BYTE*)&aesKey, sizeof(AesKeyOffering), NULL, 0, &aesKeyHandle) == FALSE)
{
// DO error
return HRESULT_FROM_WIN32(GetLastError());
}
if(CryptSetKeyParam(aesKeyHandle, KP_IV, { 0xFF, 0x00, 0xFF, 0x1C, 0x1D, 0x1E, 0x03, 0x04, 0x05, 0x0F, 0x20, 0x21, 0xAD, 0xAF, 0xA4, 0x04 } , 0) == FALSE)
{
return HRESULT_FROM_WIN32(GetLastError());
}
BYTE blah2 = CRYPT_MODE_CBC;
// set block mode
if(CryptSetKeyParam(aesKeyHandle, KP_MODE, &blah2, 0) == FALSE)
{
//
return HRESULT_FROM_WIN32(GetLastError());
}
DWORD lol = dataLength / 16 + 1;
DWORD lol2 = lol * 16;
if(CryptDecrypt(aesKeyHandle, 0, TRUE, 0, encryptedData, &lol2) == FALSE)
{
return HRESULT_FROM_WIN32(GetLastError());
}
InitWinCrypt function
if(!CryptAcquireContextW(&cryptoHandle, NULL, L"Microsoft Enhanced RSA and AES Cryptographic Provider", PROV_RSA_AES, CRYPT_VERIFYCONTEXT))
{
if(!CryptAcquireContextW(&cryptoHandle, NULL, L"Microsoft Enhanced RSA and AES Cryptographic Provider", PROV_RSA_AES, 0))
{
return HRESULT_FROM_WIN32(GetLastError());
}
else
{
return S_OK;
}
}
return S_OK;
AesOffering struct:
struct AesKeyOffering
{
BLOBHEADER m_Header;
DWORD m_KeyLength;
BYTE Key[16];
};
EDIT2
After rebooting my computer, and remvoing the CBC chunk. I am now getting Bad Data Errors. The data decrypts fine in C#. But i need to do this using wincrypt.
Are you passing cryptoHandle by reference to InitWithCrypt? If not, your code
if(!CryptAcquireContextW(&cryptoHandle, ...
would only modify InitWinCrypt's copy of cryptoHandle.
EDIT: Given that it does, try getting rid of the CryptSetKeyParam call which sets CRYPT_MODE_CBC