Undefined behavior using gettimeofday - c++

#include <iostream>
#include <sys/time.h>
static long elapsedTime(struct timeval &then)
{
struct timeval when;
gettimeofday(&when, NULL);
long result = (when.tv_sec - then.tv_sec) * 1000;
result += (when.tv_usec - then.tv_usec) / 1000;
then = when;
return (result > 0) ? result : 0;
}
int main()
{
struct timeval now;
gettimeofday(&now, NULL);
long elapsed, everyMinute = 0;
while (true) {
elapsed = elapsedTime(now);
everyMinute += elapsed;
if (everyMinute % 1000 == 0)
std::cout << "After: " << everyMinute << std::endl;
}
return 0;
}
I am trying to get this loop to print every minute but gettimeofday gives me unexpected behavior. For example this line std::cout << "Before: " << everyMinute << std::endl; will cause the if to work but without it I just get zeros: here is snippet but it is better to plug into your own compiler.

Related

C++ program crashes when attempting to call get() on a future stored in a vector

I am currently working on a project that requires me to store a vector of future so I can measure the execution time accurately, however when I call
posterTotal += posterDurationFutures[i].get();
the program just crashes with no error message. Wrapping the line in a try/catch block also does not work, as it doesn't seem to be throwing an exception.
Here is my code (please bare in mind that this is a work in progress):
#include <iostream>
#include <ctime>
#include <chrono>
#include <thread>
#include <future>
#include <queue>
#include "TCPClient.h"
#include "ThreadPool.h"
#include "RequestGenerator.h"
#include "ResponseVerifier.h"
#include "Storage.h"
#define DEFAULT_PORT 12345
double readRequest(TCPClient client, int threadIndex, double timeDurationSecs);
double postRequest(TCPClient client, int threadIndex, double timeDurationSecs);
string sendViaClient(TCPClient client, string request);
unsigned int readRequests = 0;
unsigned int postRequests = 0;
Storage* db = new Storage();
ResponseVerifier* responseVerifier = new ResponseVerifier();
RequestGenerator* requestGenerator = new RequestGenerator();
int main(int argc, char **argv)
{
// Default parameters
unsigned int posterCount = 5;
unsigned int readerCount = 0;
double timeDurationSecs = 10;
bool throttle = false;
string serverIp = "127.0.0.1";
// Validate the parameters
if (argc != 6) {
std::cout << "\nUsage (required parameters): server_IP number_of_poster_threads number_of_reader_threads time_duration throttle(0|1)\n\n";
std::cout << "server_IP - IP of the server\n";
std::cout << "number_of_poster_threads - number of threads performing POST operations\n";
std::cout << "number_of_reader_threads - number of threads performing READ operations\n";
std::cout << "time_duration - duration of test execution in seconds\n";
std::cout << "throttle(0|1) - 0: do not throttle message speed\n";
std::cout << "\t\t1: throttle message speed\n\n";
std::cout << "\nDefault Parameters:\n";
std::cout << "\tserver_IP - " << serverIp << "\n";
std::cout << "\tnumber_of_poster_threads - " << posterCount << "\n";
std::cout << "\tnumber_of_reader_threads - " << readerCount << "\n";
std::cout << "\ttime_duration - " << timeDurationSecs << "s\n";
std::cout << "\tthrottle - " << (throttle ? "true" : "false") << "\n\n";
std::cout << "Enter dev mode using default parameters?\n";
system("pause");
}
else
{
serverIp = argv[1];
posterCount = (int)argv[2];
readerCount = (int)argv[3];
timeDurationSecs = (int)argv[4];
throttle = (int)argv[5];
}
//std::queue<future<bool>> posterVerificationQueue;
//std::queue<bool> readerVerificationQueue;
ThreadPool posterPool(posterCount);
//ThreadPool readerPool(readerCount);
//vector<thread> posterThreads;
//vector<thread> readerThreads;
vector<future<double>> posterDurationFutures;
TCPClient client(serverIp, DEFAULT_PORT);
client.OpenConnection();
chrono::high_resolution_clock::time_point endTime;
chrono::high_resolution_clock::time_point startTime = chrono::high_resolution_clock::now();
for (int i = 0; i < posterCount; i++)
{
//posterThreads.emplace_back(postRequest, client, i, timeDurationSecs);
//std::future<double> durationFut = std::async(std::launch::async, postRequest, client, i, timeDurationSecs);
//posterDurationFutures.push_back(async(launch::async, postRequest, client, i, timeDurationSecs));
posterDurationFutures.push_back(posterPool.enqueue(postRequest, client, i, timeDurationSecs));
}
double posterTotal = 0;
for (int i = 0; i < posterDurationFutures.size(); i++)
{
bool valid = posterDurationFutures[i].valid();
//posterDurationFutures[i].wait();
posterTotal += posterDurationFutures[i].get();
}
cout << posterTotal;
for (int i = 0; i < readerCount; i++)
{
//readerThreads.emplace_back();
}
client.CloseConnection();
delete db;
delete responseVerifier;
delete requestGenerator;
system("pause");
return 0;
}
inline string sendViaClient(TCPClient client, string request)
{
return client.send(request);
}
double postRequest(TCPClient client, int threadIndex, double timeDurationSecs)
{
int threadPostCount = 0;
chrono::high_resolution_clock::time_point endTime;
chrono::high_resolution_clock::time_point startTime = chrono::high_resolution_clock::now();
do
{
string request = requestGenerator->generateWriteRequest();
string response = client.send(request);
bool isValidResponse = db->addPosterValue(request, response);
endTime = chrono::high_resolution_clock::now();
postRequests++;
threadPostCount++;
} while (chrono::duration_cast<chrono::duration<double>>(endTime - startTime).count() < timeDurationSecs);
double totalRunTime = (endTime - startTime).count();
double posterRequestsPerSecond = threadPostCount / totalRunTime;
std::cout << "Thread: " << threadIndex << endl;
std::cout << "Average post requests per second: " << posterRequestsPerSecond << endl;
return totalRunTime;
}
double readRequest(TCPClient client, int threadIndex, double timeDurationSecs)
{
return 0.0;
}

C++ thread request

I'm new to C++. In my application, there is a method getOnlineStatus():
int getOnlineStatus(int num);
This method is from third party DLL, it can't be modified.
I call this method to check number status, like this:
int num = 123456;
for (int i = 0; i < 10000000; i++) {
num = num + 1;
int nRet = getOnlineStatus(num);
if (nRet > 0) {
cout << num << "status online" << endl;
}
else if (nRet == 0) {
cout << num << "status offline" << endl;
}
else {
cout << num << "check fail" << endl;
}
}
But every time, it will take 2 seconds to return the nRet. So, if I check lots of number, it will take a long time.
Also, I tried to use async, but it's not working, it still takes 2 seconds to return a result one by one.
int num = 123456;
for (int i = 0; i < 10000000; i++) {
num = num + 1;
future<int> fuRes = std::async(std::launch::async, getOnlineStatus, num);
int result = fuRes.get();
if (result > 0) {
cout << num << "status online" << endl;
}
else if (result == 0) {
cout << num << "status offline" << endl;
}
else {
cout << num << "check fail" << endl;
}
}
Is there any way to open multiple threads to make it show results faster?
This largely depends on your third party DLL - does it even support requests from multiple threads? And if it does - do those requests use shared resources? Like the same internet connection / socket?
If you simplify your question and assume that the getOnlineStatus() sleeps for 2 seconds - then yes, you can greatly benefit from issuing multiple requests on different threads and wait in parallel.
Here is how you can simply setup reasonable number of threads to share the workload:
#include <iostream>
#include <vector>
#include <thread>
#include <chrono>
int status[10'000]{};
int getOnlineStatus(int n) {
std::this_thread::sleep_for(std::chrono::seconds(1));
return rand();
}
void getStatus(int low, int high) {
for (int i = low; i < high; i++) {
status[i] = getOnlineStatus(i);
}
}
int main()
{
srand(0);
const int count = std::thread::hardware_concurrency();
auto start = std::chrono::high_resolution_clock::now();
std::vector<std::thread> threads;
for (int i = 0, low = 0, high = 10; i < count; ++i, low += 10, high += 10)
threads.emplace_back(std::thread(getStatus, low, high));
for (auto& thread : threads)
thread.join();
auto stop = std::chrono::high_resolution_clock::now();
std::cout << count << " threads: " << std::chrono::duration_cast<std::chrono::milliseconds>(stop - start).count() << " ms" << std::endl;
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < 10 * count; ++i)
status[i] = getOnlineStatus(i);
stop = std::chrono::high_resolution_clock::now();
std::cout << "single thread: " << std::chrono::duration_cast<std::chrono::milliseconds>(stop - start).count() << " ms" << std::endl;
}
I get this result:
12 threads: 10075 ms
single thread: 120720 ms
NOTE: if those worker threads really do nothing, you can run many more of those, reducing total time significantly.

Correct way to get Windows CPU utilization for multiprocessor

What I want to achieve is I can get every CPUs utilization (I have 4 CPU core, so I expect I can get all of them like in resmon.exe) and the sum of them (Like in the TaskManager.exe)
I have read on some soure on how I can get the processors utilization is by doing some math on the CPU time. I tried to use NtQuerySystemInformation to get the necessary data.
#include <windows.h>
#include <vector>
#include <iostream>
#include <winternl.h>
#pragma comment(lib, "Ntdll.lib")
typedef struct
_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION_R {
LARGE_INTEGER IdleTime;
LARGE_INTEGER KernelTime;
LARGE_INTEGER UserTime;
LARGE_INTEGER DpcTime;
LARGE_INTEGER InterruptTime;
ULONG InterruptCount;
} SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION_R;
static long long toInteger(LARGE_INTEGER const & integer)
{
#ifdef INT64_MAX // Does the compiler natively support 64-bit integers?
return integer.QuadPart;
#else
return (static_cast<long long>(integer.HighPart) << 32) | integer.LowPart;
#endif
}
class CPU
{
public:
uint64_t prev_idle = 0;
uint64_t prev_ker = 0;
uint64_t prev_user = 0;
uint64_t cur_idle = 0;
uint64_t cur_ker = 0;
uint64_t cur_user = 0;
double get()
{
SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION_R *a = new SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION_R[4];
// 4 is the total of CPU (4 cores)
NtQuerySystemInformation(SystemProcessorPerformanceInformation, a, sizeof(SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION_R) * 4, NULL);
prev_idle = cur_idle;
prev_ker = cur_ker;
prev_user = cur_user;
cur_idle = 0;
cur_ker = 0;
cur_user = 0;
// 4 is the total of CPU (4 cores)
// Sum up the SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION_R array so I can get the utilization from all of the CPU
for (int i = 0; i < 4; ++i)
{
SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION_R b = a[i];
cur_idle += toInteger(b.IdleTime);
cur_ker += toInteger(b.KernelTime);
cur_user += toInteger(b.UserTime);
}
std::cout << "Cur idle " << cur_idle << '\n';
std::cout << "Cur ker " << cur_ker << '\n';
std::cout << "Cur user " << cur_user << '\n';
uint64_t delta_idle = cur_idle - prev_idle;
uint64_t delta_kernel = cur_ker - prev_ker;
uint64_t delta_user = cur_user - prev_user;
std::cout << "Delta idle " << delta_idle << '\n';
std::cout << "Delta ker " << delta_kernel << '\n';
std::cout << "Delta user " << delta_user << '\n';
uint64_t total_sys = delta_kernel + delta_user;
uint64_t kernel_total = delta_kernel - delta_idle;
delete[] a;
// return (total_sys - delta_idle) * 100.0 / total_sys;
return (kernel_total + delta_user) * 100.0 / total_sys;
}
};
int main()
{
CPU a;
std::cout << "starting" << '\n';
while(1)
{
std::cout << a.get() << '\n';
Sleep(1000);
}
return 0;
}
And to get the individual CPU utilization, I don't have to sum all of the CPUs, just choose one of the SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION_R array element.
My question is
Am I doing it right? Because when I check my program with the CPU utilization in the TaskManager.exe, it somewhat different.
Is there any better approach other than using NtQuerySystemInformation (Because Microsoft has mention "NtQuerySystemInformation may be altered or unavailable in future versions of Windows. Applications should use the alternate functions listed in this topic.")?

How to use ctime to mark starting and ending point of the program?

time_t start;
time_t no_infection;
bool time_already_set = 0;
bool infected_or_not = 0;
int not_infected_t = 0;
I have this variables in struct and I want to mark starting and ending point of the objects, than calculate the difference.
void bacteria::set_no_infection() {
if (infected_or_not == 0)
no_infection = clock();
}
For the start I have
void bacteria::set_time() {
if (infected_or_not == 1 && time_already_set!=1) {
start = clock();
time_already_set = 1;
}
}
It seems that time variables do not change during the program I test it using get functions
double bacteria::get_time() {
if (infected_or_not == 1)
return ((clock() - start) / CLOCKS_PER_SEC);
else
return -1;
}
int bacteria::get_no_infection() {
if (infected_or_not = 0)
return ((clock() - no_infection) / CLOCKS_PER_SEC);
else
return -1;
}
In main program I test it like this:
while (1) {
for (int i = 0; i < b.size() - 1; i++) {
bactpop[i].set_no_infection();
bactpop[i].inf(phagepop[i], bactpop[i], p);
bactpop[i].kill_the_bacteria(b, i);
cout << " " << b[i].start << " " << b[i].no_infection << endl;
}
cout << p.size() << " " << b.size() << endl;
}
I am not sure but I guess your functions are called several times per second and since clock() / CLOCKS_PER_SEC returns values in seconds the differences will be 0 since the calls are executed in the same second.
Use for example gettimeofday which has microseconds or std::chrono::high_resolution_clock::now() if using C++11 is okay.

Get time difference in milliseconds in c++

I'm trying to get the time difference between two time stamps. Thread is sleeping between the two time stamps. but when i get the difference, it doesn't give me the time which the thread was sleeping. My code is below.
#include <iostream>
#include <locale>
#include <sys/time.h>
#include <cstdlib>
#include <unistd.h>
using namespace std;
int timeInMilli();
int main()
{
timeval t;
timeval t2;
gettimeofday(&t, NULL);
gettimeofday(&t2, NULL);
std::string buf(20, '\0');
std::strftime(&buf[0], buf.size(), "%H:%M:%S:", localtime(&t.tv_sec));
std::string hr = buf.substr(0, 2);
std::string min = buf.substr(3, 2);
std::string sec = buf.substr(6, 2);
/*std::cout << hr << '\n';
std::cout << min << '\n';
std::cout << std::atoi(sec.c_str()) << '\n';*/
int a = timeInMilli();
usleep(10);
int b = timeInMilli();
cout << b-a << endl;
}
int timeInMilli()
{
timeval t;
gettimeofday(&t, NULL);
string buf(20, '\0');
strftime(&buf[0], buf.size(), "%H:%M:%S:", localtime(&t.tv_sec));
string str_hr = buf.substr(0, 2);
string str_min = buf.substr(3, 2);
string str_sec = buf.substr(6, 2);
int hr = atoi(str_hr.c_str());
int min = atoi(str_min.c_str());
int sec = atoi(str_sec.c_str());
int milli = t.tv_usec/1000;
/*cout << hr << endl;
cout << min << endl;
cout << sec << endl;
cout << milli << endl;*/
int timeInMilli = (((hr * 60) + min) * 60 + sec) * 1000 + milli;
return timeInMilli;
}
usleep(10);
means that you pause for 10 µsec and not 10 msec since usleep works with microseconds. try with
usleep(10000);