Considering this structure:
struct Book {
int id;
string title;
};
And this vector:
vector<Book> books;
How can I use MPI_Send in order to send the elements of vector books?
I have tried to find a way to do this the entire day, but no results.
Method 1
One way to do this is to set the title to a constant length. You can then build an MPI data type around your struct, like so:
#include "mpi.h"
#include <iostream>
#include <string>
#include <vector>
const int MAX_TITLE_LENGTH = 256;
struct Book {
int id;
char title[MAX_TITLE_LENGTH];
};
int main(int argc, char *argv[]){
MPI_Init(&argc, &argv);
std::vector<Book> books(343);
MPI_Datatype BookType;
MPI_Datatype type[2] = { MPI_INTEGER, MPI_CHAR };
int blocklen[2] = { 1, MAX_TITLE_LENGTH };
MPI_Aint disp[2];
disp[0] = 0;
disp[1] = sizeof(int);
MPI_Type_create_struct(2, blocklen, disp, type, &BookType);
MPI_Type_commit(&BookType);
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
if (myrank == 0) {
books[3].id = 4;
MPI_Send(books.data(), 343, BookType, 1, 123, MPI_COMM_WORLD);
} else if (myrank == 1) {
MPI_Status status;
MPI_Recv(books.data(), 343, BookType, 0, 123, MPI_COMM_WORLD, &status);
std::cout<<books[3].id<<std::endl;
}
MPI_Finalize();
return 0;
}
Method 2
MPI is best used for quickly exchange numbers across grids of known size. But it can also work as a handy communication layer. To do so, we can use the Cereal library to serialize arbitrary C++ objects and then send the serialized representations using MPI, as follows. This is slower than using MPI as designed because there are more intermediate copies, but provides the flexibility of using the full flexibility of C++.
#include "mpi.h"
#include <cereal/types/vector.hpp>
#include <cereal/types/string.hpp>
#include <cereal/archives/binary.hpp>
#include <sstream>
#include <string>
struct Book {
int id;
std::string title;
template <class Archive>
void serialize( Archive & ar ) { ar(id,title); }
};
template<class T>
int MPI_Send(const T &data, int dest, int tag, MPI_Comm comm){
std::stringstream ss;
{ //Needed for RAII in Cereal
cereal::BinaryOutputArchive archive( ss );
archive( data );
}
const auto serialized = ss.str();
return MPI_Send(serialized.data(), serialized.size(), MPI_CHAR, dest, tag, MPI_COMM_WORLD);
}
template<class T>
int MPI_Recv(T &data, int source, int tag, MPI_Comm comm, MPI_Status *status){
//Get number of bytes in incoming message
MPI_Probe(source, tag, MPI_COMM_WORLD, status);
int num_incoming;
MPI_Get_count(status, MPI_CHAR, &num_incoming);
//Allocate a buffer of appropriate size
std::vector<char> incoming(num_incoming);
//Receive the data
auto ret = MPI_Recv(incoming.data(), num_incoming, MPI_CHAR, source, tag, MPI_COMM_WORLD, status);
std::stringstream ss;
ss.write(incoming.data(), num_incoming);
//Unpack the data
{
cereal::BinaryInputArchive archive(ss);
archive(data);
}
return ret;
}
int main(int argc, char **argv){
MPI_Init(&argc, &argv);
std::vector<Book> books(343);
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
if (myrank == 0) {
books[3].id = 4;
books[3].title = "Hello, world!";
MPI_Send(books, 1, 123, MPI_COMM_WORLD);
} else if (myrank == 1){
MPI_Status status;
MPI_Recv(books, 0, 123, MPI_COMM_WORLD, &status);
std::cout<<books[3].id<<" "<<books[3].title<<std::endl;
}
MPI_Finalize();
return 0;
}
With title being a char[N] array of fixed size N, you could've created a new datatype and use it in MPI_Send. Unfortunately, this approach won't work with std::string as a data member. But you can send std::vector<Book> element-by-element.
For example:
std::vector<Book> books;
// ...
const unsigned long long size = books.size();
MPI_Send(&size, 1, MPI_UNSIGNED_LONG_LONG, ...);
for (const auto& book : books) {
MPI_Send(&book.id, 1, MPI_INT, ...);
const unsigned long long len = book.title.length();
MPI_Send(&len, 1, MPI_UNSIGNED_LONG_LONG, ...);
MPI_Send(book.title.data(), len, MPI_CHAR, ...);
}
and
std::vector<Book> books;
unsigned long long size;
MPI_Recv(&size, 1, MPI_UNSIGNED_LONG_LONG, ...);
books.resize(size);
for (auto& book : books) {
MPI_Recv(&book.id, 1, MPI_INT, ...);
unsigned long long len;
MPI_Recv(&len, 1, MPI_UNSIGNED_LONG_LONG, ...);
std::vector<char> str(len);
MPI_Recv(str.data(), len, MPI_CHAR, ...);
book.title.assign(str.begin(), str.end());
}
// ...
Related
I have a c++ application that spawns a child process. I am trying to setup a shared memory segment between the parent and child. I would like the shared segment to be an array of structs.
struct SharedSegment
{
bool m_Status;
std::array<std::array<std::array<char, 600>, 2>, 3> m_Array1;
std::array<std::bitset<3>, 3> m_Array2;
std::array<std::array<std::array<char, 600>, 3>, 3> m_Array3;
std::array<std::array<char, 500>, 2> m_Array4;
std::array<std::array<char, 500>, 2> m_Array5;
std::array<std::array<char, 500>, 2> m_Array6;
}
In the parent application I declare the array of size 2 std::array<SharedSegment, 2> my_SharedSegment;
int main(int argc, char* argv[])
{
std::array<SharedSegment, 2> my_SharedSegment;
MPI_Aint bufferAllocationSize = sizeof(my_SharedSegment);
int provided, n, rank, size, k;
MPI_Comm intercomm, universe;
MPI_Win win;
int disp = 1;
MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &provided);
assert(provided == MPI_THREAD_MULTIPLE);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_spawn("./child_exe", MPI_ARGV_NULL, 1,
MPI_INFO_NULL, 0, MPI_COMM_WORLD,
&intercomm, MPI_ERRCODES_IGNORE);
MPI_Intercomm_merge(intercomm, 0, &universe);
MPI_Comm_size(universe, &k);
assert(k == 2);
MPI_Win_allocate_shared(bufferAllocationSize, 1, MPI_INFO_NULL, universe, &my_SharedSegment, &win);
//Manipulate/Initialize segment here
MPI_Barrier(universe);
MPI_Barrier(universe);
MPI_Finalize();
return 0;
}
The child code:
int main(int argc, char* argv[])
{
MPI_Init(&argc, &argv);
std::array<SharedSegment, 2> my_SharedSegment;
MPI_Aint bufferAllocationSize = sizeof(my_SharedSegment);
MPI_Comm parent, universe;
int rank, disp;
MPI_Win win;
MPI_Aint asize;
MPI_Comm_get_parent(&parent);
assert(parent != MPI_COMM_NULL);
MPI_Intercomm_merge(parent, 0, &universe);
MPI_Win_allocate_shared(0, 1, MPI_INFO_NULL, universe, &my_SharedSegment, &win);
MPI_Win_shared_query(win, MPI_PROC_NULL, &asize, &disp, &my_SharedSegment);
MPI_Barrier(universe);
SharedSegment struct1 = my_SharedSegment[0]; //
SharedSegment struct2 = my_SharedSegment[1];
MPI_Barrier(universe);
MPI_Finalize();
}
I cannot get this to work. The child process is not reading the segment correctly (I expect struct1 and struct2 to have the values I initialized in the parent). Is sharing arrays of complex types not allowed? What am I doing wrong?
You are doing Win_shared_query on the null process. I'm surprised that that doesn't bomb your code. You need to `Win_shared_query the pointer from process zero.
I want to write the integer b in the output file. I try to provide the address of a, but it only outputs the first several chars, there is not number after the chars.
Here is my code:
#include <cmath>
#include <fstream>
#include <iomanip>
#include <cstdlib>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
int main(int argc, char *argv[])
{
int rank;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPIO_Request request;
MPI_Status status;
MPI_Offset offset = 0;
MPI_File fh;
std::string str1 = "output";
const char *na = str1.c_str();
std::string str2 = "#!TDV112";
const char *version = str2.c_str();
MPI_File_open(MPI_COMM_WORLD, na, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
if (rank == 0)
{
MPI_File_seek(fh, offset, MPI_SEEK_SET);
// header !version number
MPI_File_write(fh, version, 8, MPI_CHAR, &status);
// INTEGER 1
int a = 1;
MPI_File_write(fh, &a, 1, MPI_INT, &status);
}
return 0;
}
I define a new MPI data type in the main function in my code, but it seems that it can't be used in other functions.
typedef struct {
int row;
int col;
double val;
} unit;
void sendTest() {
unit val;
val.row = val.col = val.val = 1;
MPI_Send(&val, 1, valUnit, 1, 0, MPI_COMM_WORLD);
}
void recvTest() {
unit val;
MPI_Recv(&val, 1, valUnit, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
int main(int argc, char* argv[]) {
int comm_sz,my_rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &comm_sz);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
int blockcount[3]={1,1,1};
MPI_Aint offsets[3] = {offsetof(unit, row), offsetof(unit, col), offsetof(unit, val)};
MPI_Datatype dataType[3] = {MPI_INT, MPI_INT, MPI_DOUBLE};
MPI_Datatype valUnit;
MPI_Type_create_struct(3, blockcount, offsets, dataType, &valUnit);
MPI_Type_commit(&valUnit);
if(my_rank == 0)
sendTest();
else
recvTest();
MPI_Finalize();
return 0;
}
When I compile the program, I got an error:
error: ‘valUnit’ was not declared in this scope
I was wondering how to define the new mpi data type once and can be used in all scope?
Simply declare valUnit as a global variable (e.g. right after the typedef ... declaration).
Note send() and recv() are functions from the glibc so you should rename these subroutines in your program, otherwise you might experience some really weird side effects.
I've made an application that counts number of characters using MS MPI, but it causes 4 breakpoints at:
MPI_File_get_size, MPI_File_set_view, MPI_File_read and again at MPI_File_get_size(fh, &size).
Do you know what may cause them? Full code below:
#include "stdafx.h"
#include "mpi.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stack>
using namespace std;
int main(int argc, char *argv[])
{
int numprocs, rank, buffer[100];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Status status;
MPI_File fh;
MPI_Offset size;
int char_number;
const char plik[10] = "file.txt";
MPI_File_open(MPI_COMM_WORLD, plik, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
MPI_File_get_size(fh, &size);
MPI_File_set_view(fh, rank*(size / numprocs), MPI_CHAR, MPI_CHAR, "native", MPI_INFO_NULL);
MPI_File_read(fh, &buffer[100], 1, MPI_CHAR, &status);
char_number = MPI_File_get_size(fh, &size);
MPI_File_close(&fh);
if (rank == 0) {
for (int i = 0; i < numprocs; i++) {
MPI_Recv(&char_number, i, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
}
}
else {
MPI_Send(&char_number, 0, MPI_INT, 0, 3, MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
EDIT: Got rid of breakpoints, but not receiving any output now:
#include "stdafx.h"
#include "mpi.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stack>
using namespace std;
int main(int argc, char *argv[])
{
int numprocs, rank;
char buffer[100] = { 0 };
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Status status;
MPI_File fh;
MPI_Offset size;
int char_number;
const char plik[10] = "file.txt";
MPI_File_open(MPI_COMM_WORLD, plik, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
MPI_File_get_size(fh, &size);
MPI_File_set_view(fh, rank*(size / numprocs), MPI_CHAR, MPI_CHAR, "native", MPI_INFO_NULL);
MPI_File_read(fh, buffer, (size/numprocs), MPI_CHAR, &status);
char_number = MPI_File_get_size(fh, &size);
MPI_File_close(&fh);
if (rank == 0) {
for (int i = 0; i < numprocs; i++) {
MPI_Recv(&char_number, i, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
}
cout << "There is: " << char_number << " characters in file.txt";
}
else {
MPI_Send(&char_number, 0, MPI_INT, 0, 3, MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
It might be your breakpoints are still on the list:
I deleted a breakpoint, but I continue to hit it when I start
debugging again
If you deleted a breakpoint while debugging, in some
cases you may hit the breakpoint again the next time you start
debugging. To stop hitting this breakpoint, make sure all the
instances of the breakpoint are removed from the Breakpoints window.
Source: https://msdn.microsoft.com/en-us/library/5557y8b4.aspx
I remember similar situation in the past with some executables. Removing breakpoint from the source code was not enough.
It also might be that you are trying to access non-existing file.
It also might be that your code is not quite OK.
#include "mpi.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stack>
using namespace std;
int main(int argc, char *argv[])
{
int numprocs, rank;
// buffer will keep input data
char buffer[100] = { 0 };
// initialization of MPI world
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Status status;
MPI_File fh;
MPI_Offset size;
int char_number;
// name of the file with data
const char plik[10] = "file.txt";
MPI_File_open(MPI_COMM_WORLD, plik, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
MPI_File_get_size(fh, &size);
// we have to be careful here
// in this sample we have strong assumption that size of data
// divides without the reminder by number of processors!
MPI_File_set_view(fh, rank*(size / numprocs), MPI_CHAR, MPI_CHAR, "native", MPI_INFO_NULL);
MPI_File_read(fh, buffer, (size/numprocs), MPI_CHAR, &status);
char_number = MPI_File_get_size(fh, &size);
MPI_File_close(&fh);
printf("My rank: %d - my data: %s\n", rank, buffer);
if (rank == 0) {
for (int i = 1; i < numprocs; i++) {
MPI_Recv(&buffer[i*(size / numprocs)], (size/numprocs), MPI_CHAR, i, 0, MPI_COMM_WORLD, &status);
}
printf("I have collected data: %s\n",buffer);
}
else {
MPI_Send(&buffer[0], rank*(size / numprocs), MPI_CHAR, 0, 0, MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
For input file
> cat file.txt
abcdefgh
works as expected:
> mpirun -np 2 ./simple2
My rank: 0 - my data: abcd
I have collected data: abcdefgh
My rank: 1 - my data: efgh
Question is, whether this is what you are looking for.
From OpenMPI docs: C++ syntax
Request Comm::Irecv(void* buf, int count, const Datatype&
datatype, int source, int tag) const
So I imagine I do something like:
MPI::Request req;
req = MPI_Irecv(&ballChallenges[i], 2, MPI_INT, i, TAG_AT_BALL, MPI_COMM_WORLD);
But it complains:
error: too few arguments to function ‘int MPI_Irecv(void*, int, MPI_Datatype, int, int, MPI_Comm, ompi_request_t**)’
Seems like I am missing ompi_request_t**, but its not documented? Tried
MPI_Irecv(&ballChallenges[i], 2, MPI_INT, i, TAG_AT_BALL, MPI_COMM_WORLD, &req);
But fails with
error: cannot convert ‘MPI::Request*’ to ‘ompi_request_t**’ for argument ‘7’ to ‘int MPI_Irecv(void*, int, MPI_Datatype, int, int, MPI_Comm, ompi_request_t**)’
So whats with the ompi_request_t part?
This works (C):
#include <stdio.h>
#include <string.h>
#include <mpi.h>
int main(int argc, char **argv) {
int rank;
const char *msg="Hello!";
const int len=strlen(msg)+1;
char buf[len];
MPI_Request isreq, irreq;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) {
MPI_Isend((void*)msg, len, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &isreq);
MPI_Irecv(buf, len, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &irreq);
MPI_Cancel(&irreq);
MPI_Cancel(&isreq);
}
MPI_Finalize();
return 0;
}
Or this works (C++)
#include <cstring>
#include <mpi.h>
using namespace MPI;
int main(int argc, char **argv) {
const char *msg="Hello!";
const int len=strlen(msg)+1;
char *buf = new char[len];
Init(argc, argv);
int rank = COMM_WORLD.Get_rank();
if (rank == 0) {
Request isreq = COMM_WORLD.Isend(msg, len, MPI_CHAR, 0, 0);
Request irreq = COMM_WORLD.Irecv(buf, len, MPI_CHAR, 0, 0);
isreq.Cancel();
irreq.Cancel();
}
Finalize();
return 0;
}