Das Beispiel habe ich von https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report/node425.htm
Jeder Thread erstellt einen MPI Datentyp welcher die Offsett Addressen der struct Member Variablen hat.
Thread 1 sendet Daten zu Thread 0
Thread 0 empfaenge Daten von Thread1 und seine eigenen Daten, so dass alle in einem Array dann liegen.
// This example is based on https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report/node425.htm
// Jeder Thread erstellt einen MPI Datentyp welcher die Offsett Addressen der struct Member Variablen hat.
// Thread 1 sendet Daten zu Thread 0
// Thread 0 empfaenge Daten von Thread1 und seine eigenen Daten, so dass alle in einem Array dann liegen.
#include <iostream>
#include <array>
#include <mpi.h>
int my_rank;
int nThreads;
struct basetype_t {
double volstep = 0;
double volTot = 0;
};
// Vererbung ist nicht erlaubt
struct type_t {
char ID[50];
int i = 0;
float x = 0;
// Ein Container wie std::vector zwischen den Datentypen scheint wohl zu funktionieren, ist in Fortran aber explizit nicht erlaubt.
// std::vector<int> unused;
bool l = false;
double d = 0;
basetype_t base;
};
// Check MPI Error code
void check(int ierr) {
if (ierr != MPI_SUCCESS) {
char err_recvbuffer[MPI_MAX_ERROR_STRING];
int resultlen;
MPI_Error_string(ierr, err_recvbuffer, &resultlen);
std::cerr << err_recvbuffer << "\n";
MPI_Finalize();
}
}
// create new MPI datatype based on the addresses of the member variables of the type we want to send
MPI_Datatype createMPItyp() {
type_t foo;
MPI_Aint base;
check(MPI_Get_address(&foo, &base));
// Fuer jede member Variable die gesendet werden soll, Typ und Addresse bestimmen
const int nMembervarsToSend = 7;
std::array<MPI_Datatype, nMembervarsToSend> types;
std::array<int,nMembervarsToSend> blocklen;
std::array<MPI_Aint, nMembervarsToSend> disp;
types[0] = MPI_INT;
blocklen[0] = 1;
check(MPI_Get_address(&foo.i, &disp[0]));
types[1] = MPI_FLOAT;
blocklen[1] = 1;
check(MPI_Get_address(&foo.x, &disp[1]));
types[2] = MPI_LOGICAL;
blocklen[2] = 1;
check(MPI_Get_address(&foo.l, &disp[2]));
types[3] = MPI_DOUBLE;
blocklen[3] = 1;
check(MPI_Get_address(&foo.d, &disp[3]));
types[4] = MPI_CHAR;
blocklen[4] = sizeof(foo.ID);
check(MPI_Get_address(&foo.ID, &disp[4]));
types[5] = MPI_DOUBLE;
blocklen[5] = 1;
check(MPI_Get_address(&foo.base.volstep, &disp[5]));
types[6] = MPI_DOUBLE;
blocklen[6] = 1;
check(MPI_Get_address(&foo.base.volTot, &disp[6]));
if(my_rank == 0) {
std::cout << "Base Address " << std::hex << base << "\n";
std::cout << "Addresses ";
for(auto& x : disp) {
std::cout << " " << std::hex << x;
}
std::cout << std::dec << "\n";
}
// Addresse zu Offset umrechnen
for(auto& x : disp) {
x -= base;
}
if(my_rank == 0) {
std::cout << "Displacement";
for(auto& x : disp) {
std::cout << " " << x;
}
std::cout << "\n";
}
MPI_Datatype newMPItype;
check(MPI_Type_create_struct(nMembervarsToSend, blocklen.data(), disp.data(), types.data(), &newMPItype));
check(MPI_Type_commit(&newMPItype));
return newMPItype;
}
void doRank0(MPI_Datatype newMPItype) {
type_t sendbuffer;
strcpy(sendbuffer.ID, "Kreis100");
sendbuffer.i = 10;
sendbuffer.x = 1.2f;
sendbuffer.d = 1.23;
sendbuffer.l = true;
sendbuffer.base.volstep = 1.34;
sendbuffer.base.volTot = 1.56;
int displacements[nThreads], counts[nThreads];
std::vector<type_t> recvbuffer(2);
std::cout << my_rank << " Receiving...\n";
int root_rank = 0;
displacements[0] = 0;
displacements[1] = 1;
counts[0] = 1;
counts[1] = 1;
// MPI_Gatherv(recvbuffer_send,count_send, datatype_send, recvbuffer_recv, counts_recv, displacements, datatype_recv, root,comm)
check(MPI_Gatherv(&sendbuffer, 1, newMPItype, recvbuffer.data(), counts, displacements, newMPItype, root_rank, MPI_COMM_WORLD));
std::cout << my_rank << " Done receiving\n";
std::cout << my_rank << " content of struct:\n";
for(const type_t& buf : recvbuffer) {
std::cout << "ID " << buf.ID << "\n";
std::cout << "i " << buf.i << "\n";
std::cout << "x " << buf.x << "\n";
std::cout << "d " << buf.d << "\n";
std::cout << "l " << buf.l << "\n";
std::cout << "volstep " << buf.base.volstep << "\n";
std::cout << "volTot " << buf.base.volTot << "\n\n";
}
}
void doRank1(MPI_Datatype newMPItype) {
type_t sendbuffer;
int displacements[nThreads], counts[nThreads];
strcpy(sendbuffer.ID, "Kreis200");
sendbuffer.i = 20;
sendbuffer.x = 2.2;
sendbuffer.d = 2.23;
sendbuffer.l = true;
sendbuffer.base.volstep = 2.34;
sendbuffer.base.volTot = 2.56;
std::cout << my_rank << " Sending...\n";
// MPI_Gatherv(recvbuffer_send,count_send, datatype_send, recvbuffer_recv, counts_recv, displacements, datatype_recv, root,comm)
int root_rank = 0;
check(MPI_Gatherv(&sendbuffer, 1, newMPItype, NULL, counts, displacements, newMPItype, root_rank, MPI_COMM_WORLD));
std::cout << my_rank << " Done sending\n";
}
int main(int argc, char* argv[]) {
MPI_Init(&argc, &argv);
// Get number of processes and check only 2 processes are used
MPI_Comm_size(MPI_COMM_WORLD, &nThreads);
if(nThreads != 2) {
std::cout << "Start with 2 threads.\n";
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Datatype newMPItype = createMPItyp();
switch(my_rank) {
case 0: doRank0(newMPItype); break;
case 1: doRank1(newMPItype); break;
};
MPI_Finalize();
return EXIT_SUCCESS;
}
$ mpic++ -g -ggdb -Wall test_MPI_struct.cpp
$ mpiexec -n 2 ./a.out
1 Sending...
1 Done sending
Base Address 7ffd83f7e180
Addresses 7ffd83f7e1b4 7ffd83f7e1b8 7ffd83f7e1bc 7ffd83f7e1c0 7ffd83f7e180 7ffd83f7e1c8 7ffd83f7e1d0
Displacement 52 56 60 64 0 72 80
0 Receiving...
0 Done receiving
0 content of struct:
ID Kreis100
i 10
x 1.2
d 1.23
l 1
volstep 1.34
volTot 1.56ID Kreis200
i 20
x 2.2
d 2.23
l 1
volstep 2.34
volTot 2.56