我是MPI的新手,所以我要求您的幫助。我需要使用MPI在C++中分散對的向量。爲此,我創建了一個MPI_Datatype「mytype」。然後我使用MPI_Scatterv,因爲我不希望散點圖也不依賴於矢量的大小,也不要依賴於進程數量。爲MPI分配一個C++向量對
這裏是我的代碼:
#include<mpi.h>
#include<iostream>
#include<vector>
using std::cout;
using std::cerr;
using std::endl;
using std::vector;
using std::pair;
typedef vector<pair< int, int> > Vect;
void Build_mpi_type(unsigned int* i, unsigned int* j, MPI_Datatype* mytype)
{
int array_of_blocklengths[2]={1,1};
MPI_Datatype array_of_types[2]={MPI_INT, MPI_INT};
MPI_Aint i_addr, j_addr;
MPI_Aint array_of_displacements[2]={0};
MPI_Get_address(i,&i_addr);
MPI_Get_address(j,&j_addr);
array_of_displacements[1]=j_addr-i_addr;
MPI_Type_create_struct(2,array_of_blocklengths,array_of_displacements,array_of_types,mytype);
MPI_Type_commit(mytype);
}
int main(int argc, char **argv)
{
Vect esempio;
esempio.push_back(std::make_pair(1,1));
esempio.push_back(std::make_pair(2,2));
esempio.push_back(std::make_pair(3,3));
esempio.push_back(std::make_pair(4,4));
unsigned int size=esempio.size();
MPI_Init(&argc, &argv);
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int p;
MPI_Comm_size(MPI_COMM_WORLD, &p);
unsigned int* index_i=nullptr;
unsigned int* index_j=nullptr;
MPI_Datatype mytype;
Build_mpi_type(index_i, index_j,&mytype);
//cout<<esempio.size();
vector<int> elem(p); //number of elements per process
vector<int> disp(p); // first index for every local vector
// how to distribute elements
int split = size/p;
int extra = size % p;
for (int i = 0; i < extra; ++i)
elem[i] = split + 1;
for (int i = extra; i < p; ++i)
elem[i] = split;
for (int i = 1; i < p; ++i)
disp[i] = disp[i-1] + elem[i-1];
int local_n = elem[rank];
Vect local_v(local_n);
MPI_Scatterv(&esempio[0], &elem[0], &disp[0], mytype,
&local_v[0], local_n, mytype,
0, MPI_COMM_WORLD);
if(rank==0){
for(Vect::const_iterator pos = local_v.begin();pos != local_v.end(); ++pos)
cout<<pos->first<<" "<<pos->second<<endl;
}
MPI_Finalize();
return 0;
}
現在的事情是,當我運行只用一個過程(根)程序我得到這樣的結果:
1 1
2 2
0 0
0 0
當我使用兩個和三個流程運行:
1 1
0 0
最後與四個過程:
1 0
爲什麼會發生這種情況?我真的不明白。至少有一個過程的結果應該是:
1 1
2 2
3 3
4 4
我希望有人知道更好的MPI我可以幫助我。