int n, j, i, i2, i3, rank, size, rowChunk, **cells, **cellChunk;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if(!rank){
printf("\nEnter board size:\n");
fflush(stdout);
scanf("%d", &n);
printf("\nEnter the total iterations to play:\n");
fflush(stdout);
scanf("%d", &j);
srand(3);
rowChunk = n/size; //how many rows each process will get
for(i=1; i<size; i++){
MPI_Send(&n,1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&j,1, MPI_INT, i, 7, MPI_COMM_WORLD);
}
cells = (int**) malloc(n*sizeof(int*)); //create main 2D array
for(i=0; i<n; i++){
cells[i] = (int*) malloc(n*sizeof(int));
}
for(i=0; i<n; i++){
for(i2=0; i2<n; i2++){ //fill array with random data
cells[i][i2] = rand() % 2;
}
}
for(i=1; i<size; i++){ //send blocks of rows to each process
for(i2=0; i2<rowChunk; i2++){ //this works for all n
MPI_Send(cells[i2+(rowChunk*i)], n, MPI_INT, i, i2, MPI_COMM_WORLD);
}
}
cellChunk = (int**) malloc(rowChunk*sizeof(int*));
for(i=0; i<rowChunk; i++){ //declare 2D array for process zero's array chunk
cellChunk[i] = (int*) malloc(n*sizeof(int));
}
for(i=0; i<rowChunk; i++){ //give process zero it's proper chunk of the array
for(i2=0; i2<n; i2++){
cellChunk[i][i2] = cells[i][i2];
}
}
for(i3=1; i3<=j; i3++){
MPI_Send(cellChunk[0], n, MPI_INT, size-1,1,MPI_COMM_WORLD); //Hangs here if n >256
MPI_Send(cellChunk[rowChunk-1], n, MPI_INT, 1,2,MPI_COMM_WORLD); //also hangs if n > 256
... //Leaving out code that works
此代碼的工作完全如果n(數組大小)小於或等於256的任何更大,它掛在第一MPI_SEND。另外,當向其他進程發送數組行塊時(第一MPI_Send),其他進程完全接收它們的數據,即使n> 256。如果緩衝區大小超過256,什麼會導致這個MPI_Send掛起?第二MPI_SEND懸掛如果緩衝器大小超過256
你能發表完整的代碼嗎?我沒有看到您發佈的代碼段的任何問題。 – powerrox 2011-07-13 22:09:49