2013-07-10 24 views
0

我在其內部部分(grid_size-2),而我Isend旁邊外邊緣到其它過程限定尺寸grid_size和工作的一個方陣。我定義了一個環形拓撲結構,因此每個子矩陣進程都可以輕鬆計算其鄰居。雖然行(例如[1][1][1][grid_size-2])正確發送列(說至[grid_size-2][1])不正確發送 - 我使用MPI_Type_contiguous的行,而MPI_Type_vector的列 - 我檢查與空矩陣(他們是字符矩陣,所以我他們初始化爲\0)並且當行被作爲0總是發送的列在(半)的隨機位置不同。我錯過了什麼?MPI_Type_vector似乎不接收/發送它應該

typedef char bool; 
typedef bool **grid_t; 

/* create a torroid topology */ 
void cart_create(MPI_Comm *new_comm, int Proc_Root) { 
    int reorder = 1; /* allows processes reordered for efficiency */ 
    int periods[2], dim_size[2]; 
    dim_size[0] = Proc_Root; /* rows */ 
    dim_size[1] = Proc_Root; /* columns */ 
    periods[0] = 1; /* row periodic (each column forms a ring) */ 
    periods[1] = 1; /* columns periodic (each column forms a ring) */ 
    int comm_size; 
    MPI_Comm_size(MPI_COMM_WORLD, &comm_size); 
    MPI_Cart_create(MPI_COMM_WORLD, 2, dim_size, periods, reorder, new_comm); 
} 

int main(int argc, char** argv) { 

    /* ! MPI ! */ 
    MPI_Init(&argc, &argv); 
    int rank; 
    MPI_Comm_rank(MPI_COMM_WORLD, &rank); 
    int Num_of_Proc; 
    MPI_Comm_size(MPI_COMM_WORLD, &Num_of_Proc); 
    int Proc_Root = sqrt(Num_of_Proc); 
    int Inner_Grid_Size = Num_of_Rows/Proc_Root; //size of process'submarix 
    int Grid_Size = Inner_Grid_Size + 2; //grid size plus the ghost shells 

    /* topology */ 
    MPI_Comm new_comm; 
    cart_create(&new_comm, Proc_Root); 

    /* allocate the grid */ 
    grid_t grid; 
    create_grid(&grid, Grid_Size); // I fill it with 0 
    grid_t grid2; 
    create_empty_grid(&grid2, Grid_Size); 
    grid_t new, old; 

    bool *north_row = malloc(Inner_Grid_Size * sizeof *north_row); 
    bool *south_row = malloc(Inner_Grid_Size * sizeof *south_row); 
    bool *west_column = malloc(Inner_Grid_Size * sizeof *west_column); 
    bool *east_column = malloc(Inner_Grid_Size * sizeof *east_column); 
    // Works ! 
    MPI_Datatype rowtype; 
    MPI_Type_contiguous(Inner_Grid_Size, MPI_CHAR, &rowtype); // MPI_C_BOOL 
    MPI_Type_commit(&rowtype); 
    // Where is the bug ? 
    MPI_Datatype columntype; 
    MPI_Type_vector(Inner_Grid_Size, 1, Grid_Size, MPI_CHAR, &columntype); 
    MPI_Type_commit(&columntype); 

    for (int k = 0; k < generations; k++) { 
     if (k % 2) { 
      old = grid2; 
      new = grid; 
     } else { 
      old = grid; 
      new = grid2; 
     } 
     MPI_Status status[16]; 
     MPI_Request reqs[16]; 
     MPI_Isend(&old[Inner_Grid_Size][1], 1, rowtype, neighboors_ranks[S], 
       S, new_comm, &reqs[S]); //send to S 
     MPI_Irecv(north_row, Inner_Grid_Size, MPI_CHAR, neighboors_ranks[N], 
       S, new_comm, &reqs[S + EIGHT]); //receive from N 
     // above works 
     // below not 
     MPI_Isend(&old[1][1], 1, columntype, neighboors_ranks[W], W, 
       new_comm, &reqs[W]); //send to W 
     MPI_Irecv(east_column, Inner_Grid_Size, MPI_CHAR, neighboors_ranks[E], 
       W, new_comm, &reqs[W + EIGHT]); //receive from E 
     MPI_Isend(&old[1][Inner_Grid_Size], 1, columntype, neighboors_ranks[E], 
       E, new_comm, &reqs[E]); //send to E 
     MPI_Irecv(west_column, Inner_Grid_Size, MPI_CHAR, neighboors_ranks[W], 
       E, new_comm, &reqs[E + EIGHT]); //receive from W 

     MPI_Waitall(EIGHT, reqs + EIGHT, status + EIGHT); //Wait receives 
     if (rank == root) 
      for (int p = 0; p < Inner_Grid_Size; p++) { 
       printf("east[%d]=%d\n", p, east_column[p]); // should be 0 !? 
       // printf("north,%d\n", north_row[p]); // prints ok 
       printf("west[%d]=%d\n", p, west_column[p]); // should be 0 !? 
      } 
     //... 
    } 
} 

編輯:分配

void create_grid(grid_t *grid, int size) { 
    srand(time(NULL) ^get_rank() << 16); 
    if ((*grid = malloc(size * (sizeof **grid))) == NULL) return; 
    for (int i = 0; i < size; ++i) { 
     (*grid)[i] = malloc(size * (sizeof *((*grid)[i]))); 
     for (int j = 0; j < size; ++j) { 
      (*grid)[i][j] = 0; /*was random */ 
     } 
    } 
} 

/* the grid will be full of 0 */ 
void create_empty_grid(grid_t *grid, int size) { 
    if ((*grid = malloc(size * (sizeof **grid))) == NULL) return; 
    // the outer edges will be filled by the other processes 
    for (int i = 0; i < size; ++i) { 
     (*grid)[i] = malloc(size * (sizeof *((*grid)[i]))); 
     memset((*grid)[i], 0, sizeof (*(*grid)[i]) * size); 
    } 
} 

void print_grid(grid_t grid, int start, int size) { 
    for (int i = start; i < size; ++i) { 
     for (int j = start; j < size; ++j) { 
      if (grid[i][j]) { 
       printf("@"); 
      } else { 
       printf("."); 
      } 
     } 
     printf("\n"); 
    } 
    printf("\n"); 
} 
+0

你能告訴我們你如何分配網格嗎? –

+0

@JonathanDursi:編輯 - 我在各個點處打印和它打印爲0('.') - 我將在(HTTP [用於2D陣列MPI數據類型]的MO –

+0

可能重複添加打印:// stackoverflow.com/questions/10824506/mpi-data-type-for-2d-array) –

回答

1

這經常在這裏出現(例如,看到這個question/answerthis one)以 「多維數組」 用C處理MPI時。這不是一個真正的MPI事情,而是一件C事情。

用C分配陣列-的陣列來獲得多維數組的標準方法不給你的存儲器中的連續塊。每行(例如,每個malloc)都是獨立連續的,但下一行可能在內存中的任何其他位置。

因此,跳過Grid_Size項目以查找列中下一項的公式將不起作用(並且取決於網格大小可能會出現段錯誤)。因此,作爲以上這些問題的答案,

更改分配看起來像

data = malloc(size*size*sizeof(type)); 
grid = malloc(size*sizeof(type *)); 
for (int i=0; i<size; i++) 
    *grid[i] = &(data[i*size]); 

或者任何一個數字,你會看到周圍踢變化的。這給你的你的類型的size*size一個街區,與grid[]陣列指向它。取消分配然後通過

free(&(grid[0])); 
free(grid); 
+0

謝謝你提醒我做到這一點。 –

+0

感謝您保存我的理智:) –

相關問題