2013-11-29 51 views
1

我嘗試使用mpi對不同的數組進行排序。每個數組都在本地分配。 例如,我們有{1-7-4-12} {3-7-5-9} {12-15-2-16} {10-8-11-13} 我們希望{1-2-3-4}{5-6-7-8}{9-10-11-12}{13-14-15-16}使用mpi並行排序

於是我就用奇偶策略。對於2proccess它是適用於所有情況,但當我嘗試更多的過程,我有新的價值。在我的例子我可以有{23-2-3-4}.我想我的問題是分配內存,但我不覺得在哪?我做錯了什麼......

#include "mpi.h" 
#include <stdio.h> 
#include <stdlib.h> 
#include <time.h> 
#define MASTER 0 
#define MIN(a,b) ((a)<(b)?(a):(b)) 

#define BLOCK_LOW(id,p,n) ((id)*(n)/(p)) 

#define BLOCK_HIGH(id,p,n) \ 
     (BLOCK_LOW((id)+1,p,n)-1) 

#define BLOCK_SIZE(id,p,n) \ 
     (BLOCK_LOW((id)+1, p, n)-BLOCK_LOW(id, p , n)) 

#define BLOCK_OWNER(index,p,n) \ 
     (((p)*(index+1)-1)/(n)) 

int nbProcess, id, n; //n = number of value 

void printTabByProcess(int *T){ 
    int i = 0; 
    int size = BLOCK_SIZE(id, nbProcess, n); 
    printf("Tab n°%d [ ", id, size); 
    for(i; i < size; i++){ 
     printf(" %d ", T[i]); 
    } 
    printf(" ]\n"); 
} 

void fusion(int *t,int deb1,int fin1,int fin2){ 
    int *table1; 
    int deb2=fin1+1; 
    int compt1=deb1; 
    int compt2=deb2; 
    int i; 
    table1=(int*)malloc((fin1-deb1+1)*sizeof(int)); 

    for(i=deb1;i<=fin1;i++) { 
     table1[i-deb1]=t[i]; 
    } 

    for(i=deb1;i<=fin2;i++){ 
     if(compt1==deb2) 
      break; 
     else if(compt2==(fin2+1)){ 
      t[i]=table1[compt1-deb1]; 
      compt1++; 
     } 
     else if(table1[compt1-deb1]<t[compt2]){ 
      t[i]=table1[compt1-deb1]; 
      compt1++; 
     } 
     else{ 
      t[i]=t[compt2]; 
      compt2++; 
     } 
    } 
    free(table1); 
} 


void tri_fusion(int*t,int deb,int fin){ 
    if(deb!=fin){ 
     int milieu=(fin+deb)/2; 
     tri_fusion(t,deb,milieu); 
     tri_fusion(t,milieu+1,fin); 
     fusion(t,deb,milieu,fin); 
    } 
} 

int* fusion2(int* t1, int* t2, int size1, int size2){ 
    int* buffer = malloc(sizeof(int)*(size1 + size2)); 
    int index1 = 0; 
    int index2 = 0; 
    int i = 0; 
    for(i; i < (size1 + size2) - 1; i++){ 
     if(t1[index1] < t2[index2]){ 
      buffer[i] = t1[index1]; 
      index1++; 
     }else{ 
      buffer[i] = t2[index2]; 
      index2++; 
     } 
    } 
    if(index1 == size1 - 1){ 
     buffer[size1 + size2 - 1] = t1[index1]; 
    }else{ 
     buffer[size1 + size2 - 1] = t2[index2]; 
    } 

    return buffer; 
} 
/* 
* 
* OUR FUNCTION TO PARALLEL SORT 
* 
*/ 
void TD_trier(int* T){ 
    MPI_Status status; 
    int size = BLOCK_SIZE(id, nbProcess, n); 
    int receive_size = 0; 
    int* receive; 
    int* array_tmp; 
    int i = 0; 
    tri_fusion(T, 0, size - 1); 
    MPI_Barrier(MPI_COMM_WORLD); 
    for(i; i < nbProcess; i++){ 
     if(i%2==0){ 
      if(id % 2 == 1){//send to left 
       MPI_Send(&size, 1, MPI_INT, id - 1, 1, MPI_COMM_WORLD); 
       MPI_Send(T, size, MPI_INT, id - 1, 1, MPI_COMM_WORLD); 
       MPI_Recv(T, size, MPI_INT, id - 1, 1, MPI_COMM_WORLD, &status); 
      }else { 
       MPI_Recv(&receive_size, 1, MPI_INT, id + 1, 1, MPI_COMM_WORLD, &status); 
       receive = malloc(sizeof(int) * size); 
       MPI_Recv(receive, receive_size, MPI_INT, id + 1, 1, MPI_COMM_WORLD, &status); 
       array_tmp = fusion2(T, receive, size, receive_size); 
       MPI_Send(&array_tmp[size], receive_size, MPI_INT, id + 1, 1, MPI_COMM_WORLD); 
       T = realloc(array_tmp, sizeof(int) * size); 
      } 
      if(id == 1){ 
       //~ printTabByProcess(T); 
      } 
     }else if(i%2 == 1 && id < nbProcess-1){ //send to right 
      if(id % 2 == 1){ 
       MPI_Send(&size, 1, MPI_INT, id + 1, 1, MPI_COMM_WORLD); 
       MPI_Send(T, size, MPI_INT, id + 1, 1, MPI_COMM_WORLD); 
       //printTabByProcess(T); 
       MPI_Recv(T, size, MPI_INT, id + 1, 1, MPI_COMM_WORLD, &status); 
      }else if(id != 0 && id%2 ==0) { 
       MPI_Recv(&receive_size, 1, MPI_INT, id - 1, 1, MPI_COMM_WORLD, &status); 
       //receive = malloc(sizeof(int) * size); 
       MPI_Recv(receive, receive_size, MPI_INT, id - 1, 1, MPI_COMM_WORLD, &status); 
       //printTabByProcess(receive); 
       array_tmp = fusion2(T, receive, size, receive_size); 
       MPI_Send(array_tmp, receive_size, MPI_INT, id - 1, 1, MPI_COMM_WORLD); 
       printTabByProcess(&array_tmp[2]); 
       T = array_tmp + size; 
       printTabByProcess(T); 
      } 
     } 
     MPI_Barrier(MPI_COMM_WORLD);  
    } 
    //printTabByProcess(T); 

} 

int generateRandomValue(){ 
    return rand() % 100; 
} 
//init array with "random" value 
int* TD_init(int n){ 
     int i = 0; 
     int indiceDerniere = (id+1)*n/nbProcess -1; 
     int indicePremiere = id*n/nbProcess; 
     int* arrayLocal; 
     int localSize = indiceDerniere - indicePremiere +1; 
     arrayLocal = malloc(sizeof(int)*localSize); 

     //~ printf("id : %d - nbCase : %d (debut : %d, fin : %d)\n", 
      //~ id, localSize, indicePremiere, indiceDerniere); 

     for(i; i < localSize; i++){ 
      arrayLocal[i] = generateRandomValue() - id; 
     } 

     printTabByProcess(arrayLocal); 

     return arrayLocal; 
} 

int main (int argc, char *argv[]){ 
    //int n = 0; 
    int *dataLocal; 
    int dest; 
    int x; 
    int success; 

    MPI_Status status; 

    srand(time(NULL)); 

    /***** Initializations *****/ 
    MPI_Init(&argc, &argv); 
    MPI_Comm_size(MPI_COMM_WORLD, &nbProcess); //numtask contient le nombre de processeur 
    MPI_Comm_rank(MPI_COMM_WORLD, &id); //taskid, determine le numero du processus 
    //~ printf ("MPI task %d has started...\n", id); 
    //~ tag2 = 1; 
    //~ tag1 = 2; 

    MPI_Barrier (MPI_COMM_WORLD); 

    /***** Master task only ******/ 
    if (id == MASTER){ 

     printf("Chose a number of value :"); 
     scanf("%d",&n); 

     /* Send the number of cases */ 

     for (dest=1; dest<nbProcess; dest++) { 
      MPI_Send(&n, 1, MPI_INT, dest, 1, MPI_COMM_WORLD); //send number of value 
     } 
    } /* end of master section */ 

    /***** Non-master tasks only *****/ 
    if (id > MASTER) { 
     /* Receive the number of cases */ 
     MPI_Recv(&n, 1, MPI_INT, MASTER, 1, MPI_COMM_WORLD, &status); 
    } 
    MPI_Barrier (MPI_COMM_WORLD); 

    dataLocal = TD_init(n); 
    MPI_Barrier (MPI_COMM_WORLD); 
    if(id == 0){ 
     printf("__________________________________________\n"); 
    } 

    TD_trier(dataLocal); 

    MPI_Finalize(); 
} 
+1

這是相當多的代碼,你希望有人爲你調試。花一些時間把它縮減到SSCCE(http://meta.stackexchange.com/questions/22754/sscce-how-to-provide-examples-for-programming-questions)這樣做你很可能會發現這個問題,從而爲自己解答一個光明的未來,回答關於SO的棘手問題。 –

回答

0

故障可能來自fusion2功能。 index1可以高於size1。實際上,MPI部分正常工作。該代碼一旦執行測試即可運行。這裏是一個不是最優的版本,但...

int* fusion2(int* t1, int* t2, int size1, int size2){ 
    int* buffer = malloc(sizeof(int)*(size1 + size2)); 
    int index1 = 0; 
    int index2 = 0; 
    int i = 0; 
    for(i; i < (size1 + size2) ; i++){ 
    if(index1==size1){ 
     buffer[i] = t2[index2]; 
     index2++; 
    }else{ 
     if(index2==size2){ 
     buffer[i] = t1[index1]; 
      index1++; 
     }else{ 
     if(t1[index1] < t2[index2]){ 
       buffer[i] = t1[index1]; 
        index1++; 
      }else{ 
        buffer[i] = t2[index2]; 
        index2++; 
      } 
      } 
    } 


    } 

    return buffer; 
} 

監視內存管理。例如:你在做之前是否釋放了T?

T = realloc(array_tmp, sizeof(int) * size); 

您是否免費「收」?你在第二部分中釋放了「array_tmp」嗎? 我擔心存在內存泄漏......可能會更好地避免fusion2中的分配,甚至在循環中。分配array_tmp並在開始時接收「enougth」空間,可能會更安全(更快?)。

再見,

弗朗西斯

更多:qsort(在STDLIB)可能會更快本地排序。