0
我正在使用ubuntu和OpenMPI 1.4進行集羣工作。我的代碼運行良好,但我想測量在根進程和從屬節點之間發送數據的時間。測量OpenMPI中進程之間的數據傳輸
我認爲我的代碼沒有給我正確的信息。
void invertColor_Parallel(struct image *im, int size, int rank,clock_t *sendTime)
{
clock_t begin;
int i,j,lum,aux,r;
int total_pixels = (*im).ih.width * (*im).ih.height;
int qty = total_pixels/(size-1);
int rest = total_pixels % (size-1);
MPI_Status status;
//printf("\n%d\n", rank);
if(rank == 0)
{
begin = MPI_Wtime();
for(i=1; i<size; i++){
j = i*qty - qty;
aux = j;
if(rest != 0 && i==size-1) {qty=qty+rest;} //para distrubuir toda la carga
//printf("\nj: %d qty: %d rest: %d\n", j, qty, rest);
MPI_Send(&aux, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG+1, MPI_COMM_WORLD);
MPI_Send(&qty, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG+2, MPI_COMM_WORLD);
MPI_Send(&(*im).array[j], qty*3, MPI_BYTE, i, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD);
//printf("\nSending to node=%d, sender node=%d\n", i, rank);
}
*sendTime+=MPI_Wtime()-begin;
}
else
{
MPI_Recv(&aux, 1, MPI_INT, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG+1, MPI_COMM_WORLD,&status);
MPI_Recv(&qty, 1, MPI_INT, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG+2, MPI_COMM_WORLD,&status);
pixel *arreglo = (pixel *)calloc(qty, sizeof(pixel));
MPI_Recv(&arreglo[0], qty*3, MPI_BYTE, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD,&status);
//printf("Receiving node=%d, message=%d\n", rank, aux);
for(i=0;i<qty;i++)
{
arreglo[i].R = 255-arreglo[i].R;
arreglo[i].G = 255-arreglo[i].G;
arreglo[i].B = 255-arreglo[i].B;
}
MPI_Send(&aux, 1, MPI_INT, 0, SLAVE_TO_MASTER_TAG+1, MPI_COMM_WORLD);
MPI_Send(&qty, 1, MPI_INT, 0, SLAVE_TO_MASTER_TAG+2, MPI_COMM_WORLD);
MPI_Send(&arreglo[0], qty*3, MPI_BYTE, 0, SLAVE_TO_MASTER_TAG, MPI_COMM_WORLD);
free(arreglo);
}
if (rank==0){
//printf("\nrank: %d\n", rank);
begin=MPI_Wtime();
for (i=1; i<size; i++) // untill all slaves have handed back the processed data
{
MPI_Recv(&aux, 1, MPI_INT, MPI_ANY_SOURCE, SLAVE_TO_MASTER_TAG+1, MPI_COMM_WORLD, &status);
MPI_Recv(&qty, 1, MPI_INT, status.MPI_SOURCE, SLAVE_TO_MASTER_TAG+2, MPI_COMM_WORLD, &status);
MPI_Recv(&(*im).array[aux], qty*3, MPI_BYTE, status.MPI_SOURCE, SLAVE_TO_MASTER_TAG, MPI_COMM_WORLD, &status);
}
*sendTime+=MPI_Wtime()-begin;
}
}
void runningTime(clock_t begin, clock_t end,clock_t sendTime)
{
double time_spent;
time_spent = (double)(end - begin)/CLOCKS_PER_SEC;
printf("Tiempo con Latencia: %f\n", time_spent);
time_spent-=(double) sendTime/CLOCKS_PER_SEC;
printf("Tiempo de envio: %f\n", time_spent);
printf("Tiempo de latencia: %f\n\n", (double) sendTime/CLOCKS_PER_SEC);
}
int main(int argc, char *argv[])
{
//////////time counter
clock_t begin,sendTime=(clock_t) 0.0;
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Status status;
int op = (int)atof(argv[1]);
struct image image2;
if (rank==0)
{
loadImage(&image2, argv[2]);
}
//Broadcast the user's choice to all other ranks
MPI_Bcast(&op, 1, MPI_INT, 0, MPI_COMM_WORLD);
switch(op)
{
case 1:
if (rank==0) {begin = clock();}
MPI_Barrier(MPI_COMM_WORLD);
invertColor_Parallel(&image2, size, rank,&sendTime);
MPI_Barrier(MPI_COMM_WORLD);
if (rank==0) {runningTime(begin, clock(),sendTime); printf("Se invirtieron los colores de la imagen\n\n");}
break;
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank==0)
{
saveImage(&image2, argv[3]);
free(image2.array);
}
MPI_Finalize();
return 0;
}
我要測量的過程和功能的ivertColor_Parallel整個時間之間的數據傳輸時間。我究竟做錯了什麼?。
非常感謝。
但是我把'開始= MPI_Wtime(); MPI_Send(&(* im).array [j],qty * 3,MPI_BYTE,i,MASTER_TO_SLAVE_TAG,MPI_COMM_WORLD); end = MPI_Wtime(); printf(「經過的時間:%f秒\ n」,結束 - 開始);'通過該代碼,我可以獲取發送數據所需的時間或調用函數MPT_Send所用的時間。謝謝。後者是 –
。如果這與數據發送所需的實際時間相匹配,則取決於操作的模式。將'MPI_Send'替換爲'MPI_Ssend'以強制同步模式,這將確保在消息傳輸之前MPI呼叫不會返回。 –
謝謝,現在我可以得到更準確的執行時間,並可以準確計算加速和其他指標。 –