2015-05-24 214 views
1

這裏我要計算每兩點的距離,並決定它們是否是鄰居。這是我在cuda的簡單代碼。cuda兩點計算距離

__global__ void calcNeighbors(const DataPoint* points, 
    const float doubleRadius, bool* neighbors) { 

int tid = threadIdx.x + blockIdx.x * blockDim.x; 

float dis = 0.0f; 
while (tid < N) { 
    DataPoint p1 = points[tid]; 
    for (int i=0; i<N; i++) { 
     DataPoint p2 = points[i]; 
     dis = 0; 
     dis += (p1.pfDimens[0]-p2.pfDimens[0]) * (p1.pfDimens[0]-p2.pfDimens[0]) + 
      (p1.pfDimens[1]-p2.pfDimens[1]) * (p1.pfDimens[1]-p2.pfDimens[1]) + 
      (p1.pfDimens[2]-p2.pfDimens[2]) * (p1.pfDimens[2]-p2.pfDimens[2]); 
     if (dis <= doubleRadius) { 
      neighbors[tid*N+i] = true; 
     } else { 
      neighbors[tid*N+i] = false; 
     } 
    } 

    tid += blockDim.x * gridDim.x; 
} 
} 

數據點是一個結構是

typedef struct DataPoint { 
    float pfDimens[3]; 
} DataPoint; 

所以在這裏我想縮短時間,我該怎麼辦?我試圖使用內存合併和共享內存,但我沒有得到很好的加速?

===============使用共享內存==============

__global__ void calcNeighbors2(const DataPoint* points, 
    const float doubleRadius, bool* neighbors) { 
__shared__ DataPoint sharedpoints[threadsPerBlock]; 

int start = blockIdx.x * blockDim.x; 
int len = start+threadIdx.x; 
if (len < N) { 
    sharedpoints[threadIdx.x] = points[len]; 
} 
len = imin(N, blockDim.x + start); 
__syncthreads(); 

int tid = threadIdx.x; 
float dis; 
while (tid < N) { 
    DataPoint p1 = points[tid]; 
    for (int i=start; i<len; i++) { 
     dis = 0; 
     dis += (p1.pfDimens[0]-sharedpoints[i-start].pfDimens[0]) * (p1.pfDimens[0]-sharedpoints[i-start].pfDimens[0]) + 
      (p1.pfDimens[1]-sharedpoints[i-start].pfDimens[1]) * (p1.pfDimens[1]-sharedpoints[i-start].pfDimens[1]) + 
      (p1.pfDimens[2]-sharedpoints[i-start].pfDimens[2]) * (p1.pfDimens[2]-sharedpoints[i-start].pfDimens[2]); 
     if (dis <= doubleRadius) { 
      neighbors[i*N+tid] = true; 
     } else { 
      neighbors[i*N+tid] = false; 
     } 

    } 

    tid += blockDim.x; 
} 
} 

在這裏,我改變了鄰居[TID * N + i]給鄰居[i * N + tid],它使我在Tesla K10.G2.8GB上的速度提高了8倍。但是當我使用共享內存來存儲一些點時,它是沒有用的?

+1

你的算法的複雜度爲O(N^2),但是你可以消除一半的計算,因爲現在你計算每個成對距離兩次:'dist(A,B)',然後是'dist (B,A)'。其次,嘗試使用16字節對齊DataPoint結構,或使用'float3'來代替自定義類型,並嘗試每個線程處理幾個點(足以使寄存器或共享內存達到飽和)。 –

+0

另外,正如所寫的,這個算法會錯誤地將每個點標識爲它自己的鄰居。 – talonmies

+0

該算法已經過測試,並且消除了可能增加更多指令的一半計算。並且分配鄰居的值[tid * N + i](現在改爲鄰居[i * N + tid]用於內存合併)和鄰居[i * N + tid],我認爲可能需要很多時間。我沒有嘗試過在結構中使用__align __(16),但它沒用。 – Sethbrin

回答

2

至少有4個想法,其中一些已經在評論中指出:

  1. 從AOS格式轉換您的點距離存儲:

    struct DataPoint { 
        float pfDimens[3]; 
    }; 
    

    SOA的格式:

    struct DataPoint { 
        float pfDimens_x[NPTS]; 
        float pfDimens_y[NPTS]; 
        float pfDimens_z[NPTS]; 
    }; 
    

    這將啓用完全合併加載數據。事實上,爲了幫助下面的第4點,我將轉而使用3個裸露陣列,而不是一個結構。

  2. 減少計算到(略小於)一半:

    for (int i=N-1; i>tid; i--) { 
    

    那麼,無論是在線程代碼本身,或者在主機,您可以通過填充輸出矩陣的另一「半」複製數據。

  3. 移調存儲在您的輸出矩陣,讓你可以寫這樣的存儲操作:

    neighbors[i*N+tid] = true; 
    

    這將很好地凝聚,與此相反:

    neighbors[tid*N+i] = true; 
    

    這將不。

  4. 由於您的輸入點的數據是隻讀的,標誌着適當的內核參數:

    const float * __restrict__ points_x, const float * __restrict__ points_y, const float * __restrict__ points_z 
    
    在某些情況下

    ,並在一定的GPU,這往往會導致加速由於使用的the read-only cache 。如果你真的想積極地進行緩存,並且你的數據數組足夠小(4K或更少),你可以將點數據的副本放在全局內存中,也可以拷貝__constant__內存,並加載「制服」載你在這裏幹嘛通過不斷的記憶:

    DataPoint p2 = c_points[i]; 
    

    因此你可以通過只讀緩存進行聚結的負載,均布載荷通過不斷的高速緩存和聚結的商店去普通全局內存。

在一個K40c,在Linux/CUDA 7,爲N = 4096,這些變化的淨效應似乎是約3.5倍一個加速,在內核級別:

$ cat t749.cu 
#include <stdio.h> 

#define N 4096 
// if N is 16K/3 or less, we can use constant 
#define USE_CONSTANT 
#define THRESH 0.2f 
#define nTPB 256 
#define nBLK (N/nTPB+1) 

#define cudaCheckErrors(msg) \ 
    do { \ 
     cudaError_t __err = cudaGetLastError(); \ 
     if (__err != cudaSuccess) { \ 
      fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ 
       msg, cudaGetErrorString(__err), \ 
       __FILE__, __LINE__); \ 
      fprintf(stderr, "*** FAILED - ABORTING\n"); \ 
      exit(1); \ 
     } \ 
    } while (0) 


#include <time.h> 
#include <sys/time.h> 
#define USECPSEC 1000000ULL 

unsigned long long dtime_usec(unsigned long long start){ 

    timeval tv; 
    gettimeofday(&tv, 0); 
    return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start; 
} 

struct DataPoint { 
    float pfDimens[3]; 
}; 


__global__ void calcNeighbors(const DataPoint* points, 
    const float doubleRadius, bool* neighbors) { 

    int tid = threadIdx.x + blockIdx.x * blockDim.x; 

    float dis = 0.0f; 
    while (tid < N) { 
    DataPoint p1 = points[tid]; 
    for (int i=0; i<N; i++) { 
     DataPoint p2 = points[i]; 
     dis = 0; 
     dis += (p1.pfDimens[0]-p2.pfDimens[0]) * (p1.pfDimens[0]-p2.pfDimens[0]) + 
      (p1.pfDimens[1]-p2.pfDimens[1]) * (p1.pfDimens[1]-p2.pfDimens[1]) + 
      (p1.pfDimens[2]-p2.pfDimens[2]) * (p1.pfDimens[2]-p2.pfDimens[2]); 
     if (dis <= doubleRadius) { 
      neighbors[tid*N+i] = true; 
     } else { 
      neighbors[tid*N+i] = false; 
     } 
    } 

    tid += blockDim.x * gridDim.x; 
    } 
} 

#ifdef USE_CONSTANT 
__constant__ float cpx[N]; 
__constant__ float cpy[N]; 
__constant__ float cpz[N]; 
#endif 

__global__ void calcNeighbors2(const float * __restrict__ pts_x, const float * __restrict__ pts_y, const float * __restrict__ pts_z, const float doubleRadius, bool * __restrict__ neighbors) { 

    int tid = threadIdx.x+blockDim.x*blockIdx.x; 

    while (tid < N) { 
    float p1x = pts_x[tid]; 
    float p1y = pts_y[tid]; 
    float p1z = pts_z[tid]; 
    for (int i = N-1; i > tid; i--){ 
     float p2x, p2y, p2z; 
#ifdef USE_CONSTANT 
     p2x = cpx[i]; 
     p2y = cpy[i]; 
     p2z = cpz[i]; 
#else 
     p2x = pts_x[i]; 
     p2y = pts_y[i]; 
     p2z = pts_z[i]; 
#endif 
     float dis = ((p1x-p2x)*(p1x-p2x)) + ((p1y-p2y)*(p1y-p2y)) + ((p1z-p2z)*(p1z-p2z)); 
     neighbors[i*N+tid] = (dis <= doubleRadius); 
     } 
    tid += blockDim.x * gridDim.x; 
    } 
} 

int main(){ 

    float *dx, *dy, *dz, *hx, *hy, *hz; 
    DataPoint *dp, *hp; 
    bool *dn, *hn1, *hn2; 
    hx =(float *)malloc(N*sizeof(float)); 
    hy =(float *)malloc(N*sizeof(float)); 
    hz =(float *)malloc(N*sizeof(float)); 
    hp =(DataPoint *)malloc(N*sizeof(DataPoint)); 
    hn1=(bool *)malloc(N*N*sizeof(bool)); 
    hn2=(bool *)malloc(N*N*sizeof(bool)); 
    cudaMalloc(&dx, N*sizeof(float)); 
    cudaMalloc(&dy, N*sizeof(float)); 
    cudaMalloc(&dz, N*sizeof(float)); 
    cudaMalloc(&dp, N*sizeof(DataPoint)); 
    cudaMalloc(&dn, N*N*sizeof(bool)); 

    for (int i =0; i < N; i++){ 
    hx[i] = rand()/(float)RAND_MAX; 
    hy[i] = rand()/(float)RAND_MAX; 
    hz[i] = rand()/(float)RAND_MAX; 
    hp[i].pfDimens[0] = hx[i]; 
    hp[i].pfDimens[1] = hy[i]; 
    hp[i].pfDimens[2] = hz[i];} 

    cudaMemcpy(dx, hx, N*sizeof(float), cudaMemcpyHostToDevice); 
    cudaMemcpy(dy, hy, N*sizeof(float), cudaMemcpyHostToDevice); 
    cudaMemcpy(dz, hz, N*sizeof(float), cudaMemcpyHostToDevice); 
    cudaMemcpy(dp, hp, N*sizeof(DataPoint), cudaMemcpyHostToDevice); 

    // warm-up 
    calcNeighbors<<<nBLK, nTPB>>>(dp, THRESH, dn); 
    cudaDeviceSynchronize(); 
    cudaMemset(dn, 0, N*N*sizeof(bool)); 
    unsigned long long t1 = dtime_usec(0); 
    calcNeighbors<<<nBLK, nTPB>>>(dp, THRESH, dn); 
    cudaDeviceSynchronize(); 
    cudaCheckErrors("kernel 1 error"); 
    t1 = dtime_usec(t1); 
    cudaMemcpy(hn1, dn, N*N*sizeof(bool), cudaMemcpyDeviceToHost); 
    // warm-up 
    calcNeighbors2<<<nBLK, nTPB>>>(dx, dy, dz, THRESH, dn); 
    cudaDeviceSynchronize(); 
    cudaMemset(dn, 0, N*N*sizeof(bool)); 
    unsigned long long t2 = dtime_usec(0); 
    calcNeighbors2<<<nBLK, nTPB>>>(dx, dy, dz, THRESH, dn); 
    cudaDeviceSynchronize(); 
    cudaCheckErrors("kernel 2 error"); 
    t2 = dtime_usec(t2); 
    cudaMemcpy(hn2, dn, N*N*sizeof(bool), cudaMemcpyDeviceToHost); 
    cudaCheckErrors("some error"); 
    printf("t1: %fs, t2: %fs\n", t1/(float)USECPSEC, t2/(float)USECPSEC); 
    // results validation 
    for (int i = 0; i < N; i++) 
    for (int j = i+1; j < N; j++) 
     if (hn1[i*N+j] != hn2[j*N+i]) {printf("mismatch at %d, %d, was: %d, should be: %d\n", i, j, hn2[j*N+i], hn1[i*N+j]); return 1;} 
    return 0; 
} 
$ nvcc -arch=sm_35 -o t749 t749.cu 
$ ./t749 
t1: 0.004903s, t2: 0.001395s 
$ 

在在K40c的情況下,由於等待時間的原因,上面(16)啓動的有限塊數量對性能造成嚴重阻礙。如果我們註釋掉USE_CONSTANT定義,並更改N 16384,我們觀察到與改進了內核更高的加速:

$ ./t749 
t1: 0.267107s, t2: 0.008209s 
$ 

產生的〜48塊是不夠約「補」裏面有15個SM通過K40c 。

編輯:現在你已經發布了共享內存的內核,我把它添加到我的測試情況下,calcNeighbors3相比它的時序性能(如t3)。它幾乎和我的內核一樣快,它似乎提供了正確的結果(匹配你的原始內核),所以我不確定你的擔憂是什麼。

下面是更新後的代碼和測試用例:

$ cat t749.cu 
#include <stdio.h> 
#include <math.h> 

#define imin(X,Y) ((X)<(Y))?(X):(Y) 

#define N 32768 
// if N is 16K/3 or less, we can use constant 
// #define USE_CONSTANT 
#define THRESH 0.2f 
#define nTPB 256 
#define nBLK (N/nTPB+1) 

#define cudaCheckErrors(msg) \ 
    do { \ 
     cudaError_t __err = cudaGetLastError(); \ 
     if (__err != cudaSuccess) { \ 
      fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ 
       msg, cudaGetErrorString(__err), \ 
       __FILE__, __LINE__); \ 
      fprintf(stderr, "*** FAILED - ABORTING\n"); \ 
      exit(1); \ 
     } \ 
    } while (0) 


#include <time.h> 
#include <sys/time.h> 
#define USECPSEC 1000000ULL 

unsigned long long dtime_usec(unsigned long long start){ 

    timeval tv; 
    gettimeofday(&tv, 0); 
    return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start; 
} 

struct DataPoint { 
    float pfDimens[3]; 
}; 


__global__ void calcNeighbors(const DataPoint* points, 
    const float doubleRadius, bool* neighbors) { 

    int tid = threadIdx.x + blockIdx.x * blockDim.x; 

    float dis = 0.0f; 
    while (tid < N) { 
    DataPoint p1 = points[tid]; 
    for (int i=0; i<N; i++) { 
     DataPoint p2 = points[i]; 
     dis = 0; 
     dis += (p1.pfDimens[0]-p2.pfDimens[0]) * (p1.pfDimens[0]-p2.pfDimens[0]) + 
      (p1.pfDimens[1]-p2.pfDimens[1]) * (p1.pfDimens[1]-p2.pfDimens[1]) + 
      (p1.pfDimens[2]-p2.pfDimens[2]) * (p1.pfDimens[2]-p2.pfDimens[2]); 
     if (dis <= doubleRadius) { 
      neighbors[tid*N+i] = true; 
     } else { 
      neighbors[tid*N+i] = false; 
     } 
    } 

    tid += blockDim.x * gridDim.x; 
    } 
} 

#ifdef USE_CONSTANT 
__constant__ float cpx[N]; 
__constant__ float cpy[N]; 
__constant__ float cpz[N]; 
#endif 

__global__ void calcNeighbors2(const float * __restrict__ pts_x, const float * __restrict__ pts_y, const float * __restrict__ pts_z, const float doubleRadius, bool * __restrict__ neighbors) { 

    int tid = threadIdx.x+blockDim.x*blockIdx.x; 

    while (tid < N) { 
    float p1x = pts_x[tid]; 
    float p1y = pts_y[tid]; 
    float p1z = pts_z[tid]; 
    for (int i = N-1; i > tid; i--){ 
     float p2x, p2y, p2z; 
#ifdef USE_CONSTANT 
     p2x = cpx[i]; 
     p2y = cpy[i]; 
     p2z = cpz[i]; 
#else 
     p2x = pts_x[i]; 
     p2y = pts_y[i]; 
     p2z = pts_z[i]; 
#endif 
     float dis = ((p1x-p2x)*(p1x-p2x)) + ((p1y-p2y)*(p1y-p2y)) + ((p1z-p2z)*(p1z-p2z)); 
     neighbors[i*N+tid] = (dis <= doubleRadius); 
     } 
    tid += blockDim.x * gridDim.x; 
    } 
} 

__global__ void calcNeighbors3(const DataPoint* points, 
    const float doubleRadius, bool* neighbors) { 
__shared__ DataPoint sharedpoints[nTPB]; 

int start = blockIdx.x * blockDim.x; 
int len = start+threadIdx.x; 
if (len < N) { 
    sharedpoints[threadIdx.x] = points[len]; 
} 
len = imin(N, blockDim.x + start); 
__syncthreads(); 

int tid = threadIdx.x; 
float dis; 
while (tid < N) { 
    DataPoint p1 = points[tid]; 
    for (int i=start; i<len; i++) { 
     dis = 0; 
     dis += (p1.pfDimens[0]-sharedpoints[i-start].pfDimens[0]) * (p1.pfDimens[0]-sharedpoints[i-start].pfDimens[0]) + 
      (p1.pfDimens[1]-sharedpoints[i-start].pfDimens[1]) * (p1.pfDimens[1]-sharedpoints[i-start].pfDimens[1]) + 
      (p1.pfDimens[2]-sharedpoints[i-start].pfDimens[2]) * (p1.pfDimens[2]-sharedpoints[i-start].pfDimens[2]); 
     if (dis <= doubleRadius) { 
      neighbors[i*N+tid] = true; 
     } else { 
      neighbors[i*N+tid] = false; 
     } 

    } 

    tid += blockDim.x; 
} 
} 


int main(){ 

    float *dx, *dy, *dz, *hx, *hy, *hz; 
    DataPoint *dp, *hp; 
    bool *dn, *hn1, *hn2, *hn3; 
    hx =(float *)malloc(N*sizeof(float)); 
    hy =(float *)malloc(N*sizeof(float)); 
    hz =(float *)malloc(N*sizeof(float)); 
    hp =(DataPoint *)malloc(N*sizeof(DataPoint)); 
    hn1=(bool *)malloc(N*N*sizeof(bool)); 
    hn2=(bool *)malloc(N*N*sizeof(bool)); 
    hn3=(bool *)malloc(N*N*sizeof(bool)); 
    cudaMalloc(&dx, N*sizeof(float)); 
    cudaMalloc(&dy, N*sizeof(float)); 
    cudaMalloc(&dz, N*sizeof(float)); 
    cudaMalloc(&dp, N*sizeof(DataPoint)); 
    cudaMalloc(&dn, N*N*sizeof(bool)); 

    for (int i =0; i < N; i++){ 
    hx[i] = rand()/(float)RAND_MAX; 
    hy[i] = rand()/(float)RAND_MAX; 
    hz[i] = rand()/(float)RAND_MAX; 
    hp[i].pfDimens[0] = hx[i]; 
    hp[i].pfDimens[1] = hy[i]; 
    hp[i].pfDimens[2] = hz[i];} 

    cudaMemcpy(dx, hx, N*sizeof(float), cudaMemcpyHostToDevice); 
    cudaMemcpy(dy, hy, N*sizeof(float), cudaMemcpyHostToDevice); 
    cudaMemcpy(dz, hz, N*sizeof(float), cudaMemcpyHostToDevice); 
    cudaMemcpy(dp, hp, N*sizeof(DataPoint), cudaMemcpyHostToDevice); 
#ifdef USE_CONSTANT 
    cudaMemcpyToSymbol(cpx, hx, N*sizeof(float)); 
    cudaMemcpyToSymbol(cpy, hy, N*sizeof(float)); 
    cudaMemcpyToSymbol(cpz, hz, N*sizeof(float)); 
#endif 
    // warm-up 
    calcNeighbors<<<nBLK, nTPB>>>(dp, THRESH, dn); 
    cudaDeviceSynchronize(); 
    cudaMemset(dn, 0, N*N*sizeof(bool)); 
    unsigned long long t1 = dtime_usec(0); 
    calcNeighbors<<<nBLK, nTPB>>>(dp, THRESH, dn); 
    cudaDeviceSynchronize(); 
    cudaCheckErrors("kernel 1 error"); 
    t1 = dtime_usec(t1); 
    cudaMemcpy(hn1, dn, N*N*sizeof(bool), cudaMemcpyDeviceToHost); 
    // warm-up 
    calcNeighbors2<<<nBLK, nTPB>>>(dx, dy, dz, THRESH, dn); 
    cudaDeviceSynchronize(); 
    cudaMemset(dn, 0, N*N*sizeof(bool)); 
    unsigned long long t2 = dtime_usec(0); 
    calcNeighbors2<<<nBLK, nTPB>>>(dx, dy, dz, THRESH, dn); 
    cudaDeviceSynchronize(); 
    cudaCheckErrors("kernel 2 error"); 
    t2 = dtime_usec(t2); 
    cudaMemcpy(hn2, dn, N*N*sizeof(bool), cudaMemcpyDeviceToHost); 
    // warm-up 
    calcNeighbors3<<<nBLK, nTPB>>>(dp, THRESH, dn); 
    cudaDeviceSynchronize(); 
    cudaMemset(dn, 0, N*N*sizeof(bool)); 
    unsigned long long t3 = dtime_usec(0); 
    calcNeighbors3<<<nBLK, nTPB>>>(dp, THRESH, dn); 
    cudaDeviceSynchronize(); 
    cudaCheckErrors("kernel 3 error"); 
    t3 = dtime_usec(t3); 
    cudaMemcpy(hn3, dn, N*N*sizeof(bool), cudaMemcpyDeviceToHost); 
    cudaCheckErrors("some error"); 
    printf("t1: %fs, t2: %fs, t3: %fs\n", t1/(float)USECPSEC, t2/(float)USECPSEC, t3/(float)USECPSEC); 
    // results validation 
    for (int i = 0; i < N; i++) 
    for (int j = i+1; j < N; j++) 
     if (hn1[i*N+j] != hn2[j*N+i]) {printf("1:2 mismatch at %d, %d, was: %d, should be: %d\n", i, j, hn2[j*N+i], hn1[i*N+j]); return 1;} 
    for (int i = 0; i < N*N; i++) 
    if (hn1[i] != hn3[i]) {printf("1:3 mismatch at %d, was: %d, should be: %d\n", i, hn1[i], hn3[i]); return 1;} 
    return 0; 
} 
$ nvcc -arch=sm_35 -o t749 t749.cu 
$ ./t749 
t1: 1.260010s, t2: 0.022661s, t3: 0.029632s 
$ 

對於這個測試,我已經改變了數據集大小爲32768,因爲更接近您所關心的範圍。你的共享內存內核顯示你的原始內核速度提高了42倍,而我的內核在我的K40c上顯示的速度提高了55倍。

+0

謝謝。但如果我想使用共享內存,我嘗試共享內存,但它是沒有用的,我不知道爲什麼會發生。 – Sethbrin

+0

啓動內核時,我只使用(N-threadsPerBlock + 1)/ threadsPerBlock塊。我使用while(tid Sethbrin

+0

我已經更新了我的測試代碼以嘗試您的共享內存內核,現在您已經發布了它。它似乎工作正常,運行得非常快,比你的原始代碼快得多。我不確定你對它的擔憂是什麼。 –