2015-07-03 76 views
1

我已經構建了一個我在更大的代碼示例中遇到的問題的最小示例。在這個例子中,我想找到一些數據ys的平方和誤差爲函數fs,但我想一次在多個函數上執行它,所以我創建了fs作爲矩陣。原始數據長度爲gridSize,我希望一次對nGrids函數執行此成本函數,因此fs的大小爲nGrids*gridSize爲什麼此cuda內核會產生非確定性結果?

我發現CUDA內核以非確定性方式給出了不可靠的結果,這導致我相信我沒有正確執行我的線程(這是我的第一個CUDA內核!)。我在這個程序上運行了cuda-memcheck,它沒有顯示任何錯誤。

爲了測試這些錯誤的零星特性,我編寫了一個腳本來運行它100次,並比較結果隨機關閉的頻率。我發現,有它被關閉時gridSize增長的機會較大:

gridSize ... Errors 
    300 ... 0/100 
    400 ... 0/100 
    450 ... 4/100 
    500 ... 5/100 
    550 ... 55/100 
    600 ... 59/100 
    650 ... 100/100 

在這裏的想法是有一個網格的每個塊的工作,只需要調用多個CUDA塊時,我要加緊並行。因此,我在這裏稱12塊,因爲有12個網格。對於這段代碼,我永遠不會有一個gridSize超過1000,所以我將離開Nthreads1024(因爲我的NVIDIA GTX 770上每塊有1024個線程)。

下面是代碼:

#include <stdio.h> 

#define nGrids 12 
#define gridSize 700 

void H_get_costs(float* h_xs, float* h_ys, float* h_fs, float* h_costs); 
void D_get_costs(float* h_xs, float* h_ys, float* h_fs, float* d_costs); 

/**************\ 
* cuda Costs * 
\**************/ 
__global__ void cuCosts(float* d_xs, float* d_ys, float* d_fs, float* d_costs) { 
    int ir = threadIdx.x; 
    int ig = blockIdx.x; 

    __shared__ float diff[1024]; 

    diff[ir] = 0.0; 
    __syncthreads(); 

    if(ir < gridSize-1 && ig < nGrids) { 
     diff[ir] = (d_ys[ir] - d_fs[ig*gridSize + ir])*(d_ys[ir] - d_fs[ig*gridSize + ir]); 
     __syncthreads(); 
     // reduction 
     for(int s=1; s < blockDim.x; s*=2) { 
      if(ir%(2*s) == 0 && ir+s < gridSize){ 
       diff[ir] += diff[ir+s]; 
      } 
     } 
     __syncthreads(); 
     d_costs[ig] = diff[0]; 
    } 
    __syncthreads(); 
} 


/****************\ 
* Main routine * 
\****************/ 
int main(int argc, char** argv) { 

    float h_xs[gridSize]; 
    float h_ys[gridSize]; 
    float h_fs[gridSize*nGrids]; 

    for(int ir = 0; ir < gridSize; ir++) { 
     h_xs[ir] = (float)ir/10.0; 
     h_ys[ir] = (float)ir/10.0; 
    } 

    for(int ir = 0; ir < gridSize; ir++) { 
     for(int jgrid = 0; jgrid < nGrids; jgrid++) { 
      float trand = 2.0*((float)rand()/(float)RAND_MAX) - 1.0; 
      h_fs[jgrid*gridSize + ir] = h_ys[ir] + trand; 
     } 
    } 

    float h_costs[nGrids]; 
    float d_costs[nGrids]; 

    // get all of the costs (on the host) 
    H_get_costs(h_xs, h_ys, h_fs, h_costs); 

    // get all of the costs (on the device) 
    D_get_costs(h_xs, h_ys, h_fs, d_costs); 

    // Print the grids 
    /* 
    for(int ir = 0; ir < gridSize; ir++) { 
     printf("%10.5e %15.5e", h_xs[ir], h_ys[ir]); 
     for(int jg = 0; jg < nGrids; jg++) { 
      printf("%15.5e", h_fs[jg*gridSize + ir]); 
     } 
     printf("\n"); 
    } 
    */ 

    // print the results 
    printf("--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n"); 
    printf("%-25s ", "Host ... "); 
    for(int ig = 0; ig < nGrids; ig++) { 
     printf("%15.5e", h_costs[ig]); 
    } 
    printf("\n"); 
    printf("--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n"); 
    printf("%-25s ", "Device ... "); 
    for(int ig = 0; ig < nGrids; ig++) { 
     printf("%15.5e", d_costs[ig]); 
    } 
    printf("\n"); 
    printf("--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n"); 
    printf("%-25s ", "Difference ... "); 
    for(int ig = 0; ig < nGrids; ig++) { 
     printf("%15.5e", d_costs[ig]-h_costs[ig]); 
    } 
    printf("\n"); 

    return 0; 
} 

/*******************************\ 
* get the costs (on the host) * 
\*******************************/ 
void H_get_costs(float* h_xs, float* h_ys, float* h_fs, float* h_costs) { 
    for(int ig = 0; ig < nGrids; ig++) { h_costs[ig] = 0.0; } 
    for(int ir = 0; ir < gridSize-1; ir++) { 
     for(int ig = 0; ig < nGrids; ig++) { 
      h_costs[ig] += (h_ys[ir] - h_fs[ig*gridSize + ir])*(h_ys[ir] - h_fs[ig*gridSize + ir]); 
     } 
    } 
} 

/**************************\ 
* wrapper for cuda costs * 
\**************************/ 
void D_get_costs(float* h_xs_p, float* h_ys_p, float* h_fs_p, float* r_costs) { 
    float* d_xs; 
    float* d_ys; 
    float* d_fs; 

    float* d_costs; // device costs 
    float* t_costs; // temporary costs 

    cudaMalloc((void**)&d_xs, gridSize*sizeof(float)); 
    cudaMalloc((void**)&d_ys, gridSize*sizeof(float)); 
    cudaMalloc((void**)&d_fs, nGrids*gridSize*sizeof(float)); 
    cudaMalloc((void**)&d_costs, nGrids*sizeof(float)); 

    t_costs = (float*)malloc(nGrids*sizeof(float)); 

    cudaMemcpy(d_xs, h_xs_p, gridSize*sizeof(float), cudaMemcpyHostToDevice); 
    cudaMemcpy(d_ys, h_ys_p, gridSize*sizeof(float), cudaMemcpyHostToDevice); 
    cudaMemcpy(d_fs, h_fs_p, nGrids*gridSize*sizeof(float), cudaMemcpyHostToDevice); 

    int Nthreads = 1024; 
    int Nblocks = nGrids; 

    cuCosts<<<Nblocks, Nthreads>>>(d_xs, d_ys, d_fs, d_costs); 

    cudaMemcpy(t_costs, d_costs, nGrids*sizeof(float), cudaMemcpyDeviceToHost); 

    for(int ig = 0; ig < nGrids; ig++) { 
     r_costs[ig] = t_costs[ig]; 
    } 

    cudaFree(d_xs); 
    cudaFree(d_ys); 
    cudaFree(d_fs); 
} 

如果它的事項,這是是我的硬件規格:

CUDA Device Query (Runtime API) version (CUDART static linking) 

Detected 1 CUDA Capable device(s) 

Device 0: "GeForce GTX 770" 
    CUDA Driver Version/Runtime Version   6.0/5.5 
    CUDA Capability Major/Minor version number: 3.0 
    Total amount of global memory:     2047 MBytes (2146762752 bytes) 
    (8) Multiprocessors, (192) CUDA Cores/MP:  1536 CUDA Cores 
    GPU Clock rate:        1084 MHz (1.08 GHz) 
    Memory Clock rate:        3505 Mhz 
    Memory Bus Width:        256-bit 
    L2 Cache Size:         524288 bytes 
    Maximum Texture Dimension Size (x,y,z)   1D=(65536), 2D=(65536, 65536), 3D=(4096, 4096, 4096) 
    Maximum Layered 1D Texture Size, (num) layers 1D=(16384), 2048 layers 
    Maximum Layered 2D Texture Size, (num) layers 2D=(16384, 16384), 2048 layers 
    Total amount of constant memory:    65536 bytes 
    Total amount of shared memory per block:  49152 bytes 
    Total number of registers available per block: 65536 
    Warp size:          32 
    Maximum number of threads per multiprocessor: 2048 
    Maximum number of threads per block:   1024 
    Max dimension size of a thread block (x,y,z): (1024, 1024, 64) 
    Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535) 
    Maximum memory pitch:       2147483647 bytes 
    Texture alignment:        512 bytes 
    Concurrent copy and kernel execution:   Yes with 1 copy engine(s) 
    Run time limit on kernels:      Yes 
    Integrated GPU sharing Host Memory:   No 
    Support host page-locked memory mapping:  Yes 
    Alignment requirement for Surfaces:   Yes 
    Device has ECC support:      Disabled 
    Device supports Unified Addressing (UVA):  Yes 
    Device PCI Bus ID/PCI location ID:   1/0 
    Compute Mode: 
    < Default (multiple host threads can use ::cudaSetDevice() with device simultaneously) > 

deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 6.0, CUDA Runtime Version = 5.5, NumDevs = 1, Device0 = GeForce GTX 770 
Result = PASS 
+3

__syncthreads()應該在一個程序,否則你會得到UB所有線程調用。 – KiaMorot

回答

1

你的內核代碼有其引起的問題多sychronisation問題。首先,圍繞着一個__syncthreads()調用進行分支,這在CUDA中是未定義的行爲。那麼你在縮小循環中缺少同步點,這意味着扭曲積累的扭曲是不正確的。事情是這樣的:

__global__ void cuCosts(float* d_xs, float* d_ys, 
         float* d_fs, float* d_costs) 
{ 
    int ir = threadIdx.x; 
    int ig = blockIdx.x; 

    __shared__ float diff[1024]; 

    diff[ir] = 0.0; 
    __syncthreads(); 

    if(ir < gridSize-1 && ig < nGrids) { 
     diff[ir] = (d_ys[ir] - d_fs[ig*gridSize + ir])*(d_ys[ir] - d_fs[ig*gridSize + ir]); 
    } 
    __syncthreads(); 

    // reduction 
    for(int s=1; s < blockDim.x; s*=2) { 
     if(ir%(2*s) == 0 && ir+s < gridSize){ 
      diff[ir] += diff[ir+s]; 
     } 
     __syncthreads(); 
    } 
    d_costs[ig] = diff[0]; 
} 

或許應該正常工作[免責聲明,寫的瀏覽器,而不是測試,在風險自負]

相關問題