2014-06-22 74 views
-1

我想計算 '出來=阿爾法* PX +的β*吡啶', 'PX' 和 'PY' 是數組*CUDA循環展開添加

我有一個簡單的內核:

__global__ 
void saxpyGPU2(float *out, const float *px, const float *py, size_t N, float alpha,float beta) 
{ 
    size_t i = blockDim.x*blockIdx.x + threadIdx.x; 
    while (i < N) 
    { 
     out[i] = alpha * px[i] + beta * py[i]; 
     i += blockDim.x*gridDim.x; 
    } 
} 

它的工作原理,所以我想循環展開。

在CUDA的手冊中的代碼是:

template<const int n> 
__device__ 
void saxpy_unrolled(float *out, const float *px, const float *py, size_t N, float alpha,float beta) 
{ 
    float x[n], y[n]; 
    size_t i; 
    for (i = n*blockIdx.x*blockDim.x+threadIdx.x; i < N-n*blockDim.x*gridDim.x; i += n*blockDim.x*gridDim.x) { 
     for (int j = 0; j < n; j++) { 
      size_t index = i+j*blockDim.x; 
      x[j] = px[index]; 
      y[j] = py[index]; 
     } 
     for (int j = 0; j < n; j++) { 
      size_t index = i+j*blockDim.x; 
      out[index] = alpha*x[j]+beta* y[j]; 
     } 
    } 
    // to avoid the (index<N) conditional in the inner loop, 
    // we left off some work at the end 
    for (int j = 0; j < n; j++) { 
     for (int j = 0; j < n; j++) { 
      size_t index = i+j*blockDim.x; 
      if (index<N) { 
       x[j] = px[index]; 
       y[j] = py[index]; 
      } 
     } 
     for (int j = 0; j < n; j++) { 
      size_t index = i+j*blockDim.x; 
      if (index<N) out[index] = alpha*x[j]+beta* y[j]; 
     } 
    } 
} 

__global__ 
void saxpyGPU(float *out, const float *px, const float *py, size_t N, float alpha,float beta) 
{ 
    saxpy_unrolled<4>(out, px, py, N, alpha ,beta); 
} 

我不在第二分支理解當我> N-正* blockDim.x * gridDim.x。爲什麼要使用外循環

for (int j = 0; j < n; j++) { for (int j = 0; j < n; j++)....}

我測試這兩個內核,第一個是OK的,但第二個我從書上覆制不正確。

我初始化了兩個數組while(i<1024) a[i] = i; b[i] = 10*i;i++,我想用上面的兩個內核計算c = alpha * a + beta * b,但是對於c中的所有元素,循環展開內核的結果是4.3e8。

這是我的測試代碼:

int main(){ 
int arraySize = 1024; 
float* a =new float[arraySize]; 
float* b =new float[arraySize]; 
float* c =new float[arraySize]; 
for (int i =0;i<arraySize;i++) 
{ 
    a[i] = 1.0* i; 
    b[i] = 10.0*i; 
    c[i] = 0.0; 
} 
float* d_a; 
float* d_b; 
float* d_c; 
cudaMalloc((void**)&d_a,sizeof(float)*arraySize); 
cudaMemcpy(d_a,a,sizeof(float)*arraySize,cudaMemcpyHostToDevice); 
cudaMalloc((void**)&d_b,sizeof(float)*arraySize); 
cudaMemcpy(d_b,b,sizeof(float)*arraySize,cudaMemcpyHostToDevice); 
cudaMalloc((void**)&d_c,sizeof(float)*arraySize); 
for (int i=0;i<arraySize;i++) 
{ 
    c[i] = a[i] + b[i]; 
} 
dim3 block_size(256,1,1); 
dim3 grid_size((arraySize -1)/block_size.x+1,1,1); 
float alpha = 1.0; 
float beta = 1.0; 
bool flag = true; 
if(flag) 
{ 
    saxpyGPU<<<grid_size,block_size>>>(d_c,d_a,d_b,arraySize,alpha,beta); 
    float* temp = new float[arraySize]; 
    cudaMemcpy(temp,d_c,arraySize*sizeof(float),cudaMemcpyDeviceToHost); 
    for (int i = 0;i<arraySize;i++) 
    { 
     cout<<(temp[i] - c[i])<<","; 
    } 
} 
else 
{ 
    saxpyGPU2<<<grid_size,block_size>>>(d_c,d_a,d_b,arraySize,alpha,beta); 
    cudaMemcpy(temp,d_c,arraySize*sizeof(float),cudaMemcpyDeviceToHost); 
    for (int i = 0;i<arraySize;i++) 
    { 
     cout<<(temp[i] - c[i])<<","; 
    } 

這兩個內核表現出不同的結果

+0

你的問題是什麼? – talonmies

+0

@talonmies,我編輯了問題 – Zziggurats

+0

您添加的測試代碼不完整,無法編譯。 CUDA手冊中的代碼沒有任何問題。它工作正常。 – talonmies

回答

2

您發佈的內核代碼是完全正確的,併產生了預期的效果。

#include <thrust/random.h> 
#include <thrust/device_vector.h> 
#include <thrust/transform.h> 
#include <thrust/copy.h> 
#include <thrust/iterator/counting_iterator.h> 
#include <iostream> 
#include <vector> 
#include <algorithm> 
#include <cmath> 

template<const int n> 
__device__ 
void saxpy_unrolled(float *out, const float *px, const float *py, 
     size_t N, float alpha,float beta) { 
    float x[n], y[n]; 
    size_t i; 
    for (i = n*blockIdx.x*blockDim.x+threadIdx.x; 
     i < N-n*blockDim.x*gridDim.x; 
     i += n*blockDim.x*gridDim.x) { 
     for (int j = 0; j < n; j++) { 
      size_t index = i+j*blockDim.x; 
      x[j] = px[index]; 
      y[j] = py[index]; 
     } 
     for (int j = 0; j < n; j++) { 
      size_t index = i+j*blockDim.x; 
      out[index] = alpha*x[j]+beta* y[j]; 
     } 
    } 
    for (int j = 0; j < n; j++) { 
     for (int j = 0; j < n; j++) { 
      size_t index = i+j*blockDim.x; 
      if (index<N) { 
       x[j] = px[index]; 
       y[j] = py[index]; 
      } 
     } 
     for (int j = 0; j < n; j++) { 
      size_t index = i+j*blockDim.x; 
      if (index<N) { 
       out[index] = alpha*x[j] + beta*y[j]; 
      } 
     } 
    } 
} 

__global__ 
void saxpyGPU(float *out, const float *px, const float *py, 
     size_t N, float alpha,float beta) { 
    saxpy_unrolled<4>(out, px, py, N, alpha ,beta); 
} 

struct prg { 
    float a, b; 
    __host__ __device__ 
    prg(float _a=0.f, float _b=1.f) : a(_a), b(_b) {}; 

    __host__ __device__ 
    float operator()(const unsigned int n) const { 
     thrust::default_random_engine rng; 
     thrust::uniform_real_distribution<float> dist(a, b); 
     rng.discard(n); 
     return dist(rng); 
    } 
}; 

int main(void) { 
    const int N = 100000; 
    const float alpha = 0.12345f, beta = 0.9876f; 

    prg gen(1.f, 2.f); 
    thrust::device_vector<float> x(N), y(N), z(N); 
    thrust::counting_iterator<unsigned int> iseqx(0); 
    thrust::counting_iterator<unsigned int> iseqy(N); 
    thrust::transform(iseqx, iseqx + N, x.begin(), gen); 
    thrust::transform(iseqy, iseqy + N, y.begin(), gen); 

    float *xp = thrust::raw_pointer_cast(&x[0]); 
    float *yp = thrust::raw_pointer_cast(&y[0]); 
    float *zp = thrust::raw_pointer_cast(&z[0]); 
    dim3 blockdim(128); 
    dim3 griddim(16); 
    saxpyGPU<<<griddim, blockdim>>>(zp, xp, yp, N, alpha, beta); 
    cudaDeviceSynchronize(); 

    std::vector<float> xh(N), yh(N), zh(N); 
    thrust::copy(x.begin(), x.end(), xh.begin()); 
    thrust::copy(y.begin(), y.end(), yh.begin()); 
    thrust::copy(z.begin(), z.end(), zh.begin()); 

    float maxabserr = -1.f, maxrelerr = -1.f; 
    for(int i=0; i<N; i++) { 
     float saxpyval = alpha * xh[i] + beta * yh[i]; 
     float abserr = fabs(zh[i]-saxpyval); 
     float relerr = abserr/fmaxf(fabs(zh[i]), fabs(saxpyval)); 
     maxabserr = fmaxf(abserr, maxabserr); 
     maxrelerr = fmaxf(relerr, maxrelerr); 
    } 
    std::cout.precision(10); 
    std::cout << "Maximum absolute error = " << maxabserr << std::endl; 
    std::cout << "Maximum relative error = " << maxrelerr << std::endl; 

    return 0; 
} 

使我有以下:

$ nvcc -arch=sm_30 -o unrolled_saxpy unrolled_saxpy.cu 
$ ./unrolled_saxpy 
Maximum absolute error = 2.384185791e-07 
Maximum relative error = 1.1920676e-07 

如果(仍然)不明白爲什麼內核寫,因爲它是遵循什麼我發現這可以使用下面的代碼來證明你在你之前的問題中手動展開saxpy函數。從n = 1開始,確認它在功能上與展開的等價物相同,然後嘗試n = 2,n = 4等,以查看展開循環的動作。