2016-04-14 79 views
1

cublasSaxpy計算y'= a * x + y,其中x和y是向量,a是標量。有沒有辦法在cuBLAS中做「saypx」?

原來我需要計算y'= a * y + x。我沒有看到如何扭曲cuBLAS庫來做到這一點。 (當然,我可以計算y'= a * y,那麼y'= y'+ x,但是在這種情況下y'往往被讀取,而且我可以編寫自己的CUDA代碼來完成它,但是它可能沒有像cuBLAS代碼那麼快,我只是感到驚訝,沒有明顯的方式直接執行「saypx」。)

[已添加]在英特爾版本中有類似於「saxpby」的函數cblas,這將做我所需要的。但奇怪的是,這不是cuBLAS。

[增加了#2]它看起來像我可以使用cudnnAddTensor函數,描述符有一些別名(我有一個指向張量的FilterDescriptor,哪個AddTensor不會接受,但我應該能夠將別名TensorDescriptor具有相同的內存和形狀)。

回答

2

我不知道在CUBLAS中做什麼,或者在標準BLAS中做什麼。您在MKL中找到的是英特爾添加的擴展,但我不記得在其他主機和加速器BLAS實現中看到類似的東西。

好消息是,您斷言「我可以編寫自己的CUDA代碼來完成它,但那麼它可能沒有像cuBLAS代碼那麼快」,這是不真實的,至少對於操作來說是微不足道的SAXPY。即使天真地執行saxpy也會非常接近CUBLAS,因爲實際上沒有多少人閱讀兩個數組,執行FMAD並將結果寫回。只要你獲得正確的內存合併,編寫高性能代碼非常簡單。例如:

#include <vector> 
#include <algorithm> 
#include <cassert> 
#include <iostream> 
#include <cmath> 

#include "cublas_v2.h" 

typedef enum 
{ 
    AXPY = 0, 
    AXPBY = 1 
} saxpy_op_t; 

__device__ __host__ __inline__ 
float axpby_op(float y, float x, float a) 
{ 
    return a * y + x; 
} 

__device__ __host__ __inline__ 
float axpy_op(float y, float x, float a) 
{ 
    return y + a * x; 
} 

template<typename T> 
class pitched_accessor 
{ 
    T * p; 
    size_t pitch; 

    public: 
    __host__ __device__ 
    pitched_accessor(T *p_, size_t pitch_) : p(p_), pitch(pitch_) {}; 

    __host__ __device__ 
    T& operator[](size_t idx) { return p[pitch*idx]; }; 

    __host__ __device__ 
    const T& operator[](size_t idx) const { return p[pitch*idx]; }; 
}; 


template<saxpy_op_t op> 
__global__ 
void saxpy_kernel(pitched_accessor<float> y, pitched_accessor<float> x, 
        const float a, const unsigned int N1) 
{ 
    unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; 
    unsigned int stride = gridDim.x * blockDim.x; 

    #pragma unroll 8 
    for(; idx < N1; idx += stride) { 
     switch (op) { 
      case AXPY: 
       y[idx] = axpy_op(y[idx], x[idx], a); 
       break; 
      case AXPBY: 
       y[idx] = axpby_op(y[idx], x[idx], a); 
       break; 
     } 
    } 
} 

__host__ void saxby(const unsigned int N, const float a, 
        float *x, int xinc, float *y, int yinc) 
{ 
    int gridsize, blocksize; 
    cudaOccupancyMaxPotentialBlockSize(&gridsize, &blocksize, saxpy_kernel<AXPBY>); 
    saxpy_kernel<AXPBY><<<gridsize, blocksize>>>(pitched_accessor<float>(y, yinc), 
               pitched_accessor<float>(x, xinc), a, N); 
} 

__host__ void saxpy(const unsigned int N, const float a, 
        float *x, int xinc, float *y, int yinc) 
{ 
    int gridsize, blocksize; 
    cudaOccupancyMaxPotentialBlockSize(&gridsize, &blocksize, saxpy_kernel<AXPY>); 
    saxpy_kernel<AXPY><<<gridsize, blocksize>>>(pitched_accessor<float>(y, yinc), 
               pitched_accessor<float>(x, xinc), a, N); 
} 

void check_result(std::vector<float> &yhat, float result, float tolerance=1e-5f) 
{ 
    auto it = yhat.begin(); 
    for(; it != yhat.end(); ++it) { 
     float err = std::fabs(*it - result); 
     assert(err < tolerance); 
    } 
} 

int main() 
{ 

    const int N = 1<<22; 

    std::vector<float> x_h(N); 
    std::vector<float> y_h(N); 

    const float a = 2.f, y0 = 1234.f, x0 = 532.f; 
    std::fill(y_h.begin(), y_h.end(), y0); 
    std::fill(x_h.begin(), x_h.end(), x0); 

    float *x_d, *y_d; 
    size_t sz = sizeof(float) * size_t(N); 
    cudaMalloc((void **)&x_d, sz); 
    cudaMalloc((void **)&y_d, sz); 

    cudaMemcpy(x_d, &x_h[0], sz, cudaMemcpyHostToDevice); 

    { 
     cudaMemcpy(y_d, &y_h[0], sz, cudaMemcpyHostToDevice); 
     saxby(N, a, x_d, 1, y_d, 1); 
     std::vector<float> yhat(N); 
     cudaMemcpy(&yhat[0], y_d, sz, cudaMemcpyDeviceToHost); 
     check_result(yhat, axpby_op(y0, x0, a)); 
    } 

    { 
     cudaMemcpy(y_d, &y_h[0], sz, cudaMemcpyHostToDevice); 
     saxpy(N, a, x_d, 1, y_d, 1); 
     std::vector<float> yhat(N); 
     cudaMemcpy(&yhat[0], y_d, sz, cudaMemcpyDeviceToHost); 
     check_result(yhat, axpy_op(y0, x0, a)); 
    } 

    { 
     cublasHandle_t handle; 
     cublasCreate(&handle); 
     cudaMemcpy(y_d, &y_h[0], sz, cudaMemcpyHostToDevice); 
     cublasSaxpy(handle, N, &a, x_d, 1, y_d, 1); 
     std::vector<float> yhat(N); 
     cudaMemcpy(&yhat[0], y_d, sz, cudaMemcpyDeviceToHost); 
     check_result(yhat, axpy_op(y0, x0, a)); 
     cublasDestroy(handle); 
    } 

    return int(cudaDeviceReset()); 
} 

這證明了一個非常簡單的axpy內核可以很容易地適應執行兩個標準操作和你想要的版本,和CUBLAS的運行時間的10%以內的計算5.2設備我跑測試它:

$ nvcc -std=c++11 -arch=sm_52 -Xptxas="-v" -o saxby saxby.cu -lcublas 
ptxas info : 0 bytes gmem 
ptxas info : Compiling entry function '_Z12saxpy_kernelIL10saxpy_op_t0EEv16pitched_accessorIfES2_fj' for 'sm_52' 
ptxas info : Function properties for _Z12saxpy_kernelIL10saxpy_op_t0EEv16pitched_accessorIfES2_fj 
    0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads 
ptxas info : Used 17 registers, 360 bytes cmem[0] 
ptxas info : Compiling entry function '_Z12saxpy_kernelIL10saxpy_op_t1EEv16pitched_accessorIfES2_fj' for 'sm_52' 
ptxas info : Function properties for _Z12saxpy_kernelIL10saxpy_op_t1EEv16pitched_accessorIfES2_fj 
    0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads 
ptxas info : Used 17 registers, 360 bytes cmem[0] 

$ nvprof ./saxby 
==26806== NVPROF is profiling process 26806, command: ./saxby 
==26806== Profiling application: ./saxby 
==26806== Profiling result: 
Time(%)  Time  Calls  Avg  Min  Max Name 
54.06% 11.190ms   5 2.2381ms  960ns 2.9094ms [CUDA memcpy HtoD] 
40.89% 8.4641ms   3 2.8214ms 2.8039ms 2.8310ms [CUDA memcpy DtoH] 
    1.73% 357.59us   1 357.59us 357.59us 357.59us void saxpy_kernel<saxpy_op_t=1>(pitched_accessor<float>, pitched_accessor<float>, float, unsigned int) 
    1.72% 355.15us   1 355.15us 355.15us 355.15us void saxpy_kernel<saxpy_op_t=0>(pitched_accessor<float>, pitched_accessor<float>, float, unsigned int) 
    1.60% 332.21us   1 332.21us 332.21us 332.21us void axpy_kernel_val<float, int=0>(cublasAxpyParamsVal<float>) 
+0

好的。另外,感謝有關如何編寫CUDA代碼並在Linux下進行配置的優秀簡短教程! (Windows結構要複雜得多。) 我在想如果使用fmad()會照顧最後的10%。 –

相關問題