2013-10-18 30 views
0

我想並行化CUDA C中的函數,該函數將計算所有向量的總和等於向量元素和不大於k的元素。例如,如果向量元素的數量n是5,sum = 10和k = 3,那麼滿足這個條件的向量的數量是101.我已經在CUDA C中做了這個函數,但問題是當塊和線程都大於1.我知道問題出在for週期,我應該改變它,但我不知道從哪裏開始。當我使用塊和線程調用函數的功能等於1時,函數以傳統方式工作,一切都很好,但在這種情況下,函數不是並行化的。並行化函數將計算所有向量的總和等於向量元素和不大於k的元素

程序的源代碼是:

//function that count number of vectors 
__device__ void count(int *vector, int *total, int n, int s) 
{ 
int i,sum=0; 
for(i=blockIdx.x*blockDim.x+threadIdx.x;i<n;i+=blockDim.x*gridDim.x) 
{  
    sum+=vector[i]; 
    __syncthreads(); 
} 
if(sum==s) 
{ 
    total[0]=total[0]+1; 
} 
} 

//main function 
__global__ void computeVectors(int *vector, int n, int kk, int s, int *total) 
{ 
int k=0; 
int j,i,next; 

while(1) 
{ 
    //this is the problem, in for cycle 
    for(j=blockIdx.x*blockDim.x+threadIdx.x; j<=kk; j+=blockDim.x*gridDim.x) 
    { 
    vector[k]=j; 
    count(vector, total, n, s); 
    __syncthreads(); 
    } 
    for(i=blockIdx.x*blockDim.x+threadIdx.x; i<n; i+=blockDim.x*gridDim.x) 
    { 

    if(vector[i]<kk) 
     break; 
    } 
    next=i; 
    vector[next]++; 
    for(i=blockIdx.x*blockDim.x+threadIdx.x; i<sledno; i+=blockDim.x*gridDim.x) 
    { 
    vector[i]=0; 
    __syncthreads(); 
    } 
    k=0; 
    if(next>=n) 
    break; 
} 
} 

int main() 
{ 
    cudaError_t err = cudaSuccess; 

    int n,k,sum; 
    int counter=0; 

    printf("Enter the length of vector n="); 
    scanf("%d",&n); 
    printf("Enter the max value of vector elements k="); 
    scanf("%d",&k); 
    printf("Enter the sum of vector elements sum="); 
    scanf("%d",&sum); 

    //initial vector with length n 
    int *vec_h, *vec_d; 
    size_t sizevec=n*sizeof(int); 
    vec_h=(int *)malloc(sizevec); 
    cudaMalloc((void **) &vec_d, sizevec); 

    for(counter=0; counter<n; counter++) 
    { 
    vec_h[counter]=0; 
    } 
    cudaMemcpy(vec_d, vec_h, sizevec, cudaMemcpyHostToDevice); 

    int *total_h, *total_d; 
    size_t size=1*sizeof(int); 
    total_h=(int *)malloc(size); 
    cudaMalloc((void **) &total_d, size); 
    total_h[0]=0; 
    cudaMemcpy(total_d, total_h, size, cudaMemcpyHostToDevice); 

    //calling the main function 
    computeVectors<<<1, 1>>>(vec_d, n, k, sum, total_d); 

    cudaThreadSynchronize(); 

    err = cudaGetLastError(); 
    if (err != cudaSuccess) 
    { 
    fprintf(stderr, "Error: %s!\n", cudaGetErrorString(err)); 
    exit(EXIT_FAILURE); 
    } 
    cudaMemcpy(total_h, total_d, size, cudaMemcpyDeviceToHost); 
    printf("Number of vectors that satisfy condition is %d\n", total_h[0]); 

    free(vec_h); 
    cudaFree(vec_d); 

    free(total_h); 
    cudaFree(total_d); 

    return 0; 
} 
+1

我假設矢量元素的範圍可以從0到'k','k'預計爲一個正數?既然你用1個線程的1個線程塊調用內核,你根本沒有真正的並行化任何東西。你只是在運行序列碼。你有沒有嘗試過並行?你甚至有戰略嗎?例如,如果您有多個線程,每個線程會做什麼? (也許是一個單獨的模式測試?)每個塊可能負責什麼? (也許是測試的整體空間的一部分?) –

+0

是的,k是正數,我知道當塊和線程的數量是1時,代碼正在運行串行。如何使用線程進行模式測試? – Dragon

+0

要測試的空間是所有長度爲「n」的向量,其中每個向量元素具有「k」+ 1個可能的值。因此,使用樸素的蠻力方法,有(k + 1)^ n個可能的向量來測試。一種方法是讓每個線程都執行全部工作(矢量生成,和計算,和測試)來測試單個矢量,然後讓整個網格遍歷(k + 1)^ n個矢量的整個空間。對於正確測試的向量,可以在每個線程的末尾使用原子操作來更新'count'值。這將適用於符合'unsigned long'的(k + 1)^ n。 –

回答

0

問題是__ syncthreads()。爲了使__ syncthreads()正常工作,塊內的所有線程都應該能夠到達它,否則一些線程將永遠等待,並且程序不會退出。 在你的程序中,在某些地方執行__ syncthreads()是有條件的。這就是爲什麼你的程序不能在一個塊中使用多個線程的原因。

+0

我刪除了__syncthreads(),但它是一樣的,它不適用於多個線程。 – Dragon

+0

是的,我讀了所有的東西。問題比這更深。 – Farzad

0

正如羅伯特在評論中說,如果你想生成所有的(K + 1)^ n的對GPU排列並進行測試,你能想到的一些GPU內核是這樣的:

__device__ int count; //global variable must be initialized to zero before kernel call 
__global__ void perm_generator(int k, int n, int sum) { 
    int tid = blockIdx.x*blockDim.x+threadIdx.x; 
    int id = tid; 
    int mysum = 0; 
    for (int i = n; i > 1; i--) { //all n-1 vector elements 
    mysum += (id % (k+1)); 
    id /= (k+1); 
    } 
    mysum += id; //last element 
    if (mysum == sum) atomicAdd(&count, 1); 
} 

內核應正好用(k + 1)^ n個線程調用。如果你碰巧用更多的線程調用你的內核(簡單地說,因爲經驗法則,塊維度應該是32的倍數),你需要預先在內核中檢查tid的值。 另外,cudaThreadSynchronize()已被棄用。改爲使用cudaDeviceSynchronize()

+0

此方法也可以,我測試了它並將其與我的方法進行了比較。它比我的代碼慢了很多(大約是我的「大」問題慢了8倍),可能是因爲要生成每個向量,需要'n'-1模運算和'n'-1除法運算。平均而言,無論「n」如何,我的代碼都需要大約一次加法操作才能生成每個向量。 –

+0

是的,你已經明智地設計了你的算法。 – Farzad

1

下面是一個示例蠻力計劃枚舉所有可能的向量,然後測試每個向量的總和,看看它是否符合所需的總和。

  • 假設n =長度載體在「數字」
  • 假定每個矢量「數字」是由unsigned量表示
  • 假設k =最大「數字」值+ 1
  • 的的尺寸向量空間由以下公式給出:k^n
  • 將該空間劃分爲要由每個線程處理的連續向量組:(k^n)/ grid_size
  • 爲每個線程生成起始向量(即,每個組中的起始載體)
  • 每個線程然後通過測試的向量和,如果需要增加計,然後「遞增」的矢量循環,直到每個線程已處理它分配矢量的連續組

程序:

#include <stdio.h> 
#include <thrust/host_vector.h> 
#include <sys/time.h> 
#include <time.h> 

#define MAX_N 12 
#define nTPB 256 
#define GRIDSIZE (32*nTPB) 


#define cudaCheckErrors(msg) \ 
    do { \ 
     cudaError_t __err = cudaGetLastError(); \ 
     if (__err != cudaSuccess) { \ 
      fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ 
       msg, cudaGetErrorString(__err), \ 
       __FILE__, __LINE__); \ 
      fprintf(stderr, "*** FAILED - ABORTING\n"); \ 
      exit(1); \ 
     } \ 
    } while (0) 


// thrust code is to quickly prototype a CPU based 
// method for verification 
int increment(thrust::host_vector<unsigned> &data, unsigned max){ 
    int pos = 0; 
    int done = 0; 
    int finished = 0; 

    while(!done){ 
    data[pos]++; 
    if (data[pos] >= max) { 
     data[pos] = 0; 
     pos++; 
     if (pos >= data.size()){ 
     done = 1; 
     finished = 1; 
     } 
     } 
    else done = 1; 
    } 
    return finished; 
} 

__constant__ unsigned long powers[MAX_N]; 

__device__ unsigned vec_sum(unsigned *vector, int size){ 
    unsigned sum = 0; 
    for (int i=0; i<size; i++) sum += vector[(i*nTPB)]; 
    return sum; 
} 

__device__ void create_vector(unsigned long index, unsigned *vector, int size){ 
    unsigned long residual = index; 
    unsigned pos = size; 
    while ((residual > 0) && (pos > 0)){ 
    unsigned long temp = residual/powers[pos-1]; 
    vector[(pos-1)*nTPB] = temp; 
    residual -= temp*powers[pos-1]; 
    pos--; 
    } 
    while (pos>0) { 
    vector[(pos-1)*nTPB] = 0; 
    pos--; 
    } 
} 
__device__ void increment_vector(unsigned *vector, int size, int k){ 
    int pos = 0; 
    int done = 0; 

    while(!done){ 
    vector[(pos*nTPB)]++; 
    if (vector[pos*nTPB] >= k) { 
     vector[pos*nTPB] = 0; 
     pos++; 
     if (pos >= size){ 
     done = 1; 
     } 
     } 
    else done = 1; 
    } 
} 

__global__ void find_vector_match(unsigned long long int *count, int k, int n, unsigned sum){ 
    __shared__ unsigned vecs[MAX_N *nTPB]; 
    unsigned *vec = &(vecs[threadIdx.x]); 
    unsigned long idx = threadIdx.x+blockDim.x*blockIdx.x; 
    if (idx < (k*powers[n-1])){ 
    unsigned long vec_count = 0; 
    unsigned long vecs_per_thread = (k*powers[n-1])/(gridDim.x*blockDim.x); 
    vecs_per_thread++; 
    unsigned long vec_num = idx*vecs_per_thread; 
    create_vector((vec_num), vec, n); 
    while ((vec_count < vecs_per_thread) && (vec_num < (k*powers[n-1]))){ 
     if (vec_sum(vec, n) == sum) atomicAdd(count, 1UL); 
     increment_vector(vec, n, k); 
     vec_count++; 
     vec_num++; 
     } 
    } 
} 

int main(){ 

// calculate on CPU first for verification 
    struct timeval t1, t2, t3; 
    int n, k, sum; 
    printf("Enter the length of vector (maximum: %d) n=", MAX_N); 
    scanf("%d",&n); 
    printf("Enter the max value of vector elements k="); 
    scanf("%d",&k); 
    printf("Enter the sum of vector elements sum="); 
    scanf("%d",&sum); 
    int count = 0; 
    gettimeofday(&t1, NULL); 
    k++; 

    thrust::host_vector<unsigned> test(n); 
    thrust::fill(test.begin(), test.end(), 0); 
    int finished = 0; 
    do{ 
    if (thrust::reduce(test.begin(), test.end()) == sum) count++; 
    finished = increment(test, k); 
    } 
    while (!finished); 
    gettimeofday(&t2, NULL); 
    printf("CPU count = %d, in %d seconds\n", count, t2.tv_sec - t1.tv_sec); 
    unsigned long h_powers[MAX_N]; 
    h_powers[0] = 1; 
    if (n < MAX_N) 
    for (int i = 1; i<n; i++) h_powers[i] = h_powers[i-1]*k; 
    cudaMemcpyToSymbol(powers, h_powers, MAX_N*sizeof(unsigned long)); 
    cudaCheckErrors("cudaMemcpyToSymbolfail"); 
    unsigned long long int *h_count, *d_count; 
    h_count = (unsigned long long int *)malloc(sizeof(unsigned long long int)); 
    cudaMalloc((void **)&d_count, sizeof(unsigned long long int)); 
    cudaCheckErrors("cudaMalloc fail"); 
    *h_count = 0; 
    cudaMemcpy(d_count, h_count, sizeof(unsigned long long int), cudaMemcpyHostToDevice); 
    cudaCheckErrors("cudaMemcpy H2D fail"); 
    find_vector_match<<<(GRIDSIZE + nTPB -1)/nTPB, nTPB>>>(d_count, k, n, sum); 
    cudaMemcpy(h_count, d_count, sizeof(unsigned long long int), cudaMemcpyDeviceToHost); 
    cudaCheckErrors("cudaMemcpy D2H fail"); 
    gettimeofday(&t3, NULL); 
    printf("GPU count = %d, in %d seconds\n", *h_count, t3.tv_sec - t2.tv_sec); 

    return 0; 
} 

與編譯:

$ nvcc -O3 -arch=sm_20 -o t260 t260.cu 

輸出樣本:

$ ./t260 
Enter the length of vector (maximum: 12) n=2 
Enter the max value of vector elements k=3 
Enter the sum of vector elements sum=4 
CPU count = 3, in 0 seconds 
GPU count = 3, in 0 seconds 
$ ./t260 
Enter the length of vector (maximum: 12) n=5 
Enter the max value of vector elements k=3 
Enter the sum of vector elements sum=10 
CPU count = 101, in 0 seconds 
GPU count = 101, in 0 seconds 
$ ./t260 
Enter the length of vector (maximum: 12) n=9 
Enter the max value of vector elements k=9 
Enter the sum of vector elements sum=20 
CPU count = 2714319, in 12 seconds 
GPU count = 2714319, in 1 seconds 
$ ./t260 
Enter the length of vector (maximum: 12) n=10 
Enter the max value of vector elements k=9 
Enter the sum of vector elements sum=20 
CPU count = 9091270, in 123 seconds 
GPU count = 9091270, in 4 seconds 

因此,對於較大的問題規模,天真的暴力GPU代碼看起來比天真的暴力單線程CPU代碼快30倍。 (...在我的特定機器設置上:CPU = Xeon X5560,GPU = Quadro5000,CentOS 5.5,CUDA 5.0)