使用cuda進行矩陣乘法時存在問題。我必須做A * A * A * A並將其保存在hB中。 Cublas可以,但我無法使用CUDA。尺寸可以是高價值就像2000年這是我的代碼:Cuda進行矩陣乘法
__global__ void CudaMM(float *A, float *B, int N)
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
float sum = 0.f;
for (int n = 0; n < N; ++n)
sum += A[row*N+n]*A[n*N+col];
B[row*N+col] = sum;
}
void CudaMult(int dimension,float *hMatrice,float *hB,float *d_A,float *d_B){
int N,K;
K = 100;
N = K*BLOCK_SIZE;
dim3 threadBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid(K,K);
cudaMemcpy(d_A,hMatrice,dimension*dimension*sizeof(float),cudaMemcpyHostToDevice);
CudaMM<<<grid,threadBlock>>>(d_A,d_B,N);
cudaMemcpy(hB,d_B,dimension*dimension*sizeof(float),cudaMemcpyDeviceToHost);
}
void CublasFindConnect(int dimension,float* mat,float* B){
float *d_A,*d_B;
cudaMalloc(&d_A,dimension*dimension*sizeof(float));
cudaMalloc(&d_B,dimension*dimension*sizeof(float));
int w=0;
while(w<5){
CudaMult(dimension,mat,B,d_A,d_B);
// Copy Matrix computed B to previous M
for (m=0; m<dimension; m++) {
for (n=0; n<dimension; n++) {
mat[m*dimension+n]=B[m*dimension+n];
B[m*dimension+n]=0;
}
}
w++;
}
cudaFree(d_A);
cudaFree(d_B);
}
我最後安裝CUDA 6,它不需要cudaMemCpy,因爲內存是共享的。
您是否[爲您的CUDA程序進行錯誤檢查](http://stackoverflow.com/q/14038589/2386951)? – Farzad