2013-03-26 161 views
0

我正在嘗試編寫一個簡單的矩陣複製程序,該程序不斷向兩個矩陣的第三個結果矩陣中添加乘積(我本質上是在測量GPU時進行鍛鍊用單獨設備的功耗)。CUDA矩陣乘法鎖定並顯示零點矩陣

當我指定大量的迭代時,會出現我的問題。我已經用BLOCK_SIZE和矩陣維度值的幾種組合嘗試了這一點,並且我已經注意到迭代次數可以隨着較小矩陣維數增加,但BLOCK_SIZE必須是矩陣維(平方矩陣)的平方根。

在這種情況下產生的誤差是39秒(無論迭代值是多少,只要它'太多')凍結,然後是全零矩陣輸出。有趣的是,我用20000次的迭代運行了一次,它運行良好。我再次運行它並得到了凍結錯誤。

任何想法?提前致謝!

內核:

//******************************************************************** 
// matrixMultiplication_kernel.cu 
// 
// Kernel for a basic CUDA matrix multiplication program. 
//******************************************************************** 

#ifndef MATRIXMULTIPLICATION_KERNEL 
#define MATRIXMULTIPLICATION_KERNEL 

#define BLOCK_SIZE 16 // Set thread block size 
#define colsA 256  // Set matrix A column dimension 
#define rowsA 256  // Set matrix A row dimension 
#define colsB 256  // Set matrix B column dimension 
#define rowsB colsA // Set matrix B row dimension 
#define colsC colsB // Set matrix C column dimension 
#define rowsC rowsA // Set matrix C row dimension 

//-------------------------------------------------------------------- 
// matrixMultiplication() - Multiplies matrixA and matrixB, storing 
//       the result in device memory for matrixC. 
// 
// PRE: matrixA, matrixB, and matrixC are float pointers; numColsA 
//  numColsB are integers. 
// POST: The result of multiplying matrixA and matrixB is stored in 
//  matrixC. 
//-------------------------------------------------------------------- 
__global__ void matrixMultiplication(float * matrixA, float * matrixB, 
        float * matrixC, int numColsA, 
        int numColsB) { 

    /* Declare matrix-multplication holder value ouside of for loop */ 
    float val; 

    /* Set block and thread index positions */ 
    int blockX = blockIdx.x; 
    int blockY = blockIdx.y; 
    int threadX = threadIdx.x; 
    int threadY = threadIdx.y; 

    /* 
    Set starting and ending indices of the first sub-matrix of A 
    and sub-matrix size for matrix A 
    */ 
    int startA = numColsA * BLOCK_SIZE * blockY; 
    int endA = startA + numColsA - 1; 
    int subSizeA = BLOCK_SIZE; 

    /* 
    Set starting index of the first sub-matrix of B and sub-matrix 
    size for matrix B 
    */ 
    int startB = BLOCK_SIZE * blockX; 
    int subSizeB = BLOCK_SIZE * colsB; 

    /* Perform matrix multiplication 20000 times */ 
    for (int iteration = 0; iteration < 20000; iteration++) { 

     /* Loop through matrix A and matrix B's sub-matrices */ 
     for (int i = startA, j = startB; i <= endA; i += subSizeA, 
      j += subSizeB) { 

     /* 
      Declare shared memory arrays for matrix A and B 
      sub-matrices 
     */ 
     __shared__ float subA[BLOCK_SIZE][BLOCK_SIZE]; 
     __shared__ float subB[BLOCK_SIZE][BLOCK_SIZE]; 

     /* Fill sub-matrices */ 
     subA[threadY][threadX] = 
      matrixA[i + colsA * threadY + threadX]; 
     subB[threadY][threadX] = 
      matrixB[j + colsB * threadY + threadX]; 

     /* Ensure that the matrices are loaded */ 
     __syncthreads(); 

     /* Loop through the block */ 
     for (int k = 0; k < BLOCK_SIZE; ++k) { 

      /* Compute product of two matrix indices */ 
      val += subA[threadY][k] * subB[k][threadX]; 
     } 

     /* 
      Ensure completion before the next set of sub-matrices 
      begin computation 
     */ 
     __syncthreads(); 
    } 

    /* Set device memory for this sub-matrix */ 
    int position = colsB * BLOCK_SIZE * blockY + BLOCK_SIZE * blockX; 
    matrixC[position + colsB * threadY + threadX] = val; 
    } 
} 

#endif 

主持人:

//******************************************************************** 
// matrixMultiplication.cu 
// 
// A basic CUDA matrix multiplication program. 
//******************************************************************** 

/* Include necessary libraries and kernel */ 
#include <stdlib.h> 
#include <stdio.h> 
#include <math.h> 
#include <matrixMultiplication_kernel.cu> 

/* Function declarations */ 
void fillMatrix(float * matrix, int numIndices); 

//************* 
// Main Program 
//************* 
int main(int argc, char** argv) { 

    /* Declare device memory */ 
    float * deviceA; 
    float * deviceB; 
    float * deviceC; 

    srand(2013); // Set random seed 

    /* Determine total number of indices in each matrix */ 
    unsigned int numIndicesA = colsA * rowsA; 
    unsigned int numIndicesB = colsB * rowsB; 
    unsigned int numIndicesC = colsC * rowsC; 

    /* Determine memory size of each matrix */ 
    unsigned int memoryA = sizeof(float) * numIndicesA; 
    unsigned int memoryB = sizeof(float) * numIndicesB; 
    unsigned int memoryC = sizeof(float) * numIndicesC; 

    /* Allocate memory for each matrix */ 
    float * matrixA = (float *) malloc(memoryA); 
    float * matrixB = (float *) malloc(memoryB); 
    float * matrixC = (float *) malloc(memoryC); 

    /* Set contents of matrices A and B (matrix C is all zeros) */ 
    fillMatrix(matrixA, numIndicesA); 
    fillMatrix(matrixB, numIndicesB); 

    /* Allocate device memory for each matrix */ 
    cudaMalloc((void **) &deviceA, memoryA); 
    cudaMalloc((void **) &deviceB, memoryB); 
    cudaMalloc((void **) &deviceC, memoryC); 

    /* Copy host memory to device memory for matrices A and B */ 
    cudaMemcpy(deviceA, matrixA, memoryA, cudaMemcpyHostToDevice); 
    cudaMemcpy(deviceB, matrixB, memoryB, cudaMemcpyHostToDevice); 

    /* Set thread count to BLOCK_SIZE x BLOCK_SIZE */ 
    dim3 tCount(BLOCK_SIZE, BLOCK_SIZE); 

    /* Set thread block count */ 
    dim3 tbCount((colsC/tCount.x), (rowsC/tCount.y)); 

    /* Run kernel */ 
    matrixMultiplication <<< tbCount, tCount >>> (deviceA, deviceB, 
          deviceC, colsA, 
          colsB); 

    /* Copy device memory to host memory for matrix C */ 
    cudaMemcpy(matrixC, deviceC, memoryC, cudaMemcpyDeviceToHost); 

    for(int i = 0; i < 256; i++) { 
     printf("%f ", matrixC[i]); 
    } 
    printf("\n"); 

    /* Free up host and device memory for each matrix */ 
    free(matrixA); 
    free(matrixB); 
    free(matrixC); 
    cudaFree(deviceA); 
    cudaFree(deviceB); 
    cudaFree(deviceC); 
} 

//-------------------------------------------------------------------- 
// fillMatrix - Assigns a random float value to each indice of the 
//    matrix. 
// 
// PRE: matrix is a pointer to a block of bytes in memory; numIndices 
//  is the number of indicies in the matrix being instantiated. 
// POST: Each index of the matrix has been filled with random float 
//  values. 
//-------------------------------------------------------------------- 
void fillMatrix(float * matrix, int numIndices) { 

    /* Loop through each index of the matrix */ 
    for (int i = 0; i < numIndices; ++i) { 

    /* 
     Assign a random float between 0 and 1 for this index of 
     the matrix 
    */ 
    matrix[i] = rand()/(float)RAND_MAX; 
    } 
} 

的Makefile:

GCC = nvcc 
CUDA_INSTALL_PATH := /usr/local/cuda 
INCLUDES := -I. -I$(CUDA_INSTALL_PATH)/include 
CUDA_LIBS := -L$(CUDA_INSTALL_PATH)/lib -lcudart 

matrixMultiplication.o:  matrixMultiplication.cu 
        $(GCC) $(INCLUDES) -c matrixMultiplication.cu -o [email protected] 

matrixMultiplication:  matrixMultiplication.o 
     $(GCC) -o [email protected] matrixMultiplication.o $(CUDA_LIBS) 

clean: 
     $(RM) *.o *~ 
+0

冷卻不足?由於您已經連接了功率計,因此您可以輕鬆驗證方矩陣的情況是否是功耗最高的情況。 – tera 2013-03-26 01:09:45

+0

我實際上比較了CUDA和OpenCL,矩陣乘法只是它們之間的通用程序。 – 2013-03-26 01:13:08

+0

您是否正在專用GPU上進行計算?如果不是,則可能是系統超時,因爲顯示驅動程序沒有響應。 (在Windows上,超時時間僅爲2秒) – 2013-03-26 01:23:01

回答

1

問題解決了!由於內核持續時間長,這是系統超時問題。通過切換終端模式,我能夠繞過這個問題。

感謝所有的幫助傢伙!