2012-11-15 117 views
6

我正在嘗試使用Cuda和C++在GPU上執行兩個任務(分爲2個內核)。作爲輸入,我需要一個NxM矩陣(作爲浮點數組存儲在主機的內存中)。然後,我將使用一個內核對該矩陣執行一些操作,使其成爲NxMxD矩陣。然後我有第二個內核在這個3D矩陣上執行一些操作(我只是讀取值,我不必爲它寫值)。Cuda - 從設備全局內存複製到紋理內存

在紋理內存中運行似乎對我的任務來說要快得多,所以我的問題是如果可以在內核1之後將設備上的全局內存中的數據複製並直接傳輸到內核2的紋理內存而不帶它回主機?

UPDATE

我已經添加了一些代碼來說明我的問題更好。

這裏是兩個內核。第一個僅僅是現在的佔位符,並將2D矩陣複製到3D中。

__global__ void computeFeatureVector(float* imData3D_dev, int imX, int imY, int imZ) { 

//calculate each thread global index 
int xindex=blockIdx.x*blockDim.x+threadIdx.x; 
int yindex=blockIdx.y*blockDim.y+threadIdx.y;  

#pragma unroll 
for (int z=0; z<imZ; z++) { 
    imData3D_dev[xindex+yindex*imX + z*imX*imY] = tex2D(texImIp,xindex,yindex); 
} 
} 

第二個將採用此3D矩陣,現在表示爲紋理並對其執行一些操作。現在空白。

__global__ void kernel2(float* resData_dev, int imX) { 
//calculate each thread global index 
int xindex=blockIdx.x*blockDim.x+threadIdx.x; 
int yindex=blockIdx.y*blockDim.y+threadIdx.y;  

resData_dev[xindex+yindex*imX] = tex3D(texImIp3D,xindex,yindex, 0); 

return; 
} 

然後代碼的主體如下:

// declare textures 
texture<float,2,cudaReadModeElementType> texImIp; 
texture<float,3,cudaReadModeElementType> texImIp3D; 

void main_fun() { 

// constants 
int imX = 1024; 
int imY = 768; 
int imZ = 16; 

// input data 
float* imData2D = new float[sizeof(float)*imX*imY];   
for(int x=0; x<imX*imY; x++) 
    imData2D[x] = (float) rand()/RAND_MAX; 

//create channel to describe data type 
cudaArray* carrayImIp; 
cudaChannelFormatDesc channel; 
channel=cudaCreateChannelDesc<float>(); 

//allocate device memory for cuda array 
cudaMallocArray(&carrayImIp,&channel,imX,imY); 

//copy matrix from host to device memory 
cudaMemcpyToArray(carrayImIp,0,0,imData2D,sizeof(float)*imX*imY,cudaMemcpyHostToDevice); 

// Set texture properties 
texImIp.filterMode=cudaFilterModePoint; 
texImIp.addressMode[0]=cudaAddressModeClamp; 
texImIp.addressMode[1]=cudaAddressModeClamp; 

// bind texture reference with cuda array 
cudaBindTextureToArray(texImIp,carrayImIp); 

// kernel params 
dim3 blocknum; 
dim3 blocksize; 
blocksize.x=16; blocksize.y=16; blocksize.z=1; 
blocknum.x=(int)ceil((float)imX/16); 
blocknum.y=(int)ceil((float)imY/16);  

// store output here 
float* imData3D_dev;   
cudaMalloc((void**)&imData3D_dev,sizeof(float)*imX*imY*imZ); 

// execute kernel 
computeFeatureVector<<<blocknum,blocksize>>>(imData3D_dev, imX, imY, imZ); 

//unbind texture reference to free resource 
cudaUnbindTexture(texImIp); 

// check copied ok 
float* imData3D = new float[sizeof(float)*imX*imY*imZ]; 
cudaMemcpy(imData3D,imData3D_dev,sizeof(float)*imX*imY*imZ,cudaMemcpyDeviceToHost);  
cout << " kernel 1" << endl; 
for (int x=0; x<10;x++) 
    cout << imData3D[x] << " "; 
cout << endl; 
delete [] imData3D; 


// 
// kernel 2 
// 


// copy data on device to 3d array 
cudaArray* carrayImIp3D; 
cudaExtent volumesize; 
volumesize = make_cudaExtent(imX, imY, imZ); 
cudaMalloc3DArray(&carrayImIp3D,&channel,volumesize); 
cudaMemcpyToArray(carrayImIp3D,0,0,imData3D_dev,sizeof(float)*imX*imY*imZ,cudaMemcpyDeviceToDevice); 

// texture params and bind 
texImIp3D.filterMode=cudaFilterModePoint; 
texImIp3D.addressMode[0]=cudaAddressModeClamp; 
texImIp3D.addressMode[1]=cudaAddressModeClamp; 
texImIp3D.addressMode[2]=cudaAddressModeClamp; 
cudaBindTextureToArray(texImIp3D,carrayImIp3D,channel); 

// store output here 
float* resData_dev; 
cudaMalloc((void**)&resData_dev,sizeof(float)*imX*imY); 

// kernel 2 
kernel2<<<blocknum,blocksize>>>(resData_dev, imX); 
cudaUnbindTexture(texImIp3D); 

//copy result matrix from device to host memory 
float* resData = new float[sizeof(float)*imX*imY]; 
cudaMemcpy(resData,resData_dev,sizeof(float)*imX*imY,cudaMemcpyDeviceToHost); 

// check copied ok 
cout << " kernel 2" << endl; 
for (int x=0; x<10;x++) 
    cout << resData[x] << " "; 
cout << endl; 


delete [] imData2D; 
delete [] resData; 
cudaFree(imData3D_dev); 
cudaFree(resData_dev); 
cudaFreeArray(carrayImIp); 
cudaFreeArray(carrayImIp3D); 

} 

我很高興的是,第一個內核工作正常但3D矩陣imData3D_dev似乎並沒有被正確地綁定到紋理texImIp3D 。

ANSWER

我使用cudaMemcpy3D解決我的問題。這裏是主函數第二部分的修改代碼。 imData3D_dev包含來自第一個內核的全局內存中的3D矩陣。

cudaArray* carrayImIp3D; 
cudaExtent volumesize; 
volumesize = make_cudaExtent(imX, imY, imZ); 
cudaMalloc3DArray(&carrayImIp3D,&channel,volumesize); 
cudaMemcpy3DParms copyparms={0}; 

copyparms.extent = volumesize; 
copyparms.dstArray = carrayImIp3D; 
copyparms.kind = cudaMemcpyDeviceToDevice; 
copyparms.srcPtr = make_cudaPitchedPtr((void*)imData3D_dev, sizeof(float)*imX,imX,imY); 
cudaMemcpy3D(&copyparms); 

// texture params and bind 
texImIp3D.filterMode=cudaFilterModePoint; 
texImIp3D.addressMode[0]=cudaAddressModeClamp; 
texImIp3D.addressMode[1]=cudaAddressModeClamp; 
texImIp3D.addressMode[2]=cudaAddressModeClamp; 

cudaBindTextureToArray(texImIp3D,carrayImIp3D,channel); 

// store output here 
float* resData_dev; 
cudaMalloc((void**)&resData_dev,sizeof(float)*imX*imY); 

kernel2<<<blocknum,blocksize>>>(resData_dev, imX); 

    // ... clean up 

回答

1

各種cudaMemcpy程序的命名有點令人費解不幸。 對於在3D陣列上操作,您需要使用cudaMemcpy3D()(其他人之間)能夠從線性內存中的3D數據複製到3D陣列。
cudaMemcpyToArray()用於將線性數據複製到二維數組。

如果您使用的計算能力2.0或更高的設備,但是你不希望使用任何cudaMemcpy*()功能。而是使用surface,它允許您直接寫入紋理,而無需在內核之間進行任何數據複製。 (你仍然需要將讀寫分離到兩個不同的內核中,因爲紋理緩存與表面寫入不一致,並且僅在內核啓動時失效)。

+0

解決了我的問題。 – themush

2

cudaMemcpyToArray()接受cudaMemcpyDeviceToDevice參數,所以它應該是可能的。

+0

感謝您的回覆。我曾嘗試使用cudaMemcpyToArray(),但它似乎並沒有爲我複製。我貼上面的代碼。 – themush