2012-11-18 30 views
2

我正在嘗試爲三角形網格形成的任何對象寫射線追蹤器。我正在使用外部庫從.ply格式加載多維數據集,然後對其進行跟蹤。到目前爲止,我已經實現了大部分示蹤器,現在我試圖用一個立方體來測試它,但出於某種原因,我在屏幕上看到的只是一條紅線。我嘗試了幾種方法來修復它,但我根本無法弄清楚它了。對於這個初級測試,我只創建初級光線,如果它們打到我的立方體上,那麼我會將該像素着色爲立方體的漫反射顏色並返回。爲了檢查光線對象的交點,我要經過形成該對象的所有三角形,並將距離返回到最近的那個。如果你能看看代碼並告訴我可能出錯和在哪裏,那將是非常棒的。我將不勝感激。射線追蹤三角形網格對象

雷三角路口:

bool intersectTri(const Vec3D& ray_origin, const Vec3D& ray_direction, const Vec3D& v0, const Vec3D& v1, const Vec3D& v2, double &t, double &u, double &v) const 
    { 

     Vec3D edge1 = v1 - v0; 
     Vec3D edge2 = v2 - v0; 
     Vec3D pvec = ray_direction.cross(edge2); 
     double det = edge1.dot(pvec); 
     if (det > - THRESHOLD && det < THRESHOLD) 
      return false; 
     double invDet = 1/det; 
     Vec3D tvec = ray_origin - v0; 
     u = tvec.dot(pvec)*invDet; 
     if (u < 0 || u > 1) 
      return false; 
     Vec3D qvec = tvec.cross(edge1); 
     v = ray_direction.dot(qvec)*invDet; 
     if (v < 0 || u + v > 1) 
      return false; 
     t = edge2.dot(qvec)*invDet; 
     if (t < 0) 
      return false; 
     return true; 
    } 

//Object intersection 
bool intersect(const Vec3D& ray_origin, const Vec3D& ray_direction, IntersectionData& idata, bool enforce_max) const 
    { 

     double tClosest; 
     if (enforce_max) 
     { 
      tClosest = idata.t; 
     } 
     else 
     { 
      tClosest = TMAX; 
     } 

     for (int i = 0 ; i < indices.size() ; i++) 
     { 
      const Vec3D v0 = vertices[indices[i][0]]; 
      const Vec3D v1 = vertices[indices[i][1]]; 
      const Vec3D v2 = vertices[indices[i][2]]; 
      double t, u, v; 
      if (intersectTri(ray_origin, ray_direction, v0, v1, v2, t, u, v)) 
      { 
       if (t < tClosest) 
       { 
        idata.t = t; 
        tClosest = t;     
        idata.u = u; 
        idata.v = v; 
        idata.index = i; 
       } 
      } 
     } 
     return (tClosest < TMAX && tClosest > 0) ? true : false; 
    } 

Vec3D trace(World world, Vec3D &ray_origin, Vec3D &ray_direction) 
{ 

Vec3D objColor = world.background_color; 
IntersectionData idata; 
double coeff = 1.0; 
int depth = 0; 

double tClosest = TMAX; 
Object *hitObject = NULL; 
for (unsigned int i = 0 ; i < world.objs.size() ; i++) 
{  
    IntersectionData idata_curr; 
    if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false)) 
    { 
     if (idata_curr.t < tClosest && idata_curr.t > 0) 
     { 
      idata.t = idata_curr.t; 
      idata.u = idata_curr.u; 
      idata.v = idata_curr.v; 
      idata.index = idata_curr.index; 
      tClosest = idata_curr.t;    
      hitObject = &(world.objs[i]); 
     } 
    } 
} 
if (hitObject == NULL) 
{ 
    return world.background_color; 
} 
else 
{ 
    return hitObject->getDiffuse(); 
} 
} 

int main(int argc, char** argv) 
{ 

parse("cube.ply"); 
Vec3D diffusion1(1, 0, 0); 
Vec3D specular1(1, 1, 1); 
Object cube1(coordinates, connected_vertices, diffusion1, specular1, 0, 0); 
World wrld; 
// Add objects to the world 
wrld.objs.push_back(cube1); 
Vec3D background(0, 0, 0); 
wrld.background_color = background; 
// Set light color 
Vec3D light_clr(1, 1, 1); 
wrld.light_colors.push_back(light_clr); 
// Set light position 
Vec3D light(0, 64, -10); 
wrld.light_positions.push_back(light); 

int width = 128; 
int height = 128; 
Vec3D *image = new Vec3D[width*height]; 
Vec3D *pixel = image; 

// Trace rays 
for (int y = -height/2 ; y < height/2 ; ++y) 
{ 
    for (int x = -width/2 ; x < width/2 ; ++x, ++pixel) 
    { 
     Vec3D ray_dir(x+0.5, y+0.5, -1.0); 
     ray_dir.normalize(); 
     Vec3D ray_orig(0.5*width, 0.5*height, 0.0); 
     *pixel = trace(wrld, ray_orig, ray_dir);   
    } 
} 

savePPM("./test.ppm", image, width, height); 
return 0; 
} 

我剛跑了測試案例,我得到這個:

的單位立方體(0,0爲中心, - 1.5),並在X軸和Y軸上縮放100倍。看起來投影有問題,但我無法確切地告訴結果。此外,不應該在這種情況下(立方體居中於(0,0))最終的對象也出現在圖片的中間? FIX:在標準化和調用跟蹤函數之前,我通過執行ray_dir = ray_dir - ray_orig來解決了居中問題。儘管如此,這個觀點似乎是錯誤的。

+0

當你的射線方向從 - (寬度/ 2,高度/ 2)到(寬度/ 2,高度/ 2)時,爲什麼你的射線起源於(寬度/ 2,高度/ 2)?您需要將原點移動到原點,或者移動您的光線方向,以使最低值的角爲(0,0,-1)。我認爲這解釋了結果圖像,因爲它看起來像一個非常廣泛的視野。 – MikeMx7f

+0

非常感謝!我把射線的原點移動到(0,0),現在我得到這個:http://s10.postimage.org/i33s9eih5/test.png。我是否應該爲立方體實施旋轉以獲得3D方面?因爲這樣才能看到正面。我也嘗試翻譯(x,y)上的立方體,但我得到的只是它的正面。 – franciscb

回答

0

我繼續工作,現在我開始按照Phong實現漫反射。

Vec3D跡(世界世界,Vec3D & ray_origin,Vec3D & ray_direction) {

Vec3D objColor = Vec3D(0); 
IntersectionData idata; 
double coeff = 1.0; 
int depth = 0; 
do 
{ 
    double tClosest = TMAX; 
    Object *hitObject = NULL; 
    for (unsigned int i = 0 ; i < world.objs.size() ; i++) 
    {  
     IntersectionData idata_curr; 
     if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false)) 
     { 
      if (idata_curr.t < tClosest && idata_curr.t > 0) 
      { 
       idata.t = idata_curr.t; 
       idata.u = idata_curr.u; 
       idata.v = idata_curr.v; 
       idata.index = idata_curr.index; 
       tClosest = idata_curr.t;    
       hitObject = &(world.objs[i]); 
      } 
     } 
    } 
    if (hitObject == NULL) 
    { 
     return world.background_color; 
    } 

    Vec3D newStart = ray_origin + ray_direction*idata.t; 

    // Compute normal at intersection by interpolating vertex normals (PHONG Idea) 
    Vec3D v0 = hitObject->getVertices()[hitObject->getIndices()[idata.index][0]]; 
    Vec3D v1 = hitObject->getVertices()[hitObject->getIndices()[idata.index][1]]; 
    Vec3D v2 = hitObject->getVertices()[hitObject->getIndices()[idata.index][2]]; 

    Vec3D n1 = hitObject->getNormals()[hitObject->getIndices()[idata.index][0]]; 
    Vec3D n2 = hitObject->getNormals()[hitObject->getIndices()[idata.index][1]]; 
    Vec3D n3 = hitObject->getNormals()[hitObject->getIndices()[idata.index][2]]; 

// Vec3D N = n1 + (n2 - n1)*idata.u + (n3 - n1)*idata.v; 
    Vec3D N = v0.computeFaceNrm(v1, v2); 
    if (ray_direction.dot(N) > 0) 
    { 
     N = N*(-1); 
    } 
    N.normalize(); 

    Vec3D lightray_origin = newStart; 

    for (unsigned int itr = 0 ; itr < world.light_positions.size() ; itr++) 
    { 

     Vec3D lightray_dir = world.light_positions[0] - newStart; 
     lightray_dir.normalize(); 

     double cos_theta = max(N.dot(lightray_dir), 0.0); 
     objColor.setX(objColor.getX() + hitObject->getDiffuse().getX()*hitObject->getDiffuseReflection()*cos_theta); 
     objColor.setY(objColor.getY() + hitObject->getDiffuse().getY()*hitObject->getDiffuseReflection()*cos_theta); 
     objColor.setZ(objColor.getZ() + hitObject->getDiffuse().getZ()*hitObject->getDiffuseReflection()*cos_theta); 
     return objColor; 
    } 

    depth++; 

} while(coeff > 0 && depth < MAX_RAY_DEPTH); 
return objColor; 

}

當我到與主射線的對象,我發送另一個射線向光源定位在(0,0,0)並根據Phong照明模型返回顏色進行漫反射,但結果確實不是預期的結果:http://s15.postimage.org/vc6uyyssr/test.png。立方體是一個以(0,0,0)爲中心的單位立方體,然後被(1.5,-1.5,-1.5)翻譯。從我的角度來看,立方體的左側應該有更多的光線,實際上是這樣。你怎麼看呢?