2014-01-29 124 views
2

我的問題是,我想只有1 WebGLRenderer,但有一個可以放置在許多獨特的帆布許多相機的看法。在下面的例子中,我有9個視圖在1個畫布中,每個視圖都有獨特的相機,場景和網格,然後使用ctx.drawImage方法將它們繪製到它們自己的畫布上。這種方法可行,但drawImage太慢,甚至無法獲得10 fps,更不用說60+ fps。three.js所的drawImage到多個畫布

有沒有解決這個問題的任何方式,不涉及使用緩慢的drawImage方法或者是有什麼辦法可以加快整個過程?

感謝您的幫助和示例代碼如下放置。

http://jsfiddle.net/QD8M2/

<!doctype html> 

<html lang="en"> 
<head> 
    <meta charset="utf-8"> 

    <title>Three.js Test</title> 

    <style> 
     body { 
      margin: 0; 
      padding: 0; 
      overflow: hidden; 
     } 
    </style> 

    <!--[if lt IE 9]> 
    <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> 
    <![endif]--> 
</head> 

<body> 
    <script src="./three.min.js"></script> 
    <script> 
     var renderer; 
     var windowWidth, windowHeight; 
     var numberMeshes = 1; 
     var dpr = window.devicePixelRatio || 1; 

     var viewDemensions = {x: 3, y: 3}; 

     var views = []; 

     init(); 
     animate(); 

     function init() { 
      for (var i = 0; i < viewDemensions.x; i++) { 
       for (var j = 0; j < viewDemensions.y; j++) { 
        var obj = {}; 

        obj.left = i/viewDemensions.x; 
        obj.bottom = j/viewDemensions.y; 
        obj.width = 1/viewDemensions.x; 
        obj.height = 1/viewDemensions.y; 

        obj.canvas = document.createElement('canvas'); 
        obj.context = obj.canvas.getContext('2d'); 

        document.body.appendChild(obj.canvas); 

        var camera = new THREE.PerspectiveCamera(75, 100/100, 1, 10000); 
        camera.position.z = 1000; 
        obj.camera = camera; 

        var scene = new THREE.Scene(); 

        var geometry = new THREE.SphereGeometry(100, 10, 10); 

        obj.meshes = []; 

        for (var k = 0; k < numberMeshes; k++) { 
         var material = new THREE.MeshBasicMaterial({ color: 0xffffff*Math.random(), wireframe: true }); 
         var mesh = new THREE.Mesh(geometry, material); 
         var scale = 2*Math.random(); 
         mesh.scale.set(scale, scale, scale); 
         scene.add(mesh); 
         obj.meshes.push(mesh); 
        } 

        obj.scene = scene; 

        views.push(obj); 
       } 
      } 

      renderer = new THREE.WebGLRenderer({ 
       preserveDrawingBuffer: true 
      }); 

      // document.body.appendChild(renderer.domElement); 
     } 

     function updateSize() { 
      if (windowWidth != window.innerWidth || windowHeight != window.innerHeight) { 
       windowWidth = window.innerWidth; 
       windowHeight = window.innerHeight; 
       renderer.setSize (windowWidth, windowHeight); 
      } 
     } 

     function animate() { 

      updateSize(); 

      for (var i = 0; i < views.length; i++) { 
       var view = views[i]; 
       var left = Math.floor(view.left*windowWidth) * dpr; 
       var bottom = Math.floor(view.bottom*windowHeight) * dpr; 
       var width = Math.floor(view.width*windowWidth) * dpr; 
       var height = Math.floor(view.height*windowHeight) * dpr; 
       view.canvas.width = width; 
       view.canvas.height = height; 
       view.canvas.style.width = Math.floor(view.width*windowWidth) + 'px'; 
       view.canvas.style.height = Math.floor(view.height*windowHeight) + 'px'; 
       view.context.scale(dpr, dpr); 
       view.camera.aspect = width/height; 
       view.camera.updateProjectionMatrix(); 
       renderer.setViewport(left, bottom, width, height); 
       renderer.setScissor(left, bottom, width, height); 
       renderer.enableScissorTest (true); 
       renderer.setClearColor(new THREE.Color().setRGB(0.5, 0.5, 0.5)); 
       for (var j = 0; j < numberMeshes; j++) { 
        view.meshes[j].rotation.x += 0.03*Math.random(); 
        view.meshes[j].rotation.y += 0.05*Math.random(); 
       } 
       renderer.render(view.scene, view.camera); 
       view.context.drawImage(renderer.domElement,left,bottom,width,height,0,0,view.width*windowWidth,view.height*windowHeight); 
      } 

      requestAnimationFrame(animate); 
     } 
    </script> 
</body> 
</html> 
+0

爲什麼需要維護單個WebGLRenderer? – prabindh

+0

,因爲webgl有一個硬限制,它限制了可能的併發webgl渲染器的數量 –

+0

我不知道WebGL規範對此有限制,但可能存在瀏覽器特定的限制,例如內存。如果只有一個畫布並將這些對象放置得恰到好處,它會更有效率嗎? – prabindh

回答

1

一些討論意見後,使用FBO有不同的看法,然後使用這些紋理作爲輸入到不同的觀點立場,可能是適合你的情況。請檢查。請注意,這是not涉及繪製到緩衝區,然後讀取像素,然後將其應用於畫布。

EDIT1:添加了僞碼使用three.js所

Create offscreen target

rtTexture =新THREE.WebGLRenderTarget(window.innerWidth,window.innerHeight,..);

Create screen, material, and mesh

mtlScreen =新THREE.ShaderMaterial({制服:{tDiffuse:{類型: 「T」,值:rtTexture}},

MTL =新THREE.MeshBasicMaterial({地圖:rtTexture });

網狀=新THREE.Mesh(平面,函數(rtTexture));

scene.add(目);

Now render to offscreen first, then to display

renderer.render(sceneRTT,cameraRTT,rtTexture,..);

渲染器。渲染(場景,相機);

參考標準的三例得到完整的代碼 - https://github.com/prabindh/three.js/blob/master/examples/webgl_rtt.html,和我貼在這很短的幻燈片在http://www.slideshare.net/prabindh/render-to-texture-with-threejs

Approach With GLES2:

快速設置爲FBO與GLES2(瑣碎變化的WebGL):

  • glGenFramebuffers(NUM_FBO, fboId); 
    glGenTextures(NUM_FBO, fboTextureId); 
    glGenTextures(1, &regularTextureId); 
    

的ñ自帶設置繪製到屏幕外的緩衝區:

  •  GL_CHECK(glBindTexture(GL_TEXTURE_2D, 0)); 
         GL_CHECK(glBindTexture(GL_TEXTURE_2D, fboTextureId[i])); 
         GL_CHECK(glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, globals->inTextureWidth, globals->inTextureHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL)); 
         GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)); 
         GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)); 
    
         GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, fboId[i])); 
         GL_CHECK(glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, fboTextureId[i], 0)); 
    

再畫到屏幕外的緩衝區:

  •  //Bind regular texture 
         GL_CHECK(glBindTexture(GL_TEXTURE_2D, 0)); 
         GL_CHECK(glBindTexture(GL_TEXTURE_2D, regularTextureId)); 
         add_texture(globals->inTextureWidth, globals->inTextureHeight, globals->textureData, globals->inPixelFormat); 
         GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)); 
         GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)); 
         //Draw with regular draw calls to FBO 
         GL_CHECK(_test17(globals, numObjectsPerSide, 1)); 
    

現在用這個作爲紋理輸入,並畫到正常顯示:

  •  GL_CHECK(glBindTexture(GL_TEXTURE_2D, fboTextureId[i])); 
    
         GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)); 
         GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)); 
         //draw to display buffer 
    
         //Now get back display framebuffer and unbind the FBO 
         GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, 0)); 
    

https://github.com/prabindh/sgxperf/blob/master/sgxperf_test17.cpp

+0

這樣做更有意義,但我確實不太清楚如何與three.js一起執行此操作。這略高於我對webgl的認識,因爲我從來沒有真正使用過FBO,但我會一直盯着這個試圖讓事情有效的東西。 –

+0

http://mrdoob.github.io/three.js/examples/webgl_rtt.html – prabindh

+0

我發佈了我的「解決方案」,如果它完全從您發佈的內容出現錯誤的方向,請您幫助我解決問題正確的方向? –

0

這裏是我想出了一個解決方案。不像prabindh的回答那樣贊成,但這是我能想出如何解決我的問題的唯一方法。

基本上我使用WebGLRenderTarget將每個不同的視圖/場景渲染到渲染器上,然後使用readPixels將像素複製到畫布上。我試圖讓網絡工作者努力提高性能,但卻無法讓他們在寫圖片時表現出色。我的代碼如下。

如果你的意思是完全不同的prabindh,我會喜歡一些關於如何使我的當前用例更快和/或更好的幫助。

var renderer; 
var gl; 
var times = []; 
var windowWidth, windowHeight; 
var numberMeshes = 10; 
var dpr = window.devicePixelRatio || 1; 

var viewDemensions = {x: 2, y: 2}; 

var views = []; 

init(); 
animate(); 

function init() { 
    renderer = new THREE.WebGLRenderer({preserveDrawingBuffer: true}); 
    renderer.autoClear = false; 

    gl = renderer.getContext(); 

    for (var i = 0; i < viewDemensions.x; i++) { 
     for (var j = 0; j < viewDemensions.y; j++) { 
      var obj = {}; 

      obj.left = i/viewDemensions.x; 
      obj.bottom = j/viewDemensions.y; 
      obj.width = 1/viewDemensions.x; 
      obj.height = 1/viewDemensions.y; 

      obj.canvas = document.createElement('canvas'); 
      obj.context = obj.canvas.getContext('2d'); 

      document.body.appendChild(obj.canvas); 

      var camera = new THREE.PerspectiveCamera(75, 100/100, 1, 10000); 
      camera.position.z = 1000; 
      obj.camera = camera; 

      var scene = new THREE.Scene(); 

      var geometry = new THREE.SphereGeometry(100, 10, 10); 

      obj.meshes = []; 

      for (var k = 0; k < numberMeshes; k++) { 
       var material = new THREE.MeshBasicMaterial({ color: 0xffffff*Math.random(), wireframe: true }); 
       var mesh = new THREE.Mesh(geometry, material); 
       var scale = 2*Math.random(); 
       mesh.scale.set(scale, scale, scale); 
       scene.add(mesh); 
       obj.meshes.push(mesh); 
      } 

      obj.scene = scene; 

      obj.widthVal = 100; 
      obj.heightVal = 100; 

      obj.imageData = obj.context.getImageData(0,0,obj.widthVal,obj.heightVal); 

      obj.pixels = new Uint8Array(obj.imageData.data.length); 

      // obj.ww = new Worker("ww.js"); 

      // obj.frames = []; 
      // obj.prevFrame = 0; 

      // obj.ww.onmessage = function (event) { 
      //  var i = event.data.i; 
      //  var imageData = event.data.imageData; 
      //  views[i].context.putImageData(imageData,0,0); 
      // }; 

      obj.target = new THREE.WebGLRenderTarget(100, 100, { minFilter: THREE.LinearFilter, magFilter: THREE.NearestFilter, format: THREE.RGBFormat }); 

      views.push(obj); 
     } 
    } 
} 

function updateSize() { 
    if (windowWidth != window.innerWidth || windowHeight != window.innerHeight) { 
     windowWidth = window.innerWidth; 
     windowHeight = window.innerHeight; 
    } 
} 

function animate() { 

    updateSize(); 

    var i, j; 
    var view, width, height; 
    var sWidth, sHeight; 
    var mesh; 

    for (i = 0; i < views.length; i++) { 
     view = views[i]; 
     // if (!view.lock) { 
     for (j = 0; j < view.meshes.length; j++) { 
      mesh = view.meshes[j]; 
      mesh.rotation.x += 0.03; 
      mesh.rotation.y += 0.05; 
     } 

     sWidth = Math.floor(view.width*windowWidth); 
     sHeight = Math.floor(view.height*windowHeight); 

     width = sWidth * dpr; 
     height = sHeight * dpr; 

     var same = true; 

     if (view.widthVal != width || view.heightVal != height) { 
      same = false; 
      view.widthVal = width; 
      view.heightVal = height; 
     } 

     view.canvas.width = width; 
     view.canvas.height = height; 
     view.canvas.style.width = sWidth + 'px'; 
     view.canvas.style.height = sHeight + 'px'; 
     view.context.scale(dpr, dpr); 

     view.camera.aspect = width/height; 
     view.camera.updateProjectionMatrix(); 
     renderer.setSize(sWidth, sHeight); 
     view.target.width = width; 
     view.target.height = height; 
     renderer.render(view.scene, view.camera, view.target, true); 

     if (!same) { 
      view.imageData = view.context.createImageData(width,height); 
      view.pixels = new Uint8Array(view.imageData.data.length); 
     } 

     gl.readPixels(0,0,width,height,gl.RGBA,gl.UNSIGNED_BYTE,view.pixels); 

     // view.ww.postMessage({imageData: imageData, pixels: pixels, i:i}); 

     view.imageData.data.set(view.pixels); 

     view.context.putImageData(view.imageData,0,0); 
    } 

    requestAnimationFrame(animate); 

} 
+0

看起來,當我在init時間之後添加更多視圖時,webgl上下文將會丟失,並且所有內容都會崩潰。我開始認爲這個問題可能不是真的可以解決的。真的太糟糕了,考慮到webgl中的上下文數量的限制與普通的opengl相比非常低。 –

+0

這絕對不是我的意思。 readPixels是要避免的。我編輯了我的前一篇文章,用Three.js引用來展示我的真正含義。 – prabindh

+0

我明白你在做什麼,但我不明白這是如何幫助我輸出不同的視圖來分離HTML中的畫布。如何將「mtlScreen」放到html的畫布上是我不理解的主要想法。 (抱歉,由於沒有太好的理解,這個主題似乎很少記錄,我也沒有任何WebGL主控。) –