2016-05-06 66 views
0

我試圖獲得一個基本的渲染目標,在渲染到1場景的地方工作,然後將其用作渲染四邊形的紋理。我有一個基本的演示,但是當我運行它時,結果都是像素化的,就好像它被渲染到一個小屏幕上,然後在四邊形上展開。三JS像素渲染目標?

這裏是我的代碼:

<!DOCTYPE html> 
<html> 
    <head> 
     <meta charset="utf-8"> 
     <meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0"> 

     <title></title> 

     <link rel="stylesheet" type="text/css" href="style.css"> 
     <script src="three.min.js" type="text/javascript"></script> 

     <style> 
      body { 
       color: #ffffff; 
       font-family:Monospace; 
       font-size:13px; 
       text-align:center; 
       font-weight: bold; 
       background-color: #000000; 
       margin: 0px; 
       overflow: hidden; 
      } 

      #info { 
       position: absolute; 
       top: 0px; width: 100%; 
       padding: 5px; 
      } 

      a { 
       color: #ffffff; 
      } 

     </style> 
    </head> 
    <body> 
     <div id="container"></div> 

     <script id="vertex-shader-test" type="x-shader/x-vertex"> 
      void main() { 
       gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4(position, 1.0); 
      } 
     </script> 

     <script id="fragment-shader-test" type="x-shader/x-vertex"> 
      void main() { 
       gl_FragColor = vec4(1,1,1,1); 
      } 
     </script> 

     <script id="vertex-shader-screen" type="x-shader/x-vertex"> 
      varying vec2 vertexUV; 

      void main() { 
       vertexUV = uv; 
       gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0); 
      } 
     </script> 

     <script id="fragment-shader-screen" type="x-shader/x-fragment"> 
      precision highp float; 
      varying vec2 vertexUV; 
      uniform sampler2D rtTexture; 

      void main() { 
       gl_FragColor = texture2D(rtTexture, vertexUV); 
      } 
     </script> 

     <script> 

      var scene = null, 
       rtScene = null, 
       camera = null, 
       cameraRTT = null, 
       renderer = null, 
       plane = null; 

      var rtTexture; 

      init(); 
      render(); 

      function init() 
      { 
       container = document.getElementById('container'); 

       scene = new THREE.Scene(); 
       rtScene = new THREE.Scene(); 

       rtTexture = new THREE.WebGLRenderTarget(window.innerWidth, window.innerHeight, { minFilter: THREE.LinearFilter, magFilter: THREE.NearestFilter, format: THREE.RGBFormat }); 

       camera = new THREE.PerspectiveCamera(20, window.innerWidth/window.innerHeight, 1, 1000); 
       camera.position.z = 1; 

       rtCamera = new THREE.PerspectiveCamera(45, window.innerWidth/window.innerHeight, 1, 1000); 
       rtCamera.position.z = 100; 
       rtCamera.position.y = 10; 

       var material2 = new THREE.ShaderMaterial({ 
        vertexShader: document.getElementById('vertex-shader-test').textContent, 
        fragmentShader: document.getElementById('fragment-shader-test').textContent, 
       }); 

       var geometry = new THREE.PlaneBufferGeometry(100, 100, 200, 200); 

       plane = new THREE.Mesh(geometry, material2); 
       plane.rotateX(-Math.PI/2); 
       rtScene.add(plane); 

       var geometry = new THREE.PlaneBufferGeometry(1, 1); 

       var material = new THREE.ShaderMaterial({ 
        uniforms: { 
         rtTexture: { type: "t", value: rtTexture } 
        }, 
        vertexShader: document.getElementById('vertex-shader-screen').textContent, 
        fragmentShader: document.getElementById('fragment-shader-screen').textContent, 
       }); 

       var mesh = new THREE.Mesh(geometry, material); 

       scene.add(mesh); 

       renderer = new THREE.WebGLRenderer(); 
       renderer.setPixelRatio(window.devicePixelRatio); 
       renderer.setSize(window.innerWidth, window.innerHeight); 
       renderer.autoClear = false; 

       container.appendChild(renderer.domElement); 
      } 

      function render() { 
       requestAnimationFrame(render); 

       camera.lookAt(new THREE.Vector3(0,0,0)); 
       rtCamera.lookAt(new THREE.Vector3(0,0,0)); 

       renderer.render(rtScene, rtCamera, rtTexture, true); 
       renderer.render(scene, camera); 
      } 
     </script> 
    </body> 
</html> 

任何想法?

回答

1

當渲染將渲染目標映射到其上的平面時,您正在使用透視相機。這將導致呈現的輸出根據視野的大小而變化。例如,如果將相機視野從20減小到10,相機將進一步放大,您將會得到更加明顯的步進(因爲圖像已經進一步放大)。同樣,增加視野,將縮小和減少步進。

但是,對於本示例中的內容,我將對輸出渲染目標的主場景使用正交投影。