2015-08-14 78 views
0

基於this另一OpenGL ES的圖像過濾器工作:GPUImage定製的OpenGL ES着色器產生黑色圖像

uniform sampler2D texture; 
uniform float amount; 
uniform vec2 texSize; 
varying vec2 texCoord; 
void main() { 
    vec4 color = texture2D(texture, texCoord); 
    vec4 orig = color; 

    /* High pass filter */ 
    vec4 highpass = color * 5.0; 

    float dx = 1.0/texSize.x; 
    float dy = 1.0/texSize.y; 
    highpass += texture2D(texture, texCoord + vec2(-dx, -dy)) * -0.625; 
    highpass += texture2D(texture, texCoord + vec2(dx, -dy)) * -0.625; 
    highpass += texture2D(texture, texCoord + vec2(dx, dy)) * -0.625; 
    highpass += texture2D(texture, texCoord + vec2(-dx, dy)) * -0.625; 
    highpass += texture2D(texture, texCoord + vec2(-dx * 2.0, -dy * 2.0)) * -0.625; 
    highpass += texture2D(texture, texCoord + vec2(dx * 2.0, -dy * 2.0)) * -0.625; 
    highpass += texture2D(texture, texCoord + vec2(dx * 2.0, dy * 2.0)) * -0.625; 
    highpass += texture2D(texture, texCoord + vec2(-dx * 2.0, dy * 2.0)) * -0.625; 
    highpass.a = 1.0; 

    /* Overlay blend */ 
    vec3 overlay = vec3(1.0); 
    if (highpass.r <= 0.5) { 
     overlay.r = 2.0 * color.r * highpass.r; 
    } else { 
     overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r); 
    } 
    if (highpass.g <= 0.5) { 
     overlay.g = 2.0 * color.g * highpass.g; 
    } else { 
     overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g); 
    } 
    if (highpass.b <= 0.5) { 
     overlay.b = 2.0 * color.b * highpass.b; 
    } else { 
     overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b); 
    } 
    color.rgb = (overlay * 0.8) + (orig.rgb * 0.2); 

    /* Desaturated hard light */ 
    vec3 desaturated = vec3(orig.r + orig.g + orig.b/3.0); 
    if (desaturated.r <= 0.5) { 
     color.rgb = 2.0 * color.rgb * desaturated; 
    } else { 
     color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated); 
    } 
    color = (orig * 0.6) + (color * 0.4); 

    /* Add back some color */ 
    float average = (color.r + color.g + color.b)/3.0; 
    color.rgb += (average - color.rgb) * (1.0 - 1.0/(1.001 - 0.45)); 

    gl_FragColor = (color * amount) + (orig * (1.0 - amount)); 
} 

按我question yesterday,我知道分配精度每個浮點和VEC。這一次它編譯得很好,但是當我在GPUImage中應用過濾器時(例如,通過將clarity的值設置爲0.8),圖像變黑。我的直覺告訴我這與紋理大小有關,但不知道GPUImage如何處理,我有點卡住了。

這是我在Objective-C實現:我想這樣做的

.H

#import <GPUImage/GPUImage.h> 

@interface GPUImageClarityFilter : GPUImageFilter 
{ 
    GLint clarityUniform; 
} 

// Gives the image a gritty, surreal contrasty effect 
// Value 0 to 1 
@property (readwrite, nonatomic) GLfloat clarity; 

@end 

.M

#import "GPUImageClarityFilter.h" 

#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 
NSString *const kGPUImageClarityFragmentShaderString = SHADER_STRING 
(
uniform sampler2D inputImageTexture; 
uniform lowp float clarity; 
uniform highp vec2 textureSize; 
varying highp vec2 textureCoordinate; 
void main() { 
    highp vec4 color = texture2D(inputImageTexture, textureCoordinate); 
    highp vec4 orig = color; 

    /* High pass filter */ 
    highp vec4 highpass = color * 5.0; 

    highp float dx = 1.0/textureSize.x; 
    highp float dy = 1.0/textureSize.y; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, -dy)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, -dy)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, dy)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, dy)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, -dy * 2.0)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, -dy * 2.0)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, dy * 2.0)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, dy * 2.0)) * -0.625; 
    highpass.a = 1.0; 

    /* Overlay blend */ 
    highp vec3 overlay = vec3(1.0); 
    if (highpass.r <= 0.5) { 
     overlay.r = 2.0 * color.r * highpass.r; 
    } else { 
     overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r); 
    } 
    if (highpass.g <= 0.5) { 
     overlay.g = 2.0 * color.g * highpass.g; 
    } else { 
     overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g); 
    } 
    if (highpass.b <= 0.5) { 
     overlay.b = 2.0 * color.b * highpass.b; 
    } else { 
     overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b); 
    } 
    color.rgb = (overlay * 0.8) + (orig.rgb * 0.2); 

    /* Desaturated hard light */ 
    highp vec3 desaturated = vec3(orig.r + orig.g + orig.b/3.0); 
    if (desaturated.r <= 0.5) { 
     color.rgb = 2.0 * color.rgb * desaturated; 
    } else { 
     color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated); 
    } 
    color = (orig * 0.6) + (color * 0.4); 

    /* Add back some color */ 
    highp float average = (color.r + color.g + color.b)/3.0; 
    color.rgb += (average - color.rgb) * (1.0 - 1.0/(1.001 - 0.45)); 

    gl_FragColor = (color * clarity) + (orig * (1.0 - clarity)); 
} 
); 
#else 
NSString *const kGPUImageClarityFragmentShaderString = SHADER_STRING 
(
uniform sampler2D inputImageTexture; 
uniform float clarity; 
uniform vec2 textureSize; 
varying vec2 textureCoordinate; 
void main() { 
    vec4 color = texture2D(inputImageTexture, textureCoordinate); 
    vec4 orig = color; 

    /* High pass filter */ 
    vec4 highpass = color * 5.0; 

    float dx = 1.0/textureSize.x; 
    float dy = 1.0/textureSize.y; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, -dy)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, -dy)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, dy)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, dy)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, -dy * 2.0)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, -dy * 2.0)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, dy * 2.0)) * -0.625; 
    highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, dy * 2.0)) * -0.625; 
    highpass.a = 1.0; 

    /* Overlay blend */ 
    vec3 overlay = vec3(1.0); 
    if (highpass.r <= 0.5) { 
     overlay.r = 2.0 * color.r * highpass.r; 
    } else { 
     overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r); 
    } 
    if (highpass.g <= 0.5) { 
     overlay.g = 2.0 * color.g * highpass.g; 
    } else { 
     overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g); 
    } 
    if (highpass.b <= 0.5) { 
     overlay.b = 2.0 * color.b * highpass.b; 
    } else { 
     overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b); 
    } 
    color.rgb = (overlay * 0.8) + (orig.rgb * 0.2); 

    /* Desaturated hard light */ 
    vec3 desaturated = vec3(orig.r + orig.g + orig.b/3.0); 
    if (desaturated.r <= 0.5) { 
     color.rgb = 2.0 * color.rgb * desaturated; 
    } else { 
     color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated); 
    } 
    color = (orig * 0.6) + (color * 0.4); 

    /* Add back some color */ 
    float average = (color.r + color.g + color.b)/3.0; 
    color.rgb += (average - color.rgb) * (1.0 - 1.0/(1.001 - 0.45)); 

    gl_FragColor = (color * clarity) + (orig * (1.0 - clarity)); 
} 
); 
#endif 

@implementation GPUImageClarityFilter 

@synthesize clarity = _clarity; 

#pragma mark - 
#pragma mark Initialization and teardown 

- (id)init; 
{ 
    if (!(self = [super initWithFragmentShaderFromString:kGPUImageClarityFragmentShaderString])) 
    { 
     return nil; 
    } 

    clarityUniform = [filterProgram uniformIndex:@"clarity"]; 
    self.clarity = 0.0; 

    return self; 
} 

#pragma mark - 
#pragma mark Accessors 

- (void)setClarity:(GLfloat)clarity; 
{ 
    _clarity = clarity; 

    [self setFloat:_clarity forUniform:clarityUniform program:filterProgram]; 
} 

@end 

的另一件事是運用GPUImage的內置在低通和高通濾波器中,但我感覺這會導致相當笨重的s olution。

回答

1

這可能是由於textureSize不是作爲GPUImageFilter的一部分提供給您的標準制服。 inputImageTexturetextureCoordinate是由其中一個過濾器提供的標準制服,看起來您正在提供clarity制服。

因爲沒有設置textureSize,所以它將默認爲0.0。然後您的1.0/textureSize.x計算將被零除,這往往會導致iOS片段着色器中的黑幀。

您可以計算並提供該制服,或者改爲使用GPUImage3x3TextureSamplingFilter替代您的自定義過濾器。該過濾器基類將1.0/textureSize.x的結果作爲texelWidth統一(以及對於垂直分量的匹配texelHeight)。你不必計算這個。實際上,它也會計算周圍8個像素的紋理座標,因此您可以切除上述四個計算並將其轉換爲非依賴性紋理讀取。您只需計算基於2 * texelWidth2 * texelHeight的四個紋理讀取即可完成剩餘的四次讀取。

實際上,您可以將此操作分解爲多個通道,以節省計算量,進行小方框模糊處理,然後進行疊加混合處理,然後執行此過濾器的最後一個階段。這可以進一步加快這一進程。

+0

謝謝布拉德我會給它一個鏡頭。 – brandonscript

+0

花了一些時間擺弄它,但遺憾的是我不太熟悉編寫着色器來弄清楚如何集成3x3TextureSamplingFilter。相反,我只是玩了一些textureSize的硬編碼值,並在'320.0'上着陸,給了我一個漂亮的效果。 – brandonscript

+0

該死的,當然只有當輸入圖像具有特定尺寸時纔有效。 – brandonscript

0

所以,你可以重寫

(void)setupFilterForSize:(CGSize)filterFrameSize 

方法設置寬度&高度因子樣GPUImageSharpenFilter