screen-space reflections

hey, it’s been a while since i asked here some question about implementation of something… and this is the case, cause i’m puzzled. before trying to implement i thought it was simple: just take view direction and eye-space normal from gbuffer, calculate reflection vector and ray-march along it until depth-hit. well… that doesn’t work(and i can’t wrap my head around the reason why and how it should work). and i started to investigate implementations i could find on internet. the problem is there are no papers on this topic, only a couple of really dirty and confusing examples.

first of all, all of them seem to transform reflection vector to clip space, but i don’t quite get why. this example has comment saying it’s for sampling from “default hardware depthbuffer”. i have no idea, what it means. other examples don’t say that and do sample in regular way, but still use clip space for some reason. he also uses some weird sampling method with gradients and some paraboloid parameter. i’ve dropped that example at this point, cause i have no idea what’s he doing and i don’t wanna that in my code.

here’s another example: http://pastie.org/private/zofibyyegp2hlfeloxjg
i’ve tried to implement it in my code, with glsl… and here’s what ive got:



#version 330
uniform sampler2D colorMap;  //color
uniform sampler2D normalMap; //eye space normal & linear depth
uniform sampler2D positionMap; //eye space positions

uniform vec4 ratio; //ratio.x - inverse resolution

//matrices from main view
uniform mat4 inverseProjectionMatrix;
uniform mat4 projectionMatrix;

layout(location = 0) out vec4 outColor;
in vec2 texCoord0;

const int numSamples = 128;

void main(void)
{
    vec4 color = vec4(0.0);
    vec4 surfaceData = texture(normalMap, texCoord0);
    float depth = surfaceData.w;

    vec3 eyeSpaceNormal = normalize(surfaceData.xyz);
    vec3 eyeSpacePosition = normalize(texture(positionMap, texCoord0).xyz);

    vec4 viewDir = inverseProjectionMatrix * vec4(0.0, 0.0, 1.0, 1.0);
    viewDir /= viewDir.w;

    vec4 reflectionVector = normalize(projectionMatrix * vec4(reflect(viewDir.xyz, eyeSpaceNormal), 1.0));
    float refLength = length(reflectionVector.xy);
    reflectionVector = normalize(reflectionVector)/refLength * ratio.x;
    reflectionVector.z *= depth;

    vec3 currentPosition = vec3(reflectionVector.x, reflectionVector.y, depth + reflectionVector.z);

    int currentSamples = 0;
    while(currentSamples < numSamples) {
        float sampledDepth = texture(normalMap, texCoord0 + currentPosition.xy).w;
        float difference = currentPosition.z - sampledDepth;
        if(difference > 0.0025) {
            color = texture(colorMap, texCoord0 + currentPosition.xy);
            break;
        }
        currentPosition += reflectionVector.xyz;
        currentSamples++;
    }
    outColor = vec4(color.rgb, 1.0);
}  

at least it looks like raymarching occurs in somewhat correct direction. but i get something resembling correct reflections only under certain angles. and it also reacts to camera rotation a lot;

Here is a screen-space reflection shader I implemented:


uniform sampler2D gColor;
uniform sampler2D gPosition;
uniform sampler2D gNormal;
uniform sampler2D gEffect;
uniform vec2 gTexSizeInv;


// Consts should help improve performance
const float rayStep = 0.25;
const float minRayStep = 0.1;
const float maxSteps = 20;
const float searchDist = 5;
const float searchDistInv = 0.2;
const int numBinarySearchSteps = 5;
const float maxDDepth = 1.0;
const float maxDDepthInv = 1.0;


const float reflectionSpecularFalloffExponent = 3.0;


uniform mat4 projection;


vec3 BinarySearch(vec3 dir, inout vec3 hitCoord, out float dDepth)
{
    float depth;


    for(int i = 0; i < numBinarySearchSteps; i++)
    {
        vec4 projectedCoord = projection * vec4(hitCoord, 1.0);
        projectedCoord.xy /= projectedCoord.w;
        projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;


        depth = texture2D(gPosition, projectedCoord.xy).z;


        dDepth = hitCoord.z - depth;


        if(dDepth > 0.0)
            hitCoord += dir;


        dir *= 0.5;
        hitCoord -= dir;    
    }


    vec4 projectedCoord = projection * vec4(hitCoord, 1.0); 
    projectedCoord.xy /= projectedCoord.w;
    projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;


    return vec3(projectedCoord.xy, depth);
}


vec4 RayCast(vec3 dir, inout vec3 hitCoord, out float dDepth)
{
    dir *= rayStep;


    float depth;


    for(int i = 0; i < maxSteps; i++)
    {
        hitCoord += dir;


        vec4 projectedCoord = projection * vec4(hitCoord, 1.0);
        projectedCoord.xy /= projectedCoord.w;
        projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;


        depth = texture2D(gPosition, projectedCoord.xy).z;


        dDepth = hitCoord.z - depth;


        if(dDepth < 0.0)
            return vec4(BinarySearch(dir, hitCoord, dDepth), 1.0);
    }


    return vec4(0.0, 0.0, 0.0, 0.0);
}


void main()
{
    vec2 gTexCoord = gl_FragCoord.xy * gTexSizeInv;


    // Samples
    float specular = texture2D(gColor, gTexCoord).a;


    if(specular == 0.0)
    {
        gl_FragColor = vec4(0.0, 0.0, 0.0, 0.0);
        return;
    }


    vec3 viewNormal = texture2D(gNormal, gTexCoord).xyz;
    vec3 viewPos = texture2D(gPosition, gTexCoord).xyz;


    // Reflection vector
    vec3 reflected = normalize(reflect(normalize(viewPos), normalize(viewNormal)));


    // Ray cast
    vec3 hitPos = viewPos;
    float dDepth;


    vec4 coords = RayCast(reflected * max(minRayStep, -viewPos.z), hitPos, dDepth);


    vec2 dCoords = abs(vec2(0.5, 0.5) - coords.xy);


    float screenEdgefactor = clamp(1.0 - (dCoords.x + dCoords.y), 0.0, 1.0);


    // Get color
    gl_FragColor = vec4(texture2D(gEffect, coords.xy).rgb,
        pow(specular, reflectionSpecularFalloffExponent) *
        screenEdgefactor * clamp(-reflected.z, 0.0, 1.0) *
        clamp((searchDist - length(viewPos - hitPos)) * searchDistInv, 0.0, 1.0) * coords.w);
}

I hope it helps!

hey, thanks for sharing your code. i’ve already implemented SSR at this moment. it has some problems. but the one that bugs me the most is because of non-linear depth and because i do it in low-res(1/4), it’s a bit too noizy. and i think i already have given it too much resources, for such a minor effect.