aejtgl

11-08-2017, 03:02 PM

First of all, sorry for making another one of these threads (I know there are tons of them already), but I haven't been able to find any which could help me figure out what's wrong with my code. I've been trying to get the method shown here (https://www.khronos.org/opengl/wiki/Compute_eye_space_from_window_space#Optimized_meth od_from_XYZ_of_gl_FragCoord) to work for so long that I've run out of things to try.

First I tried to just implement that into my existing shader code, but as I couldn't get that to work after many hours, I tried using the barebones example (which had a few minor details I had to change to make it compile) and I'm still getting the exact same result.

Vertex shader:

#version 330 core

//Vertex shader

//Half the size of the near plane {tan(fovy/2.0) * aspect, tan(fovy/2.0) }

uniform vec2 halfSizeNearPlane;

layout (location = 0) in vec3 clipPos;

//UV for the depth buffer/screen access.

//(0,0) in bottom left corner (1, 1) in top right corner

layout (location = 1) in vec2 texCoord;

out vec3 eyeDirection;

out vec2 uv;

void main()

{

uv = texCoord;

eyeDirection = vec3((2.0 * halfSizeNearPlane * texCoord) - halfSizeNearPlane , -1.0);

gl_Position = vec4(clipPos.xy, 0, 1);

}

Fragment shader:

#version 330 core

//Fragment shader

layout (location = 0) out vec3 fragColor;

in vec3 eyeDirection;

in vec2 uv;

uniform mat4 persMatrix;

uniform vec2 depthrange;

uniform sampler2D depth;

vec4 CalcEyeFromWindow(in float windowZ, in vec3 eyeDirection, in vec2 depthrange)

{

float ndcZ = (2.0 * windowZ - depthrange.x - depthrange.y) / (depthrange.y - depthrange.x);

float eyeZ = persMatrix[3][2] / ((persMatrix[2][3] * ndcZ) - persMatrix[2][2]);

return vec4(eyeDirection * eyeZ, 1);

}

void main()

{

vec4 eyeSpace = CalcEyeFromWindow(texture(depth, uv).x, eyeDirection, depthrange);

fragColor = eyeSpace.rbg;

}

Where my camera settings are:

float fov = glm::radians(60.0f);

float aspect = 800.0f / 600.0f;

And my uniforms equal:

uniform mat4 persMatrix = glm::perspective(fov, aspect, 0.1f, 100.0f)

uniform vec2 halfSizeNearPlane = glm::vec2(glm::tan(fov/2.0) * aspect, glm::tan(fov/2.0))

uniform vec2 depthrange = glm::vec2(0.0f, 1.0f)

uniform sampler2D depth is a GL_DEPTH24_STENCIL8 texture which has depth values from an earlier pass (if I linearize it and set fragColor = vec3(linearizedZ), it shows up like it should, so nothing seems wrong there).

The resulting image I'm getting doesn't at all look like it does when I save position in the G-buffer or when I use the inverse matrix transforms to reconstruct position:

https://i.imgur.com/s3dzzOU.png

(rendering a single 15x15x15 cube)

Is there any obvious mistake in my code? To me everything looks sound, but it feels like there has to be some small obvious mistake somewhere.

Thanks!

First I tried to just implement that into my existing shader code, but as I couldn't get that to work after many hours, I tried using the barebones example (which had a few minor details I had to change to make it compile) and I'm still getting the exact same result.

Vertex shader:

#version 330 core

//Vertex shader

//Half the size of the near plane {tan(fovy/2.0) * aspect, tan(fovy/2.0) }

uniform vec2 halfSizeNearPlane;

layout (location = 0) in vec3 clipPos;

//UV for the depth buffer/screen access.

//(0,0) in bottom left corner (1, 1) in top right corner

layout (location = 1) in vec2 texCoord;

out vec3 eyeDirection;

out vec2 uv;

void main()

{

uv = texCoord;

eyeDirection = vec3((2.0 * halfSizeNearPlane * texCoord) - halfSizeNearPlane , -1.0);

gl_Position = vec4(clipPos.xy, 0, 1);

}

Fragment shader:

#version 330 core

//Fragment shader

layout (location = 0) out vec3 fragColor;

in vec3 eyeDirection;

in vec2 uv;

uniform mat4 persMatrix;

uniform vec2 depthrange;

uniform sampler2D depth;

vec4 CalcEyeFromWindow(in float windowZ, in vec3 eyeDirection, in vec2 depthrange)

{

float ndcZ = (2.0 * windowZ - depthrange.x - depthrange.y) / (depthrange.y - depthrange.x);

float eyeZ = persMatrix[3][2] / ((persMatrix[2][3] * ndcZ) - persMatrix[2][2]);

return vec4(eyeDirection * eyeZ, 1);

}

void main()

{

vec4 eyeSpace = CalcEyeFromWindow(texture(depth, uv).x, eyeDirection, depthrange);

fragColor = eyeSpace.rbg;

}

Where my camera settings are:

float fov = glm::radians(60.0f);

float aspect = 800.0f / 600.0f;

And my uniforms equal:

uniform mat4 persMatrix = glm::perspective(fov, aspect, 0.1f, 100.0f)

uniform vec2 halfSizeNearPlane = glm::vec2(glm::tan(fov/2.0) * aspect, glm::tan(fov/2.0))

uniform vec2 depthrange = glm::vec2(0.0f, 1.0f)

uniform sampler2D depth is a GL_DEPTH24_STENCIL8 texture which has depth values from an earlier pass (if I linearize it and set fragColor = vec3(linearizedZ), it shows up like it should, so nothing seems wrong there).

The resulting image I'm getting doesn't at all look like it does when I save position in the G-buffer or when I use the inverse matrix transforms to reconstruct position:

https://i.imgur.com/s3dzzOU.png

(rendering a single 15x15x15 cube)

Is there any obvious mistake in my code? To me everything looks sound, but it feels like there has to be some small obvious mistake somewhere.

Thanks!