Bram Ridder

12-12-2017, 09:53 AM

Hi all,

I am working on my own 3D engine and I recently ran into an issue when trying to combine different light types using a single shader. Multiple lights of a single type work fine, but when I combine a Point light (with cube map shadows), Directional light (with 2D shadows), and Spot lights (also with 2D shadows) things started to break. I found a solution to this problem, but I wonder if there is a better way of doing it. Let me first summarise my initial solution that failed and then talk about the solution I found.

I pass an array of lights to the shader that is used to render a mesh. This array is defined as follows in my shader:

#version 420

const int nr_lights = 5;

const int DIRECTIONAL_LIGHT = 0;

const int SPOT_LIGHT = 1;

const int POINT_LIGHT = 2;

struct light {

int type;

bool enabled;

vec4 position;

vec4 diffuse;

vec4 ambient;

vec4 specular;

mat4 shadow_matrix;

float constant_attenuation;

float linear_attenuation;

float quadratic_attenuation;

vec3 direction;

float light_angle;

samplerCube cube_depth_texture;

sampler2DShadow depth_texture;

};

uniform light lights[nr_lights];

For spot lights and directional lights I use a 2D shadow sampler to project the depth values. For point lights I created a cube texture which contain the linearised depth values. The beef of the lighting calculations are in the fragment shader and read as follows:

for (int i = 0; i < lights.length(); ++i)

{

if (!lights[i].enabled)

{

continue;

}

vec4 halfVector = normalize(H[i]);

vec4 lightVector = normalize(L[i]);

float dotValue = max(dot(normalVector, lightVector), 0.0);

if (dotValue > 0.0)

{

float distance = length(lights[i].position - worldPos);

float intensity = 1.0;

if (lights[i].type != DIRECTIONAL_LIGHT)

intensity = 1.0 / (lights[i].constant_attenuation + lights[i].linear_attenuation * distance + lights[i].quadratic_attenuation * distance * distance);

vec4 ambient = material_ambient * lights[i].ambient;

bool inLight = true;

if (lights[i].type == SPOT_LIGHT)

{

vec3 nLightToVertex = vec3(normalize(worldPos - lights[i].position));

float angleLightToFrag = dot(nLightToVertex, normalize(lights[i].direction));

float radLightAngle = lights[i].light_angle * 3.141592 / 180.0;

if (angleLightToFrag < cos(radLightAngle))

inLight = false;

}

if (inLight)

{

float shadowf = 1;

if (lights[i].type == SPOT_LIGHT || lights[i].type == DIRECTIONAL_LIGHT)

{

shadowf = textureProj(lights[i].depth_texture, shadow_coord[i]);

}

else if(lights[i].type == POINT_LIGHT)

{

float sampled_distance = texture(lights[i].cube_depth_texture, direction[i].xyz).r;

float distance = length(direction[i]);

if (distance > sampled_distance + 0.1)

shadowf = 0.0;

}

vec4 diffuse = dotValue * lights[i].diffuse * material_diffuse;

vec4 specular = pow(max(dot(normalVector, halfVector), 0.0), 10.0) * material_specular * lights[i].specular;

outColor += intensity * shadowf * (diffuse + specular * 100);

}

outColor += intensity * ambient;

}

}

outColor += material_emissive;

This clearly does not work due non-uniform control flow (a term I only learned about yesterday :)).

So, what I have done is to move all the texture lookups out of the non-uniform control flow. However, this means that I need to provide depth textures for all lights (even if they are not used for rendering) and sample both the cube and 2dShadow textures. Let me show you the updated fragment shader bit:

for (int i = 0; i < lights.length(); ++i)

{

float spot_shadowf = textureProj(lights[i].depth_texture, shadow_coord[i]);

float sampled_distance = texture(lights[i].cube_depth_texture, direction[i].xyz).r;

if (!lights[i].enabled)

{

continue;

}

vec4 halfVector = normalize(H[i]);

vec4 lightVector = normalize(L[i]);

float dotValue = max(dot(normalVector, lightVector), 0.0);

if (dotValue > 0.0)

{

float distance = length(lights[i].position - worldPos);

float intensity = 1.0;

if (lights[i].type != DIRECTIONAL_LIGHT)

intensity = 1.0 / (lights[i].constant_attenuation + lights[i].linear_attenuation * distance + lights[i].quadratic_attenuation * distance * distance);

vec4 ambient = material_ambient * lights[i].ambient;

bool inLight = true;

if (lights[i].type == SPOT_LIGHT)

{

vec3 nLightToVertex = vec3(normalize(worldPos - lights[i].position));

float angleLightToFrag = dot(nLightToVertex, normalize(lights[i].direction));

float radLightAngle = lights[i].light_angle * 3.141592 / 180.0;

if (angleLightToFrag < cos(radLightAngle))

{

inLight = false;

}

}

if (inLight)

{

float shadowf = 1;

if (lights[i].type == SPOT_LIGHT)

{

shadowf = spot_shadowf;

}

else if(lights[i].type == POINT_LIGHT)

{

float distance = length(direction[i]);

if (distance > sampled_distance + 0.1)

shadowf = 0.0;

}

vec4 diffuse = dotValue * lights[i].diffuse * material_diffuse;

vec4 specular = pow(max(dot(normalVector, halfVector), 0.0), 10.0) * material_specular * lights[i].specular;

outColor += intensity * shadowf * (diffuse + specular * 100);

}

outColor += intensity * ambient;

}

}

outColor += material_emissive;

This works! In my engine I create 2 dummy shadows of size 1x1, one is a GL_TEXTURE_2D stored as a GL_DEPTH_COMPONENT, the other is a GL_TEXTURE_CUBE_MAP that only stores GL_RED values. When less than 5 lights are needed to render a mesh I pass these values to the cube_depth_texture and depth_texture values of the respective light and set the isEnabled flag to false.

While this does work, it creates a lot of overhead. In the worst case, when no lights are being used, it will still sample 10 textures!

Is there a better way around this issue? My engine currently does forward rendering, it is not clear to me whether using a G-Buffer provides a cleaner solution. If I can I would like to stick to forward rendering, so any solution and comments you have are greatly appreciated.

Many thanks!

Bram

P.S. For those interested, my 3D enigne Dreaded Portal Engine can be found here: http://bramridder.com/index.php/personal/personal_projects/dreaded-portal-engine

I am working on my own 3D engine and I recently ran into an issue when trying to combine different light types using a single shader. Multiple lights of a single type work fine, but when I combine a Point light (with cube map shadows), Directional light (with 2D shadows), and Spot lights (also with 2D shadows) things started to break. I found a solution to this problem, but I wonder if there is a better way of doing it. Let me first summarise my initial solution that failed and then talk about the solution I found.

I pass an array of lights to the shader that is used to render a mesh. This array is defined as follows in my shader:

#version 420

const int nr_lights = 5;

const int DIRECTIONAL_LIGHT = 0;

const int SPOT_LIGHT = 1;

const int POINT_LIGHT = 2;

struct light {

int type;

bool enabled;

vec4 position;

vec4 diffuse;

vec4 ambient;

vec4 specular;

mat4 shadow_matrix;

float constant_attenuation;

float linear_attenuation;

float quadratic_attenuation;

vec3 direction;

float light_angle;

samplerCube cube_depth_texture;

sampler2DShadow depth_texture;

};

uniform light lights[nr_lights];

For spot lights and directional lights I use a 2D shadow sampler to project the depth values. For point lights I created a cube texture which contain the linearised depth values. The beef of the lighting calculations are in the fragment shader and read as follows:

for (int i = 0; i < lights.length(); ++i)

{

if (!lights[i].enabled)

{

continue;

}

vec4 halfVector = normalize(H[i]);

vec4 lightVector = normalize(L[i]);

float dotValue = max(dot(normalVector, lightVector), 0.0);

if (dotValue > 0.0)

{

float distance = length(lights[i].position - worldPos);

float intensity = 1.0;

if (lights[i].type != DIRECTIONAL_LIGHT)

intensity = 1.0 / (lights[i].constant_attenuation + lights[i].linear_attenuation * distance + lights[i].quadratic_attenuation * distance * distance);

vec4 ambient = material_ambient * lights[i].ambient;

bool inLight = true;

if (lights[i].type == SPOT_LIGHT)

{

vec3 nLightToVertex = vec3(normalize(worldPos - lights[i].position));

float angleLightToFrag = dot(nLightToVertex, normalize(lights[i].direction));

float radLightAngle = lights[i].light_angle * 3.141592 / 180.0;

if (angleLightToFrag < cos(radLightAngle))

inLight = false;

}

if (inLight)

{

float shadowf = 1;

if (lights[i].type == SPOT_LIGHT || lights[i].type == DIRECTIONAL_LIGHT)

{

shadowf = textureProj(lights[i].depth_texture, shadow_coord[i]);

}

else if(lights[i].type == POINT_LIGHT)

{

float sampled_distance = texture(lights[i].cube_depth_texture, direction[i].xyz).r;

float distance = length(direction[i]);

if (distance > sampled_distance + 0.1)

shadowf = 0.0;

}

vec4 diffuse = dotValue * lights[i].diffuse * material_diffuse;

vec4 specular = pow(max(dot(normalVector, halfVector), 0.0), 10.0) * material_specular * lights[i].specular;

outColor += intensity * shadowf * (diffuse + specular * 100);

}

outColor += intensity * ambient;

}

}

outColor += material_emissive;

This clearly does not work due non-uniform control flow (a term I only learned about yesterday :)).

So, what I have done is to move all the texture lookups out of the non-uniform control flow. However, this means that I need to provide depth textures for all lights (even if they are not used for rendering) and sample both the cube and 2dShadow textures. Let me show you the updated fragment shader bit:

for (int i = 0; i < lights.length(); ++i)

{

float spot_shadowf = textureProj(lights[i].depth_texture, shadow_coord[i]);

float sampled_distance = texture(lights[i].cube_depth_texture, direction[i].xyz).r;

if (!lights[i].enabled)

{

continue;

}

vec4 halfVector = normalize(H[i]);

vec4 lightVector = normalize(L[i]);

float dotValue = max(dot(normalVector, lightVector), 0.0);

if (dotValue > 0.0)

{

float distance = length(lights[i].position - worldPos);

float intensity = 1.0;

if (lights[i].type != DIRECTIONAL_LIGHT)

intensity = 1.0 / (lights[i].constant_attenuation + lights[i].linear_attenuation * distance + lights[i].quadratic_attenuation * distance * distance);

vec4 ambient = material_ambient * lights[i].ambient;

bool inLight = true;

if (lights[i].type == SPOT_LIGHT)

{

vec3 nLightToVertex = vec3(normalize(worldPos - lights[i].position));

float angleLightToFrag = dot(nLightToVertex, normalize(lights[i].direction));

float radLightAngle = lights[i].light_angle * 3.141592 / 180.0;

if (angleLightToFrag < cos(radLightAngle))

{

inLight = false;

}

}

if (inLight)

{

float shadowf = 1;

if (lights[i].type == SPOT_LIGHT)

{

shadowf = spot_shadowf;

}

else if(lights[i].type == POINT_LIGHT)

{

float distance = length(direction[i]);

if (distance > sampled_distance + 0.1)

shadowf = 0.0;

}

vec4 diffuse = dotValue * lights[i].diffuse * material_diffuse;

vec4 specular = pow(max(dot(normalVector, halfVector), 0.0), 10.0) * material_specular * lights[i].specular;

outColor += intensity * shadowf * (diffuse + specular * 100);

}

outColor += intensity * ambient;

}

}

outColor += material_emissive;

This works! In my engine I create 2 dummy shadows of size 1x1, one is a GL_TEXTURE_2D stored as a GL_DEPTH_COMPONENT, the other is a GL_TEXTURE_CUBE_MAP that only stores GL_RED values. When less than 5 lights are needed to render a mesh I pass these values to the cube_depth_texture and depth_texture values of the respective light and set the isEnabled flag to false.

While this does work, it creates a lot of overhead. In the worst case, when no lights are being used, it will still sample 10 textures!

Is there a better way around this issue? My engine currently does forward rendering, it is not clear to me whether using a G-Buffer provides a cleaner solution. If I can I would like to stick to forward rendering, so any solution and comments you have are greatly appreciated.

Many thanks!

Bram

P.S. For those interested, my 3D enigne Dreaded Portal Engine can be found here: http://bramridder.com/index.php/personal/personal_projects/dreaded-portal-engine