Selectively applying postprocessing on certain objects?

Greetings!

I have a simple 3D scene with: 2 crates, a plane/floor and an MD2 animated model.

What I want to do is:

  • render the crates and plane normally without any effects (a simple pass-through shader that just samples from a color texture, no lighting etc)
  • render the model to an offline render texture that gets rendered to a fullscreen quad, apply postprocessing effects on that texture and then render the quad to the screen

The problem that I think I’m having is:

  • After rendering the crates and plane normally, and then the postprocessed model on top, the model is overwriting the pixels of the creates and plane

Please see this video demonstrating the problem:

I’m really not sure what’s the cause of this. Is it really because the model is overwriting the other pixels or something else? Because if I move the camera close to the creates/plane I do see them. If the model was really overwriting, why am I seeing the crates/plane, I shouldn’t see them at all even if I move close.

The first thing I thought was a clipping issue, I tried decreasing my camera’s near clipping plane and increasing the far one. Didn’t help.

I then tried clearing the background to yellow in my postprocessor and discarding any pure yellow pixels in the postprocessing shader:

It ‘sort of’ works, but it’s ugly. You can still see yellow pixels on the model’s edges. And there’s also some depth issues with the crates/model as you see in the video.

Here’s my postprocessor code:



void PP_SetupShader(post_processor *P, u32 Shader)
{
    P->Shader = Shader;
    ShaderUse(Shader);

    ShaderSetInt(Shader, "Texture", 0);

    r32 Offset = 0.025f;
    r32 Offsets[9 * 2] =
        {
            -Offset, -Offset,
            0,       -Offset,
            +Offset, -Offset,
            -Offset, 0,
            0,       0,
            +Offset, 0,
            -Offset, +Offset,
            0,       +Offset,
            +Offset, +Offset,
        };
    ShaderSetV2s(Shader, "Offsets", 9 * 2, (v2*)Offsets);

    GLfloat BlurKernel[] =
        {
            1.0f, 2.0f, 1.0f,
            2.0f, 4.0f, 2.0f,
            1.0f, 2.0f, 1.0f
        };
    ForCount(9) BlurKernel[i] /= 16;
    ShaderSetFloats(Shader, "BlurKernel", 9, BlurKernel);

    r32 EdgeKernel[] =
        {
            -1, -1, -1,
            -1, +8, -1,
            -1, -1, -1
        };
    ShaderSetFloats(Shader, "EdgeKernel", 9, EdgeKernel);
}

void PP_Initialize(post_processor *P, u32 Shader, u32 Width, u32 Height, r32 (*GetTime)(void), b32 GenDepthBuf)
{
    P->GetTime = GetTime;
    
    // Setup VBO and VAO
    {
        r32 Quad[] =
            {
                -1.0f, +1.0f,  0.0f, 1.0f,
                -1.0f, -1.0f,  0.0f, 0.0f,
                +1.0f, -1.0f,  1.0f, 0.0f,
                -1.0f, +1.0f,  0.0f, 1.0f,
                +1.0f, -1.0f,  1.0f, 0.0f,
                +1.0f, +1.0f,  1.0f, 1.0f
            };

        GLArrayBuffer(sizeof(Quad), &Quad, GL_STATIC_DRAW);

        P->VertexArray = GLBeginVertexArray();
        {
            GLAttribute(0, 2, GL_FLOAT, 4 * sizeof(r32), 0);
            GLAttribute(1, 2, GL_FLOAT, 4 * sizeof(r32), 2 * sizeof(r32));
        }
        GLEndVertexArray();
    }
    
    // Setup framebuffer, render texture attachment and depth/stencil render buffer
    {
        glGenFramebuffers(1, &P->Framebuffer);
        glBindFramebuffer(GL_FRAMEBUFFER, P->Framebuffer);

        glGenTextures(1, &P->RenderTexture);
        glBindTexture(GL_TEXTURE_2D, P->RenderTexture);
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, Width, Height, 0, GL_RGB, GL_UNSIGNED_BYTE, 0);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
        glBindTexture(GL_TEXTURE_2D, 0);
        glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, P->RenderTexture, 0);
        
        if (GenDepthBuf)
        {
            u32 DepthRenderBuffer;
            glGenRenderbuffers(1, &DepthRenderBuffer);
            glBindRenderbuffer(GL_RENDERBUFFER, DepthRenderBuffer);
            glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, Width, Height);
            glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, DepthRenderBuffer);
        }

        if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
            Log("Error creating framebuffer in post processor!
");
        
        glBindFramebuffer(GL_FRAMEBUFFER, 0);
    }
    
     PP_SetupShader(P, Shader);
}

u32 PP_BeginUsing(post_processor *P)
{
    glBindFramebuffer(GL_FRAMEBUFFER, P->Framebuffer);
    glClearColor(1, 1, 0, 1);
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
    glEnable(GL_DEPTH_TEST);
    return 1;
}

u32 PP_EndUsing(post_processor *P)
{
    glBindFramebuffer(GL_FRAMEBUFFER, 0);
        
    ShaderUse(P->Shader);
    ShaderSetInt(P->Shader, "EffectsMask", P->EffectsMask);
    ShaderSetFloat(P->Shader, "Time", P->GetTime());
    
    GLBeginRenderGroup(P->VertexArray, P->RenderTexture);
        glDrawArrays(GL_TRIANGLES, 0, 6);
    GLEndRenderGroup();
    
    return 0;
}


Here’s the rendering code:



    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
    glEnable(GL_DEPTH_TEST);
    
    m4 ModelToWorld;
    m4 WorldToView;
    m4 ViewToProjection;
    CameraUpdate(&Data->Camera, &Platform->Input, DeltaTime, &WorldToView, &ViewToProjection);

    u32 Shader = Data->Shaders.Main;
    ShaderUse(Shader);
    ShaderSetM4(Shader, "view", &WorldToView);
    ShaderSetM4(Shader, "projection", &ViewToProjection);

    GLBeginRenderGroup(Data->Models.Floor.VAO, Data->Models.Floor.Textures.Color);
    {
        ModelToWorld = m4_Identity;
        ShaderSetM4(Shader, "model", &ModelToWorld);
        glDrawArrays(GL_TRIANGLES, 0, 6);
    }
    GLEndRenderGroup();
    
    GLBeginRenderGroup(Data->Models.Crate.VAO, Data->Models.Crate.Textures.Color);
    {
        ModelToWorld = m4_Translate(&m4_Identity, V3(-3.0f, 0.0f, -1.0f));
        ShaderSetM4(Shader, "model", &ModelToWorld);
        glDrawArrays(GL_TRIANGLES, 0, 36);
        
        ModelToWorld = m4_Translate(&m4_Identity, V3(3.0f, 0.0f, 0.0f));
        ShaderSetM4(Shader, "model", &ModelToWorld);
        glDrawArrays(GL_TRIANGLES, 0, 36);
    }
    GLEndRenderGroup();
 
    PP_BeginUsing(&Data->PostProcessor)
    {    
        MD2UpdateAndRender(&Data->DrFreak, DeltaTime, Shader, V3(0, 1.95f, 0), 0.1f);
    }
    PP_EndUsing(&Data->PostProcessor);


Note that I’m really not doing anything special inside MD2UpdateAndRender besides just a call to glDrawArrays. I can verify that the problem doesn’t have to do with the MD2 model. I tried replacing the model render call with one of the crates draw calls and it’s still the same issue. But if anybody’s still curious about how I’m rendering the model let me know and I’ll add the code, just didn’t want to add a lot of code.

Here are some of the helpers I’ve been using:



// note: internal is just 'static'

internal inline void
GLBeginRenderGroup(u32 VAO, r32 Texture)
{
    glBindVertexArray(VAO);
    glBindTexture(GL_TEXTURE_2D, Texture);
}

internal inline void
GLEndRenderGroup()
{
    glBindVertexArray(0);
}

internal inline void
GLSetArrayBuffer(u32 ArrayBuffer, u32 Size, void *Data)
{
    glBindBuffer(GL_ARRAY_BUFFER, ArrayBuffer);
    glBufferSubData(GL_ARRAY_BUFFER, 0, Size, Data);
}

internal u32
GLArrayBuffer(u32 SizeInBytes, void *Data, u32 Mode)
{
    u32 ArrayBuffer;
    glGenBuffers(1, &ArrayBuffer);
    glBindBuffer(GL_ARRAY_BUFFER, ArrayBuffer);
    glBufferData(GL_ARRAY_BUFFER, SizeInBytes, Data, Mode);
    return (ArrayBuffer);
}

internal inline void
GLAttribute(u32 Index, u32 Count, u32 Type = GL_FLOAT, u32 Stride = 0, u32 Offset = 0)
{
    glEnableVertexAttribArray(Index);
    glVertexAttribPointer(Index, Count, Type, GL_FALSE, Stride, (char *)0 + Offset);
}

internal inline u32
GLBeginVertexArray()
{
    u32 VertexArray;
    glGenVertexArrays(1, &VertexArray);
    glBindVertexArray(VertexArray);
    return (VertexArray);
}

internal inline void
GLEndVertexArray()
{
    glBindVertexArray(0);
}

How can achieve my rendering setup successfully? I’m pretty sure I’m missing something stupid. Note I know that I could use a shader on each object I want to pixelate etc but it’s more efficient to render them all to a texture and apply the effects only once on that texture.

Any help/advice/pointers is appreciated!

Thanks!
-Keithster

How are you implementing occlusion? If you render everything to the same buffer with depth testing, the depth test will ensure that closer objects occlude farther ones. If you render some parts to an off-screen buffer, you’ll need to use some other approach, e.g.

  • Ensure that the off-screen buffer’s depth values are used when compositing it with the default framebuffer, by copying the depth values to gl_FragDepth.

  • Using the same depth buffer for both. This precludes rendering objects to the default framebuffer, as you can’t attach a renderbuffer or texture to the default framebuffer, and you can’t attach the default framebuffer’s depth buffer to a FBO. Instead, you’d need to use two FBOs and composite them to the default framebuffer. The two sets of objects will need to be rendered in disjoint phases so that overdraw is handled correctly.

  • Also rendering the objects to be post-processed to the default framebuffer with stencil writes enabled, then using stencil tests when compositing.

Thanks for the reply. I’m relatively new here. I’m not doing anything special occlusion-wise other than what I showed in the video. I do a glEnable(GL_DEPTH_TEST) before rendering the default framebuffer and another before rendering to the offline fbo.

“by copying the depth values to gl_FragDepth”
Could you elaborate more on that? which pass should i render first? default or offline? how do I do the copy? spit out a texture with its fragments set to gl_FragDepth?

“you’d need to use two FBOs and composite them to the default framebuffer. The two sets of objects will need to be rendered in disjoint phases so that overdraw is handled correctly.”
So two fbos, each with its own depth and texture attachments, I render the crates/model to the first one, and model to the second. I now have two offline render textures, I need to composite them to the final full screen quad and draw that? how do I composite them?

[QUOTE=Keithster;1280861]Thanks for the reply. I’m relatively new here. I’m not doing anything special occlusion-wise other than what I showed in the video. I do a glEnable(GL_DEPTH_TEST) before rendering the default framebuffer and another before rendering to the offline fbo.
[/QUOTE]
That isn’t sufficient if you have two framebuffers each with a separate depth buffer. Objects rendered to the default framebuffer will only be occluded by other objects rendered to the default framebuffer, objects rendered to the FBO will only be occluded by other objects rendered to the FBO.

When rendering the FBO onto the default framebuffer, enable depth tests, and set gl_FragDepth to the value from the FBO’s depth buffer (which will need to have been bound as a source texture). Thus, pixels from the model will only be copied if they are closer than those from the crates. If you plan on rendering anything else to the default framebuffer afterwards, also enable depth writes so that the depth values are copied whenever pixels are copied. With this approach, it doesn’t matter which pass is rendered first; they can even be interleaved.

Both FBOs would use the same texture for their depth attachment so that it contains the nearest depth overall. Closer objects occlude farther objects regardless of whether they are rendered to the same framebuffer or to different framebuffers.

[QUOTE=Keithster;1280861]
I render the crates/model to the first one, and model to the second. I now have two offline render textures, I need to composite them to the final full screen quad and draw that? how do I composite them?[/QUOTE]
The colour buffer for the model (or whatever is rendered last) will need an alpha channel which is initially cleared to zero. Render the crates to one FBO, copy the result to the default framebuffer. Render the model to the other FBO (using the same depth buffer), post-process the colour buffer, render the result onto the default framebuffer with either blending or alpha tests enabled (this ensures that only pixels which were actually rendered will be updated). Or you can use stencilling instead of alpha. What matters is that you have some record of which pixels were rendered so that you don’t overwrite pixels rendered in the first pass with the background colour from the second pass.

This all assumes that the post-processing only affects the pixels which were rendered. If it’s an effect that extends outside of the rendered geometry, then you need to figure out what depth those extra pixels have.

Thanks for the explanations, I’ll give it a try and post back if I get any results or need more help.

But could you explain to me why I got the results I got rendering the way I did? i.e. Why is it that the post-processed model appear correctly, and the other geometry appear only if I move the camera too close? I mean if pixels from the second pass are overwriting pixels from the first pass then I shouldn’t be seeing anything even if I move the camera close, right?

I seem to be lacking some fundamentals here. I’m trying to understand this:

“When rendering the FBO onto the default framebuffer, enable depth tests, and set gl_FragDepth to the value from the FBO’s depth buffer (which will need to have been bound as a source texture)”

“When rendering the FBO onto the default framebuffer”: by that you mean we render the default framebuffer first, then the offline fbo?
“enable depth tests”: glEnable(GL_DEPTH_TEST)?
“set gl_FragDepth to the value from the FBO’s depth buffer”: you mean the gl_FragDepth of objects rendered in the default framebuffer is set to the depth values that the fbo outputs to a texture? but doesn’t that conflict with the first statement, in that we said we render the fbo ‘onto’ the default framebuffer (which I understood we render fbo first then default fb), but now we’re saying the opposite thing, render the fbo first then the default framebuffer.

I’m also not sure how to use the depth buffer from frame to the other… I looked online and saw these:


glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, width, height, 0, 0, width, height, 
                  GL_DEPTH_BUFFER_BIT, GL_NEAREST);

I’m not sure if they’re relevant to my problem. Could you give me some pointers to the right pieces of code/functions to use?

Yes.

Yes.

When rendering objects, you don’t set gl_FragDepth; you just allow the depth to be computed automatically.

When copying the post-processed image from FBO onto the default framebuffer, you read each pixel’s depth value from the texture which was used as the FBO’s depth buffer and store it in gl_FragDepth. So the depth tests which weren’t performed while rendering into the FBO (comparing the depth of the model to the depth of the crates) get performed during the copy instead.

You render the model onto the FBO, then you render the FBOs image onto the default framebuffer.

[QUOTE=Keithster;1280870]
I’m also not sure how to use the depth buffer from frame to the other… I looked online and saw these:


glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, width, height, 0, 0, width, height, 
                  GL_DEPTH_BUFFER_BIT, GL_NEAREST);

I’m not sure if they’re relevant to my problem. Could you give me some pointers to the right pieces of code/functions to use?[/QUOTE]
Copying the default framebuffer’s depth buffer to the FBO’s depth buffer.is another option, in addition to the three I listed If you do that, you don’t need to bother setting gl_FragDepth when copying the FBO onto the default framebuffer, as rendering the model to the FBO will depth-test it against the crates.

[QUOTE=Keithster;1280869]
But could you explain to me why I got the results I got rendering the way I did? i.e. Why is it that the post-processed model appear correctly, and the other geometry appear only if I move the camera too close? I mean if pixels from the second pass are overwriting pixels from the first pass then I shouldn’t be seeing anything even if I move the camera close, right?[/QUOTE]
When you render the FBO’s image to the default framebuffer as a fullscreen quad, that quad has a depth. If depth tests are enabled at that stage, any parts of the crates which are closer than the quad will cause the depth test to fail for that pixel, meaning that you see the crate rather than the model.