I am implementing motion blur based on the approach outlined in "Stupid OpenGL tricks".I also got the demo from NVidia OpenGL SDK.It is implemented using Cg while I am using GLSL.Needless to say it doesn't work for me after conversion,otherwise I wouldn't ask here for help.Also I am not sure I completely understand the logic of their implementation.

First here is Cg vertex shader:

Code :

// Motion blur vertex shader // sgg 10/2002 struct a2v { float4 coord : POSITION; // float4 prevCoord; float3 normal : NORMAL; float2 texture : TEXCOORD0; }; struct v2f { float4 hpos : POSITION; float3 velocity : TEXCOORD0; float3 col0 : COLOR0; }; v2f main(a2v In, uniform float4x4 modelView, uniform float4x4 prevModelView, uniform float4x4 modelViewProj, uniform float4x4 prevModelViewProj, uniform float3 halfWindowSize, uniform float blurScale = 1.0 ) { v2f Out; // transform previous and current position to eye space float4 P = mul(modelView, In.coord); float4 Pprev = mul(prevModelView, In.coord); // float4 Pprev = mul(prevModelView, In.prevCoord); // transform normal to eye space float3 N = mul((float3x3) modelView, In.normal); // calculate eye space motion vector float3 motionVector = P.xyz - Pprev.xyz; // calculate window space motion vector P = mul(modelViewProj, In.coord); // Pprev = mul(prevModelViewProj, In.prevCoord); Pprev = mul(prevModelViewProj, In.coord); Pprev = lerp(P, Pprev, blurScale); // choose previous or current position based on dot product between motion vector and normal float flag = dot(motionVector, N) > 0; float4 Pstretch = flag ? P : Pprev; Out.hpos = Pstretch; // do divide by W -> NDC coordinates P.xyz = P.xyz / P.w; Pprev.xyz = Pprev.xyz / Pprev.w; Pstretch.xyz = Pstretch.xyz / Pstretch.w; // calculate window space velocity float3 dP = (P.xyz - Pprev.xyz) * halfWindowSize.xyz; Out.velocity = dP; Out.col0.xy = 0.5 + (dP.xy * 0.005); return Out; }

And here is my GLSL conversion:

Code :

//========= INS ==================// layout(location = 0) in vec4 position; layout(location = 1) in vec2 uvs; layout(location = 2) in vec3 normal; //========= UNIFORMS ============// uniform mat4 MODEL_VIEW_MATRIX; uniform mat4 prevModelView; uniform mat4 PROJ; uniform float blurScale; uniform vec3 halfWindowSize; //======= OUTS ===================// out vec3 out_velocity; out vec4 outCol; out gl_PerVertex { vec4 gl_Position; }; void main(void){ // transform previous and current position to eye space: vec4 P = MODEL_VIEW_MATRIX * position;// vec4 Pprev = prevModelView * position; // transform normal to eye space: vec3 N = mat3(MODEL_VIEW_MATRIX) * normal; // calculate eye space motion vector vec3 motionVector = P.xyz - Pprev.xyz; // calculate window space motion vector P = PROJ * MODEL_VIEW_MATRIX * position;// Pprev = PROJ * prevModelView * position; Pprev = mix( Pprev,P, blurScale); // choose previous or current position based on dot product between motion vector and normal bool flag = dot(motionVector, N) > 0; vec4 Pstretch = flag ? P : Pprev; gl_Position = position; outCol = Pstretch; // do divide by W -> NDC coordinates P.xyz = P.xyz / P.w; Pprev.xyz = Pprev.xyz / Pprev.w; Pstretch.xyz = Pstretch.xyz / Pstretch.w; // calculate window space velocity vec3 dP = (P.xyz - Pprev.xyz) * halfWindowSize.xyz ; out_velocity = dP;

I have 2 problems with the vertex shader.

1) In the source code of the original example both modelView matrix and modelViewProjection matrix uniforms are identity matrices.I don't get it why? To me it seem that in such a case,because the matrices of the

previous frame are no identity,the velocity vector build would be wrong...

2) In CG vertex shader they fill varying "Out.hpos" with Pstretch which is used again after several lines to get transformed into 3D space...Why?

Now for the fragment shader:

Here is the original Cg fragment:

Code :

struct v2f { float4 wpos : WPOS; half3 velocity : TEXCOORD0; }; half4 main(v2f In, uniform samplerRECT sceneTex, uniform samplerRECT velTex, uniform half blurScale ) : COLOR { const float samples = 16; half2 wpos = In.wpos.xy; half2 velocity = In.velocity.xy * blurScale; // read velocity from texture coordinate // half2 velocity = texRECT(velTex, wpos) * blurScale; // read velocity from texture // sample into scene texture along motion vector const fixed w = 1.0 / samples; // weight fixed4 a = 0; for(float i=0; i<samples; i+=1) { half t = i / (samples-1); a = a + texRECT(sceneTex, wpos + velocity*t) * w; } return a; }

And this is my conversion :

Code :

layout(binding=0) uniform sampler2D COLOR_MAP_0; uniform float samples; uniform float blurScale; uniform vec3 halfWindowSize; // ========== INS ============// in vec3 out_velocity; out vec4 colorOut; in vec4 outCol; void main(void){ float w = 1.0 / samples; vec4 a=vec4(0.); vec2 velocity = out_velocity.xy * blurScale; ivec2 tsize = textureSize(COLOR_MAP_0, 0); vec2 screntc = gl_FragCoord.xy * (1.0 / vec2(tsize)); for(int i= 0; i<samples; i++) { float t = i / (samples-1); a = a + texture(COLOR_MAP_0, outCol.xy + velocity * t ) * w; } colorOut = a; }

What I fail to figure out here is how to convert

this method:

" a = a + texRECT(sceneTex, wpos + velocity*t) * w; "

to work with GLSL texture.I am trying like this :

"a = a + texture(COLOR_MAP_0, outCol.xy + velocity * t ) * w;"

But texRECT uses al 4 components of wpos while GLSL texture() has no such an option.

So how do I go about it?

Currently this motion blur technique is the only one which is relatively clear to me and which operates both on camera and objects but I will also be happy to get reference to other (may be better) approaches.

Thanks.