Trouble Implementing Dynamic Mapping

I’m trying to render a scene that includes a reflective sphere. I know the method behind doing this involves rendering everything else but the sphere, capturing 6 images of the scene from the origin of the sphere, and then rendering that as a texture and applying it to the sphere. In theory, my code does this, but obviously it’s not working, so it doesn’t.

I think the issue resides in the variables I pass to the shaders. I’ve only just recently learned to use multiple shaders, and I’m not quite clear whether I need to use variables from my main shader or my environment map shader here.

This is my function for generating the texture:

void sphere()
{
	glUseProgram(programs[2]); //this is the enviro mapping shader
	vec4 oldEye = eye; //save the current camera coords
	vec4 oldAt = at;
	vec4 oldUp = v;

	mat4 oldProj = projMatrix;

	eye = vec4(-1, .4, 0); //center of sphere
	projMatrix = Perspective(90, 1, 0.1, 200); //get a 90 degree shot of the scene
	glUniformMatrix4fv(sphereProj, 1, GL_TRUE, projMatrix); //send proj to shader
	mat4 modelmatrix = mat4(1);

	glViewport(0, 0, 512, 512);

	//texture init was here, now its in initTex() bc that makes more sense

	glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
	glBindRenderbuffer(GL_RENDERBUFFER, depthrenderbuffer);

	glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, 512, 512);

	for (int i = 0; i < 6; i++) //this renders the 6 sides of the texmap
	{
		glBindTexture(GL_TEXTURE_CUBE_MAP, textures[4]); 

		switch (i)
		{
			case 0: //right
				at = vec4(1, 0, 0, 1);
				v = vec4(0, -1, 0, 1);
				glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_X, textures[4], 0);
				break;
			case 1: //left
				at = vec4(-1, 0, 0, 1);
				v = vec4(0, -1, 0, 1);
				glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_NEGATIVE_X, textures[4], 0);
				break;
			case 2: //top
				at = vec4(0, 1, 0, 1);
				v = vec4(0, 0, 1, 1);
				glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_Y, textures[4], 0);
				break;
			case 3: //bottom
				at = vec4(0, -1, 0, 1);
				v = vec4(0, 0, -1, 1);
				glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, textures[4], 0);
				break;
			case 4: //front
				at = vec4(0, 0, 1, 1);
				v = vec4(0, -1, 0, 1);
				glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_Z, textures[4], 0);
				break;
			case 5: //back
				at = vec4(0, 0, -1, 1);
				v = vec4(0, -1, 0, 1);
				glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, textures[4], 0);
				break;
			default:
				break;
		}

		if (glCheckFramebufferStatus(GL_FRAMEBUFFER != GL_FRAMEBUFFER_COMPLETE))
			std::cout << "error
";

		glUseProgram(programs[0]);

		glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
		mat4 sphereViewMat = LookAt(eye, at, v); //reset the camera
		glUniformMatrix4fv(sphereView, 1, GL_TRUE, sphereViewMat); //i think????? dont think it makes sense to use main shader so
		drawObj(); //draw cubes, ground, skybox
	}
	glBindFramebuffer(GL_FRAMEBUFFER, 0);

	eye = oldEye;
	v = oldUp;
	at = oldAt;
	viewMatrix = LookAt(eye, at, v);
	projMatrix = oldProj;

	glViewport(0, 0, width, height);

}

Here is the vertex shader for environment mapping:

#version 150

out vec3 R;
in vec4 vPosition;
in vec4 vNormal;

uniform mat4 modelview;
uniform mat4 projection;

void main() 
{ 
	gl_Position = projection*modelview*vPosition;

	vec4 eyePos = modelview*vPosition;

	vec4 NN = modelview*vNormal;
	vec3 N = NN.xyz;
	R = reflect(eyePos.xyz, N);
}

Here is the fragment shader:

#version 150
#extension GL_NV_shadow_samplers_cube : enable

in vec3 R;
uniform samplerCube texMap;
out vec4 fColor;

void main() 
{ 
	vec4 texColor = textureCube(texMap, R);
	fColor = texColor;
} 

I’d suggest either rendering the cube-map faces on screen or dumping them out to image files, so that you can see whether the problem lies in generating the cube map texture or in using it.

From a quick glance at the code:

[ul]
[li] You probably shouldn’t be binding textures[4] as a source texture while rendering into its faces.
[/li][li] You need to call glFramebufferRenderbuffer() to attach the depth buffer to the framebuffer (glCheckFramebufferStatus() won’t report this, as a depth buffer is optional).
[/li][li] You need to call glViewport() when switching between a window and a FBO (unless they both have identical dimensions).
[/li][/ul]

You probably shouldn’t be binding textures[4] as a source texture while rendering into its faces.

Where should I put it, then?

You need to call glFramebufferRenderbuffer() to attach the depth buffer to the framebuffer (glCheckFramebufferStatus() won’t report this, as a depth buffer is optional).

I added glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthrenderbuffer) after glRenderBufferStorage(…).

In case it wasn’t clear: the glFramebufferTexture2D() calls are fine, it’s the glBindTexture() call that shouldn’t be there.

The bindings of textures to texture units should be the same as when you’re rendering the scene normally.