Problem with HDR rendering

Hello,

I would like to render an HDR image using openGL. To do so i use a texture of type GLfloat within a range of [0,1] and I specify the internal format GL_RGBA32F while using glTexImage2D :
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, textureWidth, textureHeight, 0, GL_RGBA, GL_FLOAT, texture);
I get the rendered image with the function :
glReadPixels( 0, 0, w, h, GL_RGBA, GL_FLOAT, renderedImage );

My problem is that the precision I have on the rendered image is a 1 byte precision i.e. all values in my rendered image are multiples of 1/255 (0,003921) ;

During the tests I used a uniform texture equal to 1/sqrt(2) (0.707107) for each channel and obtained in the renderded image a value of 0.705882 (=180/255)

I read that if the OpenGL implementation does not support the particular format and precision I choose, the driver will internally convert it into something it does support (OpenGL wiki, Common Mistakes, Common Mistakes - OpenGL Wiki), but the checked value is correct.

I used openGL 2.1 and tested it with the following GPUs :
ATI Radeon HD 3470
NVIDIA GTS 250 (which is Shader Model 4.0 Compliant)
NVIDIA GeForce 310
All of them gave the same results.

Does anyone have a guess on how to keep the precision ?

Here is the code i used :

#include “GL/glut.h”
#include <math.h>
#include <iostream>

#define textureWidth 64
#define textureHeight 64
// My texture is encoded on 2 bytes per texel per channel
static GLfloat texture[textureHeight][textureWidth][4];

static GLuint texId;

void maketexture(void){

// My texture is a uniform color image
int i, j;
for(i = 0; i &lt; textureHeight; ++i){
	for(j = 0; j &lt; textureWidth; ++j){
		texture[i][j][0] = (GLfloat) 1/sqrt(2.0f);
		texture[i][j][1] = (GLfloat) 1/sqrt(2.0f);
		texture[i][j][2] = (GLfloat) 1/sqrt(2.0f);
		texture[i][j][3] = (GLfloat) 1.0f;
	}
}

}

void init(void){
glClearColor(0.0, 0.0, 0.0, 0.0);
glShadeModel(GL_FLAT);
glEnable(GL_DEPTH_TEST);
maketexture();
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);

glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);

glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);

// Here I use the internal format GL_RGBA32F
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, textureWidth, textureHeight, 0, GL_RGBA, GL_FLOAT, texture);

}

void display(void){

int w = 100, h = 100;

glViewport(0, 0, (GLsizei) w, (GLsizei) h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (GLfloat) w/(GLfloat) h, 1.0, 30.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0, 0.0, -2.0);

glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
int value;
glBindTexture(GL_TEXTURE_2D, texId);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_INTERNAL_FORMAT, &value);
std::cout &lt;&lt; "decimal value of the internal format used : " &lt;&lt; value &lt;&lt; std::endl;
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 0.0);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 0.0);
glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 0.0);
glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, 0.0);
glEnd();
glFlush();
// My output image is encoded on 2 bytes per pixel per channel
GLfloat *renderedImage = (GLfloat*)malloc(w*h*4*sizeof(GLfloat));
glReadPixels( 0, 0, w, h, GL_RGBA, GL_FLOAT, renderedImage );
// I get the color value of a pixel in the textured area
std::cout &lt;&lt; "rendered image at pixel (18,7) : " &lt;&lt; std::endl;
std::cout &lt;&lt; "R : " &lt;&lt; renderedImage[(18+7*100)*4+0] &lt;&lt; std::endl;
std::cout &lt;&lt; "G : " &lt;&lt; renderedImage[(18+7*100)*4+1] &lt;&lt; std::endl;
std::cout &lt;&lt; "B : " &lt;&lt; renderedImage[(18+7*100)*4+2] &lt;&lt; std::endl;
free(renderedImage); renderedImage = 0;
glDisable(GL_TEXTURE_2D);

}

int main(int argc, char *argv[]){

glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(100, 100);
glutInitWindowPosition(100, 100);
glutCreateWindow(argv[0]);
init();
glutDisplayFunc(display);
glutMainLoop();

return 0;

}

Please use [ code]/[ /code] (without space after ‘[’) around source code to make it easier to read.

I would like to render an HDR image using openGL. To do so i use a texture of type GLfloat within a range of [0,1] and I specify the internal format GL_RGBA32F while using glTexImage2D :
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, textureWidth, textureHeight, 0, GL_RGBA, GL_FLOAT, texture);
I get the rendered image with the function :
glReadPixels( 0, 0, w, h, GL_RGBA, GL_FLOAT, renderedImage );

You are copying the contents of the application framebuffer (which just stores a GLuchar per channel), that won’t give you more precision. For that you’ll have to directly render to your floating point texture using a Framebuffer Object (FBO), see Framebuffer Object - OpenGL Wiki

Ok, I will try this, sorry for the plain text and thanks for your answer