Apply my own colormap in the fragment shader

I have a given image img (16 bits gray-level image whose values range from 0 to 65535) and a colormap colormap_file which is a 8 bits RGB 256*256 pixels image.

Thus, thanks to such a colormap I am able, for all pixels in the valueImage, to assign a (R, G, B) triplet.

Currently, it is done in pure Python through this function:

def transcodeThroughColormap(img, colormap_file):
    colormap = np.array(Image.open(colormap_file))
    flat_colormap = colormap.flatten()

    idx = img
    i = np.floor(idx / 256)
    j = idx - (i * 256)
    R = flat_colormap[(3 * (i * 256 + j)).astype('int')]
    G = flat_colormap[(3 * (i * 256 + j) + 1).astype('int')]
    B = flat_colormap[(3 * (i * 256 + j) + 2).astype('int')]


    rgbArray = np.zeros(shape=(img.shape[0], img.shape[1], 3))
    rgbArray[..., 0] = R
    rgbArray[..., 1] = G
    rgbArray[..., 2] = B


    return rgbArray

However, I would like to apply such a colormap but in my fragment shader.

Thus, I wonder I will have to use two sampler2D in my fragment shader but I do not how to achieve the same result as in Python.

[QUOTE=neon29;1281556]
However, I would like to apply such a colormap but in my fragment shader.

Thus, I wonder I will have to use two sampler2D in my fragment shader but I do not how to achieve the same result as in Python.[/QUOTE]

One option:


uniform usampler2D image_tex;
uniform sampler2D color_map;

in vec2 texcoord;

void main()
{
    uint value = texture(image_tex, texcoords).x;
    uvec2 uv = uvec2(value / 0x100, value % 0x100);
    vec4 color = texelFetch(color_map, uv, 0);
    gl_FragColor = color;
}

Note that the image texture needs to be of unsigned integer type, i.e. GL_R16UI. Magnification and minification filters need to be GL_NEAREST (linear interpolation isn’t meaningful for integer values).

If the values in the image texture represent a continuous range and the colour map is continuous, then you could use a normalised format (i.e. GL_R16) for the image texture and use a 1-D texture for the colour map (however a size of 65536 isn’t guaranteed to be supported, so you may need to down-sample the colour map). E.g.


uniform sampler2D image_tex;
uniform sampler1D color_map;

in vec2 texcoord;

void main()
{
    float value = texture(image_tex, texcoords).x;
    vec4 color = texture(color_map, value);
    gl_FragColor = color;
}

Or you could use a buffer texture for the colour map (a size of 65536 is guaranteed to be supported), which avoids the need to split the value into row and column. E.g.


uniform sampler2D image_tex;
uniform samplerBuffer color_map;

in vec2 texcoord;

void main()
{
    float value = texture(image_tex, texcoords).x;
    int idx = int(round(value * 0xFFFF));
    vec4 color = texelFetch(color_map, idx, 0);
    gl_FragColor = color;
}

Thanks for your answer.
However, it seems that texelFetch does not handle uvec2, I thus use ivec2 instead.

I fill buffers like this:

img_data = np.ascontiguousarray(img.flatten(), dtype=np.uint16)
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, img.shape[0], img.shape[1], 0, GL_ALPHA, GL_R16UI, img_data)

and for the colormap one:

colormap_data = np.ascontiguousarray(colormap.flatten(), dtype=np.uint16)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, colormap.shape[0], colormap.shape[1], 0, GL_RGB, GL_UNSIGNED_BYTE, colormap_data)

But nothing appears on screen…

Yes, you need to convert from uint to int (or uvec to ivec) for texelFetch.

  1. Why 16-bit?
  2. GL_ALPHA is deprecated. Use, GL_RED or GL_RED_INTEGER (or a sized version) for a single-channel texture.
  3. GL_R16UI isn’t valid for the type parameter.

It should be:


glTexImage2D(GL_TEXTURE_2D, 0, GL_R16UI, img.shape[0], img.shape[1], 0, GL_RED_INTEGER, GL_UNSIGNED_SHORT, img_data)

Here, you’re converting the data to np.uint16 but telling OpenGL that it’s GL_UNSIGNED_BYTE.

[QUOTE=GClements;1281604]Yes, you need to convert from uint to int (or uvec to ivec) for texelFetch.

  1. Why 16-bit? Because my values range from 0 to 65535 no?
  2. GL_ALPHA is deprecated. Use, GL_RED or GL_RED_INTEGER (or a sized version) for a single-channel texture. Replaced by GL_RED
  3. GL_R16UI isn’t valid for the type parameter.

It should be:


glTexImage2D(GL_TEXTURE_2D, 0, GL_R16UI, img.shape[0], img.shape[1], 0, GL_RED_INTEGER, GL_UNSIGNED_SHORT, img_data)

I do not know why but I manage to solve my issue using texture() instead of texture2D in my fragment shader. Besides, I thought they should yield the same result.

But I still get nothing on screen.

Here, you’re converting the data to np.uint16 but telling OpenGL that it’s GL_UNSIGNED_BYTE.[/QUOTE]

It is a copy/paste mistake, I wrote dtype=np.uint8 in my code.

Thus, I fill my two textures like that:
[LEFT]


               glBindTexture(GL_TEXTURE_2D, tileObj.TBO)

                glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
                glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
                glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
                glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)

                img_data = np.ascontiguousarray(tile.flatten(), dtype=np.uint16)
                glTexImage2D(GL_TEXTURE_2D, 0, GL_R16UI, tile.shape[0], tile.shape[1], 0, GL_RED_INTEGER,
                             GL_UNSIGNED_SHORT, img_data)

                # fill transcod buffer with the colormap

                colormap = misc.imread('shadow_tile.png')
                glBindTexture(GL_TEXTURE_2D, self.colormapBuffer)
                glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
                glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)

                img_data = np.ascontiguousarray(colormap.flatten(), dtype=np.uint8)
                glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, colormap.shape[0], colormap.shape[1], 0, GL_RGB,
                             GL_UNSIGNED_BYTE, img_data)

[/LEFT]

And I render using:

loc_pos = glGetAttribLocation(programID, "position")
glEnableVertexAttribArray(loc_pos)
glBindBuffer(GL_ARRAY_BUFFER, tile.VBO[0])
glVertexAttribPointer(loc_pos, 2, GL_DOUBLE, False, 0, ctypes.c_void_p(0))

loc_uv = glGetAttribLocation(programID, "texcoord")
glEnableVertexAttribArray(loc_uv)
glBindBuffer(GL_ARRAY_BUFFER, tile.VBO[1])
glVertexAttribPointer(loc_uv, 2, GL_DOUBLE, False, 0, ctypes.c_void_p(0))

glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, tile.TBO)
glUniform1i(glGetUniformLocation(1, 'texSonar'), 0)

glActiveTexture(GL_TEXTURE1)
glBindTexture(GL_TEXTURE_2D, self.colormapBuffer)
glUniform1i(glGetUniformLocation(1, 'transcodMap'), 1)

glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, tile.EBO)
glDrawElements(GL_TRIANGLES, tile.indices.size, GL_UNSIGNED_INT, ctypes.c_void_p(0))

The only thing which stands out is using hard-coded 1 rather than programID in the glGetUniformLocation() calls.

I solved it using texture() instead of texture2D() in my fragment shader. I do not know why it works however.

texture2D() is just an obsolete alias for a particular overload of texture().

It’s possible that using it forces the use of the compatibility profile, which may affect other things.