1

I am working on a 2D texture based volume rendering project and have problems when I try to apply alpha and blend to my program. The program reads 2D frames from one file and set up textures based on these frames. This is my program:

//
//  VolumeRendering.cpp
//  Volume_Rendering
//
//  Created by HOBBY on 4/5/14.
//  Copyright (c) 2014 Yihao Jiang. All rights reserved.
//
#include <GLTools.h>
#include <GL/glew.h>
#include <Opengl/gl.h>
#include <glut/glut.h>
#include <fstream>
#include "VolumeRendering.h"
#include <GLMatrixStack.h>
#include <GLFrustum.h>
#include <GLGeometryTransform.h>

int m_uImageCount;
int m_uImageWidth;
int m_uImageHeight;
GLuint* m_puTextureIDs;

GLMatrixStack modelViewMatrix;
GLMatrixStack projectionMatrix;
GLFrame       cameraFrame;
GLFrame       objectFrame;
GLFrustum     viewFrustum;
GLBatch       myBatch;
GLGeometryTransform    transformPipeline;
GLShaderManager        shaderManager;

void ChangeSize(int w, int h)
{
    glViewport(0, 0, w, h);
    //viewFrustum.SetPerspective(35.0f, float(w) / float(h), 1.0f, 500.0f);
    viewFrustum.SetOrthographic(-1.0f, 1.0f, -1.0f, 1.0f, -2.0f, 2.0f);
    projectionMatrix.LoadMatrix(viewFrustum.GetProjectionMatrix());
    transformPipeline.SetMatrixStacks(modelViewMatrix, projectionMatrix);

}

void SetupRC()
{
    glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
    shaderManager.InitializeStockShaders();

    glEnable(GL_DEPTH_TEST);

    glEnable(GL_ALPHA_TEST);
    glAlphaFunc(GL_GREATER, 0.5f);

    const char* filePath = "/Users/WensarHobby/Documents/Codes/workspace/Volume_Rendering/Volume_Rendering/head256x256x109";
    if(!InitTextures2D(filePath))
    {
        printf("InitTexture error");
    }
}

bool InitTextures2D(const char* filePath)
{
    std::fstream myFile;
    myFile.open(filePath, std::ios::in | std::ios::binary);

    m_uImageCount = 109;
    m_uImageWidth = 256;
    m_uImageHeight = 256;

    // Holds the texuture IDs
    m_puTextureIDs = new GLuint[m_uImageCount];

    // Holds the luminance buffer
    char* chBuffer = new char[m_uImageWidth * m_uImageHeight];
    char* chRGBABuffer = new char[m_uImageWidth * m_uImageHeight * 4];
    glGenTextures(m_uImageCount, m_puTextureIDs);

    // Read each frames and construct the texture
    for( int nIndx = 0; nIndx < m_uImageCount; ++nIndx )
    {
        // Read the frame
        myFile.read(chBuffer, m_uImageWidth*m_uImageHeight);



        // Convert the data to RGBA data.
        // Here we are simply putting the same value to R, G, B and A channels.
        // Usually for raw data, the alpha value will
        // be constructed by a threshold value given by the user

        for( int nIndx = 0; nIndx < m_uImageWidth*m_uImageHeight; ++nIndx )
        {
            chRGBABuffer[nIndx*4] = chBuffer[nIndx];
            chRGBABuffer[nIndx*4+1] = chBuffer[nIndx];
            chRGBABuffer[nIndx*4+2] = chBuffer[nIndx];
            //printf("%i  ", chBuffer[nIndx]);
            if( chBuffer[nIndx] < 20 )
            {
                chRGBABuffer[nIndx*4+3] = 0;
            }
            else
            {
                chRGBABuffer[nIndx*4+3] = 255;
            }
        }


        // Set the properties of the texture.
        glBindTexture( GL_TEXTURE_2D, m_puTextureIDs[nIndx] );
        glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);

        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, m_uImageWidth, m_uImageHeight , 0,
                 GL_RGBA, GL_UNSIGNED_BYTE,(GLvoid *) chRGBABuffer);
        glBindTexture( GL_TEXTURE_2D, 0 );
    }

    delete[] chBuffer;
    delete[] chRGBABuffer;
    return true;
}

void SpecialKeys(int key, int x, int y)
{
    glutPostRedisplay();
}

void RenderScene(void)
{
    static GLfloat vLightPos [] = { 1.0f, 1.0f, 0.0f };
    static GLfloat vWhite [] = { 1.0f, 1.0f, 1.0f, 1.0f };

    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);


    modelViewMatrix.PushMatrix();
    M3DMatrix44f mCamera;
    cameraFrame.GetCameraMatrix(mCamera);
    modelViewMatrix.MultMatrix(mCamera);

//    M3DMatrix44f mObjectFrame;
//    objectFrame.GetMatrix(mObjectFrame);
//    modelViewMatrix.MultMatrix(mObjectFrame);

    for(int nIndx=m_uImageCount - 1; nIndx >= 0;nIndx--)
    {
        glBindTexture(GL_TEXTURE_2D, m_puTextureIDs[nIndx]);
        MakeQuads(nIndx);
        glBindTexture(GL_TEXTURE_2D, m_puTextureIDs[nIndx]);
        shaderManager.UseStockShader(GLT_SHADER_TEXTURE_POINT_LIGHT_DIFF,
                                 transformPipeline.GetModelViewMatrix(),
                                 transformPipeline.GetProjectionMatrix(),
                                 vLightPos, vWhite, 0);
        myBatch.Draw();
        myBatch.Reset();
    }

    modelViewMatrix.PopMatrix();

    glutSwapBuffers();

}

void MakeQuads(int quads_index)
{
    myBatch.Begin(GL_QUADS, 4, 1);

    myBatch.Normal3f(0.0f, 0.0f, -1.0f);
    myBatch.MultiTexCoord2f(0, 0.0f, 0.0f);
    myBatch.Vertex3f(-1.0f, -1.0f, 1.0f - 2.0f * (GLfloat)(quads_index/m_uImageCount));

    myBatch.Normal3f(0.0f, 0.0f, -1.0f);
    myBatch.MultiTexCoord2f(0, 1.0f, 0.0f);
    myBatch.Vertex3f(1.0f, -1.0f, 1.0f - 2.0f * (GLfloat)(quads_index/m_uImageCount));

    myBatch.Normal3f(0.0f, 0.0f, -1.0f);
    myBatch.MultiTexCoord2f(0, 1.0f, 1.0f);
    myBatch.Vertex3f(1.0f, 1.0f, 1.0f - 2.0f * (GLfloat)(quads_index/m_uImageCount));

    myBatch.Normal3f(0.0f, 0.0f, -1.0f);
    myBatch.MultiTexCoord2f(0, 0.0f, 1.0f);
    myBatch.Vertex3f(-1.0f, 1.0f, 1.0f - 2.0f * (GLfloat)(quads_index/m_uImageCount));

    myBatch.End();
}

void ShutdownRC(void)
{
    glDeleteTextures(m_uImageCount, (GLuint*)m_puTextureIDs);
}


int main(int argc, char* argv[])
{
    gltSetWorkingDirectory(argv[0]);
    glutInit(&argc, argv);
    glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH | GLUT_STENCIL);
    glutInitWindowSize(400, 400);
    glutCreateWindow("Volume_Rendering");
    glutReshapeFunc(ChangeSize);
    glutSpecialFunc(SpecialKeys);
    glutDisplayFunc(RenderScene);

    GLenum err = glewInit();
    if (GLEW_OK != err) {
        fprintf(stderr, "GLEW Error: %s\n", glewGetErrorString(err));
        return 1;
    }

    SetupRC();

    glutMainLoop();

    ShutdownRC();
    return 0;

}

As you can see, here in the InitTextures2D function, I convert the raw data to RGBA data. If the luminance value of one pixel is lower than 20, I assume that it is black and set the alpha value to be 0(transparent). For the other pixels, the alpha values are all set to be 255(opaque). So in my opinion, since the alpha value of pixels can either be 0 or 255, the appearance of the final result should always be the same no matter what the value I assign to the second parameter of function glAlphaFunc(GL_GREATER, reference_value) is. I did some tests but unfortunately the results are totally different.

Result of glAlphaFunc(GL_GREATER, 0.1f): Result of glAlphaFunc(GL_GREATER, 0.1f) Result of glAlphaFunc(GL_GREATER, 0.5f): Result of glAlphaFunc(GL_GREATER, 0.5f) Result of glAlphaFunc(GL_GREATER, 0.99f): Result of glAlphaFunc(GL_GREATER, 0.99f)

Tell me the reason.

genpfault
  • 51,148
  • 11
  • 85
  • 139
NJUHOBBY
  • 850
  • 3
  • 10
  • 30

1 Answers1

1

So in my opinion, since the alpha value of pixels can either be 0 or 255

That is not exactly true. What is true is that the alpha value of your texels will be either 0 or 255. However, you are using the GL_LINEAR image filter, and also do no not map your texels to the output pixels 1:1, so you actually sample inbetween some texels, and can get any value between 0 and 255 when sampling between a transparent" and "opaque" boundary.

derhass
  • 43,833
  • 2
  • 57
  • 78
  • Thanks for your explanation, it makes sense. However, I still have a problem about blend. When I apply blend to this program it seems that the result doesn't change at all. Could you please tell me the reason? I have already been working this project for more than 3 days.. Thank you very much – NJUHOBBY Apr 16 '14 at 21:51
  • By saying "apply blend" I mean adding glEnable(GL_BLEND) and glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) in function SetupRC() right after the alpha statements. – NJUHOBBY Apr 16 '14 at 21:55
  • I'm not sure. One thing I notice is that you seem to draw your "slices" front to back (as your loop is counting backwards, and you map the highest nIdx to -1), so you are not blending against the images which lie "behind", but against black background. – derhass Apr 16 '14 at 22:06
  • I don't understand.. why I am not blending against the images which lie "behind", but against black background? what do you mean by "behind"? – NJUHOBBY Apr 16 '14 at 22:49
  • @NJUHOBBY: GL is not going to re-order your triangles for you based on their depth. Whatever order they arrive in, they will be blended with whatever was in the framebuffer immediately before. This means in the absolute worst case, if you draw the front-most polygon first, it is going to blend itself with the background, write its depth to the depth buffer and prevent anything else behind it from being drawn (because everything else will fail a depth test). For correct results, you need to draw in reverse order (objects with the greatest depth first). This sort of blending is order-dependent. – Andon M. Coleman Apr 16 '14 at 23:52
  • @NJUHOBBY: This behavior is only somewhat hidden from you by the fact that you use an alpha test here. In that worst-case example, if part of the polygon fails an alpha test then that part will not write its depth to the depth buffer, giving layers behind it a chance to be seen. In any event, the only solution to this is to properly order the layers in your volume rendering from back-to-front; disabling the depth test/writes is not going to do what you want unless you use additive blending. – Andon M. Coleman Apr 16 '14 at 23:58
  • @AndonM.Coleman Thanks for your advice, man. I totally understand what you are saying but in fact, as you can see in my code, I do draw my slices back to front. Just as you suggest, I draw the textures in reverse order. That's why I wrote "for(int nIndx=m_uImageCount - 1; nIndx >= 0;nIndx--)" instead of "for(int nIndx=0; nIndx < m_uImageCount;nIndx++)" in the RenderScene(void) function. – NJUHOBBY Apr 17 '14 at 00:37
  • @NJUHOBBY: I suspect the part in `MakeQuads (...)` that does `1.0 - 2.0 * ...` throws your intentions on their head. That appears to be inverting the output depth. You might consider `-1.0 + 2.0 * ...` instead to preserve the direction. – Andon M. Coleman Apr 17 '14 at 00:42
  • @AndonM.Coleman I don't think the 1.0 - 2.0 * ... inverts the output depth. In fact, "slices" with bigger index should be placed behind the slices with smaller index, which means the z value of bigger index slice should be smaller than slices with smaller index. – NJUHOBBY Apr 17 '14 at 00:56
  • @NJUHOBBY: NDC ranges from **-1** (near) to **1** (far). What you have right now puts the last slice in front of everything else (`1.0 - 2.0 * (X/X)` = `-1` = **near**). Ah, I see your projection matrix is not NDC, but very similar. Instead your viewing volume is slightly different (**-2** = near, **2** = far), but the fact remains that **-1** is in front of **1** ;) – Andon M. Coleman Apr 17 '14 at 01:00
  • @AndonM.Coleman what??? but according to super bible 5, -1 is far and 1 is near! Now I am really confused! – NJUHOBBY Apr 17 '14 at 01:05
  • @NJUHOBBY: I have no idea where the book got that from, that is absolutely backwards. The only time positive Z points behind you is in view (camera/eye) space. NDC is Normalized Device Coordinates, it's a left-handed coordinate system where +Z points forward and the entire viewing volume is contained within [-1,1]. – Andon M. Coleman Apr 17 '14 at 01:08
  • Not the main problem here, but I noticed something else while looking at your code: You choose a shader that (based on the name) appears to do lighting. Doing lighting on the rendered polygons does not really make sense for volume rendering. If you do want explicit lighting for volume rendering, you need to do the lighting calculations based on volume gradients. You can get something that almost looks like it's lighted, without really doing explicit lighting, by applying a transfer function that changes color along with the transparency. – Reto Koradi Apr 17 '14 at 03:56
  • @RetoKoradi So what shader should I use here? I tried GLT_SHADER_SHADED by changing shaderManager.UseStockShader(GLT_SHADER_TEXTURE_POINT_LIGHT_DIFF, transformPipeline.GetModelViewMatrix(), transformPipeline.GetProjectionMatrix(), vLightPos, vWhite, 0); to shaderManager.UseStockShader(GLT_SHADER_SHADED, transformPipeline.GetModelViewProjectionMatrix()); However, I then can't see anything in the final result. – NJUHOBBY Apr 17 '14 at 06:01
  • I'm not familiar with the toolkit you're using. But from a quick search that showed some code, GLT_SHADER_TEXTURE_REPLACE might serve the purpose. If you want to go significantly farther with volume rendering, there won't be a way around writing your own shaders. – Reto Koradi Apr 17 '14 at 06:32