Implementing volume rendering using splatting

In this recipe, we will implement splatting on the GPU. The splatting algorithm converts the voxel representation into splats by convolving them with a Gaussian kernel. The Gaussian smoothing kernel reduces high frequencies and smoothes out edges giving a smoothed rendered output.

Getting ready

The code for this recipe is in the Chapter7/Splatting directory.

How to do it…

Let us start this recipe by following these simple steps:

  1. Load the 3D volume data and store it into an array.
    std::ifstream infile(filename.c_str(), std::ios_base::binary); 
    if(infile.good()) {
    pVolume = new GLubyte[XDIM*YDIM*ZDIM];
    infile.read(reinterpret_cast<char*>(pVolume), XDIM*YDIM*ZDIM*sizeof(GLubyte));
    infile.close();
    return true;
    } else {
    return false;
    }
  2. Depending on the sampling box size, run three loops to iterate through the entire volume voxel by voxel.
    vertices.clear(); 
    int dx = XDIM/X_SAMPLING_DIST;
    int dy = YDIM/Y_SAMPLING_DIST;
    int dz = ZDIM/Z_SAMPLING_DIST;
    scale = glm::vec3(dx,dy,dz); 
    for(int z=0;z<ZDIM;z+=dz) {
    for(int y=0;y<YDIM;y+=dy) {
    for(int x=0;x<XDIM;x+=dx) {
    SampleVoxel(x,y,z);
    }
    }
    }

    The SampleVoxel function is defined in the VolumeSplatter class as follows:

    void VolumeSplatter::SampleVoxel(const int x, const int y,  
                                     const int z) {
      GLubyte data = SampleVolume(x, y, z);
      if(data>isoValue) {
        Vertex v; 
        v.pos.x = x;
        v.pos.y = y;
        v.pos.z = z;
        v.normal = GetNormal(x, y, z);
        v.pos *= invDim; 
        vertices.push_back(v);
      } 
    }
  3. In each sampling step, estimate the volume density values at the current voxel. If the value is greater than the given isovalue, store the voxel position and normal into a vertex array.
       GLubyte data = SampleVolume(x, y, z);
       if(data>isoValue) {
          Vertex v; 
          v.pos.x = x;
          v.pos.y = y;
         v.pos.z = z;
        v.normal = GetNormal(x, y, z);
        v.pos *= invDim; 
        vertices.push_back(v);
       }

    The SampleVolume function takes the given sampling point and returns the nearest voxel density. It is defined in the VolumeSplatter class as follows:

    GLubyte VolumeSplatter::SampleVolume(const int x, const int y, const int z) {
        int index = (x+(y*XDIM)) + z*(XDIM*YDIM); 
      if(index<0)
         index = 0;
      if(index >= XDIM*YDIM*ZDIM)
         index = (XDIM*YDIM*ZDIM)-1;
      return pVolume[index];
    }
  4. After the sampling step, pass the generated vertices to a vertex array object (VAO) containing a vertex buffer object (VBO).
       glGenVertexArrays(1, &volumeSplatterVAO);
       glGenBuffers(1, &volumeSplatterVBO);   
       glBindVertexArray(volumeSplatterVAO);
       glBindBuffer (GL_ARRAY_BUFFER, volumeSplatterVBO);
       glBufferData (GL_ARRAY_BUFFER, splatter->GetTotalVertices()   
       *sizeof(Vertex), splatter->GetVertexPointer(), GL_STATIC_DRAW);
      glEnableVertexAttribArray(0);
       glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE,sizeof(Vertex),
       0); 
       glEnableVertexAttribArray(1);
       glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE,sizeof(Vertex),  
       (const GLvoid*) offsetof(Vertex, normal));
  5. Set up two FBOs for offscreen rendering. The first FBO (filterFBOID) is used for Gaussian smoothing.
      glGenFramebuffers(1,&filterFBOID);
      glBindFramebuffer(GL_FRAMEBUFFER,filterFBOID);
      glGenTextures(2, blurTexID);
      for(int i=0;i<2;i++) {
        glActiveTexture(GL_TEXTURE1+i);
        glBindTexture(GL_TEXTURE_2D, blurTexID[i]);
         //set texture parameters  	       
          glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA32F,IMAGE_WIDTH,
          IMAGE_HEIGHT,0,GL_RGBA,GL_FLOAT,NULL);
          glFramebufferTexture2D(GL_FRAMEBUFFER,   
          GL_COLOR_ATTACHMENT0+i,GL_TEXTURE_2D,blurTexID[i],0); 
      }
      GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
      if(status == GL_FRAMEBUFFER_COMPLETE) {
        cout<<"Filtering FBO setup successful."<<endl;
      } else {
        cout<<"Problem in Filtering FBO setup."<<endl;
      }
  6. The second FBO (fboID) is used to render the scene so that the smoothing operation can be applied on the rendered output from the first pass. Add a render buffer object to this FBO to enable depth testing.
       glGenFramebuffers(1,&fboID);
       glGenRenderbuffers(1, &rboID);
       glGenTextures(1, &texID);
       glBindFramebuffer(GL_FRAMEBUFFER,fboID);
       glBindRenderbuffer(GL_RENDERBUFFER, rboID);
       glActiveTexture(GL_TEXTURE0);
       glBindTexture(GL_TEXTURE_2D, texID); 
       //set texture parameters
       glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA32F,IMAGE_WIDTH,
       IMAGE_HEIGHT,0,GL_RGBA,GL_FLOAT,NULL);
       glFramebufferTexture2D(GL_FRAMEBUFFER,GL_COLOR_ATTACHMENT0,
       GL_TEXTURE_2D, texID, 0);
       glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
       GL_RENDERBUFFER, rboID);
       glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT32,  
       IMAGE_WIDTH, IMAGE_HEIGHT);
       status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
       if(status == GL_FRAMEBUFFER_COMPLETE) {
          cout<<"Offscreen rendering FBO setup successful."<<endl;
       } else {
          cout<<"Problem in offscreen rendering FBO setup."<<endl;
       }
  7. In the render function, first render the point splats to a texture using the first FBO (fboID).
       glBindFramebuffer(GL_FRAMEBUFFER,fboID); 	 
      glViewport(0,0, IMAGE_WIDTH, IMAGE_HEIGHT);  
      glDrawBuffer(GL_COLOR_ATTACHMENT0); 
        glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
        glm::mat4 T = glm::translate(glm::mat4(1), 
       glm::vec3(-0.5,-0.5,-0.5));
      glBindVertexArray(volumeSplatterVAO);
      shader.Use(); 
      glUniformMatrix4fv(shader("MV"), 1, GL_FALSE,   
       glm::value_ptr(MV*T));
      glUniformMatrix3fv(shader("N"), 1, GL_FALSE,   
       glm::value_ptr(glm::inverseTranspose(glm::mat3(MV*T))));
      glUniformMatrix4fv(shader("P"), 1, GL_FALSE, 
       glm::value_ptr(P));
      glDrawArrays(GL_POINTS, 0, splatter->GetTotalVertices());
      shader.UnUse();   

    The splatting vertex shader (Chapter7/Splatting/shaders/splatShader.vert) is defined as follows. It calculates the eye space normal. The splat size is calculated using the volume dimension and the sampling voxel size. This is then written to the gl_PointSize variable in the vertex shader.

    #version 330 core
    layout(location = 0) in vec3 vVertex; 
    layout(location = 1) in vec3 vNormal; 
    uniform mat4 MV; 
    uniform mat3 N;
    uniform mat4 P;          
    smooth out vec3 outNormal; 
    uniform float splatSize;
    void main() {    
       vec4 eyeSpaceVertex = MV*vec4(vVertex,1);
       gl_PointSize = 2*splatSize/-eyeSpaceVertex.z; 
       gl_Position = P * eyeSpaceVertex; 
       outNormal = N*vNormal;
    }

    The splatting fragment shader (Chapter7/Splatting/shaders/splatShader.frag) is defined as follows:

    #version 330 core
    layout(location = 0) out vec4 vFragColor;
    smooth in vec3 outNormal;
    const vec3 L = vec3(0,0,1);
    const vec3 V = L;
    const vec4 diffuse_color = vec4(0.75,0.5,0.5,1);
    const vec4 specular_color = vec4(1);
    void main() { 
      vec3 N;
      N = normalize(outNormal);
      vec2 P = gl_PointCoord*2.0 - vec2(1.0);  
      float mag = dot(P.xy,P.xy); 
      if (mag > 1) 
        discard;  
    
      float diffuse = max(0, dot(N,L));
       vec3 halfVec = normalize(L+V);
      float specular=pow(max(0, dot(halfVec,N)),400);
       vFragColor = (specular*specular_color) + 
                    (diffuse*diffuse_color);
       }
  8. Next, set the filtering FBO and first apply the vertical and then the horizontal Gaussian smoothing pass by drawing a full-screen quad as was done in the Variance shadow mapping recipe in Chapter 4, Lights and Shadows.
      glBindVertexArray(quadVAOID); 
      glBindFramebuffer(GL_FRAMEBUFFER, filterFBOID);    
      glDrawBuffer(GL_COLOR_ATTACHMENT0);
      gaussianV_shader.Use();  
      glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, 0);
       glDrawBuffer(GL_COLOR_ATTACHMENT1); 
      gaussianH_shader.Use();  
      glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, 0);
  9. Unbind the filtering FBO, restore the default draw buffer and render the filtered output on the screen.
       glBindFramebuffer(GL_FRAMEBUFFER,0);  
      glDrawBuffer(GL_BACK_LEFT);
      glViewport(0,0,WIDTH, HEIGHT);   
      quadShader.Use();  
      glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, 0); 
      quadShader.UnUse();
      glBindVertexArray(0);

How it works…

Splatting algorithm works by rendering the voxels of the volume data as Gaussian blobs and projecting them on the screen. To achieve this, we first estimate the candidate voxels from the volume dataset by traversing through the entire volume dataset voxel by voxel for the given isovalue. If we have the appropriate voxel, we store its normal and position into a vertex array. For convenience, we wrap all of this functionality into the VolumeSplatter class.

We first create a new instance of the VolumeSplatter class. Next, we set the volume dimensions and then load the volume data. Next, we specify the target isovalue and the number of sampling voxels to use. Finally, we call the VolumeSplatter::SplatVolume function that traverses the whole volume voxel by voxel.

splatter = new VolumeSplatter();
splatter->SetVolumeDimensions(256,256,256);
splatter->LoadVolume(volume_file);
splatter->SetIsosurfaceValue(40);
splatter->SetNumSamplingVoxels(64,64,64);
std::cout<<"Generating point splats ...";
splatter->SplatVolume();
std::cout<<"Done."<<std::endl;

The splatter stores the vertices and normals into a vertex array. We then generate the vertex buffer object from this array. In the rendering function, we first draw the entire splat dataset in a single-pass into an offscreen render target. This is done so that we can filter it using separable Gaussian convolution filters. Finally, the filtered output is displayed on a full-screen quad.

The splatting vertex shader (Chapter7/Splatting/shaders/splatShader.vert) calculates the point size on screen based on the depth of the splat. In order to achieve this in the vertex shader, we have to enable the GL_VERTEX_PROGRAM_POINT_SIZE state that is, glEnable(GL_VERTEX_PROGRAM_POINT_SIZE). The vertex shader also outputs the splat normals in eye space.

vec4 eyeSpaceVertex = MV*vec4(vVertex,1);
gl_PointSize = 2*splatSize/-eyeSpaceVertex.z; 
gl_Position = P * eyeSpaceVertex; 
outNormal = N*vNormal;

Since the default point sprite renders as a screen-aligned quad, in the fragment shader (Chapter7/Splatting/shaders/splatShader.frag), we discard all fragments that are outside the radius of the splat at the current splat position.

vec3 N;
N = normalize(outNormal);
vec2 P = gl_PointCoord*2.0 - vec2(1.0);  
float mag = dot(P.xy,P.xy); 
if (mag > 1) discard;   

Finally, we estimate the diffuse and specular components and output the current fragment color using the eye space normal of the splat.

float diffuse = max(0, dot(N,L));
vec3 halfVec = normalize(L+V);
float specular = pow(max(0, dot(halfVec,N)),400);
vFragColor =  (specular*specular_color) + (diffuse*diffuse_color);

There's more…

The demo application implementing this recipe renders the engine dataset as in the previous recipes, as shown in the following screenshot. Note the output appears blurred due to Gaussian smoothing of the splats.

There's more…

This recipe gave us an overview on the splatting algorithm. Our brute force approach in this recipe was to iterate through all of the voxels. For large datasets, we have to employ an acceleration structure, like an octree, to quickly identify voxels with densities and cull unnecessary voxels.

..................Content has been hidden....................

You can't read the all page of ebook, please click here login for view all page.
Reset
18.222.119.148