|
| 1 | +/** |
| 2 | + * @author Almar Klein / http://almarklein.org |
| 3 | + * |
| 4 | + * Shaders to render 3D volumes using raycasting. |
| 5 | + * The applied techniques are based on similar implementations in the Visvis and Vispy projects. |
| 6 | + * This is not the only approach, therefore it's marked 1. |
| 7 | + */ |
| 8 | + |
| 9 | +THREE.VolumeRenderShader1 = { |
| 10 | + uniforms: { |
| 11 | + "u_size": { value: [1, 1, 1] }, |
| 12 | + "u_renderstyle": { value: 0 }, |
| 13 | + "u_renderthreshold": { value: 0.5 }, |
| 14 | + "u_clim": { value: [0.0, 1.0] }, |
| 15 | + "u_data": { value: null }, |
| 16 | + "u_cmdata": { value: null } |
| 17 | + }, |
| 18 | + vertexShader: [ |
| 19 | + 'varying vec4 v_nearpos;', |
| 20 | + 'varying vec4 v_farpos;', |
| 21 | + 'varying vec3 v_position;', |
| 22 | + |
| 23 | + 'mat4 inversemat(mat4 m) {', |
| 24 | + // Taken from https://github.com/stackgl/glsl-inverse/blob/master/index.glsl |
| 25 | + // This function is licenced by the MIT license to Mikola Lysenko |
| 26 | + 'float', |
| 27 | + 'a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3],', |
| 28 | + 'a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3],', |
| 29 | + 'a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3],', |
| 30 | + 'a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3],', |
| 31 | + |
| 32 | + 'b00 = a00 * a11 - a01 * a10,', |
| 33 | + 'b01 = a00 * a12 - a02 * a10,', |
| 34 | + 'b02 = a00 * a13 - a03 * a10,', |
| 35 | + 'b03 = a01 * a12 - a02 * a11,', |
| 36 | + 'b04 = a01 * a13 - a03 * a11,', |
| 37 | + 'b05 = a02 * a13 - a03 * a12,', |
| 38 | + 'b06 = a20 * a31 - a21 * a30,', |
| 39 | + 'b07 = a20 * a32 - a22 * a30,', |
| 40 | + 'b08 = a20 * a33 - a23 * a30,', |
| 41 | + 'b09 = a21 * a32 - a22 * a31,', |
| 42 | + 'b10 = a21 * a33 - a23 * a31,', |
| 43 | + 'b11 = a22 * a33 - a23 * a32,', |
| 44 | + |
| 45 | + 'det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;', |
| 46 | + |
| 47 | + 'return mat4(', |
| 48 | + 'a11 * b11 - a12 * b10 + a13 * b09,', |
| 49 | + 'a02 * b10 - a01 * b11 - a03 * b09,', |
| 50 | + 'a31 * b05 - a32 * b04 + a33 * b03,', |
| 51 | + 'a22 * b04 - a21 * b05 - a23 * b03,', |
| 52 | + 'a12 * b08 - a10 * b11 - a13 * b07,', |
| 53 | + 'a00 * b11 - a02 * b08 + a03 * b07,', |
| 54 | + 'a32 * b02 - a30 * b05 - a33 * b01,', |
| 55 | + 'a20 * b05 - a22 * b02 + a23 * b01,', |
| 56 | + 'a10 * b10 - a11 * b08 + a13 * b06,', |
| 57 | + 'a01 * b08 - a00 * b10 - a03 * b06,', |
| 58 | + 'a30 * b04 - a31 * b02 + a33 * b00,', |
| 59 | + 'a21 * b02 - a20 * b04 - a23 * b00,', |
| 60 | + 'a11 * b07 - a10 * b09 - a12 * b06,', |
| 61 | + 'a00 * b09 - a01 * b07 + a02 * b06,', |
| 62 | + 'a31 * b01 - a30 * b03 - a32 * b00,', |
| 63 | + 'a20 * b03 - a21 * b01 + a22 * b00) / det;', |
| 64 | + '}', |
| 65 | + |
| 66 | + |
| 67 | + 'void main() {', |
| 68 | + // Prepare transforms to map to "camera view". See also: |
| 69 | + // https://threejs.org/docs/#api/renderers/webgl/WebGLProgram |
| 70 | + 'mat4 viewtransformf = viewMatrix;', |
| 71 | + 'mat4 viewtransformi = inversemat(viewMatrix);', |
| 72 | + |
| 73 | + // Project local vertex coordinate to camera position. Then do a step |
| 74 | + // backward (in cam coords) to the near clipping plane, and project back. Do |
| 75 | + // the same for the far clipping plane. This gives us all the information we |
| 76 | + // need to calculate the ray and truncate it to the viewing cone. |
| 77 | + 'vec4 position4 = vec4(position, 1.0);', |
| 78 | + 'vec4 pos_in_cam = viewtransformf * position4;', |
| 79 | + |
| 80 | + // Intersection of ray and near clipping plane (z = -1 in clip coords) |
| 81 | + 'pos_in_cam.z = -pos_in_cam.w;', |
| 82 | + 'v_nearpos = viewtransformi * pos_in_cam;', |
| 83 | + |
| 84 | + // Intersection of ray and far clipping plane (z = +1 in clip coords) |
| 85 | + 'pos_in_cam.z = pos_in_cam.w;', |
| 86 | + 'v_farpos = viewtransformi * pos_in_cam;', |
| 87 | + |
| 88 | + // Set varyings and output pos |
| 89 | + 'v_position = position;', |
| 90 | + 'gl_Position = projectionMatrix * viewMatrix * modelMatrix * position4;', |
| 91 | + '}', |
| 92 | + ].join( '\n' ), |
| 93 | + fragmentShader: [ |
| 94 | + 'precision highp float;', |
| 95 | + 'precision mediump sampler3D;', |
| 96 | + |
| 97 | + 'uniform vec3 u_size;', |
| 98 | + 'uniform int u_renderstyle;', |
| 99 | + 'uniform float u_renderthreshold;', |
| 100 | + 'uniform vec2 u_clim;', |
| 101 | + |
| 102 | + 'uniform sampler3D u_data;', |
| 103 | + 'uniform sampler2D u_cmdata;', |
| 104 | + |
| 105 | + 'varying vec3 v_position;', |
| 106 | + 'varying vec4 v_nearpos;', |
| 107 | + 'varying vec4 v_farpos;', |
| 108 | + |
| 109 | + // The maximum distance through our rendering volume is sqrt(3). |
| 110 | + 'const int MAX_STEPS = 887; // 887 for 512^3, 1774 for 1024^3', |
| 111 | + 'const int REFINEMENT_STEPS = 4;', |
| 112 | + 'const float relative_step_size = 1.0;', |
| 113 | + 'const vec4 ambient_color = vec4(0.2, 0.4, 0.2, 1.0);', |
| 114 | + 'const vec4 diffuse_color = vec4(0.8, 0.2, 0.2, 1.0);', |
| 115 | + 'const vec4 specular_color = vec4(1.0, 1.0, 1.0, 1.0);', |
| 116 | + 'const float shininess = 40.0;', |
| 117 | + |
| 118 | + 'void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);', |
| 119 | + 'void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);', |
| 120 | + |
| 121 | + 'float sample1(vec3 texcoords);', |
| 122 | + 'vec4 apply_colormap(float val);', |
| 123 | + 'vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray);', |
| 124 | + |
| 125 | + |
| 126 | + 'void main() {', |
| 127 | + // Normalize clipping plane info |
| 128 | + 'vec3 farpos = v_farpos.xyz / v_farpos.w;', |
| 129 | + 'vec3 nearpos = v_nearpos.xyz / v_nearpos.w;', |
| 130 | + |
| 131 | + // Calculate unit vector pointing in the view direction through this fragment. |
| 132 | + 'vec3 view_ray = normalize(nearpos.xyz - farpos.xyz);', |
| 133 | + |
| 134 | + // Compute the (negative) distance to the front surface or near clipping plane. |
| 135 | + // v_position is the back face of the cuboid, so the initial distance calculated in the dot |
| 136 | + // product below is the distance from near clip plane to the back of the cuboid |
| 137 | + 'float distance = dot(nearpos - v_position, view_ray);', |
| 138 | + 'distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,', |
| 139 | + '(u_size.x - 0.5 - v_position.x) / view_ray.x));', |
| 140 | + 'distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,', |
| 141 | + '(u_size.y - 0.5 - v_position.y) / view_ray.y));', |
| 142 | + 'distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,', |
| 143 | + '(u_size.z - 0.5 - v_position.z) / view_ray.z));', |
| 144 | + |
| 145 | + // Now we have the starting position on the front surface |
| 146 | + 'vec3 front = v_position + view_ray * distance;', |
| 147 | + |
| 148 | + // Decide how many steps to take |
| 149 | + 'int nsteps = int(-distance / relative_step_size + 0.5);', |
| 150 | + 'if ( nsteps < 1 )', |
| 151 | + 'discard;', |
| 152 | + |
| 153 | + // Get starting location and step vector in texture coordinates |
| 154 | + 'vec3 step = ((v_position - front) / u_size) / float(nsteps);', |
| 155 | + 'vec3 start_loc = front / u_size;', |
| 156 | + |
| 157 | + // For testing: show the number of steps. This helps to establish |
| 158 | + // whether the rays are correctly oriented |
| 159 | + //'gl_FragColor = vec4(0.0, float(nsteps) / 1.0 / u_size.x, 1.0, 1.0);', |
| 160 | + //'return;', |
| 161 | + |
| 162 | + 'if (u_renderstyle == 0)', |
| 163 | + 'cast_mip(start_loc, step, nsteps, view_ray);', |
| 164 | + 'else if (u_renderstyle == 1)', |
| 165 | + 'cast_iso(start_loc, step, nsteps, view_ray);', |
| 166 | + |
| 167 | + 'if (gl_FragColor.a < 0.05)', |
| 168 | + 'discard;', |
| 169 | + '}', |
| 170 | + |
| 171 | + |
| 172 | + 'float sample1(vec3 texcoords) {', |
| 173 | + '/* Sample float value from a 3D texture. Assumes intensity data. */', |
| 174 | + 'return texture(u_data, texcoords.xyz).r;', |
| 175 | + '}', |
| 176 | + |
| 177 | + |
| 178 | + 'vec4 apply_colormap(float val) {', |
| 179 | + 'val = (val - u_clim[0]) / (u_clim[1] - u_clim[0]);', |
| 180 | + 'return texture2D(u_cmdata, vec2(val, 0.5));', |
| 181 | + '}', |
| 182 | + |
| 183 | + |
| 184 | + 'void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {', |
| 185 | + |
| 186 | + 'float max_val = -1e6;', |
| 187 | + 'int max_i = 100;', |
| 188 | + 'vec3 loc = start_loc;', |
| 189 | + |
| 190 | + // Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with |
| 191 | + // non-constant expression. So we use a hard-coded max, and an additional condition |
| 192 | + // inside the loop. |
| 193 | + 'for (int iter=0; iter<MAX_STEPS; iter++) {', |
| 194 | + 'if (iter >= nsteps)', |
| 195 | + 'break;', |
| 196 | + // Sample from the 3D texture |
| 197 | + 'float val = sample1(loc);', |
| 198 | + // Apply MIP operation |
| 199 | + 'if (val > max_val) {', |
| 200 | + 'max_val = val;', |
| 201 | + 'max_i = iter;', |
| 202 | + '}', |
| 203 | + // Advance location deeper into the volume |
| 204 | + 'loc += step;', |
| 205 | + '}', |
| 206 | + |
| 207 | + // Refine location, gives crispier images |
| 208 | + 'vec3 iloc = start_loc + step * (float(max_i) - 0.5);', |
| 209 | + 'vec3 istep = step / float(REFINEMENT_STEPS);', |
| 210 | + 'for (int i=0; i<REFINEMENT_STEPS; i++) {', |
| 211 | + 'max_val = max(max_val, sample1(iloc));', |
| 212 | + 'iloc += istep;', |
| 213 | + '}', |
| 214 | + |
| 215 | + // Resolve final color |
| 216 | + 'gl_FragColor = apply_colormap(max_val);', |
| 217 | + '}', |
| 218 | + |
| 219 | + |
| 220 | + 'void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {', |
| 221 | + |
| 222 | + 'gl_FragColor = vec4(0.0); // init transparent', |
| 223 | + 'vec4 color3 = vec4(0.0); // final color', |
| 224 | + 'vec3 dstep = 1.5 / u_size; // step to sample derivative', |
| 225 | + 'vec3 loc = start_loc;', |
| 226 | + |
| 227 | + 'float low_threshold = u_renderthreshold - 0.02 * (u_clim[1] - u_clim[0]);', |
| 228 | + |
| 229 | + // Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with |
| 230 | + // non-constant expression. So we use a hard-coded max, and an additional condition |
| 231 | + // inside the loop. |
| 232 | + 'for (int iter=0; iter<MAX_STEPS; iter++) {', |
| 233 | + 'if (iter >= nsteps)', |
| 234 | + 'break;', |
| 235 | + |
| 236 | + // Sample from the 3D texture |
| 237 | + 'float val = sample1(loc);', |
| 238 | + |
| 239 | + 'if (val > low_threshold) {', |
| 240 | + // Take the last interval in smaller steps |
| 241 | + 'vec3 iloc = loc - 0.5 * step;', |
| 242 | + 'vec3 istep = step / float(REFINEMENT_STEPS);', |
| 243 | + 'for (int i=0; i<REFINEMENT_STEPS; i++) {', |
| 244 | + 'val = sample1(iloc);', |
| 245 | + 'if (val > u_renderthreshold) {', |
| 246 | + 'gl_FragColor = add_lighting(val, iloc, dstep, view_ray);', |
| 247 | + 'return;', |
| 248 | + '}', |
| 249 | + 'iloc += istep;', |
| 250 | + '}', |
| 251 | + '}', |
| 252 | + |
| 253 | + // Advance location deeper into the volume |
| 254 | + 'loc += step;', |
| 255 | + '}', |
| 256 | + '}', |
| 257 | + |
| 258 | + |
| 259 | + 'vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray)', |
| 260 | + '{', |
| 261 | + // Calculate color by incorporating lighting |
| 262 | + |
| 263 | + // View direction |
| 264 | + 'vec3 V = normalize(view_ray);', |
| 265 | + |
| 266 | + // calculate normal vector from gradient |
| 267 | + 'vec3 N;', |
| 268 | + 'float val1, val2;', |
| 269 | + 'val1 = sample1(loc + vec3(-step[0], 0.0, 0.0));', |
| 270 | + 'val2 = sample1(loc + vec3(+step[0], 0.0, 0.0));', |
| 271 | + 'N[0] = val1 - val2;', |
| 272 | + 'val = max(max(val1, val2), val);', |
| 273 | + 'val1 = sample1(loc + vec3(0.0, -step[1], 0.0));', |
| 274 | + 'val2 = sample1(loc + vec3(0.0, +step[1], 0.0));', |
| 275 | + 'N[1] = val1 - val2;', |
| 276 | + 'val = max(max(val1, val2), val);', |
| 277 | + 'val1 = sample1(loc + vec3(0.0, 0.0, -step[2]));', |
| 278 | + 'val2 = sample1(loc + vec3(0.0, 0.0, +step[2]));', |
| 279 | + 'N[2] = val1 - val2;', |
| 280 | + 'val = max(max(val1, val2), val);', |
| 281 | + |
| 282 | + 'float gm = length(N); // gradient magnitude', |
| 283 | + 'N = normalize(N);', |
| 284 | + |
| 285 | + // Flip normal so it points towards viewer |
| 286 | + 'float Nselect = float(dot(N, V) > 0.0);', |
| 287 | + 'N = (2.0 * Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;', |
| 288 | + |
| 289 | + // Init colors |
| 290 | + 'vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);', |
| 291 | + 'vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);', |
| 292 | + 'vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);', |
| 293 | + |
| 294 | + // note: could allow multiple lights |
| 295 | + 'for (int i=0; i<1; i++)', |
| 296 | + '{', |
| 297 | + // Get light direction (make sure to prevent zero devision) |
| 298 | + 'vec3 L = normalize(view_ray); //lightDirs[i];', |
| 299 | + 'float lightEnabled = float( length(L) > 0.0 );', |
| 300 | + 'L = normalize(L + (1.0 - lightEnabled));', |
| 301 | + |
| 302 | + // Calculate lighting properties |
| 303 | + 'float lambertTerm = clamp(dot(N, L), 0.0, 1.0);', |
| 304 | + 'vec3 H = normalize(L+V); // Halfway vector', |
| 305 | + 'float specularTerm = pow(max(dot(H, N), 0.0), shininess);', |
| 306 | + |
| 307 | + // Calculate mask |
| 308 | + 'float mask1 = lightEnabled;', |
| 309 | + |
| 310 | + // Calculate colors |
| 311 | + 'ambient_color += mask1 * ambient_color; // * gl_LightSource[i].ambient;', |
| 312 | + 'diffuse_color += mask1 * lambertTerm;', |
| 313 | + 'specular_color += mask1 * specularTerm * specular_color;', |
| 314 | + '}', |
| 315 | + |
| 316 | + // Calculate final color by componing different components |
| 317 | + 'vec4 final_color;', |
| 318 | + 'vec4 color = apply_colormap(val);', |
| 319 | + 'final_color = color * (ambient_color + diffuse_color) + specular_color;', |
| 320 | + 'final_color.a = color.a;', |
| 321 | + 'return final_color;', |
| 322 | + '}', |
| 323 | + ].join( '\n' ) |
| 324 | +}; |
0 commit comments