HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_VoxelArray.C
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_VoxelArray.C ( UT Library, C++)
7  *
8  * COMMENTS:
9  * Tiled Voxel Array Implementation.
10  */
11 
12 #include "UT_VoxelArray.h"
13 #include "UT_BoundingBox.h"
14 #include "UT_Filter.h"
15 #include "UT_IStream.h"
16 #include "UT_JSONParser.h"
17 #include "UT_JSONWriter.h"
18 #include "UT_ParallelUtil.h"
19 #include "UT_SharedMemoryManager.h"
20 #include "UT_StackBuffer.h"
21 #include "UT_VoxelArrayJSON.h"
22 #include "UT_Debug.h"
23 #include <SYS/SYS_Align.h>
24 #include <SYS/SYS_CallIf.h>
25 #include <SYS/SYS_Math.h>
26 
27 ///
28 /// fpreal16 conversion functions
29 ///
30 inline fpreal16
32 inline fpreal16
34 inline fpreal16
36 inline fpreal16
37 UTvoxelConvertFP16(int8 a) { return a; }
38 inline fpreal16
40 inline fpreal16
42 inline fpreal16
44 inline fpreal16
45 UTvoxelConvertFP16(UT_Vector2 a) { return a.x(); }
46 inline fpreal16
47 UTvoxelConvertFP16(UT_Vector3 a) { return a.x(); }
48 inline fpreal16
49 UTvoxelConvertFP16(UT_Vector4 a) { return a.x(); }
50 
51 template <typename T>
53 {
54  return T(v);
55 }
56 
57 template <>
59 {
60  return UT_Vector3F(v, v, v);
61 }
62 template <>
64 {
65  return UT_Vector3D(v, v, v);
66 }
67 template <>
69 {
70  return UT_Vector4F(v, v, v, v);
71 }
72 template <>
74 {
75  return UT_Vector4D(v, v, v, v);
76 }
77 
78 ///
79 /// VoxelTileCompress definitions
80 ///
81 template <typename T>
82 void
84  T &min, T &max) const
85 {
86  int x, y, z;
87 
88  min = getValue(tile, 0, 0, 0);
89  max = min;
90 
91  // Generic approach is to just use the getValue() ability.
92  for (z = 0; z < tile.zres(); z++)
93  {
94  for (y = 0; y < tile.yres(); y++)
95  {
96  for (x = 0; x < tile.xres(); x++)
97  {
98  tile.expandMinMax(getValue(tile, x, y, z), min, max);
99  }
100  }
101  }
102  return;
103 }
104 
105 //
106 // VoxelTile definitions.
107 //
108 
109 template <typename T>
111 {
112  myRes[0] = 0;
113  myRes[1] = 0;
114  myRes[2] = 0;
115 
116  myCompressionType = COMPRESS_CONSTANT;
117  myForeignData = false;
118 
119  if (sizeof(T) <= sizeof(T*))
120  {
121  myData = 0;
122  }
123  else
124  {
125  myData = UT_VOXEL_ALLOC(sizeof(T));
126 
127  // It is not accidental that this is not a static_cast!
128  // There isn't a UT_Vector3(fpreal) constructor (as we have a foolish
129  // UT_Vector3(fpreal *) constructor that would cause massive problems)
130  // but there is a UT_Vector3 operator=(fpreal)!
131  ((T *)myData)[0] = 0;
132  }
133 }
134 
135 template <typename T>
137 {
138  freeData();
139 }
140 
141 template <typename T>
143 {
144  myData = 0;
145 
146  // Use assignment operator.
147  *this = src;
148 }
149 
150 template <typename T>
151 const UT_VoxelTile<T> &
153 {
154  if (&src == this)
155  return *this;
156 
157  freeData();
158 
159  myRes[0] = src.myRes[0];
160  myRes[1] = src.myRes[1];
161  myRes[2] = src.myRes[2];
162 
163  myCompressionType = src.myCompressionType;
164  switch (myCompressionType)
165  {
166  case COMPRESS_RAW:
167  myData = UT_VOXEL_ALLOC(
168  sizeof(T) * myRes[0] * myRes[1] * myRes[2]);
169  memcpy(myData, src.myData, sizeof(T) * myRes[0] * myRes[1] * myRes[2]);
170  break;
171  case COMPRESS_CONSTANT:
172  if (inlineConstant())
173  {
174  myData = src.myData;
175  }
176  else
177  {
178  myData = UT_VOXEL_ALLOC(sizeof(T));
179  memcpy(myData, src.myData, sizeof(T));
180  }
181  break;
182  case COMPRESS_RAWFULL:
183  myData = UT_VOXEL_ALLOC(
184  sizeof(T) * TILESIZE * TILESIZE * myRes[2]);
185  memcpy(myData, src.myData,
186  sizeof(T) * TILESIZE * TILESIZE * myRes[2]);
187  break;
188  case COMPRESS_FPREAL16:
189  myData = UT_VOXEL_ALLOC(
190  sizeof(fpreal16) * myRes[0] * myRes[1] * myRes[2]);
191  memcpy(myData, src.myData,
192  sizeof(fpreal16) * myRes[0] * myRes[1] * myRes[2]);
193  break;
194  default:
195  {
196  UT_VoxelTileCompress<T> *engine;
197 
198  engine = getCompressionEngine(myCompressionType);
199  myData = UT_VOXEL_ALLOC(engine->getDataLength(src));
200  memcpy(myData, src.myData, engine->getDataLength(src));
201  break;
202  }
203  }
204 
205  return *this;
206 }
207 
208 template <typename T>
209 bool
211 {
212  switch (myCompressionType)
213  {
214  case COMPRESS_RAW:
215  // Do the assignment.
216  ((T *)myData)[ ((z * myRes[1]) + y) * myRes[0] + x ] = t;
217  return true;
218 
219  case COMPRESS_CONSTANT:
220  if (rawConstVal() == t)
221  {
222  // Trivially true.
223  return true;
224  }
225  return false;
226 
227  case COMPRESS_RAWFULL:
228  ((T *)myData)[ ((z * TILESIZE) + y) * TILESIZE + x ] = t;
229  return true;
230 
231  case COMPRESS_FPREAL16:
232  return false;
233  }
234 
235  // Use the compression engine.
236  UT_VoxelTileCompress<T> *engine;
237 
238  engine = getCompressionEngine(myCompressionType);
239  return engine->writeThrough(*this, x, y, z, t);
240 }
241 
242 template <typename T>
243 T
244 UT_VoxelTile<T>::lerp(int x, int y, int z, fpreal32 fx, fpreal32 fy, fpreal32 fz) const
245 {
246  T vx, vx1, vy, vy1, vz;
247 
248  switch (myCompressionType)
249  {
250  case COMPRESS_RAW:
251  {
252  T *data = (T *) myData;
253  int offset = (z * myRes[1] + y) * myRes[0] + x;
254  int yinc = myRes[0];
255  int zinc = myRes[0] * myRes[1];
256 
257  // Lerp x:x+1, y, z
258  vx = lerpValues(data[offset], data[offset+1], fx);
259  // Lerp x:x+1, y+1, z
260  vx1 = lerpValues(data[offset+yinc], data[offset+yinc+1], fx);
261 
262  // Lerp x:x+1, y:y+1, z
263  vy = lerpValues(vx, vx1, fy);
264 
265  // Lerp x:x+1, y, z+1
266  vx = lerpValues(data[offset+zinc], data[offset+zinc+1], fx);
267  // Lerp x:x+1, y+1, z+1
268  vx1 = lerpValues(data[offset+zinc+yinc], data[offset+zinc+yinc+1], fx);
269 
270  // Lerp x:x+1, y:y+1, z+1
271  vy1 = lerpValues(vx, vx1, fy);
272 
273  // Lerp x:x+1, y:y+1, z:z+1
274  vz = lerpValues(vy, vy1, fz);
275  break;
276  }
277  case COMPRESS_RAWFULL:
278  {
279  T *data = (T *) myData;
280  int offset = (z * TILESIZE + y) * TILESIZE + x;
281  int yinc = TILESIZE;
282  int zinc = TILESIZE * TILESIZE;
283 
284  // Lerp x:x+1, y, z
285  vx = lerpValues(data[offset], data[offset+1], fx);
286  // Lerp x:x+1, y+1, z
287  vx1 = lerpValues(data[offset+yinc], data[offset+yinc+1], fx);
288 
289  // Lerp x:x+1, y:y+1, z
290  vy = lerpValues(vx, vx1, fy);
291 
292  // Lerp x:x+1, y, z+1
293  vx = lerpValues(data[offset+zinc], data[offset+zinc+1], fx);
294  // Lerp x:x+1, y+1, z+1
295  vx1 = lerpValues(data[offset+zinc+yinc], data[offset+zinc+yinc+1], fx);
296 
297  // Lerp x:x+1, y:y+1, z+1
298  vy1 = lerpValues(vx, vx1, fy);
299 
300  // Lerp x:x+1, y:y+1, z:z+1
301  vz = lerpValues(vy, vy1, fz);
302  break;
303  }
304  case COMPRESS_FPREAL16:
305  {
306  fpreal16 *data = (fpreal16 *) myData;
307  int offset = (z * myRes[1] + y) * myRes[0] + x;
308  int yinc = myRes[0];
309  int zinc = myRes[0] * myRes[1];
310  fpreal16 vx, vx1, vy, vy1, vz;
311 
312  // Lerp x:x+1, y, z
313  vx = SYSlerp(data[offset], data[offset+1], fx);
314  // Lerp x:x+1, y+1, z
315  vx1 = SYSlerp(data[offset+yinc], data[offset+yinc+1], fx);
316 
317  // Lerp x:x+1, y:y+1, z
318  vy = SYSlerp(vx, vx1, fy);
319 
320  // Lerp x:x+1, y, z+1
321  vx = SYSlerp(data[offset+zinc], data[offset+zinc+1], fx);
322  // Lerp x:x+1, y+1, z+1
323  vx1 = SYSlerp(data[offset+zinc+yinc], data[offset+zinc+yinc+1], fx);
324 
325  // Lerp x:x+1, y:y+1, z+1
326  vy1 = SYSlerp(vx, vx1, fy);
327 
328  // Lerp x:x+1, y:y+1, z:z+1
329  vz = SYSlerp(vy, vy1, fz);
330 
331  return UTvoxelConvertFromFP16<T>(vz);
332  }
333  case COMPRESS_CONSTANT:
334  {
335  // This is quite trivial to do a trilerp on.
336  vz = rawConstVal();
337  break;
338  }
339 
340  default:
341  {
342  UT_VoxelTileCompress<T> *engine;
343 
344  engine = getCompressionEngine(myCompressionType);
345  // Lerp x:x+1, y, z
346  vx = lerpValues(engine->getValue(*this, x, y, z),
347  engine->getValue(*this, x+1, y, z),
348  fx);
349  // Lerp x:x+1, y+1, z
350  vx1 = lerpValues(engine->getValue(*this, x, y+1, z),
351  engine->getValue(*this, x+1, y+1, z),
352  fx);
353 
354  // Lerp x:x+1, y:y+1, z
355  vy = lerpValues(vx, vx1, fy);
356 
357  // Lerp x:x+1, y, z+1
358  vx = lerpValues(engine->getValue(*this, x, y, z+1),
359  engine->getValue(*this, x+1, y, z+1),
360  fx);
361  // Lerp x:x+1, y+1, z+1
362  vx1 = lerpValues(engine->getValue(*this, x, y+1, z+1),
363  engine->getValue(*this, x+1, y+1, z+1),
364  fx);
365 
366  // Lerp x:x+1, y:y+1, z+1
367  vy1 = lerpValues(vx, vx1, fy);
368 
369  // Lerp x:x+1, y:y+1, z:z+1
370  vz = lerpValues(vy, vy1, fz);
371  break;
372  }
373  }
374 
375  return vz;
376 }
377 
378 template <typename T>
379 template <int AXIS2D>
380 T
381 UT_VoxelTile<T>::lerpAxis(int x, int y, int z, fpreal32 fx, fpreal32 fy, fpreal32 fz) const
382 {
383  T vx, vx1, vy, vy1, vz;
384 
385  switch (myCompressionType)
386  {
387  case COMPRESS_RAW:
388  {
389  T *data = (T *) myData;
390  int offset = (z * myRes[1] + y) * myRes[0] + x;
391  int yinc = myRes[0];
392  int zinc = myRes[0] * myRes[1];
393 
394  // Lerp x:x+1, y, z
395  if (AXIS2D != 0)
396  vx = lerpValues(data[offset],
397  data[offset+1],
398  fx);
399  else
400  vx = data[offset];
401 
402  if (AXIS2D != 1)
403  {
404  // Lerp x:x+1, y+1, z
405  if (AXIS2D != 0)
406  vx1= lerpValues(data[offset+yinc],
407  data[offset+yinc+1],
408  fx);
409  else
410  vx1 = data[offset+yinc];
411  // Lerp x:x+1, y:y+1, z
412  vy = lerpValues(vx, vx1, fy);
413  }
414  else
415  vy = vx;
416 
417  if (AXIS2D != 2)
418  {
419  // Lerp x:x+1, y, z+1
420  if (AXIS2D != 0)
421  vx = lerpValues(data[offset+zinc],
422  data[offset+zinc+1],
423  fx);
424  else
425  vx = data[offset+zinc];
426 
427  if (AXIS2D != 1)
428  {
429  // Lerp x:x+1, y+1, z+1
430  if (AXIS2D != 0)
431  vx1= lerpValues(data[offset+zinc+yinc],
432  data[offset+zinc+yinc+1],
433  fx);
434  else
435  vx1 = data[offset+zinc+yinc];
436  // Lerp x:x+1, y:y+1, z+1
437  vy1 = lerpValues(vx, vx1, fy);
438  }
439  else
440  vy1 = vx;
441 
442  // Lerp x:x+1, y:y+1, z:z+1
443  vz = lerpValues(vy, vy1, fz);
444  }
445  else
446  vz = vy;
447  break;
448  }
449  case COMPRESS_RAWFULL:
450  {
451  T *data = (T *) myData;
452  int offset = (z * TILESIZE + y) * TILESIZE + x;
453  int yinc = TILESIZE;
454  int zinc = TILESIZE * TILESIZE;
455 
456  // Lerp x:x+1, y, z
457  if (AXIS2D != 0)
458  vx = lerpValues(data[offset],
459  data[offset+1],
460  fx);
461  else
462  vx = data[offset];
463 
464  if (AXIS2D != 1)
465  {
466  // Lerp x:x+1, y+1, z
467  if (AXIS2D != 0)
468  vx1= lerpValues(data[offset+yinc],
469  data[offset+yinc+1],
470  fx);
471  else
472  vx1 = data[offset+yinc];
473  // Lerp x:x+1, y:y+1, z
474  vy = lerpValues(vx, vx1, fy);
475  }
476  else
477  vy = vx;
478 
479  if (AXIS2D != 2)
480  {
481  // Lerp x:x+1, y, z+1
482  if (AXIS2D != 0)
483  vx = lerpValues(data[offset+zinc],
484  data[offset+zinc+1],
485  fx);
486  else
487  vx = data[offset+zinc];
488 
489  if (AXIS2D != 1)
490  {
491  // Lerp x:x+1, y+1, z+1
492  if (AXIS2D != 0)
493  vx1= lerpValues(data[offset+zinc+yinc],
494  data[offset+zinc+yinc+1],
495  fx);
496  else
497  vx1 = data[offset+zinc+yinc];
498  // Lerp x:x+1, y:y+1, z+1
499  vy1 = lerpValues(vx, vx1, fy);
500  }
501  else
502  vy1 = vx;
503 
504  // Lerp x:x+1, y:y+1, z:z+1
505  vz = lerpValues(vy, vy1, fz);
506  }
507  else
508  vz = vy;
509  break;
510  }
511  case COMPRESS_FPREAL16:
512  {
513  fpreal16 *data = (fpreal16 *) myData;
514  int offset = (z * myRes[1] + y) * myRes[0] + x;
515  int yinc = myRes[0];
516  int zinc = myRes[0] * myRes[1];
517  fpreal16 vx, vx1, vy, vy1, vz;
518 
519  // Lerp x:x+1, y, z
520  if (AXIS2D != 0)
521  vx = SYSlerp(data[offset],
522  data[offset+1],
523  fx);
524  else
525  vx = data[offset];
526 
527  if (AXIS2D != 1)
528  {
529  // Lerp x:x+1, y+1, z
530  if (AXIS2D != 0)
531  vx1= SYSlerp(data[offset+yinc],
532  data[offset+yinc+1],
533  fx);
534  else
535  vx1 = data[offset+yinc];
536  // Lerp x:x+1, y:y+1, z
537  vy = SYSlerp(vx, vx1, fy);
538  }
539  else
540  vy = vx;
541 
542  if (AXIS2D != 2)
543  {
544  // Lerp x:x+1, y, z+1
545  if (AXIS2D != 0)
546  vx = SYSlerp(data[offset+zinc],
547  data[offset+zinc+1],
548  fx);
549  else
550  vx = data[offset+zinc];
551 
552  if (AXIS2D != 1)
553  {
554  // Lerp x:x+1, y+1, z+1
555  if (AXIS2D != 0)
556  vx1= SYSlerp(data[offset+zinc+yinc],
557  data[offset+zinc+yinc+1],
558  fx);
559  else
560  vx1 = data[offset+zinc+yinc];
561  // Lerp x:x+1, y:y+1, z+1
562  vy1 = SYSlerp(vx, vx1, fy);
563  }
564  else
565  vy1 = vx;
566 
567  // Lerp x:x+1, y:y+1, z:z+1
568  vz = SYSlerp(vy, vy1, fz);
569  }
570  else
571  vz = vy;
572 
573  return UTvoxelConvertFromFP16<T>(vz);
574  }
575  case COMPRESS_CONSTANT:
576  {
577  // This is quite trivial to do a bilerp on.
578  vz = rawConstVal();
579  break;
580  }
581 
582  default:
583  {
584  UT_VoxelTileCompress<T> *engine;
585 
586  engine = getCompressionEngine(myCompressionType);
587  // Lerp x:x+1, y, z
588  if (AXIS2D != 0)
589  vx = lerpValues(engine->getValue(*this, x, y, z),
590  engine->getValue(*this, x+1, y, z),
591  fx);
592  else
593  vx = engine->getValue(*this, x, y, z);
594 
595  if (AXIS2D != 1)
596  {
597  // Lerp x:x+1, y+1, z
598  if (AXIS2D != 0)
599  vx1= lerpValues(engine->getValue(*this, x, y+1, z),
600  engine->getValue(*this, x+1, y+1, z),
601  fx);
602  else
603  vx1 = engine->getValue(*this, x, y+1, z);
604  // Lerp x:x+1, y:y+1, z
605  vy = lerpValues(vx, vx1, fy);
606  }
607  else
608  vy = vx;
609 
610  if (AXIS2D != 2)
611  {
612  // Lerp x:x+1, y, z+1
613  if (AXIS2D != 0)
614  vx = lerpValues(engine->getValue(*this, x, y, z+1),
615  engine->getValue(*this, x+1, y, z+1),
616  fx);
617  else
618  vx = engine->getValue(*this, x, y, z+1);
619 
620  if (AXIS2D != 1)
621  {
622  // Lerp x:x+1, y+1, z+1
623  if (AXIS2D != 0)
624  vx1= lerpValues(engine->getValue(*this, x, y+1, z+1),
625  engine->getValue(*this, x+1, y+1, z+1),
626  fx);
627  else
628  vx1 = engine->getValue(*this, x, y+1, z+1);
629  // Lerp x:x+1, y:y+1, z+1
630  vy1 = lerpValues(vx, vx1, fy);
631  }
632  else
633  vy1 = vx;
634 
635  // Lerp x:x+1, y:y+1, z:z+1
636  vz = lerpValues(vy, vy1, fz);
637  }
638  else
639  vz = vy;
640  break;
641  }
642  }
643 
644  return vz;
645 }
646 
647 template <typename T>
648 bool
649 UT_VoxelTile<T>::extractSample(int x, int y, int z, T *sample) const
650 {
651  switch (myCompressionType)
652  {
653  case COMPRESS_RAW:
654  {
655  T *data = (T *) myData;
656  int offset = (z * myRes[1] + y) * myRes[0] + x;
657  int yinc = myRes[0];
658  int zinc = myRes[0] * myRes[1];
659 
660  sample[0] = data[offset];
661  sample[1] = data[offset+1];
662  sample[2+0] = data[offset+yinc];
663  sample[2+1] = data[offset+yinc+1];
664  sample[4+0] = data[offset+zinc];
665  sample[4+1] = data[offset+zinc+1];
666  sample[4+2+0] = data[offset+zinc+yinc];
667  sample[4+2+1] = data[offset+zinc+yinc+1];
668  break;
669  }
670  case COMPRESS_RAWFULL:
671  {
672  T *data = (T *) myData;
673  int offset = (z * TILESIZE + y) * TILESIZE + x;
674  int yinc = TILESIZE;
675  int zinc = TILESIZE * TILESIZE;
676 
677  sample[0] = data[offset];
678  sample[1] = data[offset+1];
679  sample[2+0] = data[offset+yinc];
680  sample[2+1] = data[offset+yinc+1];
681  sample[4+0] = data[offset+zinc];
682  sample[4+1] = data[offset+zinc+1];
683  sample[4+2+0] = data[offset+zinc+yinc];
684  sample[4+2+1] = data[offset+zinc+yinc+1];
685  break;
686  }
687  case COMPRESS_FPREAL16:
688  {
689  fpreal16 *data = (fpreal16 *) myData;
690  int offset = (z * myRes[1] + y) * myRes[0] + x;
691  int yinc = myRes[0];
692  int zinc = myRes[0] * myRes[1];
693 
694  sample[0] = data[offset];
695  sample[1] = data[offset+1];
696  sample[2+0] = data[offset+yinc];
697  sample[2+1] = data[offset+yinc+1];
698  sample[4+0] = data[offset+zinc];
699  sample[4+1] = data[offset+zinc+1];
700  sample[4+2+0] = data[offset+zinc+yinc];
701  sample[4+2+1] = data[offset+zinc+yinc+1];
702  break;
703  }
704  case COMPRESS_CONSTANT:
705  {
706  sample[0] = rawConstVal();
707  return true;
708  }
709 
710  default:
711  {
712  UT_VoxelTileCompress<T> *engine;
713 
714  engine = getCompressionEngine(myCompressionType);
715  // Lerp x:x+1, y, z
716  sample[0] = engine->getValue(*this, x, y, z);
717  sample[1] = engine->getValue(*this, x+1, y, z);
718  sample[2+0] = engine->getValue(*this, x, y+1, z);
719  sample[2+1] = engine->getValue(*this, x+1, y+1, z);
720  sample[4+0] = engine->getValue(*this, x, y, z+1);
721  sample[4+1] = engine->getValue(*this, x+1, y, z+1);
722  sample[4+2+0] = engine->getValue(*this, x, y+1, z+1);
723  sample[4+2+1] = engine->getValue(*this, x+1, y+1, z+1);
724  break;
725  }
726  }
727  return false;
728 }
729 
730 template <typename T>
731 bool
732 UT_VoxelTile<T>::extractSamplePlus(int x, int y, int z, T *sample) const
733 {
734  switch (myCompressionType)
735  {
736  case COMPRESS_RAW:
737  {
738  T *data = (T *) myData;
739  int offset = (z * myRes[1] + y) * myRes[0] + x;
740  int yinc = myRes[0];
741  int zinc = myRes[0] * myRes[1];
742 
743  sample[0] = data[offset-1];
744  sample[1] = data[offset+1];
745  sample[2+0] = data[offset-yinc];
746  sample[2+1] = data[offset+yinc];
747  sample[4+0] = data[offset-zinc];
748  sample[4+1] = data[offset+zinc];
749  sample[6] = data[offset];
750  break;
751  }
752  case COMPRESS_RAWFULL:
753  {
754  T *data = (T *) myData;
755  int offset = (z * TILESIZE + y) * TILESIZE + x;
756  int yinc = TILESIZE;
757  int zinc = TILESIZE * TILESIZE;
758 
759  sample[0] = data[offset-1];
760  sample[1] = data[offset+1];
761  sample[2+0] = data[offset-yinc];
762  sample[2+1] = data[offset+yinc];
763  sample[4+0] = data[offset-zinc];
764  sample[4+1] = data[offset+zinc];
765  sample[6] = data[offset];
766  break;
767  }
768  case COMPRESS_FPREAL16:
769  {
770  fpreal16 *data = (fpreal16 *) myData;
771  int offset = (z * myRes[1] + y) * myRes[0] + x;
772  int yinc = myRes[0];
773  int zinc = myRes[0] * myRes[1];
774 
775  sample[0] = data[offset-1];
776  sample[1] = data[offset+1];
777  sample[2+0] = data[offset-yinc];
778  sample[2+1] = data[offset+yinc];
779  sample[4+0] = data[offset-zinc];
780  sample[4+1] = data[offset+zinc];
781  sample[6] = data[offset];
782  break;
783  }
784  case COMPRESS_CONSTANT:
785  {
786  sample[0] = rawConstVal();
787  return true;
788  }
789 
790  default:
791  {
792  UT_VoxelTileCompress<T> *engine;
793 
794  engine = getCompressionEngine(myCompressionType);
795  // Lerp x:x+1, y, z
796  sample[0] = engine->getValue(*this, x-1, y, z);
797  sample[1] = engine->getValue(*this, x+1, y, z);
798  sample[2+0] = engine->getValue(*this, x, y-1, z);
799  sample[2+1] = engine->getValue(*this, x, y+1, z);
800  sample[4+0] = engine->getValue(*this, x, y, z+1);
801  sample[4+1] = engine->getValue(*this, x, y, z-1);
802  sample[6] = engine->getValue(*this, x, y, z);
803  break;
804  }
805  }
806  return false;
807 }
808 
809 template <typename T>
810 bool
811 UT_VoxelTile<T>::extractSampleCube(int x, int y, int z, T *sample) const
812 {
813  switch (myCompressionType)
814  {
815  case COMPRESS_RAW:
816  {
817  T *data = (T *) myData;
818  int offset = (z * myRes[1] + y) * myRes[0] + x;
819  int yinc = myRes[0];
820  int zinc = myRes[0] * myRes[1];
821  int sampidx = 0;
822 
823  offset -= zinc;
824  for (int dz = -1; dz <= 1; dz++)
825  {
826  offset -= yinc;
827  for (int dy = -1; dy <= 1; dy++)
828  {
829  sample[sampidx] = data[offset-1];
830  sample[sampidx+1] = data[offset];
831  sample[sampidx+2] = data[offset+1];
832  sampidx += 3;
833  offset += yinc;
834  }
835  offset -= yinc * 3;
836  offset += zinc;
837  }
838  break;
839  }
840  case COMPRESS_RAWFULL:
841  {
842  T *data = (T *) myData;
843  int offset = (z * TILESIZE + y) * TILESIZE + x;
844  int yinc = TILESIZE;
845  int zinc = TILESIZE * TILESIZE;
846  int sampidx = 0;
847 
848  offset -= zinc;
849  for (int dz = -1; dz <= 1; dz++)
850  {
851  offset -= yinc;
852  for (int dy = -1; dy <= 1; dy++)
853  {
854  sample[sampidx] = data[offset-1];
855  sample[sampidx+1] = data[offset];
856  sample[sampidx+2] = data[offset+1];
857  sampidx += 3;
858  offset += yinc;
859  }
860  offset -= yinc * 3;
861  offset += zinc;
862  }
863  break;
864  }
865  case COMPRESS_FPREAL16:
866  {
867  fpreal16 *data = (fpreal16 *) myData;
868  int offset = (z * myRes[1] + y) * myRes[0] + x;
869  int yinc = myRes[0];
870  int zinc = myRes[0] * myRes[1];
871  int sampidx = 0;
872 
873  offset -= zinc;
874  for (int dz = -1; dz <= 1; dz++)
875  {
876  offset -= yinc;
877  for (int dy = -1; dy <= 1; dy++)
878  {
879  sample[sampidx] = data[offset-1];
880  sample[sampidx+1] = data[offset];
881  sample[sampidx+2] = data[offset+1];
882  sampidx += 3;
883  offset += yinc;
884  }
885  offset -= yinc * 3;
886  offset += zinc;
887  }
888  break;
889  }
890  case COMPRESS_CONSTANT:
891  {
892  sample[0] = rawConstVal();
893  return true;
894  }
895 
896  default:
897  {
898  UT_VoxelTileCompress<T> *engine;
899 
900  engine = getCompressionEngine(myCompressionType);
901  int sampidx = 0;
902  // Lerp x:x+1, y, z
903  for (int dz = -1; dz <= 1; dz++)
904  {
905  for (int dy = -1; dy <= 1; dy++)
906  {
907  for (int dx = -1; dx <= 1; dx++)
908  {
909  sample[sampidx++] = engine->getValue(*this, x+dx, y+dy, z+dz);
910  }
911  }
912  }
913  break;
914  }
915  }
916  return false;
917 }
918 
919 template <typename T>
920 template <int AXIS2D>
921 bool
922 UT_VoxelTile<T>::extractSampleAxis(int x, int y, int z, T *sample) const
923 {
924  switch (myCompressionType)
925  {
926  case COMPRESS_RAW:
927  {
928  T *data = (T *) myData;
929  int offset = (z * myRes[1] + y) * myRes[0] + x;
930  int yinc = myRes[0];
931  int zinc = myRes[0] * myRes[1];
932 
933  sample[0] = data[offset];
934  if (AXIS2D != 0)
935  sample[1] = data[offset+1];
936  if (AXIS2D != 1)
937  {
938  sample[2+0] = data[offset+yinc];
939  if (AXIS2D != 0)
940  sample[2+1] = data[offset+yinc+1];
941  }
942  if (AXIS2D != 2)
943  {
944  sample[4+0] = data[offset+zinc];
945  if (AXIS2D != 0)
946  sample[4+1] = data[offset+zinc+1];
947  if (AXIS2D != 1)
948  {
949  sample[4+2+0] = data[offset+zinc+yinc];
950  if (AXIS2D != 0)
951  sample[4+2+1] = data[offset+zinc+yinc+1];
952  }
953  }
954  break;
955  }
956  case COMPRESS_RAWFULL:
957  {
958  T *data = (T *) myData;
959  int offset = (z * TILESIZE + y) * TILESIZE + x;
960  int yinc = TILESIZE;
961  int zinc = TILESIZE * TILESIZE;
962 
963  sample[0] = data[offset];
964  if (AXIS2D != 0)
965  sample[1] = data[offset+1];
966  if (AXIS2D != 1)
967  {
968  sample[2+0] = data[offset+yinc];
969  if (AXIS2D != 0)
970  sample[2+1] = data[offset+yinc+1];
971  }
972  if (AXIS2D != 2)
973  {
974  sample[4+0] = data[offset+zinc];
975  if (AXIS2D != 0)
976  sample[4+1] = data[offset+zinc+1];
977  if (AXIS2D != 1)
978  {
979  sample[4+2+0] = data[offset+zinc+yinc];
980  if (AXIS2D != 0)
981  sample[4+2+1] = data[offset+zinc+yinc+1];
982  }
983  }
984  break;
985  }
986  case COMPRESS_FPREAL16:
987  {
988  fpreal16 *data = (fpreal16 *) myData;
989  int offset = (z * myRes[1] + y) * myRes[0] + x;
990  int yinc = myRes[0];
991  int zinc = myRes[0] * myRes[1];
992 
993  sample[0] = data[offset];
994  if (AXIS2D != 0)
995  sample[1] = data[offset+1];
996  if (AXIS2D != 1)
997  {
998  sample[2+0] = data[offset+yinc];
999  if (AXIS2D != 0)
1000  sample[2+1] = data[offset+yinc+1];
1001  }
1002  if (AXIS2D != 2)
1003  {
1004  sample[4+0] = data[offset+zinc];
1005  if (AXIS2D != 0)
1006  sample[4+1] = data[offset+zinc+1];
1007  if (AXIS2D != 1)
1008  {
1009  sample[4+2+0] = data[offset+zinc+yinc];
1010  if (AXIS2D != 0)
1011  sample[4+2+1] = data[offset+zinc+yinc+1];
1012  }
1013  }
1014  break;
1015  }
1016  case COMPRESS_CONSTANT:
1017  {
1018  sample[0] = rawConstVal();
1019  return true;
1020  }
1021 
1022  default:
1023  {
1024  UT_VoxelTileCompress<T> *engine;
1025 
1026  engine = getCompressionEngine(myCompressionType);
1027  // Lerp x:x+1, y, z
1028  sample[0] = engine->getValue(*this, x, y, z);
1029  if (AXIS2D != 0)
1030  sample[1] = engine->getValue(*this, x+1, y, z);
1031  if (AXIS2D != 1)
1032  {
1033  sample[2+0] = engine->getValue(*this, x, y+1, z);
1034  if (AXIS2D != 0)
1035  sample[2+1] = engine->getValue(*this, x+1, y+1, z);
1036  }
1037  if (AXIS2D != 2)
1038  {
1039  sample[4+0] = engine->getValue(*this, x, y, z+1);
1040  if (AXIS2D != 0)
1041  sample[4+1] = engine->getValue(*this, x+1, y, z+1);
1042  if (AXIS2D != 1)
1043  {
1044  sample[4+2+0] = engine->getValue(*this, x, y+1, z+1);
1045  if (AXIS2D != 0)
1046  sample[4+2+1] = engine->getValue(*this, x+1, y+1, z+1);
1047  }
1048  }
1049  break;
1050  }
1051  }
1052  return false;
1053 }
1054 
1055 #if 0
1056 template <typename T>
1057 T
1058 UT_VoxelTile<T>::lerp(v4uf frac, int x, int y, int z) const
1059 {
1060  v4uf a, b;
1061 
1062  switch (myCompressionType)
1063  {
1064  case COMPRESS_RAW:
1065  case COMPRESS_RAWFULL:
1066  {
1067  T *data = (T *) myData;
1068  int offset = (z * myRes[1] + y) * myRes[0] + x;
1069  int yinc = myRes[0];
1070  int zinc = myRes[0] * myRes[1];
1071 
1072  a = v4uf( data[offset],
1073  data[offset+zinc],
1074  data[offset+yinc],
1075  data[offset+yinc+zinc] );
1076  b = v4uf( data[offset+1],
1077  data[offset+zinc+1],
1078  data[offset+yinc+1],
1079  data[offset+yinc+zinc+1] );
1080  break;
1081  }
1082 
1083  case COMPRESS_CONSTANT:
1084  {
1085  // This is quite trivial to do a trilerp on.
1086  return rawConstVal();
1087  }
1088 
1089  default:
1090  {
1091  UT_VoxelTileCompress<T> *engine;
1092 
1093  engine = getCompressionEngine(myCompressionType);
1094  // Lerp x:x+1, y, z
1095  a = v4uf( engine->getValue(*this, x, y, z),
1096  engine->getValue(*this, x, y, z+1),
1097  engine->getValue(*this, x, y+1, z),
1098  engine->getValue(*this, x, y+1, z+1) );
1099  b = v4uf( engine->getValue(*this, x+1, y, z),
1100  engine->getValue(*this, x+1, y, z+1),
1101  engine->getValue(*this, x+1, y+1, z),
1102  engine->getValue(*this, x+1, y+1, z+1) );
1103  break;
1104  }
1105  }
1106 
1107  v4uf fx, fy, fz;
1108 
1109  fx = frac.swizzle<0, 0, 0, 0>();
1110  fy = frac.swizzle<1, 1, 1, 1>();
1111  fz = frac.swizzle<2, 2, 2, 2>();
1112 
1113  b -= a;
1114  a = madd(b, fx, a);
1115 
1116  b = a.swizzle<2, 3, 0, 1>();
1117  b -= a;
1118  a = madd(b, fy, a);
1119 
1120  b = a.swizzle<1, 2, 3, 0>();
1121  b -= a;
1122  a = madd(b, fz, a);
1123 
1124  return a[0];
1125 }
1126 #endif
1127 
1128 template <typename T>
1129 T *
1130 UT_VoxelTile<T>::fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const
1131 {
1132  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
1133  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
1134 
1135  T *src;
1136  int i, xres = myRes[0];
1137 
1138  // All the directly handled types exit directly from this switch.
1139  switch (myCompressionType)
1140  {
1141  case COMPRESS_RAW:
1142  stride = 1;
1143  src = (T *)myData + (z * myRes[1] + y) * xres;
1144  if (!forcecopy)
1145  return &src[x];
1146 
1147  for (i = 0; i < xres; i++)
1148  cacheline[i] = src[i];
1149 
1150  return &cacheline[x];
1151 
1152  case COMPRESS_FPREAL16:
1153  {
1154  fpreal16 *src;
1155 
1156  stride = 1;
1157  src = (fpreal16 *)myData + (z * myRes[1] + y) * xres;
1158 
1159  for (i = 0; i < xres; i++)
1160  cacheline[i] = src[i];
1161 
1162  return &cacheline[x];
1163  }
1164 
1165 
1166  case COMPRESS_CONSTANT:
1167  src = rawConstData();
1168  if (!forcecopy && !strideofone)
1169  {
1170  stride = 0;
1171  return src;
1172  }
1173  stride = 1;
1174  // Fill out a constant value
1175  for (i = 0; i < xres; i++)
1176  cacheline[i] = *src;
1177 
1178  return &cacheline[x];
1179 
1180 
1181  case COMPRESS_RAWFULL:
1182  stride = 1;
1183  src = (T *)myData + (z * TILESIZE + y) * TILESIZE;
1184  if (!forcecopy)
1185  return &src[x];
1186 
1187  for (i = 0; i < xres; i++)
1188  cacheline[i] = src[i];
1189 
1190  return &cacheline[x];
1191  }
1192 
1193  // By default use the compression engine.
1194  UT_VoxelTileCompress<T> *engine;
1195 
1196  engine = getCompressionEngine(myCompressionType);
1197 
1198  // We could add support for a direct cacheline fill to accelerate
1199  // this case as well.
1200  stride = 1;
1201  for (i = 0; i < xres; i++)
1202  cacheline[i] = engine->getValue(*this, i, y, z);
1203 
1204  return &cacheline[x];
1205 }
1206 
1207 template <typename T>
1208 void
1209 UT_VoxelTile<T>::writeCacheLine(T *cacheline, int y, int z)
1210 {
1211  UT_ASSERT_P(y >= 0 && z >= 0);
1212  UT_ASSERT_P(y < myRes[1] && z < myRes[2]);
1213 
1214  T *dst, value;
1215  int i, xres = myRes[0];
1216 
1217  // All the directly handled types exit directly from this switch.
1218  switch (myCompressionType)
1219  {
1220  case COMPRESS_RAW:
1221  dst = (T *)myData + (z * myRes[1] + y) * xres;
1222  for (i = 0; i < xres; i++)
1223  *dst++ = *cacheline++;
1224  return;
1225 
1226  case COMPRESS_CONSTANT:
1227  value = rawConstVal();
1228  for (i = 0; i < xres; i++)
1229  if (cacheline[i] != value)
1230  break;
1231  // If everything was equal, our write is trivial.
1232  if (i == xres)
1233  return;
1234 
1235  break;
1236 
1237  case COMPRESS_RAWFULL:
1238  dst = (T *)myData + (z * TILESIZE + y) * TILESIZE;
1239  for (i = 0; i < TILESIZE; i++)
1240  *dst++ = *cacheline++;
1241 
1242  return;
1243  }
1244 
1245  // Switch back to invoking writeThrough. Ideally we'd have
1246  // a version that can handle a whole cache line at once
1247  for (i = 0; i < xres; i++)
1248  if (!writeThrough(i, y, z, cacheline[i]))
1249  break;
1250 
1251  // Determine if we failed to write everything through
1252  if (i != xres)
1253  {
1254  // Uncompress and reinvoke ourselves.
1255  uncompress();
1256  writeCacheLine(cacheline, y, z);
1257  }
1258 }
1259 
1260 template <typename T>
1261 void
1262 UT_VoxelTile<T>::copyFragment(int dstx, int dsty, int dstz,
1263  const UT_VoxelTile<T> &srctile,
1264  int srcx, int srcy, int srcz)
1265 {
1266  int w = SYSmin(xres() - dstx, srctile.xres() - srcx);
1267  int h = SYSmin(yres() - dsty, srctile.yres() - srcy);
1268  int d = SYSmin(zres() - dstz, srctile.zres() - srcz);
1269 
1270 #if 1
1271  if (srctile.isSimpleCompression())
1272  {
1273  T *dst;
1274  const T *src;
1275  int srcinc;
1276 
1277  src = srctile.rawData();
1278  srcinc = srctile.isConstant() ? 0 : 1;
1279 
1280  // Ensure we are easy to write to.
1281  uncompress();
1282 
1283  dst = rawData();
1284  dst += dstx + (dsty + dstz*yres())*xres();
1285 
1286  if (srcinc)
1287  src += srcx + (srcy + srcz*srctile.yres())*srctile.xres();
1288 
1289  for (int z = 0; z < d; z++)
1290  {
1291  for (int y = 0; y < h; y++)
1292  {
1293  if (srcinc)
1294  {
1295  for (int x = 0; x < w; x++)
1296  dst[x] = src[x];
1297  }
1298  else
1299  {
1300  for (int x = 0; x < w; x++)
1301  dst[x] = *src;
1302  }
1303  dst += xres();
1304  if (srcinc)
1305  src += srctile.xres();
1306  }
1307  dst += (yres()-h) * xres();
1308  if (srcinc)
1309  src += (srctile.yres() - h) * srctile.xres();
1310  }
1311 
1312  return;
1313  }
1314 #endif
1315 
1316  // Fail safe approach.
1317  for (int z = 0; z < d; z++)
1318  for (int y = 0; y < h; y++)
1319  for (int x = 0; x < w; x++)
1320  {
1321  setValue(dstx+x, dsty+y, dstz+z,
1322  srctile(srcx+x, srcy+y, srcz+z));
1323  }
1324 }
1325 
1326 template <typename T>
1327 template <typename S>
1328 void
1330 {
1331  int w = xres();
1332  int h = yres();
1333  int d = zres();
1334 
1335  if (isSimpleCompression())
1336  {
1337  const T *src;
1338  int srcinc;
1339 
1340  src = rawData();
1341  srcinc = isConstant() ? 0 : 1;
1342 
1343  if (stride == 1)
1344  {
1345  if (srcinc == 1)
1346  {
1347  // Super trivial!
1348  for (int i = 0; i < w * h * d; i++)
1349  {
1350  *dst++ = T(*src++);
1351  }
1352  }
1353  else
1354  {
1355  // Constant, also trivial!
1356  T cval = T(*src);
1357 
1358  for (int i = 0; i < w * h * d; i++)
1359  {
1360  *dst++ = cval;
1361  }
1362  }
1363  }
1364  else
1365  {
1366  for (int z = 0; z < d; z++)
1367  {
1368  for (int y = 0; y < h; y++)
1369  {
1370  if (srcinc)
1371  {
1372  for (int x = 0; x < w; x++)
1373  {
1374  *dst = S(src[x]);
1375  dst += stride;
1376  }
1377  }
1378  else
1379  {
1380  for (int x = 0; x < w; x++)
1381  {
1382  *dst = S(*src);
1383  dst += stride;
1384  }
1385  }
1386  if (srcinc)
1387  src += w;
1388  }
1389  }
1390  }
1391 
1392  return;
1393  }
1394 
1395  // Fail safe approach.
1396  for (int z = 0; z < d; z++)
1397  for (int y = 0; y < h; y++)
1398  for (int x = 0; x < w; x++)
1399  {
1400  *dst = S((*this)(x, y, z));
1401  dst += stride;
1402  }
1403 }
1404 
1405 template <typename T>
1406 template <typename S>
1407 void
1408 UT_VoxelTile<T>::writeData(const S *srcdata, int srcstride)
1409 {
1410  int w = xres();
1411  int h = yres();
1412  int d = zres();
1413  int i, n = w * h * d;
1414 
1415  // Check if src is constant
1416  S compare = srcdata[0];
1417  int srcoff = srcstride;
1418 
1419  if (srcstride == 0)
1420  {
1421  // Trivially constant
1422  makeConstant(T(compare));
1423  return;
1424  }
1425 
1426  for (i = 1; i < n; i++)
1427  {
1428  if (srcdata[srcoff] != compare)
1429  break;
1430  srcoff += srcstride;
1431  }
1432 
1433  if (i == n)
1434  {
1435  // Constant source!
1436  makeConstant(compare);
1437  return;
1438  }
1439 
1440  // Create a raw tile, expanding constants
1441  uncompress();
1442 
1443  if (srcstride == 1)
1444  {
1445  T *dst = rawData();
1446  for (i = 0; i < n; i++)
1447  {
1448  *dst++ = T(*srcdata++);
1449  }
1450  }
1451  else
1452  {
1453  T *dst = rawData();
1454 
1455  srcoff = 0;
1456  for (i = 0; i < n; i++)
1457  {
1458  dst[i] = T(srcdata[srcoff]);
1459  srcoff += srcstride;
1460  }
1461  }
1462 }
1463 
1464 template <typename T>
1465 void
1466 UT_VoxelTile<T>::setValue(int x, int y, int z, T t)
1467 {
1468  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
1469  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
1470 
1471  // Determine if assignment is compatible with current
1472  // compression technique.
1473  if (writeThrough(x, y, z, t))
1474  {
1475  return;
1476  }
1477  // Can't write to our current type of tile with the
1478  // given value, so abandon.
1479  uncompress();
1480 
1481  // Attempt to write through again. This must succeed
1482  // as we should now be uncompressed.
1483  UT_VERIFY_P(writeThrough(x, y, z, t));
1484 }
1485 
1486 template <typename T>
1487 void
1489 {
1490  switch (myCompressionType)
1491  {
1492  case COMPRESS_RAW:
1493  // Trivial.
1494  return;
1495 
1496  case COMPRESS_CONSTANT:
1497  {
1498  // Must expand the tile!
1499  T cval;
1500  int i, n;
1501 
1502  cval = rawConstVal();
1503  freeData();
1504 
1505  myCompressionType = COMPRESS_RAW;
1506 
1507  if (myRes[0] == TILESIZE &&
1508  myRes[1] == TILESIZE)
1509  {
1510  // Flag that we can do fast lookups on this tile.
1511  myCompressionType = COMPRESS_RAWFULL;
1512  }
1513 
1514  n = myRes[0] * myRes[1] * myRes[2];
1515  myData = UT_VOXEL_ALLOC(sizeof(T) * n);
1516 
1517  for (i = 0; i < n; i++)
1518  {
1519  ((T *)myData)[i] = cval;
1520  }
1521  return;
1522  }
1523  case COMPRESS_RAWFULL:
1524  {
1525  T *raw;
1526  int x, y, z, i, n;
1527 
1528  if (myRes[0] == TILESIZE &&
1529  myRes[1] == TILESIZE)
1530  {
1531  // Trivial
1532  return;
1533  }
1534 
1535  // Need to contract to the actual compact size.
1536  n = myRes[0] * myRes[1] * myRes[2];
1537  raw = (T *)UT_VOXEL_ALLOC(sizeof(T) * n);
1538  i = 0;
1539  for (z = 0; z < myRes[2]; z++)
1540  {
1541  for (y = 0; y < myRes[1]; y++)
1542  {
1543  for (x = 0; x < myRes[0]; x++)
1544  {
1545  raw[i++] = ((T *)myData)[x+(y+z*TILESIZE)*TILESIZE];
1546  }
1547  }
1548  }
1549 
1550  freeData();
1551  myCompressionType = COMPRESS_RAW;
1552  myData = raw;
1553 
1554  return;
1555  }
1556 
1557  case COMPRESS_FPREAL16:
1558  {
1559  T *raw;
1560  int x, y, z, i, n;
1561  fpreal16 *src = (fpreal16 *) myData;
1562 
1563  n = myRes[0] * myRes[1] * myRes[2];
1564  raw = (T *)UT_VOXEL_ALLOC(sizeof(T) * n);
1565  i = 0;
1566  for (z = 0; z < myRes[2]; z++)
1567  {
1568  for (y = 0; y < myRes[1]; y++)
1569  {
1570  for (x = 0; x < myRes[0]; x++)
1571  {
1572  raw[i] = src[i];
1573  i++;
1574  }
1575  }
1576  }
1577  freeData();
1578  myCompressionType = COMPRESS_RAW;
1579  myData = raw;
1580  return;
1581  }
1582  }
1583 
1584  // Use the compression engine.
1585  UT_VoxelTileCompress<T> *engine;
1586 
1587  engine = getCompressionEngine(myCompressionType);
1588 
1589  // We use the read ability to set our values.
1590  int x, y, z, i;
1591  T *raw;
1592 
1593  raw = (T *) UT_VOXEL_ALLOC(sizeof(T) * myRes[0] * myRes[1] * myRes[2]);
1594  i = 0;
1595  for (z = 0; z < myRes[2]; z++)
1596  {
1597  for (y = 0; y < myRes[1]; y++)
1598  {
1599  for (x = 0; x < myRes[0]; x++)
1600  {
1601  raw[i++] = engine->getValue(*this, x, y, z);
1602  }
1603  }
1604  }
1605 
1606  freeData();
1607 
1608  // Now copy in the result
1609  myCompressionType = COMPRESS_RAW;
1610  if (myRes[0] == TILESIZE &&
1611  myRes[1] == TILESIZE)
1612  {
1613  // Flag that we can do fast lookups on this tile.
1614  myCompressionType = COMPRESS_RAWFULL;
1615  }
1616 
1617  myData = raw;
1618 }
1619 
1620 template <typename T>
1621 void
1623 {
1624  T *raw;
1625  int x, y, z, i;
1626 
1627  if (myCompressionType == COMPRESS_RAWFULL)
1628  return;
1629 
1630  uncompress();
1631 
1632  UT_ASSERT(myCompressionType == COMPRESS_RAW);
1633 
1634  // The RAWFULL format only differs from RAW when the tile dimensions
1635  // are smaller than the maximum tile size.
1636  if (myRes[0] < TILESIZE || myRes[1] < TILESIZE)
1637  {
1638  raw = (T *)UT_VOXEL_ALLOC(sizeof(T) * TILESIZE * TILESIZE * myRes[2]);
1639  i = 0;
1640  for (z = 0; z < myRes[2]; z++)
1641  {
1642  for (y = 0; y < myRes[1]; y++)
1643  {
1644  for (x = 0; x < myRes[0]; x++)
1645  {
1646  raw[x+(y+z*TILESIZE)*TILESIZE] = ((T *)myData)[i++];
1647  }
1648  }
1649  }
1650  freeData();
1651  myData = raw;
1652  }
1653  myCompressionType = COMPRESS_RAWFULL;
1654 }
1655 
1656 template <typename T>
1657 void
1659 {
1660  if (isRaw() || isRawFull())
1661  return;
1662 
1663  freeData();
1664 
1665  if (myRes[0] == TILESIZE && myRes[1] == TILESIZE)
1666  myCompressionType = COMPRESS_RAWFULL;
1667  else
1668  myCompressionType = COMPRESS_RAW;
1669 
1670  myData = UT_VOXEL_ALLOC(sizeof(T) * numVoxels());
1671 }
1672 
1673 template <typename T>
1674 void
1676 {
1677  float irx, iry, irz;
1678 
1679  irx = 1.0 / myRes[0];
1680  iry = 1.0 / myRes[1];
1681  irz = 1.0 / myRes[2];
1682  switch (myCompressionType)
1683  {
1684  case COMPRESS_RAW:
1685  {
1686  int i;
1687  const T *data = (const T*) myData;
1688 
1689  i = 0;
1690  T zavg;
1691  zavg = 0;
1692  for (int z = 0; z < myRes[2]; z++)
1693  {
1694  T yavg;
1695  yavg = 0;
1696  for (int y = 0; y < myRes[1]; y++)
1697  {
1698  T xavg;
1699  xavg = 0;
1700  for (int x = 0; x < myRes[0]; x++)
1701  {
1702  xavg += data[i++];
1703  }
1704  xavg *= irx;
1705  yavg += xavg;
1706  }
1707  yavg *= iry;
1708  zavg += yavg;
1709  }
1710  zavg *= irz;
1711 
1712  avg = zavg;
1713  return;
1714  }
1715 
1716  case COMPRESS_FPREAL16:
1717  {
1718  int i;
1719  const fpreal16 *data = (fpreal16 *) myData;
1720 
1721  i = 0;
1722  fpreal16 zavg = 0;
1723  for (int z = 0; z < myRes[2]; z++)
1724  {
1725  fpreal16 yavg = 0;
1726  for (int y = 0; y < myRes[1]; y++)
1727  {
1728  fpreal16 xavg = 0;
1729  for (int x = 0; x < myRes[0]; x++)
1730  {
1731  xavg += data[i++];
1732  }
1733  xavg *= irx;
1734  yavg += xavg;
1735  }
1736  yavg *= iry;
1737  zavg += yavg;
1738  }
1739  zavg *= irz;
1740 
1741  avg = zavg;
1742  return;
1743  }
1744 
1745  case COMPRESS_CONSTANT:
1746  avg = rawConstVal();
1747  return;
1748 
1749  case COMPRESS_RAWFULL:
1750  {
1751  int offset;
1752  const T *data = (const T*) myData;
1753 
1754  offset = 0;
1755  T zavg;
1756  zavg = 0;
1757  for (int z = 0; z < myRes[2]; z++)
1758  {
1759  T yavg;
1760  yavg = 0;
1761  for (int y = 0; y < myRes[1]; y++)
1762  {
1763  T xavg;
1764  xavg = 0;
1765  for (int x = 0; x < myRes[0]; x++)
1766  {
1767  xavg += data[x+offset];
1768  }
1769  xavg *= irx;
1770  yavg += xavg;
1771  offset += TILESIZE;
1772  }
1773  yavg *= iry;
1774  zavg += yavg;
1775  }
1776  zavg *= irz;
1777 
1778  avg = zavg;
1779  return;
1780  }
1781 
1782  default:
1783  {
1784  T zavg;
1785  zavg = 0;
1786  for (int z = 0; z < myRes[2]; z++)
1787  {
1788  T yavg;
1789  yavg = 0;
1790  for (int y = 0; y < myRes[1]; y++)
1791  {
1792  T xavg;
1793  xavg = 0;
1794  for (int x = 0; x < myRes[0]; x++)
1795  {
1796  xavg += (*this)(x, y, z);
1797  }
1798  xavg *= irx;
1799  yavg += xavg;
1800  }
1801  yavg *= iry;
1802  zavg += yavg;
1803  }
1804  zavg *= irz;
1805 
1806  avg = zavg;
1807  return;
1808  }
1809  }
1810 }
1811 
1812 template <typename T>
1813 void
1815 {
1816  switch (myCompressionType)
1817  {
1818  case COMPRESS_RAW:
1819  {
1820  int n = myRes[0] * myRes[1] * myRes[2];
1821  int i;
1822 
1823  min = max = *(T*)myData;
1824  for (i = 1; i < n; i++)
1825  {
1826  expandMinMax( ((T*)myData)[i], min, max );
1827  }
1828  return;
1829  }
1830 
1831  case COMPRESS_FPREAL16:
1832  {
1833  int n = myRes[0] * myRes[1] * myRes[2];
1834  int i;
1835  fpreal16 *src = (fpreal16 *)myData;
1836 
1837  min = max = *src;
1838  for (i = 1; i < n; i++)
1839  {
1840  T val;
1841  val = src[i];
1842  expandMinMax( val, min, max );
1843  }
1844  return;
1845  }
1846 
1847  case COMPRESS_CONSTANT:
1848  min = max = rawConstVal();
1849  return;
1850 
1851  case COMPRESS_RAWFULL:
1852  {
1853  int x, y, z, offset;
1854 
1855  min = max = *(T*)myData;
1856  offset = 0;
1857  for (z = 0; z < myRes[2]; z++)
1858  {
1859  for (y = 0; y < myRes[1]; y++)
1860  {
1861  for (x = 0; x < myRes[0]; x++)
1862  {
1863  expandMinMax(
1864  ((T*)myData)[x+offset],
1865  min, max );
1866  }
1867  offset += TILESIZE;
1868  }
1869  }
1870  return;
1871  }
1872 
1873  default:
1874  {
1875  // Use the compression engine.
1876  UT_VoxelTileCompress<T> *engine;
1877 
1878  engine = getCompressionEngine(myCompressionType);
1879 
1880  engine->findMinMax(*this, min, max);
1881  return;
1882  }
1883  }
1884 }
1885 
1886 template <typename T>
1887 bool
1889 {
1890  switch (myCompressionType)
1891  {
1892  case COMPRESS_RAW:
1893  case COMPRESS_RAWFULL:
1894  {
1895  int n = myRes[0] * myRes[1] * myRes[2];
1896  int i;
1897 
1898  for (i = 0; i < n; i++)
1899  {
1900  if (SYSisNan( ((T*)myData)[i] ))
1901  return true;
1902  }
1903  return false;
1904  }
1905 
1906  case COMPRESS_FPREAL16:
1907  {
1908  return false;
1909  }
1910 
1911  case COMPRESS_CONSTANT:
1912  if (SYSisNan(rawConstVal()))
1913  return true;
1914  return false;
1915 
1916  default:
1917  {
1918  // Use the compression engine.
1919  UT_VoxelTileCompress<T> *engine;
1920  int x, y, z;
1921 
1922  engine = getCompressionEngine(myCompressionType);
1923 
1924  for (z = 0; z < myRes[2]; z++)
1925  for (y = 0; y < myRes[1]; y++)
1926  for (x = 0; x < myRes[0]; x++)
1927  if (SYSisNan(engine->getValue(*this, x, y, z)))
1928  {
1929  return true;
1930  }
1931 
1932  return false;
1933  }
1934  }
1935 }
1936 
1937 template <typename T>
1938 bool
1940 {
1941  T min, max;
1942  bool losslessonly = (options.myQuantizeTol == 0.0);
1943  int i;
1944  UT_VoxelTileCompress<T> *engine;
1945 
1946  // This is as easy as it gets.
1947  if (myCompressionType == COMPRESS_CONSTANT)
1948  return false;
1949 
1950  findMinMax(min, max);
1951 
1952  // See if we can be made into a constant tile.
1953  if (dist(min, max) <= options.myConstantTol)
1954  {
1955  // Ignore if we are already constant.
1956  if (myCompressionType == COMPRESS_CONSTANT)
1957  return false;
1958 
1959  // We are fully constant!
1960  if (min != max)
1961  {
1962  T avg;
1963  findAverage(avg);
1964  makeConstant(avg);
1965  }
1966  else
1967  {
1968  makeConstant(min);
1969  }
1970  return true;
1971  }
1972 
1973  bool result = false;
1974 
1975  if (options.myAllowFP16)
1976  {
1977  if (myCompressionType == COMPRESS_RAW ||
1978  myCompressionType == COMPRESS_RAWFULL)
1979  {
1980  makeFpreal16();
1981  result = true;
1982  }
1983  }
1984 
1985  for (i = 0; i < getCompressionEngines().entries(); i++)
1986  {
1987  engine = getCompressionEngines()(i);
1988 
1989  // Ignore possibly expensive lossy compressions.
1990  if (losslessonly && !engine->isLossless())
1991  continue;
1992 
1993  if (engine->tryCompress(*this, options, min, max))
1994  {
1995  myCompressionType = i + COMPRESS_ENGINE;
1996  result = true;
1997  // We keep testing in case another compression engine
1998  // can get a better number...
1999  }
2000  }
2001 
2002  // If we are RAW compress, check to see if we could become
2003  // RAWFULL for faster access.
2004  if (myCompressionType == COMPRESS_RAW)
2005  {
2006  if (myRes[0] == TILESIZE && myRes[1] == TILESIZE)
2007  {
2008  myCompressionType = COMPRESS_RAWFULL;
2009  }
2010  }
2011 
2012  // No suitable compression found.
2013  return result;
2014 }
2015 
2016 template <typename T>
2017 void
2019 {
2020  if (!isConstant())
2021  {
2022  freeData();
2023 
2024  if (!inlineConstant())
2025  myData = UT_VOXEL_ALLOC(sizeof(T));
2026  }
2027 
2028  myCompressionType = COMPRESS_CONSTANT;
2029  *rawConstData() = t;
2030 }
2031 
2032 template <typename T>
2033 void
2035 {
2036  if (isConstant())
2037  {
2038  return;
2039  }
2040 
2041  if (myCompressionType == COMPRESS_FPREAL16)
2042  return;
2043 
2044  // Get our new data.
2045  int len = myRes[2] * myRes[1] * myRes[0];
2046  fpreal16 *data = (fpreal16 *)UT_VOXEL_ALLOC(sizeof(fpreal16) * len);
2047 
2048  if (myCompressionType == COMPRESS_RAW ||
2049  myCompressionType == COMPRESS_RAWFULL)
2050  {
2051  for (int i = 0; i < len; i++)
2052  {
2053  data[i] = UTvoxelConvertFP16( ((T*)myData)[i] );
2054  }
2055  }
2056  else
2057  {
2058  // Apply any converters.
2059  int i = 0;
2060 
2061  for (int z = 0; z < myRes[2]; z++)
2062  {
2063  for (int y = 0; y < myRes[1]; y++)
2064  {
2065  for (int x = 0; x < myRes[0]; x++)
2066  {
2067  data[i++] = UTvoxelConvertFP16( (*this)(x, y, z));
2068  }
2069  }
2070  }
2071  }
2072 
2073  freeData();
2074  myData = data;
2075  myCompressionType = COMPRESS_FPREAL16;
2076 }
2077 
2078 template <typename T>
2079 int64
2080 UT_VoxelTile<T>::getMemoryUsage(bool inclusive) const
2081 {
2082  int64 mem = inclusive ? sizeof(*this) : 0;
2083  mem += getDataLength();
2084  return mem;
2085 }
2086 
2087 template <typename T>
2088 int64
2090 {
2091  exint usage;
2092 
2093  switch (myCompressionType)
2094  {
2095  case COMPRESS_RAW:
2096  usage = sizeof(T) * xres() * yres() * zres();
2097  break;
2098  case COMPRESS_FPREAL16:
2099  usage = sizeof(fpreal16) * xres() * yres() * zres();
2100  break;
2101  case COMPRESS_CONSTANT:
2102  if (inlineConstant())
2103  usage = 0;
2104  else
2105  usage = sizeof(T);
2106  break;
2107  case COMPRESS_RAWFULL:
2108  usage = sizeof(T) * TILESIZE * TILESIZE * zres();
2109  break;
2110  default:
2111  {
2112  // Use the compression engine.
2113  UT_VoxelTileCompress<T> *engine;
2114  engine = getCompressionEngine(myCompressionType);
2115  usage = engine->getDataLength(*this);
2116  break;
2117  }
2118  }
2119  return usage;
2120 }
2121 
2122 template <typename T>
2123 void
2124 UT_VoxelTile<T>::weightedSum(int pstart[3], int pend[3],
2125  const float *weights[3], int start[3],
2126  T &result)
2127 {
2128  int ix, iy, iz, i;
2129  int px, py, pz;
2130  int ixstart, ixend;
2131  int tstart[3];
2132  fpreal w, pw;
2133  T psumx, psumy;
2134 
2135  switch (myCompressionType)
2136  {
2137  case COMPRESS_CONSTANT:
2138  {
2139  w = 1;
2140  for (i = 0; i < 3; i++)
2141  {
2142  pw = 0;
2143  for (ix = 0; ix < pend[i]-pstart[i]; ix++)
2144  pw += weights[i][ix+pstart[i]-start[i]];
2145  w *= pw;
2146  }
2147  result += w * rawConstVal();
2148  break;
2149  }
2150 
2151  case COMPRESS_RAW:
2152  {
2153  tstart[0] = pstart[0] & TILEMASK;
2154  tstart[1] = pstart[1] & TILEMASK;
2155  tstart[2] = pstart[2] & TILEMASK;
2156  ixstart = pstart[0]-start[0];
2157  ixend = pend[0]-start[0];
2158  pz = tstart[2];
2159  UT_ASSERT(pz < myRes[2]);
2160  UT_ASSERT(ixend - ixstart <= myRes[0]);
2161  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2162  {
2163  psumy = 0;
2164  py = tstart[1];
2165  UT_ASSERT(py < myRes[1]);
2166  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2167  {
2168  psumx = 0;
2169  px = ((pz * myRes[1]) + py) * myRes[0] + tstart[0];
2170  for (ix = ixstart; ix < ixend; ix++, px++)
2171  {
2172  psumx += weights[0][ix]* ((T*)myData)[px];
2173  }
2174  psumy += weights[1][iy-start[1]] * psumx;
2175  }
2176  result += weights[2][iz-start[2]] * psumy;
2177  }
2178  break;
2179  }
2180 
2181  case COMPRESS_FPREAL16:
2182  {
2183  fpreal16 *src = (fpreal16 *) myData;
2184 
2185  tstart[0] = pstart[0] & TILEMASK;
2186  tstart[1] = pstart[1] & TILEMASK;
2187  tstart[2] = pstart[2] & TILEMASK;
2188  ixstart = pstart[0]-start[0];
2189  ixend = pend[0]-start[0];
2190  pz = tstart[2];
2191  UT_ASSERT(pz < myRes[2]);
2192  UT_ASSERT(ixend - ixstart <= myRes[0]);
2193  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2194  {
2195  psumy = 0;
2196  py = tstart[1];
2197  UT_ASSERT(py < myRes[1]);
2198  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2199  {
2200  psumx = 0;
2201  px = ((pz * myRes[1]) + py) * myRes[0] + tstart[0];
2202  for (ix = ixstart; ix < ixend; ix++, px++)
2203  {
2204  T val;
2205  val = src[px];
2206  psumx += weights[0][ix]* val;
2207  }
2208  psumy += weights[1][iy-start[1]] * psumx;
2209  }
2210  result += weights[2][iz-start[2]] * psumy;
2211  }
2212  break;
2213  }
2214  case COMPRESS_RAWFULL:
2215  {
2216  tstart[0] = pstart[0] & TILEMASK;
2217  tstart[1] = pstart[1] & TILEMASK;
2218  tstart[2] = pstart[2] & TILEMASK;
2219  ixstart = pstart[0]-start[0];
2220  ixend = pend[0]-start[0];
2221  pz = tstart[2];
2222  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2223  {
2224  psumy = 0;
2225  py = tstart[1];
2226  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2227  {
2228  psumx = 0;
2229  px = ((pz * TILESIZE) + py) * TILESIZE + tstart[0];
2230  for (ix = ixstart; ix < ixend; ix++, px++)
2231  {
2232  psumx += weights[0][ix]* ((T*)myData)[px];
2233  }
2234  psumy += weights[1][iy-start[1]] * psumx;
2235  }
2236  result += weights[2][iz-start[2]] * psumy;
2237  }
2238  break;
2239  }
2240 
2241  default:
2242  {
2243  // For all other compression types, we use our
2244  // getValue() accessor rather than trying to
2245  // do anything fancy.
2246  tstart[0] = pstart[0] & TILEMASK;
2247  tstart[1] = pstart[1] & TILEMASK;
2248  tstart[2] = pstart[2] & TILEMASK;
2249  ixstart = pstart[0]-start[0];
2250  ixend = pend[0]-start[0];
2251  pz = tstart[2];
2252  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2253  {
2254  psumy = 0;
2255  py = tstart[1];
2256  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2257  {
2258  psumx = 0;
2259  px = tstart[0];
2260  for (ix = ixstart; ix < ixend; ix++, px++)
2261  {
2262  psumx += weights[0][ix] *
2263  (*this)(px, py, pz);
2264  }
2265  psumy += weights[1][iy-start[1]] * psumx;
2266  }
2267  result += weights[2][iz-start[2]] * psumy;
2268  }
2269  break;
2270  }
2271  }
2272 }
2273 
2274 template <typename T>
2275 void
2277 {
2278  int i;
2279 
2280  // Repalce old copy, if any.
2281  for (i = 0; i < getCompressionEngines().entries(); i++)
2282  {
2283  if (!strcmp(engine->getName(), getCompressionEngines()(i)->getName()))
2284  {
2285  getCompressionEngines()(i) = engine;
2286  return;
2287  }
2288  }
2289 
2290  getCompressionEngines().append(engine);
2291 }
2292 
2293 template <typename T>
2294 int
2296 {
2297  int i;
2298 
2299  if (!name)
2300  return -1;
2301 
2302  for (i = 0; i < getCompressionEngines().entries(); i++)
2303  {
2304  if (!strcmp(name, getCompressionEngines()(i)->getName()))
2305  {
2306  return i + COMPRESS_ENGINE;
2307  }
2308  }
2309 
2310  return -1;
2311 }
2312 
2313 template <typename T>
2316 {
2317  index -= COMPRESS_ENGINE;
2318 
2319  return getCompressionEngines()(index);
2320 }
2321 
2322 template <typename T>
2323 void
2324 UT_VoxelTile<T>::save(std::ostream &os) const
2325 {
2326  // Always save in our native format...
2327  if (myCompressionType >= COMPRESS_ENGINE)
2328  {
2329  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2330 
2331  if (engine->canSave())
2332  {
2333  char type = myCompressionType;
2334 
2335  UTwrite(os, &type, 1);
2336  engine->save(os, *this);
2337  return;
2338  }
2339 
2340  // Can't save, must save as raw.
2341  char type = COMPRESS_RAW;
2342  T value;
2343 
2344  UTwrite(os, &type, 1);
2345 
2346  for (int z = 0; z < zres(); z++)
2347  for (int y = 0; y < yres(); y++)
2348  for (int x = 0; x < xres(); x++)
2349  {
2350  value = (*this)(x, y, z);
2351  UTwrite<T>(os, &value, 1);
2352  }
2353  return;
2354  }
2355 
2356  int len;
2357  char type = myCompressionType;
2358 
2359  if (type == COMPRESS_RAWFULL &&
2360  myRes[0] == TILESIZE &&
2361  myRes[1] == TILESIZE &&
2362  myRes[2] != TILESIZE)
2363  {
2364  // Forbid saving this as a raw full as that will confuse
2365  // older versions of Houdini.
2366  type = COMPRESS_RAW;
2367  }
2368 
2369  UT_ASSERT(type >= 0 && type < COMPRESS_ENGINE);
2370 
2371  UTwrite(os, &type, 1);
2372 
2373  switch ((CompressionType) type)
2374  {
2375  case COMPRESS_RAW:
2376  len = myRes[2] * myRes[1] * myRes[0];
2377  UTwrite<T>(os, (T *) myData, len);
2378  break;
2379 
2380  case COMPRESS_FPREAL16:
2381  len = myRes[2] * myRes[1] * myRes[0];
2382  UTwrite(os, (int16 *) myData, len);
2383  break;
2384 
2385  case COMPRESS_RAWFULL:
2386  len = TILESIZE * TILESIZE * TILESIZE;
2387  UTwrite<T>(os, (T *) myData, len);
2388  break;
2389 
2390  case COMPRESS_CONSTANT:
2391  UTwrite<T>(os, rawConstData(), 1);
2392  break;
2393 
2394  case COMPRESS_ENGINE:
2395  UT_ASSERT(!"Invalid compression type");
2396  break;
2397  }
2398 }
2399 
2400 template <typename T>
2401 void
2403 {
2404  char type, otype;
2405  int len;
2406 
2407  is.readChar(type);
2408 
2409  // Perform the indirection to find out our native type.
2410  if (type >= 0 && type < compress.entries())
2411  {
2412  otype = type;
2413  type = compress(type);
2414  if (type == -1)
2415  {
2416  std::cerr << "Missing compression engine " << (int) otype << "\n";
2417  }
2418  }
2419 
2420  if (type >= COMPRESS_ENGINE)
2421  {
2422  freeData();
2423  myCompressionType = type;
2424 
2425  if (type - COMPRESS_ENGINE >= getCompressionEngines().entries())
2426  {
2427  // Invalid type!
2428  std::cerr << "Invalid compression engine " << (int) otype << "\n";
2429  return;
2430  }
2431 
2432  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2433 
2434  engine->load(is, *this);
2435 
2436  return;
2437  }
2438 
2439  UT_ASSERT(type >= 0);
2440  if (type < 0)
2441  {
2442  return;
2443  }
2444 
2445  freeData();
2446 
2447  myCompressionType = type;
2448 
2449  switch ((CompressionType) myCompressionType)
2450  {
2451  case COMPRESS_RAW:
2452  len = myRes[2] * myRes[1] * myRes[0];
2453  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2454  is.read<T>((T *) myData, len);
2455  break;
2456 
2457  case COMPRESS_FPREAL16:
2458  len = myRes[2] * myRes[1] * myRes[0];
2459  myData = UT_VOXEL_ALLOC(sizeof(fpreal16) * len);
2460  is.read((int16 *) myData, len);
2461  break;
2462 
2463  case COMPRESS_RAWFULL:
2464  len = TILESIZE * TILESIZE * TILESIZE;
2465  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2466  is.read<T>((T *) myData, len);
2467  break;
2468 
2469  case COMPRESS_CONSTANT:
2470  if (!inlineConstant())
2471  myData = UT_VOXEL_ALLOC(sizeof(T));
2472  is.read<T>(rawConstData(), 1);
2473  break;
2474 
2475  case COMPRESS_ENGINE:
2476  UT_ASSERT(!"Invalid compression type");
2477  break;
2478  }
2479 
2480  // Recompress raw full that are not complete in z
2481  if (myCompressionType == COMPRESS_RAW &&
2482  myRes[0] == TILESIZE &&
2483  myRes[1] == TILESIZE)
2484  myCompressionType = COMPRESS_RAWFULL;
2485 }
2486 
2487 template <typename T>
2488 bool
2490 {
2491  bool ok = true;
2492 
2493  // Always save in our native format...
2494  if (myCompressionType >= COMPRESS_ENGINE)
2495  {
2496  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2497 
2498  if (engine->canSave())
2499  {
2500  char type = myCompressionType;
2501 
2502  ok = ok && w.jsonBeginArray();
2503  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2505  ok = ok && w.jsonInt(type);
2506  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2508  ok = ok && engine->save(w, *this);
2509  ok = ok && w.jsonEndArray();
2510  return ok;
2511  }
2512 
2513  // Can't save, must save as raw.
2514  char type = COMPRESS_RAW;
2515  T value;
2516 
2517  ok = ok && w.jsonBeginArray();
2518  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2520  ok = ok && w.jsonInt(type);
2521  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2523 
2524  ok = ok && w.beginUniformArray(xres()*yres()*zres(),
2525  w.jidFromValue(&value));
2526  for (int z = 0; z < zres(); z++)
2527  for (int y = 0; y < yres(); y++)
2528  for (int x = 0; x < xres(); x++)
2529  {
2530  value = (*this)(x, y, z);
2531  ok = ok && w.uniformWrite(value);
2532  }
2533  ok = ok && w.endUniformArray();
2534  ok = ok && w.jsonEndArray();
2535  return ok;
2536  }
2537 
2538  int len;
2539  char type = myCompressionType;
2540 
2541  UT_ASSERT(type >= 0 && type < COMPRESS_ENGINE);
2542  ok = ok && w.jsonBeginArray();
2543  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2545  ok = ok && w.jsonInt(type);
2546  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2548 
2549  switch (myCompressionType)
2550  {
2551  case COMPRESS_RAW:
2552  len = myRes[2] * myRes[1] * myRes[0];
2553  ok = ok && w.jsonUniformArray(len, (T *)myData);
2554  break;
2555 
2556  case COMPRESS_FPREAL16:
2557  len = myRes[2] * myRes[1] * myRes[0];
2558  ok = ok && w.jsonUniformArray(len, (fpreal16 *)myData);
2559  break;
2560 
2561  case COMPRESS_RAWFULL:
2562  len = TILESIZE * TILESIZE * myRes[2];
2563  ok = ok && w.jsonUniformArray(len, (T *)myData);
2564  break;
2565 
2566  case COMPRESS_CONSTANT:
2567  ok = ok && w.jsonValue(rawConstVal());
2568  break;
2569  }
2570 
2571  ok = ok && w.jsonEndArray();
2572  return ok;
2573 }
2574 
2575 template <typename T>
2576 bool
2578 {
2580  UT_WorkBuffer key;
2581  int8 type, otype;
2582  int len;
2583 
2584  it = p.beginArray();
2585  if (it.atEnd() || !it.getLowerKey(key))
2586  return false;
2587  if (UT_VoxelArrayJSON::getTileID(key.buffer()) !=
2589  return false;
2590  if (!p.parseNumber(type))
2591  return false;
2592 
2593  ++it;
2594 
2595  if (it.atEnd() || !it.getLowerKey(key))
2596  return false;
2597  if (UT_VoxelArrayJSON::getTileID(key.buffer()) !=
2599  return false;
2600 
2601  // Perform the indirection to find out our native type.
2602  if (type >= 0 && type < compress.entries())
2603  {
2604  otype = type;
2605  type = compress(type);
2606  if (type == -1)
2607  {
2608  std::cerr << "Missing compression engine " << (int) otype << "\n";
2609  }
2610  }
2611 
2612  if (type >= COMPRESS_ENGINE)
2613  {
2614  freeData();
2615  myCompressionType = type;
2616 
2617  if (type - COMPRESS_ENGINE >= getCompressionEngines().entries())
2618  {
2619  // Invalid type!
2620  std::cerr << "Invalid compression engine " << (int) otype << "\n";
2621  return false;
2622  }
2623 
2624  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2625 
2626  bool engine_ok = engine->load(p, *this);
2627  ++it;
2628  return it.atEnd() && engine_ok;
2629  }
2630 
2631  UT_ASSERT(type >= 0);
2632  if (type < 0)
2633  {
2634  return false;
2635  }
2636 
2637  freeData();
2638 
2639  myCompressionType = type;
2640 
2641  switch (myCompressionType)
2642  {
2643  case COMPRESS_RAW:
2644  len = myRes[2] * myRes[1] * myRes[0];
2645  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2646  if (p.parseUniformArray((T *)myData, len) != len)
2647  return false;
2648  break;
2649 
2650  case COMPRESS_FPREAL16:
2651  len = myRes[2] * myRes[1] * myRes[0];
2652  myData = UT_VOXEL_ALLOC(sizeof(fpreal16) * len);
2653  if (p.parseUniformArray((fpreal16 *)myData, len) != len)
2654  return false;
2655  break;
2656 
2657  case COMPRESS_RAWFULL:
2658  len = TILESIZE * TILESIZE * myRes[2];
2659  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2660  if (p.parseUniformArray((T *)myData, len) != len)
2661  return false;
2662  break;
2663 
2664  case COMPRESS_CONSTANT:
2665  if (!inlineConstant())
2666  myData = UT_VOXEL_ALLOC(sizeof(T));
2667  if (!p.parseNumber(*rawConstData()))
2668  return false;
2669  break;
2670  }
2671 
2672  ++it;
2673  return it.atEnd();
2674 }
2675 
2676 template <typename T>
2677 void
2679 {
2680  int16 ntype = getCompressionEngines().entries();
2681  int i;
2682 
2683  ntype += COMPRESS_ENGINE;
2684 
2685  UTwrite(os, &ntype);
2686 
2687  UT_ASSERT(COMPRESS_ENGINE == 4); // Change lower lines!
2689  UTsaveStringBinary(os, "rawfull", UT_STRING_8BIT_IO);
2690  UTsaveStringBinary(os, "constant", UT_STRING_8BIT_IO);
2691  UTsaveStringBinary(os, "fpreal16", UT_STRING_8BIT_IO);
2692 
2693  ntype -= COMPRESS_ENGINE;
2694  for (i = 0; i < ntype; i++)
2695  {
2696  UTsaveStringBinary(os, getCompressionEngines()(i)->getName(), UT_STRING_8BIT_IO);
2697  }
2698 }
2699 
2700 template <typename T>
2701 void
2703 {
2704  int16 ntype;
2705  int i, idx;
2706 
2707  compress.entries(0);
2708 
2709  is.read(&ntype);
2710 
2711  for (i = 0; i < ntype; i++)
2712  {
2713  UT_String name;
2714 
2716  if (name == "raw")
2717  compress.append(COMPRESS_RAW);
2718  else if (name == "rawfull")
2719  compress.append(COMPRESS_RAWFULL);
2720  else if (name == "constant")
2721  compress.append(COMPRESS_CONSTANT);
2722  else if (name == "fpreal16")
2723  compress.append(COMPRESS_FPREAL16);
2724  else
2725  {
2726  idx = lookupCompressionEngine(name);
2727 
2728  // -1 means a failure to find it in our set..
2729  // this is only a bad thing if a tile actually uses this engine.
2730 
2731  compress.append(idx);
2732  }
2733  }
2734 }
2735 
2736 template <typename T>
2737 bool
2739 {
2740  int16 ntype = getCompressionEngines().entries();
2741  int i;
2742  bool ok = true;
2743 
2744  ntype += COMPRESS_ENGINE;
2745 
2746  UT_ASSERT(COMPRESS_ENGINE == 4); // Change lower lines!
2747  ok = ok && w.beginUniformArray(ntype, UT_JID_STRING);
2748  ok = ok && w.uniformWrite("raw");
2749  ok = ok && w.uniformWrite("rawfull");
2750  ok = ok && w.uniformWrite("constant");
2751  ok = ok && w.uniformWrite("fpreal16");
2752 
2753  ntype -= COMPRESS_ENGINE;
2754  for (i = 0; i < ntype; i++)
2755  {
2756  ok = ok && w.uniformWrite(getCompressionEngines()(i)->getName());
2757  }
2758 
2759  ok = ok && w.endUniformArray();
2760  return ok;
2761 }
2762 
2763 template <typename T>
2764 bool
2766 {
2769  int idx;
2770 
2771  compress.entries(0);
2772  for (it = p.beginArray(); !it.atEnd(); ++it)
2773  {
2774  if (!p.parseString(buffer))
2775  return false;
2776 
2777  if (!buffer.strcmp("raw"))
2778  compress.append(COMPRESS_RAW);
2779  else if (!buffer.strcmp("rawfull"))
2780  compress.append(COMPRESS_RAWFULL);
2781  else if (!buffer.strcmp("constant"))
2782  compress.append(COMPRESS_CONSTANT);
2783  else if (!buffer.strcmp("fpreal16"))
2784  compress.append(COMPRESS_FPREAL16);
2785  else
2786  {
2787  idx = lookupCompressionEngine(buffer.buffer());
2788 
2789  // -1 means a failure to find it in our set..
2790  // this is only a bad thing if a tile actually uses this engine.
2791 
2792  compress.append(idx);
2793  }
2794  }
2795  return true;
2796 }
2797 
2798 
2799 
2800 //
2801 // VoxelArray definitions.
2802 //
2803 
2804 template <typename T>
2806 {
2807  myRes[0] = 0;
2808  myRes[1] = 0;
2809  myRes[2] = 0;
2810  myTileRes[0] = 0;
2811  myTileRes[1] = 0;
2812  myTileRes[2] = 0;
2813 
2814  myTiles = 0;
2815 
2816  myInvRes = 1;
2817 
2818  myBorderValue = 0;
2819  myBorderScale[0] = 0;
2820  myBorderScale[1] = 0;
2821  myBorderScale[2] = 0;
2822  myBorderType = UT_VOXELBORDER_STREAK;
2823 
2824  mySharedMem = 0;
2825  mySharedMemView = 0;
2826 }
2827 
2828 template <typename T>
2830 {
2831  deleteVoxels();
2832 }
2833 
2834 template <typename T>
2836 {
2837  myRes[0] = 0;
2838  myRes[1] = 0;
2839  myRes[2] = 0;
2840  myInvRes = 1;
2841  myTileRes[0] = 0;
2842  myTileRes[1] = 0;
2843  myTileRes[2] = 0;
2844 
2845  myTiles = 0;
2846 
2847  mySharedMem = 0;
2848  mySharedMemView = 0;
2849 
2850  *this = src;
2851 }
2852 
2853 template <typename T>
2854 void
2856  const UT_JobInfo &info)
2857 {
2858  UT_ASSERT(isMatching(src));
2859  UT_VoxelArrayIterator<T> vit(this);
2860  vit.splitByTile(info);
2861  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
2862  {
2863  int i = vit.getLinearTileNum();
2864  myTiles[i] = src.myTiles[i];
2865  }
2866 }
2867 
2868 template <typename T>
2869 const UT_VoxelArray<T> &
2871 {
2872  // Paranoid code:
2873  if (&src == this)
2874  return *this;
2875 
2876  myBorderScale[0] = src.myBorderScale[0];
2877  myBorderScale[1] = src.myBorderScale[1];
2878  myBorderScale[2] = src.myBorderScale[2];
2879  myBorderValue = src.myBorderValue;
2880  myBorderType = src.myBorderType;
2881 
2882  myCompressionOptions = src.myCompressionOptions;
2883 
2884  // Allocate proper size; don't bother setting to zero, since we'll copy the
2885  // data immediately after.
2886  size(src.myRes[0], src.myRes[1], src.myRes[2], false);
2887 
2888  copyData(src);
2889 
2890  return *this;
2891 }
2892 
2893 // If defined, construction and destruction of tiles will be multithreaded.
2894 // Otherwise, new[] and delete[] take care of calling these methods.
2895 // Unfortunately, this doesn't work at the moment...
2896 #define __MULTITHREADED_STRUCTORS__
2897 template <typename T>
2898 void
2900 {
2901 #ifdef __MULTITHREADED_STRUCTORS__
2902  // Free up each tile's heap data, then bulk-deallocate the array.
2904  [&](const UT_BlockedRange<int>& range)
2905  {
2906  for(int i = range.begin(); i < range.end(); i++)
2907  {
2908  myTiles[i].~UT_VoxelTile<T>();
2909  }
2910  });
2911  operator delete(myTiles, std::nothrow);
2912 #else
2913  delete [] myTiles;
2914 #endif
2915  myTiles = 0;
2916 
2917  myTileRes[0] = myTileRes[1] = myTileRes[2] = 0;
2918  myRes[0] = myRes[1] = myRes[2] = 0;
2919 
2920  delete mySharedMemView;
2921  mySharedMemView = 0;
2922 
2923  delete mySharedMem;
2924  mySharedMem = 0;
2925 }
2926 
2927 template <typename T>
2928 void
2929 UT_VoxelArray<T>::size(int xres, int yres, int zres, bool zero)
2930 {
2931  // Check if the sizes are already right.
2932  if (myRes[0] == xres && myRes[1] == yres && myRes[2] == zres)
2933  {
2934  if (zero)
2935  {
2936  T tzero;
2937  (&tzero)[0] = 0;
2938  constant(tzero);
2939  }
2940  return;
2941  }
2942 
2943  deleteVoxels();
2944 
2945  // Initialize our tiles.
2946  int tile_res[3];
2947  tile_res[0] = (xres + TILEMASK) >> TILEBITS;
2948  tile_res[1] = (yres + TILEMASK) >> TILEBITS;
2949  tile_res[2] = (zres + TILEMASK) >> TILEBITS;
2950 
2951  exint ntiles = ((exint)tile_res[0]) * tile_res[1] * tile_res[2];
2952  if (ntiles)
2953  {
2954 #ifdef __MULTITHREADED_STRUCTORS__
2955  // Allocate the big array of the right size, then use placement new to
2956  // construct each tile.
2957  // Note that it is not guaranteed that the returned pointer is properly
2958  // aligned with raw operator new. However, we only practically need 8-byte
2959  // alignment given the current implementation, which should be respected by
2960  // operator new.
2961  // We have to wait for C++17 for version of operator new that accepts
2962  // alignment requirements...
2963  myTiles = (UT_VoxelTile<T>*) operator new(sizeof(UT_VoxelTile<T>) * ntiles,
2964  std::nothrow);
2966  [&](const UT_BlockedRange<int>& range)
2967  {
2968  for(int k = range.begin(); k < range.end(); k++)
2969  {
2970  new(myTiles + k) UT_VoxelTile<T>();
2971 
2972  // Set the resolution of this tile.
2973  int tilex = k % tile_res[0];
2974  int k2 = k / tile_res[0];
2975  int tiley = k2 % tile_res[1];
2976  int tilez = k2 / tile_res[1];
2977  myTiles[k].setRes(SYSmin(TILESIZE, xres - tilex * TILESIZE),
2978  SYSmin(TILESIZE, yres - tiley * TILESIZE),
2979  SYSmin(TILESIZE, zres - tilez * TILESIZE));
2980  }
2981  });
2982 #else
2983  myTiles = new UT_VoxelTile<T>[ntiles];
2984 #endif
2985 
2986  // Only set resolutions *AFTER* we successfully allocate
2987  myRes[0] = xres;
2988  myRes[1] = yres;
2989  myRes[2] = zres;
2990 
2991  myInvRes = 1;
2992  if (xres)
2993  myInvRes.x() = 1.0f / myRes[0];
2994  if (yres)
2995  myInvRes.y() = 1.0f / myRes[1];
2996  if (zres)
2997  myInvRes.z() = 1.0f / myRes[2];
2998 
2999  myTileRes[0] = tile_res[0];
3000  myTileRes[1] = tile_res[1];
3001  myTileRes[2] = tile_res[2];
3002 
3003 #ifndef __MULTITHREADED_STRUCTORS__
3004  int i = 0;
3005  for (int tz = 0; tz < myTileRes[2]; tz++)
3006  {
3007  int zr;
3008  if (tz < myTileRes[2]-1)
3009  zr = TILESIZE;
3010  else
3011  zr = zres - tz * TILESIZE;
3012 
3013  for (int ty = 0; ty < myTileRes[1]; ty++)
3014  {
3015  int yr;
3016  if (ty < myTileRes[1]-1)
3017  yr = TILESIZE;
3018  else
3019  yr = yres - ty * TILESIZE;
3020 
3021  int tx, xr = TILESIZE;
3022  for (tx = 0; tx < myTileRes[0]-1; tx++)
3023  {
3024  myTiles[i].setRes(xr, yr, zr);
3025 
3026  i++;
3027  }
3028  xr = xres - tx * TILESIZE;
3029  myTiles[i].setRes(xr, yr, zr);
3030  i++;
3031  }
3032  }
3033 #endif
3034  }
3035  else
3036  myTiles = 0;
3037 }
3038 
3039 template <typename T>
3040 void
3042 {
3043  // This call will only actually resize if the sizes are different. If it
3044  // does not resize, contents will be left alone.
3045  size(src.getXRes(), src.getYRes(), src.getZRes(), false);
3046 
3047  // Update border conditions.
3048  myBorderType = src.myBorderType;
3049  myBorderScale[0] = src.myBorderScale[0];
3050  myBorderScale[1] = src.myBorderScale[1];
3051  myBorderScale[2] = src.myBorderScale[2];
3052  myBorderValue = src.myBorderValue;
3053 
3054  // Match our compression tolerances
3055  myCompressionOptions = src.myCompressionOptions;
3056 }
3057 
3058 template <typename T>
3059 int64
3061 {
3062  int64 mem = inclusive ? sizeof(*this) : 0;
3063 
3064  int ntiles = numTiles();
3065  for (int i = 0; i < ntiles; i++)
3066  mem += myTiles[i].getMemoryUsage(true);
3067 
3068  if (mySharedMem)
3069  mem += mySharedMem->getMemoryUsage(true);
3070 
3071  if (mySharedMemView)
3072  mem += mySharedMemView->getMemoryUsage(true);
3073 
3074  return mem;
3075 }
3076 
3077 template <typename T>
3078 void
3080 {
3081  UT_VoxelArrayIterator<T> vit(this);
3082  // We don't want split tile as we update the actual tiles here
3083  // so will have false-sharing if we interleaved.
3084  vit.setPartialRange(info.job(), info.numJobs());
3085  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
3086  {
3087  int i = vit.getLinearTileNum();
3088  myTiles[i].makeConstant(t);
3089  }
3090 }
3091 
3092 template <typename T>
3093 bool
3095 {
3096  int i, ntiles;
3097  T cval = 0;
3098  const fpreal tol = SYS_FTOLERANCE_R;
3099 
3100  ntiles = numTiles();
3101  for (i = 0; i < ntiles; i++)
3102  {
3103  if (!myTiles[i].isConstant())
3104  {
3105  return false;
3106  }
3107 
3108  if (!i)
3109  {
3110  // First tile, get the constant value.
3111  cval = myTiles[i].rawConstVal();
3112  }
3113  else
3114  {
3115  // See if we have deviated too much.
3116  if (UT_VoxelTile<T>::dist(cval, myTiles[i].rawConstVal()) > tol)
3117  {
3118  return false;
3119  }
3120  }
3121  }
3122 
3123  // All tiles are both constant and within tolerance of each
3124  // other. Write out our constant value and return true.
3125  if (t)
3126  *t = cval;
3127 
3128  return true;
3129 }
3130 
3131 template <typename T>
3132 bool
3134 {
3135  int i, ntiles;
3136 
3137  ntiles = numTiles();
3138  for (i = 0; i < ntiles; i++)
3139  {
3140  if (myTiles[i].hasNan())
3141  {
3142  return true;
3143  }
3144  }
3145 
3146  return false;
3147 }
3148 
3149 template <typename T>
3150 T
3152 {
3153  // We go from the position in the unit cube into the index.
3154  // The center of cells must map to the exact integer indices.
3155  pos.x() *= myRes[0];
3156  pos.y() *= myRes[1];
3157  pos.z() *= myRes[2];
3158  pos.x() -= 0.5;
3159  pos.y() -= 0.5;
3160  pos.z() -= 0.5;
3161 
3162  return lerpVoxelCoord(pos);
3163 }
3164 
3165 template <typename T>
3166 T
3168 {
3169  int x, y, z;
3170  // Yes, these have to be 32 becaues split float requires 32!
3171  fpreal32 fx, fy, fz;
3172 
3173  splitVoxelCoord(pos, x, y, z, fx, fy, fz);
3174 
3175  return lerpVoxel(x, y, z, fx, fy, fz);
3176 }
3177 
3178 template <typename T>
3179 template <int AXIS2D>
3180 T
3182 {
3183  int x, y, z;
3184  // Yes, these have to be 32 becaues split float requires 32!
3185  fpreal32 fx, fy, fz;
3186 
3187  splitVoxelCoordAxis<AXIS2D>(pos, x, y, z, fx, fy, fz);
3188 
3189  return lerpVoxelAxis<AXIS2D>(x, y, z, fx, fy, fz);
3190 }
3191 
3192 template <typename T>
3193 T
3194 UT_VoxelArray<T>::lerpVoxel(int x, int y, int z,
3195  float fx, float fy, float fz) const
3196 {
3197  // Do trilinear interpolation.
3198  T vx, vx1, vy, vy1, vz;
3199 
3200  // Optimization for common cases (values are within the voxel range and
3201  // are all within the same tile)
3202  if ( !((x | y | z) < 0) &&
3203  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3204 
3205  // (x > 0) && (y > 0) && (z > 0) &&
3206  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3207  // report in bounds!
3208  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3209  {
3210  int xm, ym, zm;
3211 
3212  xm = x & TILEMASK;
3213  ym = y & TILEMASK;
3214  zm = z & TILEMASK;
3215 
3216  if ((xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3217  {
3218  const UT_VoxelTile<T> *tile =
3219  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3220 
3221  vz = tile->lerp(xm, ym, zm, fx, fy, fz);
3222  }
3223  else
3224  {
3225  // We cross tile boundaries but we remain within
3226  // the voxel grid. We can thus avoid any bounds checking
3227  // and use operator() rather than getValue.
3228 
3229  // Lerp x:x+1, y, z
3230  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z),
3231  (*this)(x+1, y, z),
3232  fx);
3233  // Lerp x:x+1, y+1, z
3234  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z),
3235  (*this)(x+1, y+1, z),
3236  fx);
3237  // Lerp x:x+1, y:y+1, z
3238  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3239 
3240  // Lerp x:x+1, y, z+1
3241  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z+1),
3242  (*this)(x+1, y, z+1),
3243  fx);
3244  // Lerp x:x+1, y+1, z+1
3245  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z+1),
3246  (*this)(x+1, y+1, z+1),
3247  fx);
3248 
3249  // Lerp x:x+1, y:y+1, z+1
3250  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3251 
3252  // Lerp x:x+1, y:y+1, z:z+1
3253  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3254  }
3255  }
3256  else
3257  {
3258  // Lerp x:x+1, y, z
3259  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z),
3260  getValue(x+1, y, z),
3261  fx);
3262  // Lerp x:x+1, y+1, z
3263  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z),
3264  getValue(x+1, y+1, z),
3265  fx);
3266 
3267  // Lerp x:x+1, y:y+1, z
3268  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3269 
3270  // Lerp x:x+1, y, z+1
3271  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z+1),
3272  getValue(x+1, y, z+1),
3273  fx);
3274  // Lerp x:x+1, y+1, z+1
3275  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z+1),
3276  getValue(x+1, y+1, z+1),
3277  fx);
3278 
3279  // Lerp x:x+1, y:y+1, z+1
3280  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3281 
3282  // Lerp x:x+1, y:y+1, z:z+1
3283  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3284  }
3285 
3286  return vz;
3287 }
3288 
3289 template <typename T>
3290 template <int AXIS2D>
3291 T
3293  float fx, float fy, float fz) const
3294 {
3295  // Do bilinear interpolation.
3296  T vx, vx1, vy, vy1, vz;
3297 
3298  int lesscomp = 0, greatercomp = -1;
3299 
3300  if (AXIS2D != 0)
3301  {
3302  lesscomp |= x;
3303  greatercomp &= (x - myRes[0]+1);
3304  }
3305  if (AXIS2D != 1)
3306  {
3307  lesscomp |= y;
3308  greatercomp &= (y - myRes[1]+1);
3309  }
3310  if (AXIS2D != 2)
3311  {
3312  lesscomp |= z;
3313  greatercomp &= (z - myRes[2]+1);
3314  }
3315 
3316  // Optimization for common cases (values are within the voxel range and
3317  // are all within the same tile)
3318  if ( !(lesscomp < 0) && (greatercomp < 0) )
3319  {
3320  int xm, ym, zm;
3321 
3322  xm = x & TILEMASK;
3323  ym = y & TILEMASK;
3324  zm = z & TILEMASK;
3325 
3326  if ((AXIS2D == 0 || xm != TILEMASK) &&
3327  (AXIS2D == 1 || ym != TILEMASK) &&
3328  (AXIS2D == 2 || zm != TILEMASK))
3329  {
3330  const UT_VoxelTile<T> *tile =
3331  getTile( (AXIS2D == 0) ? 0 : (x >> TILEBITS),
3332  (AXIS2D == 1) ? 0 : (y >> TILEBITS),
3333  (AXIS2D == 2) ? 0 : (z >> TILEBITS) );
3334 
3335  vz = tile->template lerpAxis<AXIS2D>(xm, ym, zm, fx, fy, fz);
3336  }
3337  else
3338  {
3339  // We cross tile boundaries but we remain within
3340  // the voxel grid. We can thus avoid any bounds checking
3341  // and use operator() rather than getValue.
3342 
3343  // Lerp x:x+1, y, z
3344  if (AXIS2D != 0)
3345  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z),
3346  (*this)(x+1, y, z),
3347  fx);
3348  else
3349  vx = (*this)(x, y, z);
3350 
3351  if (AXIS2D != 1)
3352  {
3353  // Lerp x:x+1, y+1, z
3354  if (AXIS2D != 0)
3355  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z),
3356  (*this)(x+1, y+1, z),
3357  fx);
3358  else
3359  vx1 = (*this)(x, y+1, z);
3360  // Lerp x:x+1, y:y+1, z
3361  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3362  }
3363  else
3364  vy = vx;
3365 
3366  if (AXIS2D != 2)
3367  {
3368  // Lerp x:x+1, y, z+1
3369  if (AXIS2D != 0)
3370  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z+1),
3371  (*this)(x+1, y, z+1),
3372  fx);
3373  else
3374  vx = (*this)(x, y, z+1);
3375 
3376  if (AXIS2D != 1)
3377  {
3378  // Lerp x:x+1, y+1, z+1
3379  if (AXIS2D != 0)
3380  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z+1),
3381  (*this)(x+1, y+1, z+1),
3382  fx);
3383  else
3384  vx1 = (*this)(x, y+1, z+1);
3385  // Lerp x:x+1, y:y+1, z+1
3386  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3387  }
3388  else
3389  vy1 = vx;
3390 
3391  // Lerp x:x+1, y:y+1, z:z+1
3392  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3393  }
3394  else
3395  vz = vy;
3396  }
3397  }
3398  else
3399  {
3400  // Lerp x:x+1, y, z
3401  if (AXIS2D != 0)
3402  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z),
3403  getValue(x+1, y, z),
3404  fx);
3405  else
3406  vx = getValue(x, y, z);
3407 
3408  if (AXIS2D != 1)
3409  {
3410  // Lerp x:x+1, y+1, z
3411  if (AXIS2D != 0)
3412  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z),
3413  getValue(x+1, y+1, z),
3414  fx);
3415  else
3416  vx1 = getValue(x, y+1, z);
3417  // Lerp x:x+1, y:y+1, z
3418  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3419  }
3420  else
3421  vy = vx;
3422 
3423  if (AXIS2D != 2)
3424  {
3425  // Lerp x:x+1, y, z+1
3426  if (AXIS2D != 0)
3427  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z+1),
3428  getValue(x+1, y, z+1),
3429  fx);
3430  else
3431  vx = getValue(x, y, z+1);
3432 
3433  if (AXIS2D != 1)
3434  {
3435  // Lerp x:x+1, y+1, z+1
3436  if (AXIS2D != 0)
3437  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z+1),
3438  getValue(x+1, y+1, z+1),
3439  fx);
3440  else
3441  vx1 = getValue(x, y+1, z+1);
3442  // Lerp x:x+1, y:y+1, z+1
3443  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3444  }
3445  else
3446  vy1 = vx;
3447 
3448  // Lerp x:x+1, y:y+1, z:z+1
3449  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3450  }
3451  else
3452  vz = vy;
3453  }
3454 
3455  return vz;
3456 }
3457 
3458 template <typename T>
3459 void
3461  UT_Vector3F pos) const
3462 {
3463  // We go from the position in the unit cube into the index.
3464  // The center of cells must map to the exact integer indices.
3465  pos.x() *= myRes[0];
3466  pos.y() *= myRes[1];
3467  pos.z() *= myRes[2];
3468  pos.x() -= 0.5;
3469  pos.y() -= 0.5;
3470  pos.z() -= 0.5;
3471 
3472  lerpVoxelCoordMinMax(lerp, lmin, lmax, pos);
3473 }
3474 
3475 template <typename T>
3476 void
3478  UT_Vector3F pos) const
3479 {
3480  int x, y, z;
3481  // Yes, these have to be 32 becaues split float requires 32!
3482  fpreal32 fx, fy, fz;
3483 
3484  splitVoxelCoord(pos, x, y, z, fx, fy, fz);
3485 
3486  lerpVoxelMinMax(lerp, lmin, lmax, x, y, z, fx, fy, fz);
3487 }
3488 
3489 
3490 template <typename T>
3491 template <int AXIS2D>
3492 void
3494  UT_Vector3F pos) const
3495 {
3496  int x, y, z;
3497  // Yes, these have to be 32 becaues split float requires 32!
3498  fpreal32 fx, fy, fz;
3499 
3500  splitVoxelCoordAxis<AXIS2D>(pos, x, y, z, fx, fy, fz);
3501 
3502  lerpVoxelMinMaxAxis<AXIS2D>(lerp, lmin, lmax, x, y, z, fx, fy, fz);
3503 }
3504 
3505 
3506 template <typename T>
3507 void
3509  T &lerp, T &smin, T &smax,
3510  int x, int y, int z,
3511  float fx, float fy, float fz) const
3512 {
3513  T samples[8];
3514 
3515  if (extractSample(x, y, z, samples))
3516  {
3517  lerp = smin = smax = samples[0];
3518  return;
3519  }
3520 
3521  lerp = lerpSample(samples, fx, fy, fz);
3522 
3523  T smin1, smax1;
3524 
3525  SYSminmax(samples[0], samples[1], samples[2], samples[3],
3526  smin, smax);
3527  SYSminmax(samples[4+0], samples[4+1], samples[4+2], samples[4+3],
3528  smin1, smax1);
3529 
3530  smin = SYSmin(smin, smin1);
3531  smax = SYSmax(smax, smax1);
3532 }
3533 
3534 template <typename T>
3535 template <int AXIS2D>
3536 void
3538  T &lerp, T &smin, T &smax,
3539  int x, int y, int z,
3540  float fx, float fy, float fz) const
3541 {
3542  T samples[8];
3543 
3544  if (extractSampleAxis<AXIS2D>(x, y, z, samples))
3545  {
3546  lerp = smin = smax = samples[0];
3547  return;
3548  }
3549 
3550  lerp = lerpSampleAxis<AXIS2D>(samples, fx, fy, fz);
3551 
3552  if (AXIS2D == 0)
3553  SYSminmax(samples[0], samples[2], samples[4], samples[6],
3554  smin, smax);
3555  else if (AXIS2D == 1)
3556  SYSminmax(samples[0], samples[1], samples[4], samples[5],
3557  smin, smax);
3558  else if (AXIS2D == 2)
3559  SYSminmax(samples[0], samples[1], samples[2], samples[3],
3560  smin, smax);
3561  else
3562  {
3563  T smin1, smax1;
3564 
3565  SYSminmax(samples[0], samples[1], samples[2], samples[3],
3566  smin, smax);
3567  SYSminmax(samples[4+0], samples[4+1], samples[4+2], samples[4+3],
3568  smin1, smax1);
3569 
3570  smin = SYSmin(smin, smin1);
3571  smax = SYSmax(smax, smax1);
3572  }
3573 }
3574 
3575 template <typename T>
3576 bool
3578  T *samples) const
3579 {
3580  // Optimization for common cases (values are within the voxel range and
3581  // are all within the same tile)
3582  if ( !((x | y | z) < 0) &&
3583  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3584 
3585  // (x > 0) && (y > 0) && (z > 0) &&
3586  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3587  // report in bounds!
3588  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3589  {
3590  int xm, ym, zm;
3591 
3592  xm = x & TILEMASK;
3593  ym = y & TILEMASK;
3594  zm = z & TILEMASK;
3595 
3596  if ((xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3597  {
3598  const UT_VoxelTile<T> *tile =
3599  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3600 
3601  return tile->extractSample(xm, ym, zm, samples);
3602  }
3603  else
3604  {
3605  // We cross tile boundaries but we remain within
3606  // the voxel grid. We can thus avoid any bounds checking
3607  // and use operator() rather than getValue.
3608  samples[0] = (*this)(x, y, z);
3609  samples[1] = (*this)(x+1, y, z);
3610  samples[2+0] = (*this)(x, y+1, z);
3611  samples[2+1] = (*this)(x+1, y+1, z);
3612  samples[4+0] = (*this)(x, y, z+1);
3613  samples[4+1] = (*this)(x+1, y, z+1);
3614  samples[4+2+0] = (*this)(x, y+1, z+1);
3615  samples[4+2+1] = (*this)(x+1, y+1, z+1);
3616  }
3617  }
3618  else
3619  {
3620  samples[0] = getValue(x, y, z);
3621  samples[1] = getValue(x+1, y, z);
3622  samples[2+0] = getValue(x, y+1, z);
3623  samples[2+1] = getValue(x+1, y+1, z);
3624  samples[4+0] = getValue(x, y, z+1);
3625  samples[4+1] = getValue(x+1, y, z+1);
3626  samples[4+2+0] = getValue(x, y+1, z+1);
3627  samples[4+2+1] = getValue(x+1, y+1, z+1);
3628  }
3629 
3630  return false;
3631 }
3632 
3633 
3634 template <typename T>
3635 template <int AXIS2D>
3636 bool
3638  T *samples) const
3639 {
3640  // Optimization for common cases (values are within the voxel range and
3641  // are all within the same tile)
3642  int lesscomp = 0, greatercomp = -1;
3643 
3644  if (AXIS2D != 0)
3645  {
3646  lesscomp |= x;
3647  greatercomp &= (x - myRes[0]+1);
3648  }
3649  if (AXIS2D != 1)
3650  {
3651  lesscomp |= y;
3652  greatercomp &= (y - myRes[1]+1);
3653  }
3654  if (AXIS2D != 2)
3655  {
3656  lesscomp |= z;
3657  greatercomp &= (z - myRes[2]+1);
3658  }
3659 
3660  // Optimization for common cases (values are within the voxel range and
3661  // are all within the same tile)
3662  if ( !(lesscomp < 0) && (greatercomp < 0) )
3663  {
3664  int xm, ym, zm;
3665 
3666  xm = x & TILEMASK;
3667  ym = y & TILEMASK;
3668  zm = z & TILEMASK;
3669 
3670  if ((AXIS2D == 0 || xm != TILEMASK) &&
3671  (AXIS2D == 1 || ym != TILEMASK) &&
3672  (AXIS2D == 2 || zm != TILEMASK))
3673  {
3674  const UT_VoxelTile<T> *tile =
3675  getTile( (AXIS2D == 0) ? 0 : (x >> TILEBITS),
3676  (AXIS2D == 1) ? 0 : (y >> TILEBITS),
3677  (AXIS2D == 2) ? 0 : (z >> TILEBITS) );
3678 
3679  return tile->template extractSampleAxis<AXIS2D>(xm, ym, zm, samples);
3680  }
3681  else
3682  {
3683  // We cross tile boundaries but we remain within
3684  // the voxel grid. We can thus avoid any bounds checking
3685  // and use operator() rather than getValue.
3686  samples[0] = (*this)(x, y, z);
3687  if (AXIS2D != 0)
3688  samples[1] = (*this)(x+1, y, z);
3689  if (AXIS2D != 1)
3690  {
3691  samples[2+0] = (*this)(x, y+1, z);
3692  if (AXIS2D != 0)
3693  samples[2+1] = (*this)(x+1, y+1, z);
3694  }
3695  if (AXIS2D != 2)
3696  {
3697  samples[4+0] = (*this)(x, y, z+1);
3698  if (AXIS2D != 0)
3699  samples[4+1] = (*this)(x+1, y, z+1);
3700  if (AXIS2D != 1)
3701  {
3702  samples[4+2+0] = (*this)(x, y+1, z+1);
3703  if (AXIS2D != 0)
3704  samples[4+2+1] = (*this)(x+1, y+1, z+1);
3705  }
3706  }
3707  }
3708  }
3709  else
3710  {
3711  samples[0] = getValue(x, y, z);
3712  if (AXIS2D != 0)
3713  samples[1] = getValue(x+1, y, z);
3714  if (AXIS2D != 1)
3715  {
3716  samples[2+0] = getValue(x, y+1, z);
3717  if (AXIS2D != 0)
3718  samples[2+1] = getValue(x+1, y+1, z);
3719  }
3720  if (AXIS2D != 2)
3721  {
3722  samples[4+0] = getValue(x, y, z+1);
3723  if (AXIS2D != 0)
3724  samples[4+1] = getValue(x+1, y, z+1);
3725  if (AXIS2D != 1)
3726  {
3727  samples[4+2+0] = getValue(x, y+1, z+1);
3728  if (AXIS2D != 0)
3729  samples[4+2+1] = getValue(x+1, y+1, z+1);
3730  }
3731  }
3732  }
3733 
3734  return false;
3735 }
3736 
3737 template <typename T>
3738 bool
3740  T *samples) const
3741 {
3742  // Optimization for common cases (values are within the voxel range and
3743  // are all within the same tile)
3744  if ( !(((x-1) | (y-1) | (z-1)) < 0) &&
3745  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3746 
3747  // (x > 0) && (y > 0) && (z > 0) &&
3748  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3749  // report in bounds!
3750  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3751  {
3752  int xm, ym, zm;
3753 
3754  xm = x & TILEMASK;
3755  ym = y & TILEMASK;
3756  zm = z & TILEMASK;
3757 
3758  if (xm && ym && zm && (xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3759  {
3760  const UT_VoxelTile<T> *tile =
3761  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3762 
3763  return tile->extractSamplePlus(xm, ym, zm, samples);
3764  }
3765  else
3766  {
3767  // We cross tile boundaries but we remain within
3768  // the voxel grid. We can thus avoid any bounds checking
3769  // and use operator() rather than getValue.
3770  samples[0] = (*this)(x-1, y, z);
3771  samples[1] = (*this)(x+1, y, z);
3772  samples[2+0] = (*this)(x, y-1, z);
3773  samples[2+1] = (*this)(x, y+1, z);
3774  samples[4+0] = (*this)(x, y, z-1);
3775  samples[4+1] = (*this)(x, y, z+1);
3776  samples[6] = (*this)(x, y, z);
3777  }
3778  }
3779  else
3780  {
3781  samples[0] = getValue(x-1, y, z);
3782  samples[1] = getValue(x+1, y, z);
3783  samples[2+0] = getValue(x, y-1, z);
3784  samples[2+1] = getValue(x, y+1, z);
3785  samples[4+0] = getValue(x, y, z-1);
3786  samples[4+1] = getValue(x, y, z+1);
3787  samples[6] = getValue(x, y, z);
3788  }
3789 
3790  return false;
3791 }
3792 
3793 template <typename T>
3794 bool
3796  T *samples) const
3797 {
3798  // Optimization for common cases (values are within the voxel range and
3799  // are all within the same tile)
3800  if ( !(((x-1) | (y-1) | (z-1)) < 0) &&
3801  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3802 
3803  // (x > 0) && (y > 0) && (z > 0) &&
3804  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3805  // report in bounds!
3806  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3807  {
3808  int xm, ym, zm;
3809 
3810  xm = x & TILEMASK;
3811  ym = y & TILEMASK;
3812  zm = z & TILEMASK;
3813 
3814  if (xm && ym && zm && (xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3815  {
3816  const UT_VoxelTile<T> *tile =
3817  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3818 
3819  return tile->extractSampleCube(xm, ym, zm, samples);
3820  }
3821  else
3822  {
3823  // We cross tile boundaries but we remain within
3824  // the voxel grid. We can thus avoid any bounds checking
3825  // and use operator() rather than getValue.
3826  int sampidx = 0;
3827  for (int dz = -1; dz <= 1; dz++)
3828  {
3829  for (int dy = -1; dy <= 1; dy++)
3830  {
3831  for (int dx = -1; dx <= 1; dx++)
3832  {
3833  samples[sampidx++] = (*this)(x+dx, y+dy, z+dz);
3834  }
3835  }
3836  }
3837  }
3838  }
3839  else
3840  {
3841  int sampidx = 0;
3842  for (int dz = -1; dz <= 1; dz++)
3843  {
3844  for (int dy = -1; dy <= 1; dy++)
3845  {
3846  for (int dx = -1; dx <= 1; dx++)
3847  {
3848  samples[sampidx++] = getValue(x+dx, y+dy, z+dz);
3849  }
3850  }
3851  }
3852  }
3853 
3854  return false;
3855 }
3856 template <typename T>
3857 T
3859  float fx, float fy, float fz) const
3860 {
3861 #if 1
3862  // Do trilinear interpolation.
3863  T vx, vx1, vy, vy1, vz;
3864 
3865  // Lerp x:x+1, y, z
3866  vx = UT_VoxelTile<T>::lerpValues(samples[0],
3867  samples[1],
3868  fx);
3869  // Lerp x:x+1, y+1, z
3870  vx1= UT_VoxelTile<T>::lerpValues(samples[2],
3871  samples[2+1],
3872  fx);
3873  // Lerp x:x+1, y:y+1, z
3874  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3875 
3876  // Lerp x:x+1, y, z+1
3877  vx = UT_VoxelTile<T>::lerpValues(samples[4],
3878  samples[4+1],
3879  fx);
3880  // Lerp x:x+1, y+1, z+1
3881  vx1= UT_VoxelTile<T>::lerpValues(samples[4+2],
3882  samples[4+2+1],
3883  fx);
3884 
3885  // Lerp x:x+1, y:y+1, z+1
3886  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3887 
3888  // Lerp x:x+1, y:y+1, z:z+1
3889  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3890 
3891  return vz;
3892 #else
3893  v4uf a, b, vfx, vfy, vfz;
3894 
3895  a = v4uf(&samples[0]);
3896  b = v4uf(&samples[4]);
3897 
3898  vfx = v4uf(fx);
3899  vfy = v4uf(fy);
3900  vfz = v4uf(fz);
3901 
3902  b -= a;
3903  a = madd(b, fz, a);
3904 
3905  b = a.swizzle<2, 3, 0, 1>();
3906  b -= a;
3907  a = madd(b, fy, a);
3908 
3909  b = a.swizzle<1, 2, 3, 0>();
3910  b -= a;
3911  a = madd(b, fx, a);
3912 
3913  return a[0];
3914 #endif
3915 }
3916 
3917 template <typename T>
3918 template <int AXIS2D>
3919 T
3921  float fx, float fy, float fz) const
3922 {
3923  // Do trilinear interpolation.
3924  T vx, vx1, vy, vy1, vz;
3925 
3926  // Lerp x:x+1, y, z
3927  if (AXIS2D != 0)
3928  vx = UT_VoxelTile<T>::lerpValues(samples[0], samples[1], fx);
3929  else
3930  vx = samples[0];
3931 
3932  if (AXIS2D != 1)
3933  {
3934  // Lerp x:x+1, y+1, z
3935  if (AXIS2D != 0)
3936  vx1= UT_VoxelTile<T>::lerpValues(samples[2], samples[2+1], fx);
3937  else
3938  vx1= samples[2];
3939 
3940  // Lerp x:x+1, y:y+1, z
3941  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3942  }
3943  else
3944  vy = vx;
3945 
3946  if (AXIS2D != 2)
3947  {
3948  // Lerp x:x+1, y, z+1
3949  if (AXIS2D != 0)
3950  vx = UT_VoxelTile<T>::lerpValues(samples[4], samples[4+1], fx);
3951  else
3952  vx = samples[4];
3953 
3954  if (AXIS2D != 1)
3955  {
3956  // Lerp x:x+1, y+1, z+1
3957  if (AXIS2D != 0)
3958  vx1= UT_VoxelTile<T>::lerpValues(samples[4+2], samples[4+2+1], fx);
3959  else
3960  vx1= samples[4+2];
3961 
3962  // Lerp x:x+1, y:y+1, z+1
3963  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3964  }
3965  else
3966  vy1 = vx;
3967 
3968  // Lerp x:x+1, y:y+1, z:z+1
3969  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3970  }
3971  else
3972  vz = vy;
3973 
3974  return vz;
3975 }
3976 
3977 template <typename T>
3978 T
3980 {
3981  int x, y, z;
3982  // Yes, these have to be 32 becaues split float requires 32!
3983  fpreal32 fx, fy, fz;
3984 
3985  // We go from the position in the unit cube into the index.
3986  // The center of cells must map to the exact integer indices.
3987  pos.x() *= myRes[0];
3988  pos.y() *= myRes[1];
3989  pos.z() *= myRes[2];
3990  pos.x() -= 0.5;
3991  pos.y() -= 0.5;
3992  pos.z() -= 0.5;
3993 
3994  // Determine integer & fractional components.
3995  fx = pos.x();
3996  SYSfastSplitFloat(fx, x);
3997  fy = pos.y();
3998  SYSfastSplitFloat(fy, y);
3999  fz = pos.z();
4000  SYSfastSplitFloat(fz, z);
4001 
4002  // Do trilinear interpolation.
4003  T vx, vx1, vy, vy1, vz;
4004 
4005  // NOTE:
4006  // If you get a crash reading one of these getValues, note
4007  // that if you are reading from the same voxel array that you
4008  // are writing to, and doing so in a multi-threaded, tile-by-tile
4009  // approach, just because your positions match doesn't mean you
4010  // won't access neighbouring tiles. To avoid this, perform
4011  // the obvious optimization of checking for aligned tiles
4012  // and using direct indices.
4013  // (I have avoided using if (!fx && !fy && !fz) as the test
4014  // as that is both potentially inaccurate (round off in conversion
4015  // may mess us up) and said optimzation is a useful one anyways)
4016 
4017  // Optimization for common cases (values are within the voxel range and
4018  // are all within the same tile)
4019  if ( !((x | y | z) < 0) &&
4020  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
4021  // if ( (x > 0) && (y > 0) && (z > 0) &&
4022  // Do not use x+1 < foo as if x is MAX_INT that will falsely
4023  // report in bounds!
4024  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
4025  {
4026  int xm, ym, zm;
4027 
4028  xm = x & TILEMASK;
4029  ym = y & TILEMASK;
4030  zm = z & TILEMASK;
4031 
4032  if ((xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
4033  {
4034  const UT_VoxelTile<T> *tile =
4035  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
4036 
4037  vz = tile->lerp(xm, ym, zm, fx, fy, fz);
4038  }
4039  else
4040  {
4041  // We cross tile boundaries but we remain within
4042  // the voxel grid. We can thus avoid any bounds checking
4043  // and use operator() rather than getValue.
4044 
4045  // Lerp x:x+1, y, z
4046  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z),
4047  (*this)(x+1, y, z),
4048  fx);
4049  // Lerp x:x+1, y+1, z
4050  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z),
4051  (*this)(x+1, y+1, z),
4052  fx);
4053  // Lerp x:x+1, y:y+1, z
4054  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4055 
4056  // Lerp x:x+1, y, z+1
4057  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z+1),
4058  (*this)(x+1, y, z+1),
4059  fx);
4060  // Lerp x:x+1, y+1, z+1
4061  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z+1),
4062  (*this)(x+1, y+1, z+1),
4063  fx);
4064 
4065  // Lerp x:x+1, y:y+1, z+1
4066  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4067 
4068  // Lerp x:x+1, y:y+1, z:z+1
4069  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
4070  }
4071  }
4072  else
4073  {
4074  // Lerp x:x+1, y, z
4075  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z),
4076  getValue(x+1, y, z),
4077  fx);
4078  // Lerp x:x+1, y+1, z
4079  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z),
4080  getValue(x+1, y+1, z),
4081  fx);
4082 
4083  // Lerp x:x+1, y:y+1, z
4084  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4085 
4086  // Lerp x:x+1, y, z+1
4087  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z+1),
4088  getValue(x+1, y, z+1),
4089  fx);
4090  // Lerp x:x+1, y+1, z+1
4091  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z+1),
4092  getValue(x+1, y+1, z+1),
4093  fx);
4094 
4095  // Lerp x:x+1, y:y+1, z+1
4096  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4097 
4098  // Lerp x:x+1, y:y+1, z:z+1
4099  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
4100  }
4101 
4102  return vz;
4103 }
4104 
4105 #if 0
4106 template <typename T>
4107 T
4109 {
4110  int x, y, z;
4111 
4112  v4ui idx;
4113 
4114  // We go from the position in the unit cube into the index.
4115  // The center of cells must map to the exact integer indices.
4116  pos *= v4uf((float) myRes[0], (float) myRes[1], (float) myRes[2], 0.0f);
4117 
4118  // Because we use truncation we'll get inaccurate results
4119  // at the zero boundary, we thus bump up by two
4120  pos += 1.5;
4121 
4122  idx = pos.splitFloat();
4123 
4124  // And counteract that bump
4125  idx -= 2;
4126 
4127  x = idx[0];
4128  y = idx[1];
4129  z = idx[2];
4130 
4131  // Do trilinear interpolation.
4132  // A | B
4133  // 0 +z +y +yz | +x +xz +xy +xyz
4134 
4135  v4uf a, b, fx, fy, fz;
4136 
4137  int xm, ym, zm;
4138 
4139  xm = x & TILEMASK;
4140  ym = y & TILEMASK;
4141  zm = z & TILEMASK;
4142 
4143  // Optimization for common case (values are within the voxel range and
4144  // are all within the same tile)
4145  if ( (x > 0) & (y > 0) & (z > 0) &
4146  (x < myRes[0]-1) & (y < myRes[1]-1) & (z < myRes[2]-1) &
4147  (xm != TILEMASK) & (ym != TILEMASK) & (zm != TILEMASK) )
4148 /*
4149  if (isValidIndex(x, y, z) && isValidIndex(x+1, y+1, z+1) &&
4150  (x & TILEMASK) != TILEMASK &&
4151  (y & TILEMASK) != TILEMASK &&
4152  (z & TILEMASK) != TILEMASK)
4153 */
4154  {
4155  const UT_VoxelTile<T> *tile =
4156  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
4157 
4158  return tile->lerp(pos, xm, ym, zm);
4159  }
4160  else
4161  {
4162  a = v4uf( getValue(x, y, z),
4163  getValue(x, y, z+1),
4164  getValue(x, y+1, z),
4165  getValue(x, y+1, z+1) );
4166  b = v4uf( getValue(x+1, y, z),
4167  getValue(x+1, y, z+1),
4168  getValue(x+1, y+1, z),
4169  getValue(x+1, y+1, z+1) );
4170  }
4171 
4172  fx = pos.swizzle<0, 0, 0, 0>();
4173  fy = pos.swizzle<1, 1, 1, 1>();
4174  fz = pos.swizzle<2, 2, 2, 2>();
4175 
4176  b -= a;
4177  a = madd(b, fx, a);
4178 
4179  b = a.swizzle<2, 3, 0, 1>();
4180  b -= a;
4181  a = madd(b, fy, a);
4182 
4183  b = a.swizzle<1, 2, 3, 0>();
4184  b -= a;
4185  a = madd(b, fz, a);
4186 
4187  return a[0];
4188 }
4189 #endif
4190 
4191 static inline int
4192 firstTile(int &start, int &end, int res)
4193 {
4194  if (start < 0)
4195  {
4196  start += res;
4197  end += res;
4198  }
4199  return start;
4200 }
4201 
4202 static inline int
4203 nextTile(int &tile, int &pstart, int &start, int &end, int res)
4204 {
4205  int pend;
4206 
4207  if (pstart >= res)
4208  {
4209  pstart -= res;
4210  start -= res;
4211  end -= res;
4212  }
4213  tile = pstart >> TILEBITS;
4214  pend = SYSmin((tile+1) * TILESIZE, end, res);
4215 
4216  UT_ASSERT(pstart >= 0 && pstart < res);
4217  UT_ASSERT(pend > 0 && pstart <= res);
4218  UT_ASSERT(pend-pstart > 0);
4219  UT_ASSERT(pend-pstart <= TILESIZE);
4220 
4221  return pend;
4222 }
4223 
4224 template <typename T>
4225 T
4227  fpreal radius, int clampaxis) const
4228 {
4229  UT_Vector3 tpos;
4230  UT_FilterWindow win[3];
4231  UT_FilterWrap wrap[3];
4232  UT_VoxelTile<T> *tile;
4233  const float *weights[3];
4234  fpreal visible;
4235  int start[3], end[3], size[3];
4236  int pstart[3], pend[3];
4237  int tx, ty, tz, i;
4238  T result;
4239 
4240  if (!myTiles)
4241  {
4242  // If the array is empty, just use the border value.
4243  return myBorderValue;
4244  }
4245 
4246  UT_FilterWrap basewrap;
4247  switch (myBorderType)
4248  {
4249  case UT_VOXELBORDER_CONSTANT: basewrap = UT_WRAP_BORDER; break;
4250  case UT_VOXELBORDER_REPEAT: basewrap = UT_WRAP_REPEAT; break;
4251  case UT_VOXELBORDER_STREAK: basewrap = UT_WRAP_CLAMP; break;
4252 
4253  // NOTE: This is incorrect!
4254  case UT_VOXELBORDER_EXTRAP: basewrap = UT_WRAP_CLAMP; break;
4255  default: basewrap = UT_WRAP_CLAMP; break;
4256  }
4257  for (i = 0; i < 3; i++)
4258  {
4259  if (i == clampaxis)
4260  wrap[i] = UT_WRAP_CLAMP;
4261  else
4262  wrap[i] = basewrap;
4263  }
4264 
4265  memset(&result, 0, sizeof(result));
4266 
4267  radius *= filter.getSupport();
4268 
4269  // Make a local copy of the position so that we can modify it.
4270  tpos = pos;
4271  visible = 1;
4272  for (i = 0; i < 3; i++)
4273  {
4274  tpos[i] = tpos[i]*myRes[i];
4275  if (!win[i].setWeights(filter, tpos[i], radius, myRes[i], wrap[i]))
4276  return result;
4277 
4278  weights[i] = win[i].getWeights();
4279  start[i] = win[i].getStart() % myRes[i];
4280  size[i] = win[i].getSize();
4281 
4282  UT_ASSERT(start[i] >= 0);
4283  UT_ASSERT(size[i] <= myRes[i]);
4284 
4285  end[i] = start[i] + size[i];
4286  visible *= win[i].getVisible();
4287  }
4288 
4289  // Accumulate filtered results
4290  pstart[2] = firstTile(start[2], end[2], myRes[2]);
4291  while (pstart[2] < end[2])
4292  {
4293  pend[2] = nextTile(tz, pstart[2], start[2], end[2], myRes[2]);
4294  pstart[1] = firstTile(start[1], end[1], myRes[1]);
4295  while (pstart[1] < end[1])
4296  {
4297  pend[1] = nextTile(ty, pstart[1], start[1], end[1], myRes[1]);
4298  pstart[0] = firstTile(start[0], end[0], myRes[0]);
4299  while (pstart[0] < end[0])
4300  {
4301  pend[0] = nextTile(tx, pstart[0], start[0], end[0], myRes[0]);
4302  tile = getTile(tx, ty, tz);
4303  UT_ASSERT(tile);
4304  tile->weightedSum(pstart, pend, weights, start, result);
4305  pstart[0] = pend[0];
4306  }
4307  pstart[1] = pend[1];
4308  }
4309  pstart[2] = pend[2];
4310  }
4311 
4312  if (visible < 1)
4313  {
4314  result += (1-visible)*myBorderValue;
4315  }
4316 
4317  return result;
4318 }
4319 
4320 template <typename T>
4321 void
4323  UT_FilterType filtertype,
4324  float filterwidthscale,
4325  int clampaxis)
4326 {
4327  fpreal radius;
4328  UT_Filter *filter;
4329 
4330  filter = UT_Filter::getFilter(filtertype);
4331 
4332  radius = SYSmax( src.getXRes() / (fpreal)getXRes(),
4333  src.getYRes() / (fpreal)getYRes(),
4334  src.getZRes() / (fpreal)getZRes(),
4335  1.0f );
4336  radius *= 0.5 * filterwidthscale;
4337 
4338  resamplethread(src, filter, radius, clampaxis);
4339 
4340  UT_Filter::releaseFilter(filter);
4341 }
4342 
4343 template <typename T>
4344 void
4345 UT_VoxelArray<T>::flattenPartial(T *flatarray, exint ystride, exint zstride,
4346  const UT_JobInfo &info) const
4347 {
4348  // Check to see if we are 2d, if so, we wish to use the
4349  // axis flatten.
4350  if (getXRes() == 1 && ystride == 1)
4351  {
4352  flattenPartialAxis<0>(flatarray, zstride, info);
4353  }
4354  else if (getYRes() == 1 && zstride == ystride)
4355  {
4356  flattenPartialAxis<1>(flatarray, zstride, info);
4357  }
4358  else if (getZRes() == 1)
4359  {
4360  flattenPartialAxis<2>(flatarray, ystride, info);
4361  }
4362  else
4363  {
4365 
4366  // Const cast.
4367  vit.setArray((UT_VoxelArray<T> *)this);
4368  // We can't use splitByTile as then our writes will fight
4369  // over 4k pages.
4370  vit.setPartialRange(info.job(), info.numJobs());
4371 
4372  for (vit.rewind(); !vit.atEnd(); vit.advance())
4373  {
4374  flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride] = vit.getValue();
4375  }
4376  }
4377 }
4378 
4379 
4380 template <typename T>
4381 template <int AXIS2D>
4382 void
4384  const UT_JobInfo &info) const
4385 {
4386  // The x/y refer to the destination x/y.
4387  // The actual source index depends on AXIS2D.
4388  const int ax = (AXIS2D == 0) ? 1 : 0;
4389  const int ay = (AXIS2D == 2) ? 1 : 2;
4390  int tileidx[3] = { 0, 0, 0 };
4391 
4392  while (1)
4393  {
4394  exint tiley = info.nextTask();
4395  exint ystart = tiley * TILESIZE;
4396  if (ystart >= getRes(ay))
4397  break;
4398 
4399  T *stripe = &flatarray[ystart * ystride];
4400 
4401  int yres = SYSmin(getRes(ay) - ystart, TILESIZE);
4402 
4403  for (int tilex = 0, ntilex = getTileRes(ax); tilex < ntilex; tilex++)
4404  {
4405  tileidx[ax] = tilex;
4406  tileidx[ay] = tiley;
4407  auto tile = getTile(tileidx[0], tileidx[1], tileidx[2]);
4408 
4409  int xres = tile->getRes(ax);
4410  T *stripey = stripe;
4411  if (tile->isConstant())
4412  {
4413  const T *srcdata = tile->rawData();
4414 
4415  for (int y = 0; y < yres; y++)
4416  {
4417  for (int x = 0; x < xres; x++)
4418  {
4419  memcpy(&stripey[x], srcdata, sizeof(T));
4420  }
4421  stripey += ystride;
4422  }
4423  }
4424  else if (tile->isSimpleCompression())
4425  {
4426  const T *srcdata = tile->rawData();
4427 
4428  if (xres != TILESIZE)
4429  {
4430  for (int y = 0; y < yres; y++)
4431  {
4432  memcpy(stripey, srcdata, sizeof(T) * xres);
4433  srcdata += xres;
4434  stripey += ystride;
4435  }
4436  }
4437  else
4438  {
4439  for (int y = 0; y < yres; y++)
4440  {
4441  memcpy(stripey, srcdata, sizeof(T) * TILESIZE);
4442  srcdata += TILESIZE;
4443  stripey += ystride;
4444  }
4445  }
4446  }
4447  else
4448  {
4449  for (int y = 0; y < yres; y++)
4450  {
4451  int idx[3] = { 0, 0, 0 };
4452  idx[ay] = y;
4453  for (int x = 0; x < xres; x++)
4454  {
4455  idx[ax] = x;
4456  stripey[x] = (*tile)(idx[0], idx[1], idx[2]);
4457  }
4458  stripey += ystride;
4459  }
4460  }
4461 
4462  stripe += TILESIZE;
4463  }
4464  }
4465 }
4466 
4467 
4468 template <>
4469 inline void
4471  exint ystride, exint zstride,
4472  UT_Vector4F,
4473  const UT_JobInfo &info) const
4474 {
4476 
4477  // Const cast.
4478  vit.setArray(SYSconst_cast(this));
4479  // We can't use splitByTile as then our writes will fight
4480  // over 4k pages.
4481  vit.setPartialRange(info.job(), info.numJobs());
4482 
4483  for (vit.rewind(); !vit.atEnd(); vit.advance())
4484  {
4485  UT_Vector4F v = vit.getValue();
4486  int idx = (vit.x() + vit.y()*ystride + vit.z()*zstride) * 4;
4487 
4488  flatarray[idx] = (uint8) SYSclamp(v.x() * 255.0f, 0.0f, 255.0f);
4489  flatarray[idx+1] = (uint8) SYSclamp(v.y() * 255.0f, 0.0f, 255.0f);
4490  flatarray[idx+2] = (uint8) SYSclamp(v.z() * 255.0f, 0.0f, 255.0f);
4491  flatarray[idx+3] = (uint8) SYSclamp(v.w() * 255.0f, 0.0f, 255.0f);
4492  }
4493 }
4494 
4495 template <typename T>
4496 void
4498  exint ystride, exint zstride,
4499  T, const UT_JobInfo &info) const
4500 {
4501  UT_ASSERT(!"This template requires specific instantiations.");
4502 }
4503 
4504 template <>
4505 inline void
4507  exint ystride, exint zstride,
4508  UT_Vector4F,
4509  const UT_JobInfo &info) const
4510 {
4512 
4513  // Const cast.
4514  vit.setArray(SYSconst_cast(this));
4515  // We can't use splitByTile as then our writes will fight
4516  // over 4k pages.
4517  vit.setPartialRange(info.job(), info.numJobs());
4518 
4519  for (vit.rewind(); !vit.atEnd(); vit.advance())
4520  flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride] = vit.getValue();
4521 }
4522 
4523 template <typename T>
4524 void
4526  exint ystride, exint zstride,
4527  T, const UT_JobInfo &info) const
4528 {
4529  UT_ASSERT(!"This template requires specific instantiations.");
4530 }
4531 
4532 template <>
4533 inline void
4535  exint ystride, exint zstride,
4536  UT_Vector4F,
4537  const UT_JobInfo &info) const
4538 {
4540 
4541  // Const cast.
4542  vit.setArray(SYSconst_cast(this));
4543  // We can't use splitByTile as then our writes will fight
4544  // over 4k pages.
4545  vit.setPartialRange(info.job(), info.numJobs());
4546 
4547  for (vit.rewind(); !vit.atEnd(); vit.advance())
4548  {
4549  UT_Vector4F v = vit.getValue();
4550 
4551  // NOTE: This works around an Nvidia driver bug on OSX, and older
4552  // Nvidia drivers on other platforms. The problem was that very
4553  // small FP values were causing huge random values to appear in
4554  // the 3D Texture.
4555  if(SYSabs(v.x()) < 1e-9)
4556  v.x() = 0.0;
4557  if(SYSabs(v.y()) < 1e-9)
4558  v.y() = 0.0;
4559  if(SYSabs(v.z()) < 1e-9)
4560  v.z() = 0.0;
4561  if(SYSabs(v.w()) < 1e-9)
4562  v.w() = 0.0;
4563 
4564  flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride] = v;
4565  }
4566 }
4567 
4568 template <typename T>
4569 void
4571  exint ystride, exint zstride,
4572  T,const UT_JobInfo &info) const
4573 {
4574  UT_ASSERT(!"This template requires specific instantiations.");
4575 }
4576 
4577 
4578 template <typename T>
4579 void
4581  exint ystride, exint zstride,
4582  const UT_JobInfo &info)
4583 {
4585 
4586  vit.setArray(this);
4587  vit.splitByTile(info);
4588  vit.setCompressOnExit(true);
4589 
4590  for (vit.rewind(); !vit.atEnd(); vit.advance())
4591  {
4592  vit.setValue(flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride]);
4593  }
4594 }
4595 
4596 template <typename T>
4597 template <typename S>
4598 S *
4600  const UT_IntArray &tilelist) const
4601 {
4602  for (int i = 0; i < tilelist.entries(); i++)
4603  {
4604  UT_ASSERT(tilelist(i) >= 0 && tilelist(i) < numTiles());
4605  const UT_VoxelTile<T> *tile = getLinearTile(tilelist(i));
4606 
4607  tile->flatten(dstdata, stride);
4608  dstdata += tile->numVoxels() * stride;
4609  }
4610  return dstdata;
4611 }
4612 
4613 template <typename T>
4614 template <typename S, typename IDX>
4615 S *
4617  const IDX *ix, const IDX *iy, const IDX *iz,
4618  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist) const
4619 {
4620  int srcidx = 0;
4621 
4622  for (auto && tiledata : tilelist)
4623  {
4624  int tileidx = tiledata.tileidx;
4625  UT_ASSERT(tileidx >= 0 && tileidx < numTiles());
4626 
4627  const UT_VoxelTile<T> *tile = getLinearTile(tileidx);
4628  int tilevoxel = tile->numVoxels();
4629 
4630  if (tilevoxel == tiledata.numvoxel)
4631  {
4632  // Full tile.
4633  tile->flatten(dstdata, stride);
4634  dstdata += tiledata.numvoxel * stride;
4635  srcidx += tiledata.numvoxel;
4636  }
4637  else
4638  {
4639  // Partial tile...
4640  int basex, basey, basez;
4641  linearTileToXYZ(tileidx, basex, basey, basez);
4642 
4643  basex <<= TILEBITS;
4644  basey <<= TILEBITS;
4645  basez <<= TILEBITS;
4646 
4647  if (tile->isSimpleCompression())
4648  {
4649  const T *src = tile->rawData();
4650  if (tile->isConstant())
4651  {
4652  S cval = *src;
4653  for (int i = 0; i < tiledata.numvoxel; i++)
4654  {
4655  *dstdata = cval;
4656  dstdata += stride;
4657  srcidx++;
4658  }
4659  }
4660  else
4661  {
4662  int w = tile->xres();
4663  int h = tile->yres();
4664  for (int i = 0; i < tiledata.numvoxel; i++)
4665  {
4666  UT_ASSERT_P(ix[srcidx] >= basex && ix[srcidx] < basex+TILESIZE);
4667  UT_ASSERT_P(iy[srcidx] >= basey && iy[srcidx] < basey+TILESIZE);
4668  UT_ASSERT_P(iz[srcidx] >= basez && iz[srcidx] < basez+TILESIZE);
4669  *dstdata = src[ (ix[srcidx] - basex)
4670  + (iy[srcidx] - basey) * w
4671  + (iz[srcidx] - basez) * w * h];
4672  dstdata += stride;
4673  srcidx++;
4674  }
4675  }
4676  }
4677  else
4678  {
4679  for (int i = 0; i < tiledata.numvoxel; i++)
4680  {
4681  UT_ASSERT_P(ix[srcidx] >= basex && ix[srcidx] < basex+TILESIZE);
4682  UT_ASSERT_P(iy[srcidx] >= basey && iy[srcidx] < basey+TILESIZE);
4683  UT_ASSERT_P(iz[srcidx] >= basez && iz[srcidx] < basez+TILESIZE);
4684  *dstdata = (*tile)(ix[srcidx] - basex,
4685  iy[srcidx] - basey,
4686  iz[srcidx] - basez);
4687  dstdata += stride;
4688  srcidx++;
4689  }
4690  }
4691  }
4692  }
4693  return dstdata;
4694 }
4695 
4696 template <typename T>
4697 template <typename S>
4698 const S *
4700  const UT_IntArray &tilelist)
4701 {
4702  bool docompress = getCompressionOptions().compressionEnabled();
4703  for (int i = 0; i < tilelist.entries(); i++)
4704  {
4705  UT_ASSERT(tilelist(i) >= 0 && tilelist(i) < numTiles());
4706  UT_VoxelTile<T> *tile = getLinearTile(tilelist(i));
4707 
4708  tile->writeData(srcdata, stride);
4709  if (docompress)
4710  tile->tryCompress(getCompressionOptions());
4711  srcdata += tile->numVoxels() * stride;
4712  }
4713  return srcdata;
4714 }
4715 
4716 template <typename T>
4717 template <typename S, typename IDX>
4718 const S *
4720  const IDX *ix, const IDX *iy, const IDX *iz,
4721  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist)
4722 {
4723  bool docompress = getCompressionOptions().compressionEnabled();
4724  int srcidx = 0;
4725 
4726  for (auto && tiledata : tilelist)
4727  {
4728  int tileidx = tiledata.tileidx;
4729  UT_ASSERT(tileidx >= 0 && tileidx < numTiles());
4730 
4731  UT_VoxelTile<T> *tile = getLinearTile(tileidx);
4732  int tilevoxel = tile->numVoxels();
4733 
4734  if (tilevoxel == tiledata.numvoxel)
4735  {
4736  tile->writeData(srcdata, stride);
4737  srcdata += tiledata.numvoxel * stride;
4738  srcidx += tiledata.numvoxel;
4739  }
4740  else
4741  {
4742  // Partial tile.
4743  int basex, basey, basez;
4744  linearTileToXYZ(tileidx, basex, basey, basez);
4745 
4746  basex <<= TILEBITS;
4747  basey <<= TILEBITS;
4748  basez <<= TILEBITS;
4749 
4750  for (int i = 0; i < tiledata.numvoxel; i++)
4751  {
4752  UT_ASSERT_P(ix[srcidx] >= basex && ix[srcidx] < basex+TILESIZE);
4753  UT_ASSERT_P(iy[srcidx] >= basey && iy[srcidx] < basey+TILESIZE);
4754  UT_ASSERT_P(iz[srcidx] >= basez && iz[srcidx] < basez+TILESIZE);
4755  tile->setValue(ix[srcidx] - basex,
4756  iy[srcidx] - basey,
4757  iz[srcidx] - basez,
4758  *srcdata);
4759  srcdata += stride;
4760  srcidx++;
4761  }
4762  }
4763 
4764  if (docompress)
4765  tile->tryCompress(getCompressionOptions());
4766  }
4767  return srcdata;
4768 }
4769 
4770 ///
4771 /// This helper class can be used for iterating over a voxel array in relation to another
4772 /// one. In particular, dst is the destination voxel array that is to be iterated over; its
4773 /// tile at [X, Y, Z] is taken to be coincident with tile [X + xoff, Y + yoff, Z + zoff] of
4774 /// src. Tiles of dst are partitioned into two sets:
4775 /// - BP (boundary pass): these are tiles of dst that are coincident with tiles of src that
4776 /// are at the boundary (so they affect extrapolation);
4777 /// - IP (internal pass): these tiles are coincident with internal tiles of src (those that
4778 /// are irrelevant for extrapolation).
4779 /// Once created, an object of this class can return the number of tiles in each set and
4780 /// convert from a linear 0-N index to [X, Y, Z] coordinates that identify the tile within
4781 /// dst.
4782 ///
4783 /// This class can be roughly used as follows... Main function:
4784 ///
4785 /// __linearTileIndexConverter index(dst, src, xoff, yoff, zoff);
4786 /// customFunctionBP(..., index);
4787 /// customFunctionIP(..., index);
4788 ///
4789 /// BPPartial(..., const __linearTileIndexConverter& index, const UT_JobInfo& i) function:
4790 ///
4791 /// int t0, t1;
4792 /// i.divideWork(index.getNTilesBP(), t0, t1);
4793 /// for (int j = t0; j < t1; j++)
4794 /// {
4795 /// int x, y, z;
4796 /// index.toLinearBP(j, x, y, z);
4797 /// UT_VoxerTile<T>* dtile = src.getTile(x, y, z);
4798 /// ...
4799 /// }
4800 ///
4801 /// With a similar implementation for IP. See moveTilesWithOffset() for an example.
4802 ///
4804 {
4805 public:
4806  /// Creates an index converter from the given voxel arrays. This object partitions tiles
4807  /// of dst into those that are affected by
4808  template <typename T>
4810  const UT_VoxelArray<T>* src,
4811  int xoff, int yoff, int zoff)
4812  {
4813  myXMax = dst->getTileRes(0);
4814  myYMax = dst->getTileRes(1);
4815  myZMax = dst->getTileRes(2);
4816 
4817  // If source has a constant border, no tile affects extrapolation; so all tiles
4818  // can be put into IP. Otherwise, the outer layer of src's tiles are part of BP.
4819  int m = (src->getBorder() == UT_VOXELBORDER_CONSTANT) ? 0 : 1;
4820  myXSkipMin = SYSmax(m - xoff, 0);
4821  myXSkipLen =
4822  SYSmax(SYSmin(myXMax, src->getTileRes(0) - xoff - m) - myXSkipMin, 0);
4823  myYSkipMin = SYSmax(m - yoff, 0);
4824  myYSkipLen =
4825  SYSmax(SYSmin(myYMax, src->getTileRes(1) - yoff - m) - myYSkipMin, 0);
4826  myZSkipMin = SYSmax(m - zoff, 0);
4827  myZSkipLen =
4828  SYSmax(SYSmin(myZMax, src->getTileRes(2) - zoff - m) - myZSkipMin, 0);
4829  }
4830 
4831  /// Returns the number of tiles in each part.
4832  int getNTilesBP() const
4833  {
4834  return myXMax * myYMax * myZMax - getNTilesIP();
4835  }
4836  int getNTilesIP() const
4837  {
4838  return myXSkipLen * myYSkipLen * myZSkipLen;
4839  }
4840 
4841  /// Converts a linear index (between 0 and getNTiles*P()-1) to the [X, Y, Z] tile
4842  /// coordinate with respect to dst.
4843  void toLinearBP(int k, int& x, int& y, int& z) const
4844  {
4845  // Number of voxels before the first Z slice with a hole.
4846  const int check_0 = myXMax * myYMax * myZSkipMin;
4847  // Check if we are before the first Z slice of the hole or if there is no hole.
4848  if (myXSkipLen == 0 || myYSkipLen == 0 || myZSkipLen == 0 || k < check_0)
4849  {
4850  _toRegularLinear(k, myXMax, myYMax, x, y, z);
4851  return;
4852  }
4853  k -= check_0;
4854 
4855  // Number of voxels per Z-slice with a hole.
4856  const int check_1a = myXMax * myYMax - myXSkipLen * myYSkipLen;
4857  // Total number of voxels in Z-slices with a hole.
4858  const int check_1b = check_1a * myZSkipLen;
4859  // Check if we are past the hole...
4860  if (k >= check_1b)
4861  {
4862  _toRegularLinear(k - check_1b, myXMax, myYMax, x, y, z);
4863  z += myZSkipMin + myZSkipLen;
4864  return;
4865  }
4866 
4867  // We are in the holed slices... First, determine the Z-slice.
4868  z = k / check_1a + myZSkipMin;
4869  // The remainder (index within the slice).
4870  k = k % check_1a;
4871  // Check if we are before the first Y slice with a hole.
4872  const int check_2 = myXMax * myYSkipMin;
4873  if (k < check_2)
4874  {
4875  y = k / myXMax;
4876  x = k % myXMax;
4877  return;
4878  }
4879  k -= check_2;
4880  // Check if we are past the last Y slice with a ahole.
4881  const int check_3 = (myXMax - myXSkipLen) * myYSkipLen;
4882  if (k >= check_3)
4883  {
4884  k -= check_3;
4885  y = k / myXMax + myYSkipMin + myYSkipLen;
4886  x = k % myXMax;
4887  return;
4888  }
4889 
4890  // We are in the holed slices. Find the y coordinate.
4891  y = k / (myXMax - myXSkipLen) + myYSkipMin;
4892  x = k % (myXMax - myXSkipLen);
4893  if (x >= myXSkipMin)
4894  x += myXSkipLen;
4895  }
4896  void toLinearIP(int k, int& x, int& y, int& z) const
4897  {
4898  _toRegularLinear(k, myXSkipLen, myYSkipLen, x, y, z);
4899  x += myXSkipMin;
4900  y += myYSkipMin;
4901  z += myZSkipMin;
4902  }
4903 
4904 protected:
4905  static void _toRegularLinear(int k, int xdim, int ydim, int& x, int& y, int& z)
4906  {
4907  x = k % xdim;
4908  k = k / xdim;
4909  y = k % ydim;
4910  z = k / ydim;
4911  }
4912 
4913 protected:
4917 };
4918 
4919 template <typename T>
4920 void
4922  int tileoffy, int tileoffz)
4923 {
4924  __linearTileIndexConverter index(this, &src, tileoffx, tileoffy, tileoffz);
4925  UTparallelInvoke(true, [&]
4926  {
4927  // Divide up the work (only internal tiles). Source tiles for these guys can be
4928  // safely changed without affecting other threads.
4930  [&](const UT_BlockedRange<int>& range)
4931  {
4932  for (int i = range.begin(); i < range.end(); i++)
4933  {
4934  // Get the location of the current tile...
4935  int xyz[3];
4936  index.toLinearIP(i, xyz[0], xyz[1], xyz[2]);
4937 
4938  // Get the tiles and exchange data.
4939  UT_VoxelTile<T>* tile = getTile(xyz[0], xyz[1], xyz[2]);
4940  UT_VoxelTile<T>* srctile = src.getTile(xyz[0] + tileoffx,
4941  xyz[1] + tileoffy,
4942  xyz[2] + tileoffz);
4943 
4944  // If tiles are of the same size, simply exchange the data. We know these
4945  // tiles do not affect results outside of src.
4946  if (tile->xres() == srctile->xres() && tile->yres() == srctile->yres()
4947  && tile->zres() == srctile->zres())
4948  {
4949  // The tiles are of the same size, so we can just exchange the data
4950  // pointers.
4951  UTswap(tile->myData, srctile->myData);
4952  UTswap(tile->myCompressionType, srctile->myCompressionType);
4953  UTswap(tile->myForeignData, srctile->myForeignData);
4954  }
4955  else
4956  {
4957  // Otherwise, manually fill in the values.
4958  int offv[] = {(xyz[0] + tileoffx) * TILESIZE,
4959  (xyz[1] + tileoffy) * TILESIZE,
4960  (xyz[2] + tileoffz) * TILESIZE};
4961  for (int z = 0; z < tile->zres(); z++)
4962  {
4963  for (int y = 0; y < tile->yres(); y++)
4964  {
4965  for (int x = 0; x < tile->xres(); x++)
4966  {
4967  tile->setValue(x, y, z, src.getValue(x + offv[0],
4968  y + offv[1],
4969  z + offv[2]));
4970  }
4971  }
4972  }
4973  }
4974  }
4975  });
4976  }, [&]
4977  {
4978  // Is the border constant?
4979  const bool const_src_border = (src.getBorder() == UT_VOXELBORDER_CONSTANT);
4980  const T border_val = src.getBorderValue();
4981 
4982  // Offsets in terms of tiles.
4983  const int off[] = {tileoffx, tileoffy, tileoffz};
4984  // Tile resolution of the source.
4985  const int src_tileres[] = {src.getTileRes(0), src.getTileRes(1), src.getTileRes(2)};
4986 
4987  // Divide up the work (only boundary tiles). Source tiles for these guys cannot be
4988  // modified safely.
4990  [&](const UT_BlockedRange<int>& range)
4991  {
4992  for (int i = range.begin(); i < range.end(); i++)
4993  {
4994  // Get the location of the current tile...
4995  int xyz[3];
4996  index.toLinearBP(i, xyz[0], xyz[1], xyz[2]);
4997 
4998  // Get the current tile...
4999  UT_VoxelTile<T>* tile = getTile(xyz[0], xyz[1], xyz[2]);
5000  bool outside = false;
5001  for (int j = 0; j < 3; j++)
5002  {
5003  xyz[j] += off[j];
5004  outside = outside || (xyz[j] < 0 || xyz[j] >= src_tileres[j]);
5005  }
5006 
5007  // If we are completely outside, and borders are constant, we can go ahead and
5008  // make this tile constant.
5009  if (outside && const_src_border)
5010  {
5011  tile->makeConstant(border_val);
5012  }
5013  else
5014  {
5015  // Otherwise, manually fill in the values.
5016  int offv[] = {xyz[0] * TILESIZE, xyz[1] * TILESIZE, xyz[2] * TILESIZE};
5017  for (int z = 0; z < tile->zres(); z++)
5018  {
5019  for (int y = 0; y < tile->yres(); y++)
5020  {
5021  for (int x = 0; x < tile->xres(); x++)
5022  {
5023  tile->setValue(x, y, z, src.getValue(x + offv[0],
5024  y + offv[1],
5025  z + offv[2]));
5026  }
5027  }
5028  }
5029  }
5030  }
5031  });
5032  });
5033 }
5034 
5035 template <typename T>
5036 void
5038  int offx, int offy, int offz)
5039 {
5040  UT_VoxelBorderType srcborder;
5041  T srcborderval;
5042 
5043  srcborder = src.getBorder();
5044  srcborderval = src.getBorderValue();
5045 
5046  if (srcborder != UT_VOXELBORDER_EXTRAP)
5047  {
5048  // These borders may be constant friendly
5049  T srcval;
5050 
5051  if (src.isConstant(&srcval))
5052  {
5053  if (srcborder != UT_VOXELBORDER_CONSTANT ||
5054  SYSisEqual(srcborderval, srcval))
5055  {
5056  // We can trivially make ourselves constant.
5057  constant(srcval);
5058  return;
5059  }
5060  }
5061  }
5062 
5063  copyWithOffsetInternal(src, offx, offy, offz);
5064 }
5065 
5066 template <typename T>
5067 void
5069  int offx, int offy, int offz,
5070  const UT_JobInfo &info)
5071 {
5073  UT_Vector3I off(offx, offy, offz);
5074  bool can_copy_tiles = ((offx & TILEMASK) == 0) && ((offy & TILEMASK) == 0)
5075  && ((offz & TILEMASK) == 0);
5076 
5077  vit.setArray(this);
5078  vit.splitByTile(info);
5079  vit.setCompressOnExit(true);
5080 
5081  // Iterate over all tiles
5082  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
5083  {
5084  // Check out this tile
5085  UT_VoxelTile<T> *tile = vit.getTile();
5086  UT_VoxelTile<T> *srctile;
5087  int tx, ty, tz;
5088 
5089  UT_Vector3I start, end, srctileidx, srctileoff, srcend, srcendtile;
5090  vit.getTileVoxels(start, end);
5091 
5092  // If offsets are all multiples of TILESIZE, it is possible to do direct copies,
5093  // assuming source tile is inside and of the same size.
5094  if (can_copy_tiles)
5095  {
5096  // Start by getting the source tile index and ensuring it is inside the
5097  // source array.
5098  bool inside = true;
5099  for (int i = 0; i < 3; i++)
5100  {
5101  srctileidx(i) = vit.myTilePos[i] + (off(i) >> TILEBITS);
5102  inside = inside && srctileidx(i) >= 0
5103  && srctileidx(i) < src.getTileRes(i);
5104  }
5105  // If the tile is inside, we make sure the tile sizes are the same, in which
5106  // case we can do a copy and move on.
5107  if (inside)
5108  {
5109  srctile = src.getTile(srctileidx.x(), srctileidx.y(), srctileidx.z());
5110  if (tile->xres() == srctile->xres() && tile->yres() == srctile->yres()
5111  && tile->zres() == srctile->zres())
5112  {
5113  *tile = *srctile;
5114  continue;
5115  }
5116  }
5117  }
5118 
5119  srctileidx = start;
5120  srctileidx += off;
5121  srctileidx.x() >>= TILEBITS;
5122  srctileidx.y() >>= TILEBITS;
5123  srctileidx.z() >>= TILEBITS;
5124  srctileoff.x() = off.x() & TILEMASK;
5125  srctileoff.y() = off.y() & TILEMASK;
5126  srctileoff.z() = off.z() & TILEMASK;
5127 
5128  srcend = start;
5129  // We are very careful here to be inclusive, so we don't trigger
5130  // the next tile. This is the largest index we expect to index.
5131  srcend.x() += tile->xres() - 1;
5132  srcend.y() += tile->yres() - 1;
5133  srcend.z() += tile->zres() - 1;
5134  srcend += off;
5135  srcendtile = srcend;
5136  srcendtile.x() >>= TILEBITS;
5137  srcendtile.y() >>= TILEBITS;
5138  srcendtile.z() >>= TILEBITS;
5139 
5140  UT_ASSERT(srcendtile.x() == srctileidx.x() ||
5141  srcendtile.x() == srctileidx.x()+1);
5142  UT_ASSERT(srcendtile.y() == srctileidx.y() ||
5143  srcendtile.y() == srctileidx.y()+1);
5144  UT_ASSERT(srcendtile.z() == srctileidx.z() ||
5145  srcendtile.z() == srctileidx.z()+1);
5146 
5147  // Check if we are fully in bounds.
5148  if (srctileidx.x() >= 0 &&
5149  srctileidx.y() >= 0 &&
5150  srctileidx.z() >= 0 &&
5151  srcendtile.x() < src.getTileRes(0) &&
5152  srcendtile.y() < src.getTileRes(1) &&
5153  srcendtile.z() < src.getTileRes(2) &&
5154  srcend.x() < src.getXRes() &&
5155  srcend.y() < src.getYRes() &&
5156  srcend.z() < src.getZRes())
5157  {
5158  bool allconst = true, firsttile = true;
5159  T constval, cval;
5160  // Check if we are all constant...
5161  for (tz = srctileidx.z(); allconst && tz <= srcendtile.z(); tz++)
5162  for (ty = srctileidx.y(); allconst && ty <= srcendtile.y(); ty++)
5163  for (tx = srctileidx.x(); tx <= srcendtile.x(); tx++)
5164  {
5165  srctile = src.getTile(tx, ty, tz);
5166  if (!srctile->isConstant())
5167  {
5168  allconst = false;
5169  break;
5170  }
5171  cval = (*srctile)(0, 0, 0);
5172  if (firsttile)
5173  {
5174  firsttile = false;
5175  constval = cval;
5176  }
5177  if (!SYSisEqual(cval, constval))
5178  {
5179  allconst = false;
5180  break;
5181  }
5182  }
5183  if (allconst)
5184  {
5185  // Should have found at least one tile!
5186  UT_ASSERT(!firsttile);
5187  tile->makeConstant(constval);
5188 
5189  // Onto the next tile!
5190  continue;
5191  }
5192  }
5193 
5194  // All of the fragments aren't constant, or aren't all inside
5195  // our range. So we have to work fragment at a time..
5196  for (tz = srctileidx.z(); tz <= srcendtile.z(); tz++)
5197  for (ty = srctileidx.y(); ty <= srcendtile.y(); ty++)
5198  for (tx = srctileidx.x(); tx <= srcendtile.x(); tx++)
5199  {
5200  int destx, desty, destz;
5201  int srcx, srcy, srcz;
5202 
5203  destx = (tx == srctileidx.x()) ? 0 : (TILESIZE-srctileoff.x());
5204  desty = (ty == srctileidx.y()) ? 0 : (TILESIZE-srctileoff.y());
5205  destz = (tz == srctileidx.z()) ? 0 : (TILESIZE-srctileoff.z());
5206  srcx = (tx == srctileidx.x()) ? srctileoff.x() : 0;
5207  srcy = (ty == srctileidx.y()) ? srctileoff.y() : 0;
5208  srcz = (tz == srctileidx.z()) ? srctileoff.z() : 0;
5209 #if 1
5210  if (tx >= 0 &&
5211  ty >= 0 &&
5212  tz >= 0 &&
5213  tx < src.getTileRes(0) &&
5214  ty < src.getTileRes(1) &&
5215  tz < src.getTileRes(2) &&
5216  ((tx != src.getTileRes(0)-1) ||
5217  srcend.x() < src.getXRes()) &&
5218  ((ty != src.getTileRes(1)-1) ||
5219  srcend.y() < src.getYRes()) &&
5220  ((tz != src.getTileRes(2)-1) ||
5221  srcend.z() < src.getZRes())
5222  )
5223  {
5224  srctile = src.getTile(tx, ty, tz);
5225  // In bounds
5226  tile->copyFragment(
5227  destx, desty, destz,
5228  *srctile,
5229  srcx, srcy, srcz
5230  );
5231  }
5232  else
5233 #endif
5234  {
5235  // Out of bounds!
5236  int maxd = SYSmin(tile->zres(),
5237  TILESIZE-srcz);
5238  int maxh = SYSmin(tile->yres(),
5239  TILESIZE-srcy);
5240  int maxw = SYSmin(tile->xres(),
5241  TILESIZE-srcx);
5242  for (int z = destz; z < maxd; z++)
5243  {
5244  for (int y = desty; y < maxh; y++)
5245  {
5246  for (int x = destx; x < maxw; x++)
5247  {
5248  T val;
5249 
5250  val = src.getValue(x + vit.x() + offx,
5251  y + vit.y() + offy,
5252  z + vit.z() + offz);
5253  tile->setValue(x, y, z, val);
5254  }
5255  }
5256  }
5257  }
5258  }
5259  }
5260 }
5261 
5262 template <typename T>
5263 void
5265  const UT_Filter *filter, float radius,
5266  int clampaxis,
5267  const UT_JobInfo &info)
5268 {
5270  UT_Vector3 pos;
5271  UT_Vector3 ratio;
5272  UT_Interrupt *boss = UTgetInterrupt();
5273 
5274  vit.setArray(this);
5275  vit.splitByTile(info);
5276  vit.setCompressOnExit(true);
5277  vit.setInterrupt(boss);
5278 
5279  ratio.x() = 1.0f / getXRes();
5280  ratio.y() = 1.0f / getYRes();
5281  ratio.z() = 1.0f / getZRes();
5282 
5283  for (vit.rewind(); !vit.atEnd(); vit.advance())
5284  {
5285  pos.x() = vit.x()+0.5f;
5286  pos.y() = vit.y()+0.5f;
5287  pos.z() = vit.z()+0.5f;
5288  pos *= ratio;
5289 
5290  vit.setValue(src.evaluate(pos, *filter, radius, clampaxis));
5291  }
5292 }
5293 
5294 template <typename T>
5295 bool
5296 UT_VoxelArray<T>::posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const
5297 {
5298  // We go from the position in the unit cube into the index.
5299  // The center of cells must map to the exact integer indices.
5300  pos.x() *= myRes[0];
5301  pos.y() *= myRes[1];
5302  pos.z() *= myRes[2];
5303 
5304  // The centers of cells are now mapped .5 too high. Ie, the true
5305  // center of cell index (0,0,0) would be (0.5,0.5,0.5) This, however,
5306  // is exactly what we want for rounding.
5307  x = (int) SYSfloor(pos.x());
5308  y = (int) SYSfloor(pos.y());
5309  z = (int) SYSfloor(pos.z());
5310 
5311  // Determine if out of bounds.
5312  return isValidIndex(x, y, z);
5313 }
5314 
5315 template <typename T>
5316 bool
5318 {
5319  // We go from the position in the unit cube into the index.
5320  // The center of cells must map to the exact integer indices.
5321  pos.x() *= myRes[0];
5322  pos.y() *= myRes[1];
5323  pos.z() *= myRes[2];
5324 
5325  // The centers of cells are now mapped .5 too high. Ie, the true
5326  // center of cell index (0,0,0) would be (0.5,0.5,0.5)
5327  pos.x() -= 0.5;
5328  pos.y() -= 0.5;
5329  pos.z() -= 0.5;
5330 
5331  ipos = pos;
5332 
5333  // Determine if out of bounds.
5334  if (pos.x() < 0 || pos.x() >= myRes[0] ||
5335  pos.y() < 0 || pos.y() >= myRes[1] ||
5336  pos.z() < 0 || pos.z() >= myRes[2])
5337  return false;
5338 
5339  return true;
5340 }
5341 
5342 template <typename T>
5343 bool
5345 {
5346  // We go from the position in the unit cube into the index.
5347  // The center of cells must map to the exact integer indices.
5348  pos.x() *= myRes[0];
5349  pos.y() *= myRes[1];
5350  pos.z() *= myRes[2];
5351 
5352  // The centers of cells are now mapped .5 too high. Ie, the true
5353  // center of cell index (0,0,0) would be (0.5,0.5,0.5) This, however,
5354  // is exactly what we want for rounding.
5355  x = (exint) SYSfloor(pos.x());
5356  y = (exint) SYSfloor(pos.y());
5357  z = (exint) SYSfloor(pos.z());
5358 
5359  // Determine if out of bounds.
5360  return isValidIndex(x, y, z);
5361 }
5362 
5363 template <typename T>
5364 bool
5366 {
5367  // We go from the position in the unit cube into the index.
5368  // The center of cells must map to the exact integer indices.
5369  pos.x() *= myRes[0];
5370  pos.y() *= myRes[1];
5371  pos.z() *= myRes[2];
5372 
5373  // The centers of cells are now mapped .5 too high. Ie, the true
5374  // center of cell index (0,0,0) would be (0.5,0.5,0.5)
5375  pos.x() -= 0.5;
5376  pos.y() -= 0.5;
5377  pos.z() -= 0.5;
5378 
5379  ipos = pos;
5380 
5381  // Determine if out of bounds.
5382  if (pos.x() < 0 || pos.x() >= myRes[0] ||
5383  pos.y() < 0 || pos.y() >= myRes[1] ||
5384  pos.z() < 0 || pos.z() >= myRes[2])
5385  return false;
5386 
5387  return true;
5388 }
5389 
5390 template <typename T>
5391 bool
5392 UT_VoxelArray<T>::indexToPos(int x, int y, int z, UT_Vector3F &pos) const
5393 {
5394  pos.x() = x;
5395  pos.y() = y;
5396  pos.z() = z;
5397 
5398  // Move the indices to the centers of the cells.
5399  pos.x() += 0.5;
5400  pos.y() += 0.5;
5401  pos.z() += 0.5;
5402 
5403  // And scale into the unit cube.
5404  pos *= myInvRes;
5405 
5406  // Return true if the original coordinates were in range.
5407  return isValidIndex(x, y, z);
5408 }
5409 
5410 template <typename T>
5411 bool
5413 {
5414  pos.x() = x;
5415  pos.y() = y;
5416  pos.z() = z;
5417 
5418  // Move the indices to the centers of the cells.
5419  pos.x() += 0.5;
5420  pos.y() += 0.5;
5421  pos.z() += 0.5;
5422 
5423  // And scale into the unit cube.
5424  pos *= myInvRes;
5425 
5426  // Return true if the original coordinates were in range.
5427  return isValidIndex(x, y, z);
5428 }
5429 
5430 template <typename T>
5431 void
5433 {
5434  pos = index;
5435 
5436  // Move the indices to the centers of the cells.
5437  pos.x() += 0.5;
5438  pos.y() += 0.5;
5439  pos.z() += 0.5;
5440 
5441  // And scale into the unit cube.
5442  pos *= myInvRes;
5443 }
5444 
5445 template <typename T>
5446 void
5448 {
5449  pos = index;
5450 
5451  // Move the indices to the centers of the cells.
5452  pos.x() += 0.5;
5453  pos.y() += 0.5;
5454  pos.z() += 0.5;
5455 
5456  // And scale into the unit cube.
5457  pos *= myInvRes;
5458 }
5459 
5460 template <typename T>
5461 void
5463 {
5464  myBorderType = type;
5465  myBorderValue = t;
5466 }
5467 
5468 template <typename T>
5469 void
5471 {
5472  myBorderScale[0] = sx;
5473  myBorderScale[1] = sy;
5474  myBorderScale[2] = sz;
5475 }
5476 
5477 template <typename T>
5478 void
5480 {
5481  UT_VoxelArrayIterator<T> vit(this);
5482  vit.splitByTile(info);
5483  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
5484  {
5485  int i = vit.getLinearTileNum();
5486  myTiles[i].tryCompress(getCompressionOptions());
5487  }
5488 }
5489 
5490 template <typename T>
5491 void
5493 {
5494  UT_VoxelArrayIterator<T> vit(this);
5495  vit.splitByTile(info);
5496  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
5497  {
5498  int i = vit.getLinearTileNum();
5499  myTiles[i].uncompress();
5500  }
5501 }
5502 
5503 template <typename T>
5504 void
5506 {
5507  UT_VoxelArrayIterator<T> vit(this);
5508  vit.splitByTile(info);
5509  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
5510  {
5511  int i = vit.getLinearTileNum();
5512  if (!myTiles[i].isConstant())
5513  myTiles[i].uncompress();
5514  }
5515 }
5516 
5517 template <typename T>
5518 void
5519 UT_VoxelArray<T>::saveData(std::ostream &os) const
5520 {
5521  T cval;
5522  char version;
5523 
5524  // First determine if we are fully constant.
5525  if (isConstant(&cval))
5526  {
5527  // Save a constant array.
5528  version = 0;
5529  UTwrite(os, &version, 1);
5530  UTwrite<T>(os, &cval);
5531  return;
5532  }
5533 
5534  // Compressed tiles.
5535  version = 1;
5536  UTwrite(os, &version, 1);
5537 
5538  // Store list of compression types
5540 
5541  int i, ntiles;
5542 
5543  ntiles = numTiles();
5544  for (i = 0; i < ntiles; i++)
5545  {
5546  myTiles[i].save(os);
5547  }
5548 }
5549 
5550 
5551 template <typename T>
5552 void
5554 {
5555  T cval;
5556  char version;
5557 
5558  is.readChar(version);
5559 
5560  // First determine if we are fully constant.
5561  if (version == 0)
5562  {
5563  // Save a constant array.
5564  is.read<T>(&cval);
5565 
5566  constant(cval);
5567  return;
5568  }
5569 
5570  if (version == 1)
5571  {
5572  UT_IntArray compressions;
5573 
5574  // Store list of compression types
5575  UT_VoxelTile<T>::loadCompressionTypes(is, compressions);
5576 
5577  int i, ntiles;
5578 
5579  ntiles = numTiles();
5580  for (i = 0; i < ntiles; i++)
5581  {
5582  myTiles[i].load(is, compressions);
5583  }
5584  }
5585 }
5586 
5587 template <typename T>
5588 bool
5589 UT_VoxelArray<T>::saveData(UT_JSONWriter &w, const char *shared_mem_owner) const
5590 {
5591  bool ok = true;
5592  T cval;
5593  int8 version;
5594 
5595  // First determine if we are fully constant.
5596  if (isConstant(&cval))
5597  {
5598  ok = ok && w.jsonBeginArray();
5599  // Save a constant array.
5600  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5602  ok = ok && w.jsonValue(cval);
5603  ok = ok && w.jsonEndArray();
5604  return ok;
5605  }
5606 
5607  if (shared_mem_owner)
5608  {
5609  SYS_SharedMemory *shm;
5610  shm = copyToSharedMemory(shared_mem_owner);
5611  if (shm)
5612  {
5613  ok = ok && w.jsonBeginArray();
5614  // Save a shared memory array.
5615  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5617  ok = ok && w.jsonString(shm->id());
5618  ok = ok && w.jsonEndArray();
5619  return ok;
5620  }
5621  // Fall back on raw write.
5622  }
5623 
5624 
5625  // Compressed tiles.
5626  ok = ok && w.jsonBeginArray();
5627  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5629  ok = ok && w.jsonBeginArray();
5630  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5632  version = 1;
5633  ok = ok && w.jsonValue(version);
5634 
5635  // Store list of compression types
5636  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5639 
5640  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5642  ok = ok && w.jsonBeginArray();
5643  int i, ntiles;
5644 
5645  ntiles = numTiles();
5646  for (i = 0; i < ntiles; i++)
5647  {
5648  ok = ok && myTiles[i].save(w);
5649  }
5650  ok = ok && w.jsonEndArray();
5651 
5652  ok = ok && w.jsonEndArray();
5653 
5654  ok = ok && w.jsonEndArray();
5655  return ok;
5656 }
5657 
5658 template <typename T>
5659 bool
5661 {
5662  T cval;
5663  int8 version;
5664  UT_WorkBuffer key;
5665  int key_id;
5666  bool array_error = false;
5667 
5668  delete mySharedMemView;
5669  mySharedMemView = 0;
5670 
5671  delete mySharedMem;
5672  mySharedMem = 0;
5673 
5674  if (!p.parseBeginArray(array_error) || array_error)
5675  return false;
5676 
5677  if (!p.parseString(key))
5678  return false;
5679  key_id = UT_VoxelArrayJSON::getArrayID(key.buffer());
5681  {
5682  if (!p.parseNumber(cval))
5683  return false;
5684 
5685  constant(cval);
5686  }
5687  else if (key_id == UT_VoxelArrayJSON::ARRAY_SHAREDARRAY)
5688  {
5689  UT_WorkBuffer shm_id;
5690  if (!p.parseString(shm_id))
5691  return false;
5692 
5693  if (!populateFromSharedMemory(shm_id.buffer()))
5694  {
5695  // If the shared memory disappears before we get a chance to read
5696  // it, don't abort, just set a constant value. That way Mantra has
5697  // a chance to recover. Not the most elegant solution but works for
5698  // now.
5699  T cval;
5700  cval = 0;
5701  constant(cval);
5702  }
5703  }
5704  else if (key_id == UT_VoxelArrayJSON::ARRAY_TILEDARRAY)
5705  {
5706  UT_JSONParser::traverser it, tile_it;
5707  UT_IntArray compressions;
5708  int i, ntiles;
5709 
5710 
5711  for (it = p.beginArray(); !it.atEnd(); ++it)
5712  {
5713  if (!it.getLowerKey(key))
5714  return false;
5715  switch (UT_VoxelArrayJSON::getArrayID(key.buffer()))
5716  {
5718  if (!p.parseNumber(version))
5719  return false;
5720  break;
5722  // Store list of compression types
5723  if (!UT_VoxelTile<T>::loadCompressionTypes(p, compressions))
5724  return false;
5725  break;
5726 
5728  ntiles = numTiles();
5729  for (tile_it = p.beginArray(), i = 0; !tile_it.atEnd();
5730  ++tile_it, ++i)
5731  {
5732  if (i < ntiles)
5733  {
5734  if (!myTiles[i].load(p, compressions))
5735  return false;
5736  }
5737  else
5738  {
5739  UT_VoxelTile<T> dummy_tile;
5740  dummy_tile.setRes(TILESIZE, TILESIZE, TILESIZE);
5741  if (!dummy_tile.load(p, compressions))
5742  return false;
5743  }
5744  }
5745  if (i != ntiles)
5746  return false;
5747  break;
5748  default:
5749  p.addWarning("Unexpected key for voxel data: %s",
5750  key.buffer());
5751  if (!p.skipNextObject())
5752  return false;
5753  }
5754  }
5755  }
5756  else
5757  {
5758  p.addWarning("Unexpected voxel key: %s\n", key.buffer());
5759  p.skipNextObject();
5760  return false;
5761  }
5762 
5763  if (!p.parseEndArray(array_error) || array_error)
5764  return false;
5765 
5766  return true;
5767 }
5768 
5769 template<typename T>
5771 UT_VoxelArray<T>::copyToSharedMemory(const char *shared_mem_owner) const
5772 {
5774 
5775  int ntiles;
5776 
5777  ntiles = numTiles();
5778 
5779  // Tally up the size we need.
5780  exint total_mem_size;
5781 
5782  total_mem_size = sizeof(exint); // Tile count.
5783 
5784  UT_Array<exint> tile_sizes;
5785  UT_Array<exint> next_block_offsets;
5786 
5787  for (int i = 0; i < ntiles; i++)
5788  {
5789  exint tile_size, block_size;
5790  if (myTiles[i].isConstant())
5791  tile_size = sizeof(T);
5792  else
5793  tile_size = myTiles[i].getDataLength();
5794 
5795  tile_sizes.append(tile_size);
5796 
5797  block_size = SYSroundUpToMultipleOf<exint>(tile_size, sizeof(exint));
5798  block_size += sizeof(exint) * 2; // Offset + compression type
5799 
5800  if (i < (ntiles-1))
5801  next_block_offsets.append(block_size / sizeof(exint));
5802  else
5803  next_block_offsets.append(0);
5804 
5805  total_mem_size += block_size;
5806  }
5807 
5808  // Start with an empty shared memory.
5809  SYS_SharedMemory *shared_mem;
5810  UT_WorkBuffer shared_mem_key;
5811 
5812  shared_mem_key.sprintf("%s:%p", shared_mem_owner, this);
5813 
5814  shared_mem = shmgr.get(shared_mem_key.buffer());
5815 
5816  // Does the existing block have the same number of tiles and same
5817  // offsets? In that case we don't reset the shared memory, just write
5818  // the tiles out again. There's very little adverse effect in changing
5819  // the voxel tile data.
5820  bool same_meta_data = false;
5821  if (shared_mem->size() >= sizeof(exint))
5822  {
5823  same_meta_data = true;
5824  {
5825  SYS_SharedMemoryView sv_tc(*shared_mem, 0, sizeof(exint));
5826 
5827  exint *ptr_ds = (exint *)sv_tc.data();
5828  if (*ptr_ds != ntiles)
5829  same_meta_data = false;
5830  }
5831 
5832  if (same_meta_data)
5833  {
5834  // Check if the offsets + compression type is the same.
5835  exint offset = sizeof(exint);
5836  for (int i = 0; i < ntiles; i++)
5837  {
5838  SYS_SharedMemoryView sv_tile(*shared_mem, offset,
5839  sizeof(exint) * 2);
5840 
5841  exint *ptr_tile = (exint *)sv_tile.data();
5842  if (ptr_tile[0] != next_block_offsets(i) ||
5843  ptr_tile[1] != (exint)myTiles[i].myCompressionType)
5844  {
5845  same_meta_data = false;
5846  break;
5847  }
5848  offset += next_block_offsets(i) * sizeof(exint);
5849  }
5850  }
5851  }
5852 
5853  if (!same_meta_data)
5854  {
5855  if (!shared_mem->reset(total_mem_size) ||
5856  shared_mem->size() != total_mem_size)
5857  {
5858  return NULL;
5859  }
5860  }
5861 
5862  {
5863  SYS_SharedMemoryView sv_tc(*shared_mem, 0, sizeof(exint));
5864 
5865  exint *ptr_ds = (exint *)sv_tc.data();
5866  *ptr_ds = ntiles;
5867  }
5868 
5869  exint offset = sizeof(exint);
5870  for (int i = 0; i < ntiles; i++)
5871  {
5872  SYS_SharedMemoryView sv_tile(*shared_mem, offset,
5873  sizeof(exint) * 2 + tile_sizes(i));
5874 
5875  exint *ptr_tile = (exint *)sv_tile.data();
5876 
5877  // Offset to next tile in exint units.
5878  ptr_tile[0] = next_block_offsets(i);
5879  ptr_tile[1] = myTiles[i].myCompressionType;
5880 
5881  ::memcpy(&ptr_tile[2], myTiles[i].rawData(), tile_sizes(i));
5882 
5883  offset += ptr_tile[0] * sizeof(exint);
5884  }
5885 
5886  return shared_mem;
5887 }
5888 
5889 template<typename T>
5890 bool
5892 {
5893  mySharedMem = new SYS_SharedMemory(id, /*read_only=*/true);
5894  if (!mySharedMem->size())
5895  {
5896  delete mySharedMem;
5897  mySharedMem = 0;
5898  return false;
5899  }
5900 
5901  exint ntiles;
5902  {
5903  SYS_SharedMemoryView sv_tc(*mySharedMem, 0, sizeof(exint));
5904  ntiles = *(const exint *)sv_tc.data();
5905  }
5906  if (ntiles != numTiles())
5907  {
5908  delete mySharedMem;
5909  mySharedMem = 0;
5910  return false;
5911  }
5912 
5913  mySharedMemView = new SYS_SharedMemoryView(*mySharedMem, sizeof(exint));
5914 
5915  exint *data = (exint *)mySharedMemView->data();
5916 
5917  for (int i = 0; i < ntiles; i++)
5918  {
5919  exint offset = data[0];
5920  int8 ctype = (int8)data[1];
5921 
5922  myTiles[i].setForeignData(&data[2], ctype);
5923 
5924  data += offset;
5925  }
5926 
5927  return true;
5928 }
5929 
5930 
5931 
5932 
5933 //
5934 // UT_VoxelMipMap functions
5935 //
5936 template <typename T>
5938 {
5939  initializePrivate();
5940 }
5941 
5942 template <typename T>
5944 {
5945  destroyPrivate();
5946 }
5947 
5948 template <typename T>
5950 {
5951  initializePrivate();
5952 
5953  *this = src;
5954 }
5955 
5956 template <typename T>
5957 const UT_VoxelMipMap<T> &
5959 {
5961  int level, i;
5962 
5963  if (&src == this)
5964  return *this;
5965 
5966  destroyPrivate();
5967 
5968  // We do a deep copy. We have learned from UT_String that
5969  // shallow copies will destroy us in the end.
5970  myOwnBase = true;
5971  myBaseLevel = new UT_VoxelArray<T>;
5972  *myBaseLevel = *src.myBaseLevel;
5973 
5974  myNumLevels = src.myNumLevels;
5975 
5976  for (i = 0; i < src.myLevels.entries(); i++)
5977  {
5978  levels = new UT_VoxelArray<T> *[myNumLevels];
5979  myLevels.append(levels);
5980 
5981  for (level = 0; level < myNumLevels; level++)
5982  {
5983  levels[level] = new UT_VoxelArray<T>;
5984  *levels[level] = *src.myLevels(i)[level];
5985  }
5986  }
5987  return *this;
5988 }
5989 
5990 template <typename T>
5991 void
5993  mipmaptype function)
5994 {
5995  UT_Array<mipmaptype> functions;
5996 
5997  functions.append(function);
5998  build(baselevel, functions);
5999 }
6000 
6001 template <typename T>
6002 void
6004  const UT_Array<mipmaptype> &functions)
6005 {
6006  destroyPrivate();
6007 
6008  // Setup our baselevel.
6009  myBaseLevel = baselevel;
6010  myOwnBase = false;
6011 
6012  // Now build from bottom up. First, calculate the number
6013  // of levels we'll need.
6014  myNumLevels = 0;
6015  int maxres, level;
6016 
6017  maxres = SYSmax(myBaseLevel->getXRes(), myBaseLevel->getYRes());
6018  maxres = SYSmax(maxres, myBaseLevel->getZRes());
6019 
6020  // std::cerr << "Max res " << maxres << std::endl;
6021 
6022  while (maxres > 1)
6023  {
6024  myNumLevels++;
6025  maxres++;
6026  maxres >>= 1;
6027  }
6028 
6029  // std::cerr << "Levels: " << myNumLevels << std::endl;
6030 
6031  // Create our desired voxel array list.
6032  for (int i = 0; i < functions.entries(); i++)
6033  {
6034  mipmaptype function = functions(i);
6035 
6036  UT_VoxelArray<T> **levels = new UT_VoxelArray<T> *[myNumLevels];
6037  myLevels.append(levels);
6038 
6039  UT_VoxelArray<T> *lastlevel = myBaseLevel;
6040  for (level = myNumLevels-1; level >= 0; level--)
6041  {
6042  // Find our resolutions.
6043  int xres = (lastlevel->getXRes() + 1) >> 1;
6044  int yres = (lastlevel->getYRes() + 1) >> 1;
6045  int zres = (lastlevel->getZRes() + 1) >> 1;
6046 
6047  levels[level] = new UT_VoxelArray<T>;
6048  levels[level]->size(xres, yres, zres);
6049 
6050  downsample(*levels[level], *lastlevel, function);
6051 
6052  lastlevel = levels[level];
6053  }
6054  }
6055 }
6056 
6057 template <typename T>
6058 void
6061  const UT_VoxelArray<T> &src,
6062  mipmaptype function,
6063  const UT_JobInfo &info)
6064 {
6065  UT_VoxelArrayIterator<T> vit(&dst);
6066 
6067  vit.splitByTile(info);
6068  vit.setCompressOnExit(true);
6069 
6070  for (vit.rewind(); !vit.atEnd(); vit.advance())
6071  {
6072  int x = 2*vit.x();
6073  int y = 2*vit.y();
6074  int z = 2*vit.z();
6075  T sam[8];
6076 
6077  if (src.extractSample(x, y, z, sam))
6078  {
6079  vit.setValue(sam[0]);
6080  }
6081  else
6082  {
6083  vit.setValue(
6084  mixValues(
6085  mixValues(
6086  mixValues(sam[0], sam[1], function),
6087  mixValues(sam[2], sam[3], function), function),
6088  mixValues(
6089  mixValues(sam[4], sam[5], function),
6090  mixValues(sam[6], sam[7], function), function),
6091  function));
6092  }
6093  }
6094 }
6095 
6096 
6097 template <typename T>
6098 int64
6100 {
6101  int64 mem = inclusive ? sizeof(*this) : 0;
6102 
6103  mem += myLevels.getMemoryUsage(false);
6104  mem += myLevels.entries() * myNumLevels * sizeof(UT_VoxelArray<T>*);
6105  for (exint j = 0; j < myLevels.entries(); j++)
6106  {
6107  for (int i = 0; i < myNumLevels; i++)
6108  mem += myLevels(j)[i]->getMemoryUsage(true);
6109  }
6110 
6111  return mem;
6112 }
6113 
6114 template <typename T>
6115 void
6116 UT_VoxelMipMap<T>::traverseTopDown(Callback function, void *data) const
6117 {
6118  doTraverse(0, 0, 0, 0, function, data);
6119 }
6120 
6121 template <typename T>
6122 void
6123 UT_VoxelMipMap<T>::doTraverse(int x, int y, int z, int level,
6124  Callback function, void *data) const
6125 {
6126  UT_StackBuffer<T> tval(myLevels.entries());
6127  bool isfinal;
6128  UT_VoxelArray<T> *vox;
6129  int shift;
6130 
6131  if (level == myNumLevels)
6132  {
6133  isfinal = true;
6134  vox = myBaseLevel;
6135  tval[0] = (*vox)(x, y, z);
6136  for (int i = 1; i < myLevels.entries(); i++)
6137  tval[i] = tval[0];
6138  }
6139  else
6140  {
6141  isfinal = false;
6142  for (int i = 0; i < myLevels.entries(); i++)
6143  {
6144  vox = myLevels(i)[level];
6145  tval[i] = (*vox)(x, y, z);
6146  }
6147  }
6148 
6149  shift = myNumLevels - level;
6150 
6152 
6153  box.initBounds(SYSmin(x << shift, myBaseLevel->getXRes()),
6154  SYSmin(y << shift, myBaseLevel->getYRes()),
6155  SYSmin(z << shift, myBaseLevel->getZRes()));
6156  box.enlargeBounds(SYSmin((x+1) << shift, myBaseLevel->getXRes()),
6157  SYSmin((y+1) << shift, myBaseLevel->getYRes()),
6158  SYSmin((z+1) << shift, myBaseLevel->getZRes()));
6159 
6160  if (!function(tval, box, isfinal, data))
6161  {
6162  // Function asked for an early exit.
6163  // Give it.
6164  return;
6165  }
6166 
6167  // We now want to proceed to the next level.
6168  if (isfinal)
6169  return;
6170 
6171  level++;
6172  shift--;
6173  x <<= 1;
6174  y <<= 1;
6175  z <<= 1;
6176 
6177  bool xinc, yinc, zinc;
6178 
6179  // Determine which of our children are valid.
6180  if ( ((x+1) << shift) < myBaseLevel->getXRes() )
6181  xinc = true;
6182  else
6183  xinc = false;
6184  if ( ((y+1) << shift) < myBaseLevel->getYRes() )
6185  yinc = true;
6186  else
6187  yinc = false;
6188  if ( ((z+1) << shift) < myBaseLevel->getZRes() )
6189  zinc = true;
6190  else
6191  zinc = false;
6192 
6193  //
6194  // Traverse all valid children. Note that this is deliberately a gray
6195  // order traversal for full nodes.
6196  //
6197 
6198  doTraverse(x, y, z, level, function, data);
6199 
6200  if (yinc)
6201  {
6202  doTraverse(x, y+1, z, level, function, data);
6203  if (zinc)
6204  doTraverse(x, y+1, z+1, level, function, data);
6205  }
6206  if (zinc)
6207  doTraverse(x, y, z+1, level, function, data);
6208 
6209  if (xinc)
6210  {
6211  if (zinc)
6212  doTraverse(x+1, y, z+1, level, function, data);
6213  doTraverse(x+1, y, z, level, function, data);
6214  if (yinc)
6215  {
6216  doTraverse(x+1, y+1, z, level, function, data);
6217  if (zinc)
6218  doTraverse(x+1, y+1, z+1, level, function, data);
6219  }
6220  }
6221 }
6222 
6223 template <typename T>
6224 template <typename OP>
6225 void
6227 {
6228  doTraverse(0, 0, 0, numLevels()-1, op);
6229 }
6230 
6231 template <typename T>
6232 template <typename OP>
6233 void
6234 UT_VoxelMipMap<T>::doTraverse(int x, int y, int z, int level,
6235  OP &op) const
6236 {
6237  int shift;
6238 
6239  shift = level;
6240 
6242 
6243  box.initBounds(SYSmin(x << shift, myBaseLevel->getXRes()),
6244  SYSmin(y << shift, myBaseLevel->getYRes()),
6245  SYSmin(z << shift, myBaseLevel->getZRes()));
6246  box.enlargeBounds(SYSmin((x+1) << shift, myBaseLevel->getXRes()),
6247  SYSmin((y+1) << shift, myBaseLevel->getYRes()),
6248  SYSmin((z+1) << shift, myBaseLevel->getZRes()));
6249 
6250  if (!op(box, level))
6251  {
6252  // Function asked for an early exit.
6253  // Give it.
6254  return;
6255  }
6256 
6257  level--;
6258  // We now want to proceed to the next level.
6259  if (level < 0)
6260  return;
6261 
6262  x <<= 1;
6263  y <<= 1;
6264  z <<= 1;
6265 
6266  bool xinc, yinc, zinc;
6267 
6268  // Determine which of our children are valid.
6269  if ( ((x+1) << level) < myBaseLevel->getXRes() )
6270  xinc = true;
6271  else
6272  xinc = false;
6273  if ( ((y+1) << level) < myBaseLevel->getYRes() )
6274  yinc = true;
6275  else
6276  yinc = false;
6277  if ( ((z+1) << level) < myBaseLevel->getZRes() )
6278  zinc = true;
6279  else
6280  zinc = false;
6281 
6282  //
6283  // Traverse all valid children. Note that this is deliberately a gray
6284  // order traversal for full nodes.
6285  //
6286 
6287  doTraverse(x, y, z, level, op);
6288 
6289  if (yinc)
6290  {
6291  doTraverse(x, y+1, z, level, op);
6292  if (zinc)
6293  doTraverse(x, y+1, z+1, level, op);
6294  }
6295  if (zinc)
6296  doTraverse(x, y, z+1, level, op);
6297 
6298  if (xinc)
6299  {
6300  if (zinc)
6301  doTraverse(x+1, y, z+1, level, op);
6302  doTraverse(x+1, y, z, level, op);
6303  if (yinc)
6304  {
6305  doTraverse(x+1, y+1, z, level, op);
6306  if (zinc)
6307  doTraverse(x+1, y+1, z+1, level, op);
6308  }
6309  }
6310 }
6311 
6312 template <typename T>
6313 template <typename OP>
6314 void
6316 {
6317  doTraverseSorted(0, 0, 0, numLevels()-1, op);
6318 }
6319 
6321 {
6322 public:
6323  float value;
6324  int key;
6325 };
6326 
6328 {
6329  return lhs.value < rhs.value;
6330 }
6331 
6332 template <typename T>
6333 template <typename OP>
6334 void
6335 UT_VoxelMipMap<T>::doTraverseSorted(int x, int y, int z, int level,
6336  OP &op) const
6337 {
6339 
6340  box.initBounds(SYSmin(x << level, myBaseLevel->getXRes()),
6341