HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_VoxelArray.C
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_VoxelArray.C ( UT Library, C++)
7  *
8  * COMMENTS:
9  * Tiled Voxel Array Implementation.
10  */
11 
12 #include "UT_VoxelArray.h"
13 #include "UT_BoundingBox.h"
14 #include "UT_Filter.h"
15 #include "UT_IStream.h"
16 #include "UT_JSONParser.h"
17 #include "UT_JSONWriter.h"
18 #include "UT_SharedMemoryManager.h"
19 #include "UT_StackBuffer.h"
20 #include "UT_VoxelArrayJSON.h"
21 #include "UT_Debug.h"
22 #include <SYS/SYS_Align.h>
23 #include <SYS/SYS_CallIf.h>
24 #include <SYS/SYS_Math.h>
25 
26 ///
27 /// fpreal16 conversion functions
28 ///
29 inline fpreal16
31 inline fpreal16
33 inline fpreal16
35 inline fpreal16
36 UTvoxelConvertFP16(int8 a) { return a; }
37 inline fpreal16
39 inline fpreal16
41 inline fpreal16
43 inline fpreal16
44 UTvoxelConvertFP16(UT_Vector2 a) { return a.x(); }
45 inline fpreal16
46 UTvoxelConvertFP16(UT_Vector3 a) { return a.x(); }
47 inline fpreal16
48 UTvoxelConvertFP16(UT_Vector4 a) { return a.x(); }
49 
50 template <typename T>
52 {
53  return T(v);
54 }
55 
56 template <>
58 {
59  return UT_Vector3F(v, v, v);
60 }
61 template <>
63 {
64  return UT_Vector3D(v, v, v);
65 }
66 template <>
68 {
69  return UT_Vector4F(v, v, v, v);
70 }
71 template <>
73 {
74  return UT_Vector4D(v, v, v, v);
75 }
76 
77 ///
78 /// VoxelTileCompress definitions
79 ///
80 template <typename T>
81 void
83  T &min, T &max) const
84 {
85  int x, y, z;
86 
87  min = getValue(tile, 0, 0, 0);
88  max = min;
89 
90  // Generic approach is to just use the getValue() ability.
91  for (z = 0; z < tile.zres(); z++)
92  {
93  for (y = 0; y < tile.yres(); y++)
94  {
95  for (x = 0; x < tile.xres(); x++)
96  {
97  tile.expandMinMax(getValue(tile, x, y, z), min, max);
98  }
99  }
100  }
101  return;
102 }
103 
104 //
105 // VoxelTile definitions.
106 //
107 
108 template <typename T>
110 {
111  myRes[0] = 0;
112  myRes[1] = 0;
113  myRes[2] = 0;
114 
115  myCompressionType = COMPRESS_CONSTANT;
116  myForeignData = false;
117 
118  if (sizeof(T) <= sizeof(T*))
119  {
120  myData = 0;
121  }
122  else
123  {
124  myData = UT_VOXEL_ALLOC(sizeof(T));
125 
126  // It is not accidental that this is not a static_cast!
127  // There isn't a UT_Vector3(fpreal) constructor (as we have a foolish
128  // UT_Vector3(fpreal *) constructor that would cause massive problems)
129  // but there is a UT_Vector3 operator=(fpreal)!
130  ((T *)myData)[0] = 0;
131  }
132 }
133 
134 template <typename T>
136 {
137  freeData();
138 }
139 
140 template <typename T>
142 {
143  myData = 0;
144 
145  // Use assignment operator.
146  *this = src;
147 }
148 
149 template <typename T>
150 const UT_VoxelTile<T> &
152 {
153  if (&src == this)
154  return *this;
155 
156  freeData();
157 
158  myRes[0] = src.myRes[0];
159  myRes[1] = src.myRes[1];
160  myRes[2] = src.myRes[2];
161 
162  myCompressionType = src.myCompressionType;
163  switch (myCompressionType)
164  {
165  case COMPRESS_RAW:
166  myData = UT_VOXEL_ALLOC(
167  sizeof(T) * myRes[0] * myRes[1] * myRes[2]);
168  memcpy(myData, src.myData, sizeof(T) * myRes[0] * myRes[1] * myRes[2]);
169  break;
170  case COMPRESS_CONSTANT:
171  if (inlineConstant())
172  {
173  myData = src.myData;
174  }
175  else
176  {
177  myData = UT_VOXEL_ALLOC(sizeof(T));
178  memcpy(myData, src.myData, sizeof(T));
179  }
180  break;
181  case COMPRESS_RAWFULL:
182  myData = UT_VOXEL_ALLOC(
183  sizeof(T) * TILESIZE * TILESIZE * myRes[2]);
184  memcpy(myData, src.myData,
185  sizeof(T) * TILESIZE * TILESIZE * myRes[2]);
186  break;
187  case COMPRESS_FPREAL16:
188  myData = UT_VOXEL_ALLOC(
189  sizeof(fpreal16) * myRes[0] * myRes[1] * myRes[2]);
190  memcpy(myData, src.myData,
191  sizeof(fpreal16) * myRes[0] * myRes[1] * myRes[2]);
192  break;
193  default:
194  {
195  UT_VoxelTileCompress<T> *engine;
196 
197  engine = getCompressionEngine(myCompressionType);
198  myData = UT_VOXEL_ALLOC(engine->getDataLength(src));
199  memcpy(myData, src.myData, engine->getDataLength(src));
200  break;
201  }
202  }
203 
204  return *this;
205 }
206 
207 template <typename T>
208 bool
210 {
211  switch (myCompressionType)
212  {
213  case COMPRESS_RAW:
214  // Do the assignment.
215  ((T *)myData)[ ((z * myRes[1]) + y) * myRes[0] + x ] = t;
216  return true;
217 
218  case COMPRESS_CONSTANT:
219  if (rawConstVal() == t)
220  {
221  // Trivially true.
222  return true;
223  }
224  return false;
225 
226  case COMPRESS_RAWFULL:
227  ((T *)myData)[ ((z * TILESIZE) + y) * TILESIZE + x ] = t;
228  return true;
229 
230  case COMPRESS_FPREAL16:
231  return false;
232  }
233 
234  // Use the compression engine.
235  UT_VoxelTileCompress<T> *engine;
236 
237  engine = getCompressionEngine(myCompressionType);
238  return engine->writeThrough(*this, x, y, z, t);
239 }
240 
241 template <typename T>
242 T
243 UT_VoxelTile<T>::lerp(int x, int y, int z, fpreal32 fx, fpreal32 fy, fpreal32 fz) const
244 {
245  T vx, vx1, vy, vy1, vz;
246 
247  switch (myCompressionType)
248  {
249  case COMPRESS_RAW:
250  {
251  T *data = (T *) myData;
252  int offset = (z * myRes[1] + y) * myRes[0] + x;
253  int yinc = myRes[0];
254  int zinc = myRes[0] * myRes[1];
255 
256  // Lerp x:x+1, y, z
257  vx = lerpValues(data[offset], data[offset+1], fx);
258  // Lerp x:x+1, y+1, z
259  vx1 = lerpValues(data[offset+yinc], data[offset+yinc+1], fx);
260 
261  // Lerp x:x+1, y:y+1, z
262  vy = lerpValues(vx, vx1, fy);
263 
264  // Lerp x:x+1, y, z+1
265  vx = lerpValues(data[offset+zinc], data[offset+zinc+1], fx);
266  // Lerp x:x+1, y+1, z+1
267  vx1 = lerpValues(data[offset+zinc+yinc], data[offset+zinc+yinc+1], fx);
268 
269  // Lerp x:x+1, y:y+1, z+1
270  vy1 = lerpValues(vx, vx1, fy);
271 
272  // Lerp x:x+1, y:y+1, z:z+1
273  vz = lerpValues(vy, vy1, fz);
274  break;
275  }
276  case COMPRESS_RAWFULL:
277  {
278  T *data = (T *) myData;
279  int offset = (z * TILESIZE + y) * TILESIZE + x;
280  int yinc = TILESIZE;
281  int zinc = TILESIZE * TILESIZE;
282 
283  // Lerp x:x+1, y, z
284  vx = lerpValues(data[offset], data[offset+1], fx);
285  // Lerp x:x+1, y+1, z
286  vx1 = lerpValues(data[offset+yinc], data[offset+yinc+1], fx);
287 
288  // Lerp x:x+1, y:y+1, z
289  vy = lerpValues(vx, vx1, fy);
290 
291  // Lerp x:x+1, y, z+1
292  vx = lerpValues(data[offset+zinc], data[offset+zinc+1], fx);
293  // Lerp x:x+1, y+1, z+1
294  vx1 = lerpValues(data[offset+zinc+yinc], data[offset+zinc+yinc+1], fx);
295 
296  // Lerp x:x+1, y:y+1, z+1
297  vy1 = lerpValues(vx, vx1, fy);
298 
299  // Lerp x:x+1, y:y+1, z:z+1
300  vz = lerpValues(vy, vy1, fz);
301  break;
302  }
303  case COMPRESS_FPREAL16:
304  {
305  fpreal16 *data = (fpreal16 *) myData;
306  int offset = (z * myRes[1] + y) * myRes[0] + x;
307  int yinc = myRes[0];
308  int zinc = myRes[0] * myRes[1];
309  fpreal16 vx, vx1, vy, vy1, vz;
310 
311  // Lerp x:x+1, y, z
312  vx = SYSlerp(data[offset], data[offset+1], fx);
313  // Lerp x:x+1, y+1, z
314  vx1 = SYSlerp(data[offset+yinc], data[offset+yinc+1], fx);
315 
316  // Lerp x:x+1, y:y+1, z
317  vy = SYSlerp(vx, vx1, fy);
318 
319  // Lerp x:x+1, y, z+1
320  vx = SYSlerp(data[offset+zinc], data[offset+zinc+1], fx);
321  // Lerp x:x+1, y+1, z+1
322  vx1 = SYSlerp(data[offset+zinc+yinc], data[offset+zinc+yinc+1], fx);
323 
324  // Lerp x:x+1, y:y+1, z+1
325  vy1 = SYSlerp(vx, vx1, fy);
326 
327  // Lerp x:x+1, y:y+1, z:z+1
328  vz = SYSlerp(vy, vy1, fz);
329 
330  return UTvoxelConvertFromFP16<T>(vz);
331  }
332  case COMPRESS_CONSTANT:
333  {
334  // This is quite trivial to do a trilerp on.
335  vz = rawConstVal();
336  break;
337  }
338 
339  default:
340  {
341  UT_VoxelTileCompress<T> *engine;
342 
343  engine = getCompressionEngine(myCompressionType);
344  // Lerp x:x+1, y, z
345  vx = lerpValues(engine->getValue(*this, x, y, z),
346  engine->getValue(*this, x+1, y, z),
347  fx);
348  // Lerp x:x+1, y+1, z
349  vx1 = lerpValues(engine->getValue(*this, x, y+1, z),
350  engine->getValue(*this, x+1, y+1, z),
351  fx);
352 
353  // Lerp x:x+1, y:y+1, z
354  vy = lerpValues(vx, vx1, fy);
355 
356  // Lerp x:x+1, y, z+1
357  vx = lerpValues(engine->getValue(*this, x, y, z+1),
358  engine->getValue(*this, x+1, y, z+1),
359  fx);
360  // Lerp x:x+1, y+1, z+1
361  vx1 = lerpValues(engine->getValue(*this, x, y+1, z+1),
362  engine->getValue(*this, x+1, y+1, z+1),
363  fx);
364 
365  // Lerp x:x+1, y:y+1, z+1
366  vy1 = lerpValues(vx, vx1, fy);
367 
368  // Lerp x:x+1, y:y+1, z:z+1
369  vz = lerpValues(vy, vy1, fz);
370  break;
371  }
372  }
373 
374  return vz;
375 }
376 
377 template <typename T>
378 template <int AXIS2D>
379 T
380 UT_VoxelTile<T>::lerpAxis(int x, int y, int z, fpreal32 fx, fpreal32 fy, fpreal32 fz) const
381 {
382  T vx, vx1, vy, vy1, vz;
383 
384  switch (myCompressionType)
385  {
386  case COMPRESS_RAW:
387  {
388  T *data = (T *) myData;
389  int offset = (z * myRes[1] + y) * myRes[0] + x;
390  int yinc = myRes[0];
391  int zinc = myRes[0] * myRes[1];
392 
393  // Lerp x:x+1, y, z
394  if (AXIS2D != 0)
395  vx = lerpValues(data[offset],
396  data[offset+1],
397  fx);
398  else
399  vx = data[offset];
400 
401  if (AXIS2D != 1)
402  {
403  // Lerp x:x+1, y+1, z
404  if (AXIS2D != 0)
405  vx1= lerpValues(data[offset+yinc],
406  data[offset+yinc+1],
407  fx);
408  else
409  vx1 = data[offset+yinc];
410  // Lerp x:x+1, y:y+1, z
411  vy = lerpValues(vx, vx1, fy);
412  }
413  else
414  vy = vx;
415 
416  if (AXIS2D != 2)
417  {
418  // Lerp x:x+1, y, z+1
419  if (AXIS2D != 0)
420  vx = lerpValues(data[offset+zinc],
421  data[offset+zinc+1],
422  fx);
423  else
424  vx = data[offset+zinc];
425 
426  if (AXIS2D != 1)
427  {
428  // Lerp x:x+1, y+1, z+1
429  if (AXIS2D != 0)
430  vx1= lerpValues(data[offset+zinc+yinc],
431  data[offset+zinc+yinc+1],
432  fx);
433  else
434  vx1 = data[offset+zinc+yinc];
435  // Lerp x:x+1, y:y+1, z+1
436  vy1 = lerpValues(vx, vx1, fy);
437  }
438  else
439  vy1 = vx;
440 
441  // Lerp x:x+1, y:y+1, z:z+1
442  vz = lerpValues(vy, vy1, fz);
443  }
444  else
445  vz = vy;
446  break;
447  }
448  case COMPRESS_RAWFULL:
449  {
450  T *data = (T *) myData;
451  int offset = (z * TILESIZE + y) * TILESIZE + x;
452  int yinc = TILESIZE;
453  int zinc = TILESIZE * TILESIZE;
454 
455  // Lerp x:x+1, y, z
456  if (AXIS2D != 0)
457  vx = lerpValues(data[offset],
458  data[offset+1],
459  fx);
460  else
461  vx = data[offset];
462 
463  if (AXIS2D != 1)
464  {
465  // Lerp x:x+1, y+1, z
466  if (AXIS2D != 0)
467  vx1= lerpValues(data[offset+yinc],
468  data[offset+yinc+1],
469  fx);
470  else
471  vx1 = data[offset+yinc];
472  // Lerp x:x+1, y:y+1, z
473  vy = lerpValues(vx, vx1, fy);
474  }
475  else
476  vy = vx;
477 
478  if (AXIS2D != 2)
479  {
480  // Lerp x:x+1, y, z+1
481  if (AXIS2D != 0)
482  vx = lerpValues(data[offset+zinc],
483  data[offset+zinc+1],
484  fx);
485  else
486  vx = data[offset+zinc];
487 
488  if (AXIS2D != 1)
489  {
490  // Lerp x:x+1, y+1, z+1
491  if (AXIS2D != 0)
492  vx1= lerpValues(data[offset+zinc+yinc],
493  data[offset+zinc+yinc+1],
494  fx);
495  else
496  vx1 = data[offset+zinc+yinc];
497  // Lerp x:x+1, y:y+1, z+1
498  vy1 = lerpValues(vx, vx1, fy);
499  }
500  else
501  vy1 = vx;
502 
503  // Lerp x:x+1, y:y+1, z:z+1
504  vz = lerpValues(vy, vy1, fz);
505  }
506  else
507  vz = vy;
508  break;
509  }
510  case COMPRESS_FPREAL16:
511  {
512  fpreal16 *data = (fpreal16 *) myData;
513  int offset = (z * myRes[1] + y) * myRes[0] + x;
514  int yinc = myRes[0];
515  int zinc = myRes[0] * myRes[1];
516  fpreal16 vx, vx1, vy, vy1, vz;
517 
518  // Lerp x:x+1, y, z
519  if (AXIS2D != 0)
520  vx = SYSlerp(data[offset],
521  data[offset+1],
522  fx);
523  else
524  vx = data[offset];
525 
526  if (AXIS2D != 1)
527  {
528  // Lerp x:x+1, y+1, z
529  if (AXIS2D != 0)
530  vx1= SYSlerp(data[offset+yinc],
531  data[offset+yinc+1],
532  fx);
533  else
534  vx1 = data[offset+yinc];
535  // Lerp x:x+1, y:y+1, z
536  vy = SYSlerp(vx, vx1, fy);
537  }
538  else
539  vy = vx;
540 
541  if (AXIS2D != 2)
542  {
543  // Lerp x:x+1, y, z+1
544  if (AXIS2D != 0)
545  vx = SYSlerp(data[offset+zinc],
546  data[offset+zinc+1],
547  fx);
548  else
549  vx = data[offset+zinc];
550 
551  if (AXIS2D != 1)
552  {
553  // Lerp x:x+1, y+1, z+1
554  if (AXIS2D != 0)
555  vx1= SYSlerp(data[offset+zinc+yinc],
556  data[offset+zinc+yinc+1],
557  fx);
558  else
559  vx1 = data[offset+zinc+yinc];
560  // Lerp x:x+1, y:y+1, z+1
561  vy1 = SYSlerp(vx, vx1, fy);
562  }
563  else
564  vy1 = vx;
565 
566  // Lerp x:x+1, y:y+1, z:z+1
567  vz = SYSlerp(vy, vy1, fz);
568  }
569  else
570  vz = vy;
571 
572  return UTvoxelConvertFromFP16<T>(vz);
573  }
574  case COMPRESS_CONSTANT:
575  {
576  // This is quite trivial to do a bilerp on.
577  vz = rawConstVal();
578  break;
579  }
580 
581  default:
582  {
583  UT_VoxelTileCompress<T> *engine;
584 
585  engine = getCompressionEngine(myCompressionType);
586  // Lerp x:x+1, y, z
587  if (AXIS2D != 0)
588  vx = lerpValues(engine->getValue(*this, x, y, z),
589  engine->getValue(*this, x+1, y, z),
590  fx);
591  else
592  vx = engine->getValue(*this, x, y, z);
593 
594  if (AXIS2D != 1)
595  {
596  // Lerp x:x+1, y+1, z
597  if (AXIS2D != 0)
598  vx1= lerpValues(engine->getValue(*this, x, y+1, z),
599  engine->getValue(*this, x+1, y+1, z),
600  fx);
601  else
602  vx1 = engine->getValue(*this, x, y+1, z);
603  // Lerp x:x+1, y:y+1, z
604  vy = lerpValues(vx, vx1, fy);
605  }
606  else
607  vy = vx;
608 
609  if (AXIS2D != 2)
610  {
611  // Lerp x:x+1, y, z+1
612  if (AXIS2D != 0)
613  vx = lerpValues(engine->getValue(*this, x, y, z+1),
614  engine->getValue(*this, x+1, y, z+1),
615  fx);
616  else
617  vx = engine->getValue(*this, x, y, z+1);
618 
619  if (AXIS2D != 1)
620  {
621  // Lerp x:x+1, y+1, z+1
622  if (AXIS2D != 0)
623  vx1= lerpValues(engine->getValue(*this, x, y+1, z+1),
624  engine->getValue(*this, x+1, y+1, z+1),
625  fx);
626  else
627  vx1 = engine->getValue(*this, x, y+1, z+1);
628  // Lerp x:x+1, y:y+1, z+1
629  vy1 = lerpValues(vx, vx1, fy);
630  }
631  else
632  vy1 = vx;
633 
634  // Lerp x:x+1, y:y+1, z:z+1
635  vz = lerpValues(vy, vy1, fz);
636  }
637  else
638  vz = vy;
639  break;
640  }
641  }
642 
643  return vz;
644 }
645 
646 template <typename T>
647 bool
648 UT_VoxelTile<T>::extractSample(int x, int y, int z, T *sample) const
649 {
650  switch (myCompressionType)
651  {
652  case COMPRESS_RAW:
653  {
654  T *data = (T *) myData;
655  int offset = (z * myRes[1] + y) * myRes[0] + x;
656  int yinc = myRes[0];
657  int zinc = myRes[0] * myRes[1];
658 
659  sample[0] = data[offset];
660  sample[1] = data[offset+1];
661  sample[2+0] = data[offset+yinc];
662  sample[2+1] = data[offset+yinc+1];
663  sample[4+0] = data[offset+zinc];
664  sample[4+1] = data[offset+zinc+1];
665  sample[4+2+0] = data[offset+zinc+yinc];
666  sample[4+2+1] = data[offset+zinc+yinc+1];
667  break;
668  }
669  case COMPRESS_RAWFULL:
670  {
671  T *data = (T *) myData;
672  int offset = (z * TILESIZE + y) * TILESIZE + x;
673  int yinc = TILESIZE;
674  int zinc = TILESIZE * TILESIZE;
675 
676  sample[0] = data[offset];
677  sample[1] = data[offset+1];
678  sample[2+0] = data[offset+yinc];
679  sample[2+1] = data[offset+yinc+1];
680  sample[4+0] = data[offset+zinc];
681  sample[4+1] = data[offset+zinc+1];
682  sample[4+2+0] = data[offset+zinc+yinc];
683  sample[4+2+1] = data[offset+zinc+yinc+1];
684  break;
685  }
686  case COMPRESS_FPREAL16:
687  {
688  fpreal16 *data = (fpreal16 *) myData;
689  int offset = (z * myRes[1] + y) * myRes[0] + x;
690  int yinc = myRes[0];
691  int zinc = myRes[0] * myRes[1];
692 
693  sample[0] = data[offset];
694  sample[1] = data[offset+1];
695  sample[2+0] = data[offset+yinc];
696  sample[2+1] = data[offset+yinc+1];
697  sample[4+0] = data[offset+zinc];
698  sample[4+1] = data[offset+zinc+1];
699  sample[4+2+0] = data[offset+zinc+yinc];
700  sample[4+2+1] = data[offset+zinc+yinc+1];
701  break;
702  }
703  case COMPRESS_CONSTANT:
704  {
705  sample[0] = rawConstVal();
706  return true;
707  }
708 
709  default:
710  {
711  UT_VoxelTileCompress<T> *engine;
712 
713  engine = getCompressionEngine(myCompressionType);
714  // Lerp x:x+1, y, z
715  sample[0] = engine->getValue(*this, x, y, z);
716  sample[1] = engine->getValue(*this, x+1, y, z);
717  sample[2+0] = engine->getValue(*this, x, y+1, z);
718  sample[2+1] = engine->getValue(*this, x+1, y+1, z);
719  sample[4+0] = engine->getValue(*this, x, y, z+1);
720  sample[4+1] = engine->getValue(*this, x+1, y, z+1);
721  sample[4+2+0] = engine->getValue(*this, x, y+1, z+1);
722  sample[4+2+1] = engine->getValue(*this, x+1, y+1, z+1);
723  break;
724  }
725  }
726  return false;
727 }
728 
729 template <typename T>
730 bool
731 UT_VoxelTile<T>::extractSamplePlus(int x, int y, int z, T *sample) const
732 {
733  switch (myCompressionType)
734  {
735  case COMPRESS_RAW:
736  {
737  T *data = (T *) myData;
738  int offset = (z * myRes[1] + y) * myRes[0] + x;
739  int yinc = myRes[0];
740  int zinc = myRes[0] * myRes[1];
741 
742  sample[0] = data[offset-1];
743  sample[1] = data[offset+1];
744  sample[2+0] = data[offset-yinc];
745  sample[2+1] = data[offset+yinc];
746  sample[4+0] = data[offset-zinc];
747  sample[4+1] = data[offset+zinc];
748  sample[6] = data[offset];
749  break;
750  }
751  case COMPRESS_RAWFULL:
752  {
753  T *data = (T *) myData;
754  int offset = (z * TILESIZE + y) * TILESIZE + x;
755  int yinc = TILESIZE;
756  int zinc = TILESIZE * TILESIZE;
757 
758  sample[0] = data[offset-1];
759  sample[1] = data[offset+1];
760  sample[2+0] = data[offset-yinc];
761  sample[2+1] = data[offset+yinc];
762  sample[4+0] = data[offset-zinc];
763  sample[4+1] = data[offset+zinc];
764  sample[6] = data[offset];
765  break;
766  }
767  case COMPRESS_FPREAL16:
768  {
769  fpreal16 *data = (fpreal16 *) myData;
770  int offset = (z * myRes[1] + y) * myRes[0] + x;
771  int yinc = myRes[0];
772  int zinc = myRes[0] * myRes[1];
773 
774  sample[0] = data[offset-1];
775  sample[1] = data[offset+1];
776  sample[2+0] = data[offset-yinc];
777  sample[2+1] = data[offset+yinc];
778  sample[4+0] = data[offset-zinc];
779  sample[4+1] = data[offset+zinc];
780  sample[6] = data[offset];
781  break;
782  }
783  case COMPRESS_CONSTANT:
784  {
785  sample[0] = rawConstVal();
786  return true;
787  }
788 
789  default:
790  {
791  UT_VoxelTileCompress<T> *engine;
792 
793  engine = getCompressionEngine(myCompressionType);
794  // Lerp x:x+1, y, z
795  sample[0] = engine->getValue(*this, x-1, y, z);
796  sample[1] = engine->getValue(*this, x+1, y, z);
797  sample[2+0] = engine->getValue(*this, x, y-1, z);
798  sample[2+1] = engine->getValue(*this, x, y+1, z);
799  sample[4+0] = engine->getValue(*this, x, y, z+1);
800  sample[4+1] = engine->getValue(*this, x, y, z-1);
801  sample[6] = engine->getValue(*this, x, y, z);
802  break;
803  }
804  }
805  return false;
806 }
807 
808 template <typename T>
809 bool
810 UT_VoxelTile<T>::extractSampleCube(int x, int y, int z, T *sample) const
811 {
812  switch (myCompressionType)
813  {
814  case COMPRESS_RAW:
815  {
816  T *data = (T *) myData;
817  int offset = (z * myRes[1] + y) * myRes[0] + x;
818  int yinc = myRes[0];
819  int zinc = myRes[0] * myRes[1];
820  int sampidx = 0;
821 
822  offset -= zinc;
823  for (int dz = -1; dz <= 1; dz++)
824  {
825  offset -= yinc;
826  for (int dy = -1; dy <= 1; dy++)
827  {
828  sample[sampidx] = data[offset-1];
829  sample[sampidx+1] = data[offset];
830  sample[sampidx+2] = data[offset+1];
831  sampidx += 3;
832  offset += yinc;
833  }
834  offset -= yinc * 3;
835  offset += zinc;
836  }
837  break;
838  }
839  case COMPRESS_RAWFULL:
840  {
841  T *data = (T *) myData;
842  int offset = (z * TILESIZE + y) * TILESIZE + x;
843  int yinc = TILESIZE;
844  int zinc = TILESIZE * TILESIZE;
845  int sampidx = 0;
846 
847  offset -= zinc;
848  for (int dz = -1; dz <= 1; dz++)
849  {
850  offset -= yinc;
851  for (int dy = -1; dy <= 1; dy++)
852  {
853  sample[sampidx] = data[offset-1];
854  sample[sampidx+1] = data[offset];
855  sample[sampidx+2] = data[offset+1];
856  sampidx += 3;
857  offset += yinc;
858  }
859  offset -= yinc * 3;
860  offset += zinc;
861  }
862  break;
863  }
864  case COMPRESS_FPREAL16:
865  {
866  fpreal16 *data = (fpreal16 *) myData;
867  int offset = (z * myRes[1] + y) * myRes[0] + x;
868  int yinc = myRes[0];
869  int zinc = myRes[0] * myRes[1];
870  int sampidx = 0;
871 
872  offset -= zinc;
873  for (int dz = -1; dz <= 1; dz++)
874  {
875  offset -= yinc;
876  for (int dy = -1; dy <= 1; dy++)
877  {
878  sample[sampidx] = data[offset-1];
879  sample[sampidx+1] = data[offset];
880  sample[sampidx+2] = data[offset+1];
881  sampidx += 3;
882  offset += yinc;
883  }
884  offset -= yinc * 3;
885  offset += zinc;
886  }
887  break;
888  }
889  case COMPRESS_CONSTANT:
890  {
891  sample[0] = rawConstVal();
892  return true;
893  }
894 
895  default:
896  {
897  UT_VoxelTileCompress<T> *engine;
898 
899  engine = getCompressionEngine(myCompressionType);
900  int sampidx = 0;
901  // Lerp x:x+1, y, z
902  for (int dz = -1; dz <= 1; dz++)
903  {
904  for (int dy = -1; dy <= 1; dy++)
905  {
906  for (int dx = -1; dx <= 1; dx++)
907  {
908  sample[sampidx++] = engine->getValue(*this, x+dx, y+dy, z+dz);
909  }
910  }
911  }
912  break;
913  }
914  }
915  return false;
916 }
917 
918 template <typename T>
919 template <int AXIS2D>
920 bool
921 UT_VoxelTile<T>::extractSampleAxis(int x, int y, int z, T *sample) const
922 {
923  switch (myCompressionType)
924  {
925  case COMPRESS_RAW:
926  {
927  T *data = (T *) myData;
928  int offset = (z * myRes[1] + y) * myRes[0] + x;
929  int yinc = myRes[0];
930  int zinc = myRes[0] * myRes[1];
931 
932  sample[0] = data[offset];
933  if (AXIS2D != 0)
934  sample[1] = data[offset+1];
935  if (AXIS2D != 1)
936  {
937  sample[2+0] = data[offset+yinc];
938  if (AXIS2D != 0)
939  sample[2+1] = data[offset+yinc+1];
940  }
941  if (AXIS2D != 2)
942  {
943  sample[4+0] = data[offset+zinc];
944  if (AXIS2D != 0)
945  sample[4+1] = data[offset+zinc+1];
946  if (AXIS2D != 1)
947  {
948  sample[4+2+0] = data[offset+zinc+yinc];
949  if (AXIS2D != 0)
950  sample[4+2+1] = data[offset+zinc+yinc+1];
951  }
952  }
953  break;
954  }
955  case COMPRESS_RAWFULL:
956  {
957  T *data = (T *) myData;
958  int offset = (z * TILESIZE + y) * TILESIZE + x;
959  int yinc = TILESIZE;
960  int zinc = TILESIZE * TILESIZE;
961 
962  sample[0] = data[offset];
963  if (AXIS2D != 0)
964  sample[1] = data[offset+1];
965  if (AXIS2D != 1)
966  {
967  sample[2+0] = data[offset+yinc];
968  if (AXIS2D != 0)
969  sample[2+1] = data[offset+yinc+1];
970  }
971  if (AXIS2D != 2)
972  {
973  sample[4+0] = data[offset+zinc];
974  if (AXIS2D != 0)
975  sample[4+1] = data[offset+zinc+1];
976  if (AXIS2D != 1)
977  {
978  sample[4+2+0] = data[offset+zinc+yinc];
979  if (AXIS2D != 0)
980  sample[4+2+1] = data[offset+zinc+yinc+1];
981  }
982  }
983  break;
984  }
985  case COMPRESS_FPREAL16:
986  {
987  fpreal16 *data = (fpreal16 *) myData;
988  int offset = (z * myRes[1] + y) * myRes[0] + x;
989  int yinc = myRes[0];
990  int zinc = myRes[0] * myRes[1];
991 
992  sample[0] = data[offset];
993  if (AXIS2D != 0)
994  sample[1] = data[offset+1];
995  if (AXIS2D != 1)
996  {
997  sample[2+0] = data[offset+yinc];
998  if (AXIS2D != 0)
999  sample[2+1] = data[offset+yinc+1];
1000  }
1001  if (AXIS2D != 2)
1002  {
1003  sample[4+0] = data[offset+zinc];
1004  if (AXIS2D != 0)
1005  sample[4+1] = data[offset+zinc+1];
1006  if (AXIS2D != 1)
1007  {
1008  sample[4+2+0] = data[offset+zinc+yinc];
1009  if (AXIS2D != 0)
1010  sample[4+2+1] = data[offset+zinc+yinc+1];
1011  }
1012  }
1013  break;
1014  }
1015  case COMPRESS_CONSTANT:
1016  {
1017  sample[0] = rawConstVal();
1018  return true;
1019  }
1020 
1021  default:
1022  {
1023  UT_VoxelTileCompress<T> *engine;
1024 
1025  engine = getCompressionEngine(myCompressionType);
1026  // Lerp x:x+1, y, z
1027  sample[0] = engine->getValue(*this, x, y, z);
1028  if (AXIS2D != 0)
1029  sample[1] = engine->getValue(*this, x+1, y, z);
1030  if (AXIS2D != 1)
1031  {
1032  sample[2+0] = engine->getValue(*this, x, y+1, z);
1033  if (AXIS2D != 0)
1034  sample[2+1] = engine->getValue(*this, x+1, y+1, z);
1035  }
1036  if (AXIS2D != 2)
1037  {
1038  sample[4+0] = engine->getValue(*this, x, y, z+1);
1039  if (AXIS2D != 0)
1040  sample[4+1] = engine->getValue(*this, x+1, y, z+1);
1041  if (AXIS2D != 1)
1042  {
1043  sample[4+2+0] = engine->getValue(*this, x, y+1, z+1);
1044  if (AXIS2D != 0)
1045  sample[4+2+1] = engine->getValue(*this, x+1, y+1, z+1);
1046  }
1047  }
1048  break;
1049  }
1050  }
1051  return false;
1052 }
1053 
1054 #if 0
1055 template <typename T>
1056 T
1057 UT_VoxelTile<T>::lerp(v4uf frac, int x, int y, int z) const
1058 {
1059  v4uf a, b;
1060 
1061  switch (myCompressionType)
1062  {
1063  case COMPRESS_RAW:
1064  case COMPRESS_RAWFULL:
1065  {
1066  T *data = (T *) myData;
1067  int offset = (z * myRes[1] + y) * myRes[0] + x;
1068  int yinc = myRes[0];
1069  int zinc = myRes[0] * myRes[1];
1070 
1071  a = v4uf( data[offset],
1072  data[offset+zinc],
1073  data[offset+yinc],
1074  data[offset+yinc+zinc] );
1075  b = v4uf( data[offset+1],
1076  data[offset+zinc+1],
1077  data[offset+yinc+1],
1078  data[offset+yinc+zinc+1] );
1079  break;
1080  }
1081 
1082  case COMPRESS_CONSTANT:
1083  {
1084  // This is quite trivial to do a trilerp on.
1085  return rawConstVal();
1086  }
1087 
1088  default:
1089  {
1090  UT_VoxelTileCompress<T> *engine;
1091 
1092  engine = getCompressionEngine(myCompressionType);
1093  // Lerp x:x+1, y, z
1094  a = v4uf( engine->getValue(*this, x, y, z),
1095  engine->getValue(*this, x, y, z+1),
1096  engine->getValue(*this, x, y+1, z),
1097  engine->getValue(*this, x, y+1, z+1) );
1098  b = v4uf( engine->getValue(*this, x+1, y, z),
1099  engine->getValue(*this, x+1, y, z+1),
1100  engine->getValue(*this, x+1, y+1, z),
1101  engine->getValue(*this, x+1, y+1, z+1) );
1102  break;
1103  }
1104  }
1105 
1106  v4uf fx, fy, fz;
1107 
1108  fx = frac.swizzle<0, 0, 0, 0>();
1109  fy = frac.swizzle<1, 1, 1, 1>();
1110  fz = frac.swizzle<2, 2, 2, 2>();
1111 
1112  b -= a;
1113  a = madd(b, fx, a);
1114 
1115  b = a.swizzle<2, 3, 0, 1>();
1116  b -= a;
1117  a = madd(b, fy, a);
1118 
1119  b = a.swizzle<1, 2, 3, 0>();
1120  b -= a;
1121  a = madd(b, fz, a);
1122 
1123  return a[0];
1124 }
1125 #endif
1126 
1127 template <typename T>
1128 T *
1129 UT_VoxelTile<T>::fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const
1130 {
1131  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
1132  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
1133 
1134  T *src;
1135  int i, xres = myRes[0];
1136 
1137  // All the directly handled types exit directly from this switch.
1138  switch (myCompressionType)
1139  {
1140  case COMPRESS_RAW:
1141  stride = 1;
1142  src = (T *)myData + (z * myRes[1] + y) * xres;
1143  if (!forcecopy)
1144  return &src[x];
1145 
1146  for (i = 0; i < xres; i++)
1147  cacheline[i] = src[i];
1148 
1149  return &cacheline[x];
1150 
1151  case COMPRESS_FPREAL16:
1152  {
1153  fpreal16 *src;
1154 
1155  stride = 1;
1156  src = (fpreal16 *)myData + (z * myRes[1] + y) * xres;
1157 
1158  for (i = 0; i < xres; i++)
1159  cacheline[i] = src[i];
1160 
1161  return &cacheline[x];
1162  }
1163 
1164 
1165  case COMPRESS_CONSTANT:
1166  src = rawConstData();
1167  if (!forcecopy && !strideofone)
1168  {
1169  stride = 0;
1170  return src;
1171  }
1172  stride = 1;
1173  // Fill out a constant value
1174  for (i = 0; i < xres; i++)
1175  cacheline[i] = *src;
1176 
1177  return &cacheline[x];
1178 
1179 
1180  case COMPRESS_RAWFULL:
1181  stride = 1;
1182  src = (T *)myData + (z * TILESIZE + y) * TILESIZE;
1183  if (!forcecopy)
1184  return &src[x];
1185 
1186  for (i = 0; i < xres; i++)
1187  cacheline[i] = src[i];
1188 
1189  return &cacheline[x];
1190  }
1191 
1192  // By default use the compression engine.
1193  UT_VoxelTileCompress<T> *engine;
1194 
1195  engine = getCompressionEngine(myCompressionType);
1196 
1197  // We could add support for a direct cacheline fill to accelerate
1198  // this case as well.
1199  stride = 1;
1200  for (i = 0; i < xres; i++)
1201  cacheline[i] = engine->getValue(*this, i, y, z);
1202 
1203  return &cacheline[x];
1204 }
1205 
1206 template <typename T>
1207 void
1208 UT_VoxelTile<T>::writeCacheLine(T *cacheline, int y, int z)
1209 {
1210  UT_ASSERT_P(y >= 0 && z >= 0);
1211  UT_ASSERT_P(y < myRes[1] && z < myRes[2]);
1212 
1213  T *dst, value;
1214  int i, xres = myRes[0];
1215 
1216  // All the directly handled types exit directly from this switch.
1217  switch (myCompressionType)
1218  {
1219  case COMPRESS_RAW:
1220  dst = (T *)myData + (z * myRes[1] + y) * xres;
1221  for (i = 0; i < xres; i++)
1222  *dst++ = *cacheline++;
1223  return;
1224 
1225  case COMPRESS_CONSTANT:
1226  value = rawConstVal();
1227  for (i = 0; i < xres; i++)
1228  if (cacheline[i] != value)
1229  break;
1230  // If everything was equal, our write is trivial.
1231  if (i == xres)
1232  return;
1233 
1234  break;
1235 
1236  case COMPRESS_RAWFULL:
1237  dst = (T *)myData + (z * TILESIZE + y) * TILESIZE;
1238  for (i = 0; i < TILESIZE; i++)
1239  *dst++ = *cacheline++;
1240 
1241  return;
1242  }
1243 
1244  // Switch back to invoking writeThrough. Ideally we'd have
1245  // a version that can handle a whole cache line at once
1246  for (i = 0; i < xres; i++)
1247  if (!writeThrough(i, y, z, cacheline[i]))
1248  break;
1249 
1250  // Determine if we failed to write everything through
1251  if (i != xres)
1252  {
1253  // Uncompress and reinvoke ourselves.
1254  uncompress();
1255  writeCacheLine(cacheline, y, z);
1256  }
1257 }
1258 
1259 template <typename T>
1260 void
1261 UT_VoxelTile<T>::copyFragment(int dstx, int dsty, int dstz,
1262  const UT_VoxelTile<T> &srctile,
1263  int srcx, int srcy, int srcz)
1264 {
1265  int w = SYSmin(xres() - dstx, srctile.xres() - srcx);
1266  int h = SYSmin(yres() - dsty, srctile.yres() - srcy);
1267  int d = SYSmin(zres() - dstz, srctile.zres() - srcz);
1268 
1269 #if 1
1270  if (srctile.isSimpleCompression())
1271  {
1272  T *dst;
1273  const T *src;
1274  int srcinc;
1275 
1276  src = srctile.rawData();
1277  srcinc = srctile.isConstant() ? 0 : 1;
1278 
1279  // Ensure we are easy to write to.
1280  uncompress();
1281 
1282  dst = rawData();
1283  dst += dstx + (dsty + dstz*yres())*xres();
1284 
1285  if (srcinc)
1286  src += srcx + (srcy + srcz*srctile.yres())*srctile.xres();
1287 
1288  for (int z = 0; z < d; z++)
1289  {
1290  for (int y = 0; y < h; y++)
1291  {
1292  if (srcinc)
1293  {
1294  for (int x = 0; x < w; x++)
1295  dst[x] = src[x];
1296  }
1297  else
1298  {
1299  for (int x = 0; x < w; x++)
1300  dst[x] = *src;
1301  }
1302  dst += xres();
1303  if (srcinc)
1304  src += srctile.xres();
1305  }
1306  dst += (yres()-h) * xres();
1307  if (srcinc)
1308  src += (srctile.yres() - h) * srctile.xres();
1309  }
1310 
1311  return;
1312  }
1313 #endif
1314 
1315  // Fail safe approach.
1316  for (int z = 0; z < d; z++)
1317  for (int y = 0; y < h; y++)
1318  for (int x = 0; x < w; x++)
1319  {
1320  setValue(dstx+x, dsty+y, dstz+z,
1321  srctile(srcx+x, srcy+y, srcz+z));
1322  }
1323 }
1324 
1325 template <typename T>
1326 template <typename S>
1327 void
1329 {
1330  int w = xres();
1331  int h = yres();
1332  int d = zres();
1333 
1334  if (isSimpleCompression())
1335  {
1336  const T *src;
1337  int srcinc;
1338 
1339  src = rawData();
1340  srcinc = isConstant() ? 0 : 1;
1341 
1342  if (stride == 1)
1343  {
1344  if (srcinc == 1)
1345  {
1346  // Super trivial!
1347  for (int i = 0; i < w * h * d; i++)
1348  {
1349  *dst++ = T(*src++);
1350  }
1351  }
1352  else
1353  {
1354  // Constant, also trivial!
1355  T cval = T(*src);
1356 
1357  for (int i = 0; i < w * h * d; i++)
1358  {
1359  *dst++ = cval;
1360  }
1361  }
1362  }
1363  else
1364  {
1365  for (int z = 0; z < d; z++)
1366  {
1367  for (int y = 0; y < h; y++)
1368  {
1369  if (srcinc)
1370  {
1371  for (int x = 0; x < w; x++)
1372  {
1373  *dst = S(src[x]);
1374  dst += stride;
1375  }
1376  }
1377  else
1378  {
1379  for (int x = 0; x < w; x++)
1380  {
1381  *dst = S(*src);
1382  dst += stride;
1383  }
1384  }
1385  if (srcinc)
1386  src += w;
1387  }
1388  }
1389  }
1390 
1391  return;
1392  }
1393 
1394  // Fail safe approach.
1395  for (int z = 0; z < d; z++)
1396  for (int y = 0; y < h; y++)
1397  for (int x = 0; x < w; x++)
1398  {
1399  *dst = S((*this)(x, y, z));
1400  dst += stride;
1401  }
1402 }
1403 
1404 template <typename T>
1405 template <typename S>
1406 void
1407 UT_VoxelTile<T>::writeData(const S *srcdata, int srcstride)
1408 {
1409  int w = xres();
1410  int h = yres();
1411  int d = zres();
1412  int i, n = w * h * d;
1413 
1414  // Check if src is constant
1415  S compare = srcdata[0];
1416  int srcoff = srcstride;
1417 
1418  if (srcstride == 0)
1419  {
1420  // Trivially constant
1421  makeConstant(T(compare));
1422  return;
1423  }
1424 
1425  for (i = 1; i < n; i++)
1426  {
1427  if (srcdata[srcoff] != compare)
1428  break;
1429  srcoff += srcstride;
1430  }
1431 
1432  if (i == n)
1433  {
1434  // Constant source!
1435  makeConstant(compare);
1436  return;
1437  }
1438 
1439  // Create a raw tile, expanding constants
1440  uncompress();
1441 
1442  if (srcstride == 1)
1443  {
1444  T *dst = rawData();
1445  for (i = 0; i < n; i++)
1446  {
1447  *dst++ = T(*srcdata++);
1448  }
1449  }
1450  else
1451  {
1452  T *dst = rawData();
1453 
1454  srcoff = 0;
1455  for (i = 0; i < n; i++)
1456  {
1457  dst[i] = T(srcdata[srcoff]);
1458  srcoff += srcstride;
1459  }
1460  }
1461 }
1462 
1463 template <typename T>
1464 void
1465 UT_VoxelTile<T>::setValue(int x, int y, int z, T t)
1466 {
1467  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
1468  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
1469 
1470  // Determine if assignment is compatible with current
1471  // compression technique.
1472  if (writeThrough(x, y, z, t))
1473  {
1474  return;
1475  }
1476  // Can't write to our current type of tile with the
1477  // given value, so abandon.
1478  uncompress();
1479 
1480  // Attempt to write through again. This must succeed
1481  // as we should now be uncompressed.
1482  UT_VERIFY_P(writeThrough(x, y, z, t));
1483 }
1484 
1485 template <typename T>
1486 void
1488 {
1489  switch (myCompressionType)
1490  {
1491  case COMPRESS_RAW:
1492  // Trivial.
1493  return;
1494 
1495  case COMPRESS_CONSTANT:
1496  {
1497  // Must expand the tile!
1498  T cval;
1499  int i, n;
1500 
1501  cval = rawConstVal();
1502  freeData();
1503 
1504  myCompressionType = COMPRESS_RAW;
1505 
1506  if (myRes[0] == TILESIZE &&
1507  myRes[1] == TILESIZE)
1508  {
1509  // Flag that we can do fast lookups on this tile.
1510  myCompressionType = COMPRESS_RAWFULL;
1511  }
1512 
1513  n = myRes[0] * myRes[1] * myRes[2];
1514  myData = UT_VOXEL_ALLOC(sizeof(T) * n);
1515 
1516  for (i = 0; i < n; i++)
1517  {
1518  ((T *)myData)[i] = cval;
1519  }
1520  return;
1521  }
1522  case COMPRESS_RAWFULL:
1523  {
1524  T *raw;
1525  int x, y, z, i, n;
1526 
1527  if (myRes[0] == TILESIZE &&
1528  myRes[1] == TILESIZE)
1529  {
1530  // Trivial
1531  return;
1532  }
1533 
1534  // Need to contract to the actual compact size.
1535  n = myRes[0] * myRes[1] * myRes[2];
1536  raw = (T *)UT_VOXEL_ALLOC(sizeof(T) * n);
1537  i = 0;
1538  for (z = 0; z < myRes[2]; z++)
1539  {
1540  for (y = 0; y < myRes[1]; y++)
1541  {
1542  for (x = 0; x < myRes[0]; x++)
1543  {
1544  raw[i++] = ((T *)myData)[x+(y+z*TILESIZE)*TILESIZE];
1545  }
1546  }
1547  }
1548 
1549  freeData();
1550  myCompressionType = COMPRESS_RAW;
1551  myData = raw;
1552 
1553  return;
1554  }
1555 
1556  case COMPRESS_FPREAL16:
1557  {
1558  T *raw;
1559  int x, y, z, i, n;
1560  fpreal16 *src = (fpreal16 *) myData;
1561 
1562  n = myRes[0] * myRes[1] * myRes[2];
1563  raw = (T *)UT_VOXEL_ALLOC(sizeof(T) * n);
1564  i = 0;
1565  for (z = 0; z < myRes[2]; z++)
1566  {
1567  for (y = 0; y < myRes[1]; y++)
1568  {
1569  for (x = 0; x < myRes[0]; x++)
1570  {
1571  raw[i] = src[i];
1572  i++;
1573  }
1574  }
1575  }
1576  freeData();
1577  myCompressionType = COMPRESS_RAW;
1578  myData = raw;
1579  return;
1580  }
1581  }
1582 
1583  // Use the compression engine.
1584  UT_VoxelTileCompress<T> *engine;
1585 
1586  engine = getCompressionEngine(myCompressionType);
1587 
1588  // We use the read ability to set our values.
1589  int x, y, z, i;
1590  T *raw;
1591 
1592  raw = (T *) UT_VOXEL_ALLOC(sizeof(T) * myRes[0] * myRes[1] * myRes[2]);
1593  i = 0;
1594  for (z = 0; z < myRes[2]; z++)
1595  {
1596  for (y = 0; y < myRes[1]; y++)
1597  {
1598  for (x = 0; x < myRes[0]; x++)
1599  {
1600  raw[i++] = engine->getValue(*this, x, y, z);
1601  }
1602  }
1603  }
1604 
1605  freeData();
1606 
1607  // Now copy in the result
1608  myCompressionType = COMPRESS_RAW;
1609  if (myRes[0] == TILESIZE &&
1610  myRes[1] == TILESIZE)
1611  {
1612  // Flag that we can do fast lookups on this tile.
1613  myCompressionType = COMPRESS_RAWFULL;
1614  }
1615 
1616  myData = raw;
1617 }
1618 
1619 template <typename T>
1620 void
1622 {
1623  T *raw;
1624  int x, y, z, i;
1625 
1626  if (myCompressionType == COMPRESS_RAWFULL)
1627  return;
1628 
1629  uncompress();
1630 
1631  UT_ASSERT(myCompressionType == COMPRESS_RAW);
1632 
1633  // The RAWFULL format only differs from RAW when the tile dimensions
1634  // are smaller than the maximum tile size.
1635  if (myRes[0] < TILESIZE || myRes[1] < TILESIZE)
1636  {
1637  raw = (T *)UT_VOXEL_ALLOC(sizeof(T) * TILESIZE * TILESIZE * myRes[2]);
1638  i = 0;
1639  for (z = 0; z < myRes[2]; z++)
1640  {
1641  for (y = 0; y < myRes[1]; y++)
1642  {
1643  for (x = 0; x < myRes[0]; x++)
1644  {
1645  raw[x+(y+z*TILESIZE)*TILESIZE] = ((T *)myData)[i++];
1646  }
1647  }
1648  }
1649  freeData();
1650  myData = raw;
1651  }
1652  myCompressionType = COMPRESS_RAWFULL;
1653 }
1654 
1655 template <typename T>
1656 void
1658 {
1659  if (isRaw() || isRawFull())
1660  return;
1661 
1662  freeData();
1663 
1664  if (myRes[0] == TILESIZE && myRes[1] == TILESIZE)
1665  myCompressionType = COMPRESS_RAWFULL;
1666  else
1667  myCompressionType = COMPRESS_RAW;
1668 
1669  myData = UT_VOXEL_ALLOC(sizeof(T) * numVoxels());
1670 }
1671 
1672 template <typename T>
1673 void
1675 {
1676  float irx, iry, irz;
1677 
1678  irx = 1.0 / myRes[0];
1679  iry = 1.0 / myRes[1];
1680  irz = 1.0 / myRes[2];
1681  switch (myCompressionType)
1682  {
1683  case COMPRESS_RAW:
1684  {
1685  int i;
1686  const T *data = (const T*) myData;
1687 
1688  i = 0;
1689  T zavg;
1690  zavg = 0;
1691  for (int z = 0; z < myRes[2]; z++)
1692  {
1693  T yavg;
1694  yavg = 0;
1695  for (int y = 0; y < myRes[1]; y++)
1696  {
1697  T xavg;
1698  xavg = 0;
1699  for (int x = 0; x < myRes[0]; x++)
1700  {
1701  xavg += data[i++];
1702  }
1703  xavg *= irx;
1704  yavg += xavg;
1705  }
1706  yavg *= iry;
1707  zavg += yavg;
1708  }
1709  zavg *= irz;
1710 
1711  avg = zavg;
1712  return;
1713  }
1714 
1715  case COMPRESS_FPREAL16:
1716  {
1717  int i;
1718  const fpreal16 *data = (fpreal16 *) myData;
1719 
1720  i = 0;
1721  fpreal16 zavg = 0;
1722  for (int z = 0; z < myRes[2]; z++)
1723  {
1724  fpreal16 yavg = 0;
1725  for (int y = 0; y < myRes[1]; y++)
1726  {
1727  fpreal16 xavg = 0;
1728  for (int x = 0; x < myRes[0]; x++)
1729  {
1730  xavg += data[i++];
1731  }
1732  xavg *= irx;
1733  yavg += xavg;
1734  }
1735  yavg *= iry;
1736  zavg += yavg;
1737  }
1738  zavg *= irz;
1739 
1740  avg = zavg;
1741  return;
1742  }
1743 
1744  case COMPRESS_CONSTANT:
1745  avg = rawConstVal();
1746  return;
1747 
1748  case COMPRESS_RAWFULL:
1749  {
1750  int offset;
1751  const T *data = (const T*) myData;
1752 
1753  offset = 0;
1754  T zavg;
1755  zavg = 0;
1756  for (int z = 0; z < myRes[2]; z++)
1757  {
1758  T yavg;
1759  yavg = 0;
1760  for (int y = 0; y < myRes[1]; y++)
1761  {
1762  T xavg;
1763  xavg = 0;
1764  for (int x = 0; x < myRes[0]; x++)
1765  {
1766  xavg += data[x+offset];
1767  }
1768  xavg *= irx;
1769  yavg += xavg;
1770  offset += TILESIZE;
1771  }
1772  yavg *= iry;
1773  zavg += yavg;
1774  }
1775  zavg *= irz;
1776 
1777  avg = zavg;
1778  return;
1779  }
1780 
1781  default:
1782  {
1783  T zavg;
1784  zavg = 0;
1785  for (int z = 0; z < myRes[2]; z++)
1786  {
1787  T yavg;
1788  yavg = 0;
1789  for (int y = 0; y < myRes[1]; y++)
1790  {
1791  T xavg;
1792  xavg = 0;
1793  for (int x = 0; x < myRes[0]; x++)
1794  {
1795  xavg += (*this)(x, y, z);
1796  }
1797  xavg *= irx;
1798  yavg += xavg;
1799  }
1800  yavg *= iry;
1801  zavg += yavg;
1802  }
1803  zavg *= irz;
1804 
1805  avg = zavg;
1806  return;
1807  }
1808  }
1809 }
1810 
1811 template <typename T>
1812 void
1814 {
1815  switch (myCompressionType)
1816  {
1817  case COMPRESS_RAW:
1818  {
1819  int n = myRes[0] * myRes[1] * myRes[2];
1820  int i;
1821 
1822  min = max = *(T*)myData;
1823  for (i = 1; i < n; i++)
1824  {
1825  expandMinMax( ((T*)myData)[i], min, max );
1826  }
1827  return;
1828  }
1829 
1830  case COMPRESS_FPREAL16:
1831  {
1832  int n = myRes[0] * myRes[1] * myRes[2];
1833  int i;
1834  fpreal16 *src = (fpreal16 *)myData;
1835 
1836  min = max = *src;
1837  for (i = 1; i < n; i++)
1838  {
1839  T val;
1840  val = src[i];
1841  expandMinMax( val, min, max );
1842  }
1843  return;
1844  }
1845 
1846  case COMPRESS_CONSTANT:
1847  min = max = rawConstVal();
1848  return;
1849 
1850  case COMPRESS_RAWFULL:
1851  {
1852  int x, y, z, offset;
1853 
1854  min = max = *(T*)myData;
1855  offset = 0;
1856  for (z = 0; z < myRes[2]; z++)
1857  {
1858  for (y = 0; y < myRes[1]; y++)
1859  {
1860  for (x = 0; x < myRes[0]; x++)
1861  {
1862  expandMinMax(
1863  ((T*)myData)[x+offset],
1864  min, max );
1865  }
1866  offset += TILESIZE;
1867  }
1868  }
1869  return;
1870  }
1871 
1872  default:
1873  {
1874  // Use the compression engine.
1875  UT_VoxelTileCompress<T> *engine;
1876 
1877  engine = getCompressionEngine(myCompressionType);
1878 
1879  engine->findMinMax(*this, min, max);
1880  return;
1881  }
1882  }
1883 }
1884 
1885 template <typename T>
1886 bool
1888 {
1889  switch (myCompressionType)
1890  {
1891  case COMPRESS_RAW:
1892  case COMPRESS_RAWFULL:
1893  {
1894  int n = myRes[0] * myRes[1] * myRes[2];
1895  int i;
1896 
1897  for (i = 0; i < n; i++)
1898  {
1899  if (SYSisNan( ((T*)myData)[i] ))
1900  return true;
1901  }
1902  return false;
1903  }
1904 
1905  case COMPRESS_FPREAL16:
1906  {
1907  return false;
1908  }
1909 
1910  case COMPRESS_CONSTANT:
1911  if (SYSisNan(rawConstVal()))
1912  return true;
1913  return false;
1914 
1915  default:
1916  {
1917  // Use the compression engine.
1918  UT_VoxelTileCompress<T> *engine;
1919  int x, y, z;
1920 
1921  engine = getCompressionEngine(myCompressionType);
1922 
1923  for (z = 0; z < myRes[2]; z++)
1924  for (y = 0; y < myRes[1]; y++)
1925  for (x = 0; x < myRes[0]; x++)
1926  if (SYSisNan(engine->getValue(*this, x, y, z)))
1927  {
1928  return true;
1929  }
1930 
1931  return false;
1932  }
1933  }
1934 }
1935 
1936 template <typename T>
1937 bool
1939 {
1940  T min, max;
1941  bool losslessonly = (options.myQuantizeTol == 0.0);
1942  int i;
1943  UT_VoxelTileCompress<T> *engine;
1944 
1945  // This is as easy as it gets.
1946  if (myCompressionType == COMPRESS_CONSTANT)
1947  return false;
1948 
1949  findMinMax(min, max);
1950 
1951  // See if we can be made into a constant tile.
1952  if (dist(min, max) <= options.myConstantTol)
1953  {
1954  // Ignore if we are already constant.
1955  if (myCompressionType == COMPRESS_CONSTANT)
1956  return false;
1957 
1958  // We are fully constant!
1959  if (min != max)
1960  {
1961  T avg;
1962  findAverage(avg);
1963  makeConstant(avg);
1964  }
1965  else
1966  {
1967  makeConstant(min);
1968  }
1969  return true;
1970  }
1971 
1972  bool result = false;
1973 
1974  if (options.myAllowFP16)
1975  {
1976  if (myCompressionType == COMPRESS_RAW ||
1977  myCompressionType == COMPRESS_RAWFULL)
1978  {
1979  makeFpreal16();
1980  result = true;
1981  }
1982  }
1983 
1984  for (i = 0; i < getCompressionEngines().entries(); i++)
1985  {
1986  engine = getCompressionEngines()(i);
1987 
1988  // Ignore possibly expensive lossy compressions.
1989  if (losslessonly && !engine->isLossless())
1990  continue;
1991 
1992  if (engine->tryCompress(*this, options, min, max))
1993  {
1994  myCompressionType = i + COMPRESS_ENGINE;
1995  result = true;
1996  // We keep testing in case another compression engine
1997  // can get a better number...
1998  }
1999  }
2000 
2001  // If we are RAW compress, check to see if we could become
2002  // RAWFULL for faster access.
2003  if (myCompressionType == COMPRESS_RAW)
2004  {
2005  if (myRes[0] == TILESIZE && myRes[1] == TILESIZE)
2006  {
2007  myCompressionType = COMPRESS_RAWFULL;
2008  }
2009  }
2010 
2011  // No suitable compression found.
2012  return result;
2013 }
2014 
2015 template <typename T>
2016 void
2018 {
2019  if (!isConstant())
2020  {
2021  freeData();
2022 
2023  if (!inlineConstant())
2024  myData = UT_VOXEL_ALLOC(sizeof(T));
2025  }
2026 
2027  myCompressionType = COMPRESS_CONSTANT;
2028  *rawConstData() = t;
2029 }
2030 
2031 template <typename T>
2032 void
2034 {
2035  if (isConstant())
2036  {
2037  return;
2038  }
2039 
2040  if (myCompressionType == COMPRESS_FPREAL16)
2041  return;
2042 
2043  // Get our new data.
2044  int len = myRes[2] * myRes[1] * myRes[0];
2045  fpreal16 *data = (fpreal16 *)UT_VOXEL_ALLOC(sizeof(fpreal16) * len);
2046 
2047  if (myCompressionType == COMPRESS_RAW ||
2048  myCompressionType == COMPRESS_RAWFULL)
2049  {
2050  for (int i = 0; i < len; i++)
2051  {
2052  data[i] = UTvoxelConvertFP16( ((T*)myData)[i] );
2053  }
2054  }
2055  else
2056  {
2057  // Apply any converters.
2058  int i = 0;
2059 
2060  for (int z = 0; z < myRes[2]; z++)
2061  {
2062  for (int y = 0; y < myRes[1]; y++)
2063  {
2064  for (int x = 0; x < myRes[0]; x++)
2065  {
2066  data[i++] = UTvoxelConvertFP16( (*this)(x, y, z));
2067  }
2068  }
2069  }
2070  }
2071 
2072  freeData();
2073  myData = data;
2074  myCompressionType = COMPRESS_FPREAL16;
2075 }
2076 
2077 template <typename T>
2078 int64
2079 UT_VoxelTile<T>::getMemoryUsage(bool inclusive) const
2080 {
2081  int64 mem = inclusive ? sizeof(*this) : 0;
2082  mem += getDataLength();
2083  return mem;
2084 }
2085 
2086 template <typename T>
2087 int64
2089 {
2090  exint usage;
2091 
2092  switch (myCompressionType)
2093  {
2094  case COMPRESS_RAW:
2095  usage = sizeof(T) * xres() * yres() * zres();
2096  break;
2097  case COMPRESS_FPREAL16:
2098  usage = sizeof(fpreal16) * xres() * yres() * zres();
2099  break;
2100  case COMPRESS_CONSTANT:
2101  if (inlineConstant())
2102  usage = 0;
2103  else
2104  usage = sizeof(T);
2105  break;
2106  case COMPRESS_RAWFULL:
2107  usage = sizeof(T) * TILESIZE * TILESIZE * zres();
2108  break;
2109  default:
2110  {
2111  // Use the compression engine.
2112  UT_VoxelTileCompress<T> *engine;
2113  engine = getCompressionEngine(myCompressionType);
2114  usage = engine->getDataLength(*this);
2115  break;
2116  }
2117  }
2118  return usage;
2119 }
2120 
2121 template <typename T>
2122 void
2123 UT_VoxelTile<T>::weightedSum(int pstart[3], int pend[3],
2124  const float *weights[3], int start[3],
2125  T &result)
2126 {
2127  int ix, iy, iz, i;
2128  int px, py, pz;
2129  int ixstart, ixend;
2130  int tstart[3];
2131  fpreal w, pw;
2132  T psumx, psumy;
2133 
2134  switch (myCompressionType)
2135  {
2136  case COMPRESS_CONSTANT:
2137  {
2138  w = 1;
2139  for (i = 0; i < 3; i++)
2140  {
2141  pw = 0;
2142  for (ix = 0; ix < pend[i]-pstart[i]; ix++)
2143  pw += weights[i][ix+pstart[i]-start[i]];
2144  w *= pw;
2145  }
2146  result += w * rawConstVal();
2147  break;
2148  }
2149 
2150  case COMPRESS_RAW:
2151  {
2152  tstart[0] = pstart[0] & TILEMASK;
2153  tstart[1] = pstart[1] & TILEMASK;
2154  tstart[2] = pstart[2] & TILEMASK;
2155  ixstart = pstart[0]-start[0];
2156  ixend = pend[0]-start[0];
2157  pz = tstart[2];
2158  UT_ASSERT(pz < myRes[2]);
2159  UT_ASSERT(ixend - ixstart <= myRes[0]);
2160  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2161  {
2162  psumy = 0;
2163  py = tstart[1];
2164  UT_ASSERT(py < myRes[1]);
2165  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2166  {
2167  psumx = 0;
2168  px = ((pz * myRes[1]) + py) * myRes[0] + tstart[0];
2169  for (ix = ixstart; ix < ixend; ix++, px++)
2170  {
2171  psumx += weights[0][ix]* ((T*)myData)[px];
2172  }
2173  psumy += weights[1][iy-start[1]] * psumx;
2174  }
2175  result += weights[2][iz-start[2]] * psumy;
2176  }
2177  break;
2178  }
2179 
2180  case COMPRESS_FPREAL16:
2181  {
2182  fpreal16 *src = (fpreal16 *) myData;
2183 
2184  tstart[0] = pstart[0] & TILEMASK;
2185  tstart[1] = pstart[1] & TILEMASK;
2186  tstart[2] = pstart[2] & TILEMASK;
2187  ixstart = pstart[0]-start[0];
2188  ixend = pend[0]-start[0];
2189  pz = tstart[2];
2190  UT_ASSERT(pz < myRes[2]);
2191  UT_ASSERT(ixend - ixstart <= myRes[0]);
2192  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2193  {
2194  psumy = 0;
2195  py = tstart[1];
2196  UT_ASSERT(py < myRes[1]);
2197  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2198  {
2199  psumx = 0;
2200  px = ((pz * myRes[1]) + py) * myRes[0] + tstart[0];
2201  for (ix = ixstart; ix < ixend; ix++, px++)
2202  {
2203  T val;
2204  val = src[px];
2205  psumx += weights[0][ix]* val;
2206  }
2207  psumy += weights[1][iy-start[1]] * psumx;
2208  }
2209  result += weights[2][iz-start[2]] * psumy;
2210  }
2211  break;
2212  }
2213  case COMPRESS_RAWFULL:
2214  {
2215  tstart[0] = pstart[0] & TILEMASK;
2216  tstart[1] = pstart[1] & TILEMASK;
2217  tstart[2] = pstart[2] & TILEMASK;
2218  ixstart = pstart[0]-start[0];
2219  ixend = pend[0]-start[0];
2220  pz = tstart[2];
2221  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2222  {
2223  psumy = 0;
2224  py = tstart[1];
2225  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2226  {
2227  psumx = 0;
2228  px = ((pz * TILESIZE) + py) * TILESIZE + tstart[0];
2229  for (ix = ixstart; ix < ixend; ix++, px++)
2230  {
2231  psumx += weights[0][ix]* ((T*)myData)[px];
2232  }
2233  psumy += weights[1][iy-start[1]] * psumx;
2234  }
2235  result += weights[2][iz-start[2]] * psumy;
2236  }
2237  break;
2238  }
2239 
2240  default:
2241  {
2242  // For all other compression types, we use our
2243  // getValue() accessor rather than trying to
2244  // do anything fancy.
2245  tstart[0] = pstart[0] & TILEMASK;
2246  tstart[1] = pstart[1] & TILEMASK;
2247  tstart[2] = pstart[2] & TILEMASK;
2248  ixstart = pstart[0]-start[0];
2249  ixend = pend[0]-start[0];
2250  pz = tstart[2];
2251  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2252  {
2253  psumy = 0;
2254  py = tstart[1];
2255  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2256  {
2257  psumx = 0;
2258  px = tstart[0];
2259  for (ix = ixstart; ix < ixend; ix++, px++)
2260  {
2261  psumx += weights[0][ix] *
2262  (*this)(px, py, pz);
2263  }
2264  psumy += weights[1][iy-start[1]] * psumx;
2265  }
2266  result += weights[2][iz-start[2]] * psumy;
2267  }
2268  break;
2269  }
2270  }
2271 }
2272 
2273 template <typename T>
2274 void
2276 {
2277  int i;
2278 
2279  // Repalce old copy, if any.
2280  for (i = 0; i < getCompressionEngines().entries(); i++)
2281  {
2282  if (!strcmp(engine->getName(), getCompressionEngines()(i)->getName()))
2283  {
2284  getCompressionEngines()(i) = engine;
2285  return;
2286  }
2287  }
2288 
2289  getCompressionEngines().append(engine);
2290 }
2291 
2292 template <typename T>
2293 int
2295 {
2296  int i;
2297 
2298  if (!name)
2299  return -1;
2300 
2301  for (i = 0; i < getCompressionEngines().entries(); i++)
2302  {
2303  if (!strcmp(name, getCompressionEngines()(i)->getName()))
2304  {
2305  return i + COMPRESS_ENGINE;
2306  }
2307  }
2308 
2309  return -1;
2310 }
2311 
2312 template <typename T>
2315 {
2316  index -= COMPRESS_ENGINE;
2317 
2318  return getCompressionEngines()(index);
2319 }
2320 
2321 template <typename T>
2322 void
2323 UT_VoxelTile<T>::save(std::ostream &os) const
2324 {
2325  // Always save in our native format...
2326  if (myCompressionType >= COMPRESS_ENGINE)
2327  {
2328  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2329 
2330  if (engine->canSave())
2331  {
2332  char type = myCompressionType;
2333 
2334  UTwrite(os, &type, 1);
2335  engine->save(os, *this);
2336  return;
2337  }
2338 
2339  // Can't save, must save as raw.
2340  char type = COMPRESS_RAW;
2341  T value;
2342 
2343  UTwrite(os, &type, 1);
2344 
2345  for (int z = 0; z < zres(); z++)
2346  for (int y = 0; y < yres(); y++)
2347  for (int x = 0; x < xres(); x++)
2348  {
2349  value = (*this)(x, y, z);
2350  UTwrite<T>(os, &value, 1);
2351  }
2352  return;
2353  }
2354 
2355  int len;
2356  char type = myCompressionType;
2357 
2358  if (type == COMPRESS_RAWFULL &&
2359  myRes[0] == TILESIZE &&
2360  myRes[1] == TILESIZE &&
2361  myRes[2] != TILESIZE)
2362  {
2363  // Forbid saving this as a raw full as that will confuse
2364  // older versions of Houdini.
2365  type = COMPRESS_RAW;
2366  }
2367 
2368  UT_ASSERT(type >= 0 && type < COMPRESS_ENGINE);
2369 
2370  UTwrite(os, &type, 1);
2371 
2372  switch ((CompressionType) type)
2373  {
2374  case COMPRESS_RAW:
2375  len = myRes[2] * myRes[1] * myRes[0];
2376  UTwrite<T>(os, (T *) myData, len);
2377  break;
2378 
2379  case COMPRESS_FPREAL16:
2380  len = myRes[2] * myRes[1] * myRes[0];
2381  UTwrite(os, (int16 *) myData, len);
2382  break;
2383 
2384  case COMPRESS_RAWFULL:
2385  len = TILESIZE * TILESIZE * TILESIZE;
2386  UTwrite<T>(os, (T *) myData, len);
2387  break;
2388 
2389  case COMPRESS_CONSTANT:
2390  UTwrite<T>(os, rawConstData(), 1);
2391  break;
2392 
2393  case COMPRESS_ENGINE:
2394  UT_ASSERT(!"Invalid compression type");
2395  break;
2396  }
2397 }
2398 
2399 template <typename T>
2400 void
2402 {
2403  char type, otype;
2404  int len;
2405 
2406  is.readChar(type);
2407 
2408  // Perform the indirection to find out our native type.
2409  if (type >= 0 && type < compress.entries())
2410  {
2411  otype = type;
2412  type = compress(type);
2413  if (type == -1)
2414  {
2415  std::cerr << "Missing compression engine " << (int) otype << "\n";
2416  }
2417  }
2418 
2419  if (type >= COMPRESS_ENGINE)
2420  {
2421  freeData();
2422  myCompressionType = type;
2423 
2424  if (type - COMPRESS_ENGINE >= getCompressionEngines().entries())
2425  {
2426  // Invalid type!
2427  std::cerr << "Invalid compression engine " << (int) otype << "\n";
2428  return;
2429  }
2430 
2431  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2432 
2433  engine->load(is, *this);
2434 
2435  return;
2436  }
2437 
2438  UT_ASSERT(type >= 0);
2439  if (type < 0)
2440  {
2441  return;
2442  }
2443 
2444  freeData();
2445 
2446  myCompressionType = type;
2447 
2448  switch ((CompressionType) myCompressionType)
2449  {
2450  case COMPRESS_RAW:
2451  len = myRes[2] * myRes[1] * myRes[0];
2452  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2453  is.read<T>((T *) myData, len);
2454  break;
2455 
2456  case COMPRESS_FPREAL16:
2457  len = myRes[2] * myRes[1] * myRes[0];
2458  myData = UT_VOXEL_ALLOC(sizeof(fpreal16) * len);
2459  is.read((int16 *) myData, len);
2460  break;
2461 
2462  case COMPRESS_RAWFULL:
2463  len = TILESIZE * TILESIZE * TILESIZE;
2464  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2465  is.read<T>((T *) myData, len);
2466  break;
2467 
2468  case COMPRESS_CONSTANT:
2469  if (!inlineConstant())
2470  myData = UT_VOXEL_ALLOC(sizeof(T));
2471  is.read<T>(rawConstData(), 1);
2472  break;
2473 
2474  case COMPRESS_ENGINE:
2475  UT_ASSERT(!"Invalid compression type");
2476  break;
2477  }
2478 
2479  // Recompress raw full that are not complete in z
2480  if (myCompressionType == COMPRESS_RAW &&
2481  myRes[0] == TILESIZE &&
2482  myRes[1] == TILESIZE)
2483  myCompressionType = COMPRESS_RAWFULL;
2484 }
2485 
2486 template <typename T>
2487 bool
2489 {
2490  bool ok = true;
2491 
2492  // Always save in our native format...
2493  if (myCompressionType >= COMPRESS_ENGINE)
2494  {
2495  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2496 
2497  if (engine->canSave())
2498  {
2499  char type = myCompressionType;
2500 
2501  ok = ok && w.jsonBeginArray();
2502  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2504  ok = ok && w.jsonInt(type);
2505  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2507  ok = ok && engine->save(w, *this);
2508  ok = ok && w.jsonEndArray();
2509  return ok;
2510  }
2511 
2512  // Can't save, must save as raw.
2513  char type = COMPRESS_RAW;
2514  T value;
2515 
2516  ok = ok && w.jsonBeginArray();
2517  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2519  ok = ok && w.jsonInt(type);
2520  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2522 
2523  ok = ok && w.beginUniformArray(xres()*yres()*zres(),
2524  w.jidFromValue(&value));
2525  for (int z = 0; z < zres(); z++)
2526  for (int y = 0; y < yres(); y++)
2527  for (int x = 0; x < xres(); x++)
2528  {
2529  value = (*this)(x, y, z);
2530  ok = ok && w.uniformWrite(value);
2531  }
2532  ok = ok && w.endUniformArray();
2533  ok = ok && w.jsonEndArray();
2534  return ok;
2535  }
2536 
2537  int len;
2538  char type = myCompressionType;
2539 
2540  UT_ASSERT(type >= 0 && type < COMPRESS_ENGINE);
2541  ok = ok && w.jsonBeginArray();
2542  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2544  ok = ok && w.jsonInt(type);
2545  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2547 
2548  switch (myCompressionType)
2549  {
2550  case COMPRESS_RAW:
2551  len = myRes[2] * myRes[1] * myRes[0];
2552  ok = ok && w.jsonUniformArray(len, (T *)myData);
2553  break;
2554 
2555  case COMPRESS_FPREAL16:
2556  len = myRes[2] * myRes[1] * myRes[0];
2557  ok = ok && w.jsonUniformArray(len, (fpreal16 *)myData);
2558  break;
2559 
2560  case COMPRESS_RAWFULL:
2561  len = TILESIZE * TILESIZE * myRes[2];
2562  ok = ok && w.jsonUniformArray(len, (T *)myData);
2563  break;
2564 
2565  case COMPRESS_CONSTANT:
2566  ok = ok && w.jsonValue(rawConstVal());
2567  break;
2568  }
2569 
2570  ok = ok && w.jsonEndArray();
2571  return ok;
2572 }
2573 
2574 template <typename T>
2575 bool
2577 {
2579  UT_WorkBuffer key;
2580  int8 type, otype;
2581  int len;
2582 
2583  it = p.beginArray();
2584  if (it.atEnd() || !it.getLowerKey(key))
2585  return false;
2586  if (UT_VoxelArrayJSON::getTileID(key.buffer()) !=
2588  return false;
2589  if (!p.parseNumber(type))
2590  return false;
2591 
2592  ++it;
2593 
2594  if (it.atEnd() || !it.getLowerKey(key))
2595  return false;
2596  if (UT_VoxelArrayJSON::getTileID(key.buffer()) !=
2598  return false;
2599 
2600  // Perform the indirection to find out our native type.
2601  if (type >= 0 && type < compress.entries())
2602  {
2603  otype = type;
2604  type = compress(type);
2605  if (type == -1)
2606  {
2607  std::cerr << "Missing compression engine " << (int) otype << "\n";
2608  }
2609  }
2610 
2611  if (type >= COMPRESS_ENGINE)
2612  {
2613  freeData();
2614  myCompressionType = type;
2615 
2616  if (type - COMPRESS_ENGINE >= getCompressionEngines().entries())
2617  {
2618  // Invalid type!
2619  std::cerr << "Invalid compression engine " << (int) otype << "\n";
2620  return false;
2621  }
2622 
2623  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2624 
2625  bool engine_ok = engine->load(p, *this);
2626  ++it;
2627  return it.atEnd() && engine_ok;
2628  }
2629 
2630  UT_ASSERT(type >= 0);
2631  if (type < 0)
2632  {
2633  return false;
2634  }
2635 
2636  freeData();
2637 
2638  myCompressionType = type;
2639 
2640  switch (myCompressionType)
2641  {
2642  case COMPRESS_RAW:
2643  len = myRes[2] * myRes[1] * myRes[0];
2644  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2645  if (p.parseUniformArray((T *)myData, len) != len)
2646  return false;
2647  break;
2648 
2649  case COMPRESS_FPREAL16:
2650  len = myRes[2] * myRes[1] * myRes[0];
2651  myData = UT_VOXEL_ALLOC(sizeof(fpreal16) * len);
2652  if (p.parseUniformArray((fpreal16 *)myData, len) != len)
2653  return false;
2654  break;
2655 
2656  case COMPRESS_RAWFULL:
2657  len = TILESIZE * TILESIZE * myRes[2];
2658  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2659  if (p.parseUniformArray((T *)myData, len) != len)
2660  return false;
2661  break;
2662 
2663  case COMPRESS_CONSTANT:
2664  if (!inlineConstant())
2665  myData = UT_VOXEL_ALLOC(sizeof(T));
2666  if (!p.parseNumber(*rawConstData()))
2667  return false;
2668  break;
2669  }
2670 
2671  ++it;
2672  return it.atEnd();
2673 }
2674 
2675 template <typename T>
2676 void
2678 {
2679  int16 ntype = getCompressionEngines().entries();
2680  int i;
2681 
2682  ntype += COMPRESS_ENGINE;
2683 
2684  UTwrite(os, &ntype);
2685 
2686  UT_ASSERT(COMPRESS_ENGINE == 4); // Change lower lines!
2688  UTsaveStringBinary(os, "rawfull", UT_STRING_8BIT_IO);
2689  UTsaveStringBinary(os, "constant", UT_STRING_8BIT_IO);
2690  UTsaveStringBinary(os, "fpreal16", UT_STRING_8BIT_IO);
2691 
2692  ntype -= COMPRESS_ENGINE;
2693  for (i = 0; i < ntype; i++)
2694  {
2695  UTsaveStringBinary(os, getCompressionEngines()(i)->getName(), UT_STRING_8BIT_IO);
2696  }
2697 }
2698 
2699 template <typename T>
2700 void
2702 {
2703  int16 ntype;
2704  int i, idx;
2705 
2706  compress.entries(0);
2707 
2708  is.read(&ntype);
2709 
2710  for (i = 0; i < ntype; i++)
2711  {
2712  UT_String name;
2713 
2715  if (name == "raw")
2716  compress.append(COMPRESS_RAW);
2717  else if (name == "rawfull")
2718  compress.append(COMPRESS_RAWFULL);
2719  else if (name == "constant")
2720  compress.append(COMPRESS_CONSTANT);
2721  else if (name == "fpreal16")
2722  compress.append(COMPRESS_FPREAL16);
2723  else
2724  {
2725  idx = lookupCompressionEngine(name);
2726 
2727  // -1 means a failure to find it in our set..
2728  // this is only a bad thing if a tile actually uses this engine.
2729 
2730  compress.append(idx);
2731  }
2732  }
2733 }
2734 
2735 template <typename T>
2736 bool
2738 {
2739  int16 ntype = getCompressionEngines().entries();
2740  int i;
2741  bool ok = true;
2742 
2743  ntype += COMPRESS_ENGINE;
2744 
2745  UT_ASSERT(COMPRESS_ENGINE == 4); // Change lower lines!
2746  ok = ok && w.beginUniformArray(ntype, UT_JID_STRING);
2747  ok = ok && w.uniformWrite("raw");
2748  ok = ok && w.uniformWrite("rawfull");
2749  ok = ok && w.uniformWrite("constant");
2750  ok = ok && w.uniformWrite("fpreal16");
2751 
2752  ntype -= COMPRESS_ENGINE;
2753  for (i = 0; i < ntype; i++)
2754  {
2755  ok = ok && w.uniformWrite(getCompressionEngines()(i)->getName());
2756  }
2757 
2758  ok = ok && w.endUniformArray();
2759  return ok;
2760 }
2761 
2762 template <typename T>
2763 bool
2765 {
2768  int idx;
2769 
2770  compress.entries(0);
2771  for (it = p.beginArray(); !it.atEnd(); ++it)
2772  {
2773  if (!p.parseString(buffer))
2774  return false;
2775 
2776  if (!buffer.strcmp("raw"))
2777  compress.append(COMPRESS_RAW);
2778  else if (!buffer.strcmp("rawfull"))
2779  compress.append(COMPRESS_RAWFULL);
2780  else if (!buffer.strcmp("constant"))
2781  compress.append(COMPRESS_CONSTANT);
2782  else if (!buffer.strcmp("fpreal16"))
2783  compress.append(COMPRESS_FPREAL16);
2784  else
2785  {
2786  idx = lookupCompressionEngine(buffer.buffer());
2787 
2788  // -1 means a failure to find it in our set..
2789  // this is only a bad thing if a tile actually uses this engine.
2790 
2791  compress.append(idx);
2792  }
2793  }
2794  return true;
2795 }
2796 
2797 
2798 
2799 //
2800 // VoxelArray definitions.
2801 //
2802 
2803 template <typename T>
2805 {
2806  myRes[0] = 0;
2807  myRes[1] = 0;
2808  myRes[2] = 0;
2809  myTileRes[0] = 0;
2810  myTileRes[1] = 0;
2811  myTileRes[2] = 0;
2812 
2813  myTiles = 0;
2814 
2815  myInvRes = 1;
2816 
2817  myBorderValue = 0;
2818  myBorderScale[0] = 0;
2819  myBorderScale[1] = 0;
2820  myBorderScale[2] = 0;
2821  myBorderType = UT_VOXELBORDER_STREAK;
2822 
2823  mySharedMem = 0;
2824  mySharedMemView = 0;
2825 }
2826 
2827 template <typename T>
2829 {
2830  deleteVoxels();
2831 }
2832 
2833 template <typename T>
2835 {
2836  myRes[0] = 0;
2837  myRes[1] = 0;
2838  myRes[2] = 0;
2839  myInvRes = 1;
2840  myTileRes[0] = 0;
2841  myTileRes[1] = 0;
2842  myTileRes[2] = 0;
2843 
2844  myTiles = 0;
2845 
2846  mySharedMem = 0;
2847  mySharedMemView = 0;
2848 
2849  *this = src;
2850 }
2851 
2852 template <typename T>
2853 void
2855  const UT_JobInfo &info)
2856 {
2857  UT_ASSERT(isMatching(src));
2858  UT_VoxelArrayIterator<T> vit(this);
2859  vit.splitByTile(info);
2860  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
2861  {
2862  int i = vit.getLinearTileNum();
2863  myTiles[i] = src.myTiles[i];
2864  }
2865 }
2866 
2867 template <typename T>
2868 const UT_VoxelArray<T> &
2870 {
2871  // Paranoid code:
2872  if (&src == this)
2873  return *this;
2874 
2875  // Clear out my own data (this is done again by size, but not
2876  // maybe with resize?)
2877  deleteVoxels();
2878 
2879  myBorderScale[0] = src.myBorderScale[0];
2880  myBorderScale[1] = src.myBorderScale[1];
2881  myBorderScale[2] = src.myBorderScale[2];
2882  myBorderValue = src.myBorderValue;
2883  myBorderType = src.myBorderType;
2884 
2885  myCompressionOptions = src.myCompressionOptions;
2886 
2887  // Allocate proper size.
2888  size(src.myRes[0], src.myRes[1], src.myRes[2]);
2889 
2890  copyData(src);
2891 
2892  return *this;
2893 }
2894 
2895 template <typename T>
2896 void
2898 {
2899  delete [] myTiles;
2900  myTiles = 0;
2901 
2902  myTileRes[0] = myTileRes[1] = myTileRes[2] = 0;
2903  myRes[0] = myRes[1] = myRes[2] = 0;
2904 
2905  delete mySharedMemView;
2906  mySharedMemView = 0;
2907 
2908  delete mySharedMem;
2909  mySharedMem = 0;
2910 }
2911 
2912 template <typename T>
2913 void
2914 UT_VoxelArray<T>::size(int xres, int yres, int zres)
2915 {
2916  deleteVoxels();
2917 
2918  // Initialize our tiles.
2919  int tile_res[3];
2920  tile_res[0] = (xres + TILEMASK) >> TILEBITS;
2921  tile_res[1] = (yres + TILEMASK) >> TILEBITS;
2922  tile_res[2] = (zres + TILEMASK) >> TILEBITS;
2923 
2924  exint ntiles = ((exint)tile_res[0]) * tile_res[1] * tile_res[2];
2925  if (ntiles)
2926  {
2927  myTiles = new UT_VoxelTile<T> [ntiles];
2928 
2929  // Only set resolutions *AFTER* we successfully allocate
2930  myRes[0] = xres;
2931  myRes[1] = yres;
2932  myRes[2] = zres;
2933 
2934  myInvRes = 1;
2935  if (xres)
2936  myInvRes.x() = 1.0f / myRes[0];
2937  if (yres)
2938  myInvRes.y() = 1.0f / myRes[1];
2939  if (zres)
2940  myInvRes.z() = 1.0f / myRes[2];
2941 
2942  myTileRes[0] = tile_res[0];
2943  myTileRes[1] = tile_res[1];
2944  myTileRes[2] = tile_res[2];
2945 
2946  int i = 0;
2947  for (int tz = 0; tz < myTileRes[2]; tz++)
2948  {
2949  int zr;
2950  if (tz < myTileRes[2]-1)
2951  zr = TILESIZE;
2952  else
2953  zr = zres - tz * TILESIZE;
2954 
2955  for (int ty = 0; ty < myTileRes[1]; ty++)
2956  {
2957  int yr;
2958  if (ty < myTileRes[1]-1)
2959  yr = TILESIZE;
2960  else
2961  yr = yres - ty * TILESIZE;
2962 
2963  int tx, xr = TILESIZE;
2964  for (tx = 0; tx < myTileRes[0]-1; tx++)
2965  {
2966  myTiles[i].setRes(xr, yr, zr);
2967 
2968  i++;
2969  }
2970  xr = xres - tx * TILESIZE;
2971  myTiles[i].setRes(xr, yr, zr);
2972  i++;
2973  }
2974  }
2975  }
2976  else
2977  myTiles = 0;
2978 }
2979 
2980 template <typename T>
2981 void
2983 {
2984  // Only resize if our res is different.
2985  if (src.getXRes() != getXRes() ||
2986  src.getYRes() != getYRes() ||
2987  src.getZRes() != getZRes())
2988  {
2989  size(src.getXRes(), src.getYRes(), src.getZRes());
2990  }
2991 
2992  // Update border conditions.
2993  myBorderType = src.myBorderType;
2994  myBorderScale[0] = src.myBorderScale[0];
2995  myBorderScale[1] = src.myBorderScale[1];
2996  myBorderScale[2] = src.myBorderScale[2];
2997  myBorderValue = src.myBorderValue;
2998 
2999  // Match our compression tolerances
3000  myCompressionOptions = src.myCompressionOptions;
3001 }
3002 
3003 template <typename T>
3004 int64
3006 {
3007  int64 mem = inclusive ? sizeof(*this) : 0;
3008 
3009  int ntiles = numTiles();
3010  for (int i = 0; i < ntiles; i++)
3011  mem += myTiles[i].getMemoryUsage(true);
3012 
3013  if (mySharedMem)
3014  mem += mySharedMem->getMemoryUsage(true);
3015 
3016  if (mySharedMemView)
3017  mem += mySharedMemView->getMemoryUsage(true);
3018 
3019  return mem;
3020 }
3021 
3022 template <typename T>
3023 void
3025 {
3026  UT_VoxelArrayIterator<T> vit(this);
3027  // We don't want split tile as we update the actual tiles here
3028  // so will have false-sharing if we interleaved.
3029  vit.setPartialRange(info.job(), info.numJobs());
3030  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
3031  {
3032  int i = vit.getLinearTileNum();
3033  myTiles[i].makeConstant(t);
3034  }
3035 }
3036 
3037 template <typename T>
3038 bool
3040 {
3041  int i, ntiles;
3042  T cval = 0;
3043  const fpreal tol = SYS_FTOLERANCE_R;
3044 
3045  ntiles = numTiles();
3046  for (i = 0; i < ntiles; i++)
3047  {
3048  if (!myTiles[i].isConstant())
3049  {
3050  return false;
3051  }
3052 
3053  if (!i)
3054  {
3055  // First tile, get the constant value.
3056  cval = myTiles[i].rawConstVal();
3057  }
3058  else
3059  {
3060  // See if we have deviated too much.
3061  if (UT_VoxelTile<T>::dist(cval, myTiles[i].rawConstVal()) > tol)
3062  {
3063  return false;
3064  }
3065  }
3066  }
3067 
3068  // All tiles are both constant and within tolerance of each
3069  // other. Write out our constant value and return true.
3070  if (t)
3071  *t = cval;
3072 
3073  return true;
3074 }
3075 
3076 template <typename T>
3077 bool
3079 {
3080  int i, ntiles;
3081 
3082  ntiles = numTiles();
3083  for (i = 0; i < ntiles; i++)
3084  {
3085  if (myTiles[i].hasNan())
3086  {
3087  return true;
3088  }
3089  }
3090 
3091  return false;
3092 }
3093 
3094 template <typename T>
3095 T
3097 {
3098  // We go from the position in the unit cube into the index.
3099  // The center of cells must map to the exact integer indices.
3100  pos.x() *= myRes[0];
3101  pos.y() *= myRes[1];
3102  pos.z() *= myRes[2];
3103  pos.x() -= 0.5;
3104  pos.y() -= 0.5;
3105  pos.z() -= 0.5;
3106 
3107  return lerpVoxelCoord(pos);
3108 }
3109 
3110 template <typename T>
3111 T
3113 {
3114  int x, y, z;
3115  // Yes, these have to be 32 becaues split float requires 32!
3116  fpreal32 fx, fy, fz;
3117 
3118  splitVoxelCoord(pos, x, y, z, fx, fy, fz);
3119 
3120  return lerpVoxel(x, y, z, fx, fy, fz);
3121 }
3122 
3123 template <typename T>
3124 template <int AXIS2D>
3125 T
3127 {
3128  int x, y, z;
3129  // Yes, these have to be 32 becaues split float requires 32!
3130  fpreal32 fx, fy, fz;
3131 
3132  splitVoxelCoordAxis<AXIS2D>(pos, x, y, z, fx, fy, fz);
3133 
3134  return lerpVoxelAxis<AXIS2D>(x, y, z, fx, fy, fz);
3135 }
3136 
3137 template <typename T>
3138 T
3139 UT_VoxelArray<T>::lerpVoxel(int x, int y, int z,
3140  float fx, float fy, float fz) const
3141 {
3142  // Do trilinear interpolation.
3143  T vx, vx1, vy, vy1, vz;
3144 
3145  // Optimization for common cases (values are within the voxel range and
3146  // are all within the same tile)
3147  if ( !((x | y | z) < 0) &&
3148  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3149 
3150  // (x > 0) && (y > 0) && (z > 0) &&
3151  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3152  // report in bounds!
3153  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3154  {
3155  int xm, ym, zm;
3156 
3157  xm = x & TILEMASK;
3158  ym = y & TILEMASK;
3159  zm = z & TILEMASK;
3160 
3161  if ((xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3162  {
3163  const UT_VoxelTile<T> *tile =
3164  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3165 
3166  vz = tile->lerp(xm, ym, zm, fx, fy, fz);
3167  }
3168  else
3169  {
3170  // We cross tile boundaries but we remain within
3171  // the voxel grid. We can thus avoid any bounds checking
3172  // and use operator() rather than getValue.
3173 
3174  // Lerp x:x+1, y, z
3175  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z),
3176  (*this)(x+1, y, z),
3177  fx);
3178  // Lerp x:x+1, y+1, z
3179  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z),
3180  (*this)(x+1, y+1, z),
3181  fx);
3182  // Lerp x:x+1, y:y+1, z
3183  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3184 
3185  // Lerp x:x+1, y, z+1
3186  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z+1),
3187  (*this)(x+1, y, z+1),
3188  fx);
3189  // Lerp x:x+1, y+1, z+1
3190  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z+1),
3191  (*this)(x+1, y+1, z+1),
3192  fx);
3193 
3194  // Lerp x:x+1, y:y+1, z+1
3195  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3196 
3197  // Lerp x:x+1, y:y+1, z:z+1
3198  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3199  }
3200  }
3201  else
3202  {
3203  // Lerp x:x+1, y, z
3204  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z),
3205  getValue(x+1, y, z),
3206  fx);
3207  // Lerp x:x+1, y+1, z
3208  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z),
3209  getValue(x+1, y+1, z),
3210  fx);
3211 
3212  // Lerp x:x+1, y:y+1, z
3213  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3214 
3215  // Lerp x:x+1, y, z+1
3216  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z+1),
3217  getValue(x+1, y, z+1),
3218  fx);
3219  // Lerp x:x+1, y+1, z+1
3220  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z+1),
3221  getValue(x+1, y+1, z+1),
3222  fx);
3223 
3224  // Lerp x:x+1, y:y+1, z+1
3225  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3226 
3227  // Lerp x:x+1, y:y+1, z:z+1
3228  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3229  }
3230 
3231  return vz;
3232 }
3233 
3234 template <typename T>
3235 template <int AXIS2D>
3236 T
3238  float fx, float fy, float fz) const
3239 {
3240  // Do bilinear interpolation.
3241  T vx, vx1, vy, vy1, vz;
3242 
3243  int lesscomp = 0, greatercomp = -1;
3244 
3245  if (AXIS2D != 0)
3246  {
3247  lesscomp |= x;
3248  greatercomp &= (x - myRes[0]+1);
3249  }
3250  if (AXIS2D != 1)
3251  {
3252  lesscomp |= y;
3253  greatercomp &= (y - myRes[1]+1);
3254  }
3255  if (AXIS2D != 2)
3256  {
3257  lesscomp |= z;
3258  greatercomp &= (z - myRes[2]+1);
3259  }
3260 
3261  // Optimization for common cases (values are within the voxel range and
3262  // are all within the same tile)
3263  if ( !(lesscomp < 0) && (greatercomp < 0) )
3264  {
3265  int xm, ym, zm;
3266 
3267  xm = x & TILEMASK;
3268  ym = y & TILEMASK;
3269  zm = z & TILEMASK;
3270 
3271  if ((AXIS2D == 0 || xm != TILEMASK) &&
3272  (AXIS2D == 1 || ym != TILEMASK) &&
3273  (AXIS2D == 2 || zm != TILEMASK))
3274  {
3275  const UT_VoxelTile<T> *tile =
3276  getTile( (AXIS2D == 0) ? 0 : (x >> TILEBITS),
3277  (AXIS2D == 1) ? 0 : (y >> TILEBITS),
3278  (AXIS2D == 2) ? 0 : (z >> TILEBITS) );
3279 
3280  vz = tile->template lerpAxis<AXIS2D>(xm, ym, zm, fx, fy, fz);
3281  }
3282  else
3283  {
3284  // We cross tile boundaries but we remain within
3285  // the voxel grid. We can thus avoid any bounds checking
3286  // and use operator() rather than getValue.
3287 
3288  // Lerp x:x+1, y, z
3289  if (AXIS2D != 0)
3290  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z),
3291  (*this)(x+1, y, z),
3292  fx);
3293  else
3294  vx = (*this)(x, y, z);
3295 
3296  if (AXIS2D != 1)
3297  {
3298  // Lerp x:x+1, y+1, z
3299  if (AXIS2D != 0)
3300  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z),
3301  (*this)(x+1, y+1, z),
3302  fx);
3303  else
3304  vx1 = (*this)(x, y+1, z);
3305  // Lerp x:x+1, y:y+1, z
3306  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3307  }
3308  else
3309  vy = vx;
3310 
3311  if (AXIS2D != 2)
3312  {
3313  // Lerp x:x+1, y, z+1
3314  if (AXIS2D != 0)
3315  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z+1),
3316  (*this)(x+1, y, z+1),
3317  fx);
3318  else
3319  vx = (*this)(x, y, z+1);
3320 
3321  if (AXIS2D != 1)
3322  {
3323  // Lerp x:x+1, y+1, z+1
3324  if (AXIS2D != 0)
3325  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z+1),
3326  (*this)(x+1, y+1, z+1),
3327  fx);
3328  else
3329  vx1 = (*this)(x, y+1, z+1);
3330  // Lerp x:x+1, y:y+1, z+1
3331  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3332  }
3333  else
3334  vy1 = vx;
3335 
3336  // Lerp x:x+1, y:y+1, z:z+1
3337  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3338  }
3339  else
3340  vz = vy;
3341  }
3342  }
3343  else
3344  {
3345  // Lerp x:x+1, y, z
3346  if (AXIS2D != 0)
3347  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z),
3348  getValue(x+1, y, z),
3349  fx);
3350  else
3351  vx = getValue(x, y, z);
3352 
3353  if (AXIS2D != 1)
3354  {
3355  // Lerp x:x+1, y+1, z
3356  if (AXIS2D != 0)
3357  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z),
3358  getValue(x+1, y+1, z),
3359  fx);
3360  else
3361  vx1 = getValue(x, y+1, z);
3362  // Lerp x:x+1, y:y+1, z
3363  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3364  }
3365  else
3366  vy = vx;
3367 
3368  if (AXIS2D != 2)
3369  {
3370  // Lerp x:x+1, y, z+1
3371  if (AXIS2D != 0)
3372  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z+1),
3373  getValue(x+1, y, z+1),
3374  fx);
3375  else
3376  vx = getValue(x, y, z+1);
3377 
3378  if (AXIS2D != 1)
3379  {
3380  // Lerp x:x+1, y+1, z+1
3381  if (AXIS2D != 0)
3382  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z+1),
3383  getValue(x+1, y+1, z+1),
3384  fx);
3385  else
3386  vx1 = getValue(x, y+1, z+1);
3387  // Lerp x:x+1, y:y+1, z+1
3388  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3389  }
3390  else
3391  vy1 = vx;
3392 
3393  // Lerp x:x+1, y:y+1, z:z+1
3394  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3395  }
3396  else
3397  vz = vy;
3398  }
3399 
3400  return vz;
3401 }
3402 
3403 template <typename T>
3404 void
3406  UT_Vector3F pos) const
3407 {
3408  // We go from the position in the unit cube into the index.
3409  // The center of cells must map to the exact integer indices.
3410  pos.x() *= myRes[0];
3411  pos.y() *= myRes[1];
3412  pos.z() *= myRes[2];
3413  pos.x() -= 0.5;
3414  pos.y() -= 0.5;
3415  pos.z() -= 0.5;
3416 
3417  lerpVoxelCoordMinMax(lerp, lmin, lmax, pos);
3418 }
3419 
3420 template <typename T>
3421 void
3423  UT_Vector3F pos) const
3424 {
3425  int x, y, z;
3426  // Yes, these have to be 32 becaues split float requires 32!
3427  fpreal32 fx, fy, fz;
3428 
3429  splitVoxelCoord(pos, x, y, z, fx, fy, fz);
3430 
3431  lerpVoxelMinMax(lerp, lmin, lmax, x, y, z, fx, fy, fz);
3432 }
3433 
3434 
3435 template <typename T>
3436 template <int AXIS2D>
3437 void
3439  UT_Vector3F pos) const
3440 {
3441  int x, y, z;
3442  // Yes, these have to be 32 becaues split float requires 32!
3443  fpreal32 fx, fy, fz;
3444 
3445  splitVoxelCoordAxis<AXIS2D>(pos, x, y, z, fx, fy, fz);
3446 
3447  lerpVoxelMinMaxAxis<AXIS2D>(lerp, lmin, lmax, x, y, z, fx, fy, fz);
3448 }
3449 
3450 
3451 template <typename T>
3452 void
3454  T &lerp, T &smin, T &smax,
3455  int x, int y, int z,
3456  float fx, float fy, float fz) const
3457 {
3458  T samples[8];
3459 
3460  if (extractSample(x, y, z, samples))
3461  {
3462  lerp = smin = smax = samples[0];
3463  return;
3464  }
3465 
3466  lerp = lerpSample(samples, fx, fy, fz);
3467 
3468  T smin1, smax1;
3469 
3470  SYSminmax(samples[0], samples[1], samples[2], samples[3],
3471  smin, smax);
3472  SYSminmax(samples[4+0], samples[4+1], samples[4+2], samples[4+3],
3473  smin1, smax1);
3474 
3475  smin = SYSmin(smin, smin1);
3476  smax = SYSmax(smax, smax1);
3477 }
3478 
3479 template <typename T>
3480 template <int AXIS2D>
3481 void
3483  T &lerp, T &smin, T &smax,
3484  int x, int y, int z,
3485  float fx, float fy, float fz) const
3486 {
3487  T samples[8];
3488 
3489  if (extractSampleAxis<AXIS2D>(x, y, z, samples))
3490  {
3491  lerp = smin = smax = samples[0];
3492  return;
3493  }
3494 
3495  lerp = lerpSampleAxis<AXIS2D>(samples, fx, fy, fz);
3496 
3497  if (AXIS2D == 0)
3498  SYSminmax(samples[0], samples[2], samples[4], samples[6],
3499  smin, smax);
3500  else if (AXIS2D == 1)
3501  SYSminmax(samples[0], samples[1], samples[4], samples[5],
3502  smin, smax);
3503  else if (AXIS2D == 2)
3504  SYSminmax(samples[0], samples[1], samples[2], samples[3],
3505  smin, smax);
3506  else
3507  {
3508  T smin1, smax1;
3509 
3510  SYSminmax(samples[0], samples[1], samples[2], samples[3],
3511  smin, smax);
3512  SYSminmax(samples[4+0], samples[4+1], samples[4+2], samples[4+3],
3513  smin1, smax1);
3514 
3515  smin = SYSmin(smin, smin1);
3516  smax = SYSmax(smax, smax1);
3517  }
3518 }
3519 
3520 template <typename T>
3521 bool
3523  T *samples) const
3524 {
3525  // Optimization for common cases (values are within the voxel range and
3526  // are all within the same tile)
3527  if ( !((x | y | z) < 0) &&
3528  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3529 
3530  // (x > 0) && (y > 0) && (z > 0) &&
3531  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3532  // report in bounds!
3533  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3534  {
3535  int xm, ym, zm;
3536 
3537  xm = x & TILEMASK;
3538  ym = y & TILEMASK;
3539  zm = z & TILEMASK;
3540 
3541  if ((xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3542  {
3543  const UT_VoxelTile<T> *tile =
3544  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3545 
3546  return tile->extractSample(xm, ym, zm, samples);
3547  }
3548  else
3549  {
3550  // We cross tile boundaries but we remain within
3551  // the voxel grid. We can thus avoid any bounds checking
3552  // and use operator() rather than getValue.
3553  samples[0] = (*this)(x, y, z);
3554  samples[1] = (*this)(x+1, y, z);
3555  samples[2+0] = (*this)(x, y+1, z);
3556  samples[2+1] = (*this)(x+1, y+1, z);
3557  samples[4+0] = (*this)(x, y, z+1);
3558  samples[4+1] = (*this)(x+1, y, z+1);
3559  samples[4+2+0] = (*this)(x, y+1, z+1);
3560  samples[4+2+1] = (*this)(x+1, y+1, z+1);
3561  }
3562  }
3563  else
3564  {
3565  samples[0] = getValue(x, y, z);
3566  samples[1] = getValue(x+1, y, z);
3567  samples[2+0] = getValue(x, y+1, z);
3568  samples[2+1] = getValue(x+1, y+1, z);
3569  samples[4+0] = getValue(x, y, z+1);
3570  samples[4+1] = getValue(x+1, y, z+1);
3571  samples[4+2+0] = getValue(x, y+1, z+1);
3572  samples[4+2+1] = getValue(x+1, y+1, z+1);
3573  }
3574 
3575  return false;
3576 }
3577 
3578 
3579 template <typename T>
3580 template <int AXIS2D>
3581 bool
3583  T *samples) const
3584 {
3585  // Optimization for common cases (values are within the voxel range and
3586  // are all within the same tile)
3587  int lesscomp = 0, greatercomp = -1;
3588 
3589  if (AXIS2D != 0)
3590  {
3591  lesscomp |= x;
3592  greatercomp &= (x - myRes[0]+1);
3593  }
3594  if (AXIS2D != 1)
3595  {
3596  lesscomp |= y;
3597  greatercomp &= (y - myRes[1]+1);
3598  }
3599  if (AXIS2D != 2)
3600  {
3601  lesscomp |= z;
3602  greatercomp &= (z - myRes[2]+1);
3603  }
3604 
3605  // Optimization for common cases (values are within the voxel range and
3606  // are all within the same tile)
3607  if ( !(lesscomp < 0) && (greatercomp < 0) )
3608  {
3609  int xm, ym, zm;
3610 
3611  xm = x & TILEMASK;
3612  ym = y & TILEMASK;
3613  zm = z & TILEMASK;
3614 
3615  if ((AXIS2D == 0 || xm != TILEMASK) &&
3616  (AXIS2D == 1 || ym != TILEMASK) &&
3617  (AXIS2D == 2 || zm != TILEMASK))
3618  {
3619  const UT_VoxelTile<T> *tile =
3620  getTile( (AXIS2D == 0) ? 0 : (x >> TILEBITS),
3621  (AXIS2D == 1) ? 0 : (y >> TILEBITS),
3622  (AXIS2D == 2) ? 0 : (z >> TILEBITS) );
3623 
3624  return tile->template extractSampleAxis<AXIS2D>(xm, ym, zm, samples);
3625  }
3626  else
3627  {
3628  // We cross tile boundaries but we remain within
3629  // the voxel grid. We can thus avoid any bounds checking
3630  // and use operator() rather than getValue.
3631  samples[0] = (*this)(x, y, z);
3632  if (AXIS2D != 0)
3633  samples[1] = (*this)(x+1, y, z);
3634  if (AXIS2D != 1)
3635  {
3636  samples[2+0] = (*this)(x, y+1, z);
3637  if (AXIS2D != 0)
3638  samples[2+1] = (*this)(x+1, y+1, z);
3639  }
3640  if (AXIS2D != 2)
3641  {
3642  samples[4+0] = (*this)(x, y, z+1);
3643  if (AXIS2D != 0)
3644  samples[4+1] = (*this)(x+1, y, z+1);
3645  if (AXIS2D != 1)
3646  {
3647  samples[4+2+0] = (*this)(x, y+1, z+1);
3648  if (AXIS2D != 0)
3649  samples[4+2+1] = (*this)(x+1, y+1, z+1);
3650  }
3651  }
3652  }
3653  }
3654  else
3655  {
3656  samples[0] = getValue(x, y, z);
3657  if (AXIS2D != 0)
3658  samples[1] = getValue(x+1, y, z);
3659  if (AXIS2D != 1)
3660  {
3661  samples[2+0] = getValue(x, y+1, z);
3662  if (AXIS2D != 0)
3663  samples[2+1] = getValue(x+1, y+1, z);
3664  }
3665  if (AXIS2D != 2)
3666  {
3667  samples[4+0] = getValue(x, y, z+1);
3668  if (AXIS2D != 0)
3669  samples[4+1] = getValue(x+1, y, z+1);
3670  if (AXIS2D != 1)
3671  {
3672  samples[4+2+0] = getValue(x, y+1, z+1);
3673  if (AXIS2D != 0)
3674  samples[4+2+1] = getValue(x+1, y+1, z+1);
3675  }
3676  }
3677  }
3678 
3679  return false;
3680 }
3681 
3682 template <typename T>
3683 bool
3685  T *samples) const
3686 {
3687  // Optimization for common cases (values are within the voxel range and
3688  // are all within the same tile)
3689  if ( !(((x-1) | (y-1) | (z-1)) < 0) &&
3690  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3691 
3692  // (x > 0) && (y > 0) && (z > 0) &&
3693  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3694  // report in bounds!
3695  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3696  {
3697  int xm, ym, zm;
3698 
3699  xm = x & TILEMASK;
3700  ym = y & TILEMASK;
3701  zm = z & TILEMASK;
3702 
3703  if (xm && ym && zm && (xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3704  {
3705  const UT_VoxelTile<T> *tile =
3706  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3707 
3708  return tile->extractSamplePlus(xm, ym, zm, samples);
3709  }
3710  else
3711  {
3712  // We cross tile boundaries but we remain within
3713  // the voxel grid. We can thus avoid any bounds checking
3714  // and use operator() rather than getValue.
3715  samples[0] = (*this)(x-1, y, z);
3716  samples[1] = (*this)(x+1, y, z);
3717  samples[2+0] = (*this)(x, y-1, z);
3718  samples[2+1] = (*this)(x, y+1, z);
3719  samples[4+0] = (*this)(x, y, z-1);
3720  samples[4+1] = (*this)(x, y, z+1);
3721  samples[6] = (*this)(x, y, z);
3722  }
3723  }
3724  else
3725  {
3726  samples[0] = getValue(x-1, y, z);
3727  samples[1] = getValue(x+1, y, z);
3728  samples[2+0] = getValue(x, y-1, z);
3729  samples[2+1] = getValue(x, y+1, z);
3730  samples[4+0] = getValue(x, y, z-1);
3731  samples[4+1] = getValue(x, y, z+1);
3732  samples[6] = getValue(x, y, z);
3733  }
3734 
3735  return false;
3736 }
3737 
3738 template <typename T>
3739 bool
3741  T *samples) const
3742 {
3743  // Optimization for common cases (values are within the voxel range and
3744  // are all within the same tile)
3745  if ( !(((x-1) | (y-1) | (z-1)) < 0) &&
3746  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3747 
3748  // (x > 0) && (y > 0) && (z > 0) &&
3749  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3750  // report in bounds!
3751  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3752  {
3753  int xm, ym, zm;
3754 
3755  xm = x & TILEMASK;
3756  ym = y & TILEMASK;
3757  zm = z & TILEMASK;
3758 
3759  if (xm && ym && zm && (xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3760  {
3761  const UT_VoxelTile<T> *tile =
3762  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3763 
3764  return tile->extractSampleCube(xm, ym, zm, samples);
3765  }
3766  else
3767  {
3768  // We cross tile boundaries but we remain within
3769  // the voxel grid. We can thus avoid any bounds checking
3770  // and use operator() rather than getValue.
3771  int sampidx = 0;
3772  for (int dz = -1; dz <= 1; dz++)
3773  {
3774  for (int dy = -1; dy <= 1; dy++)
3775  {
3776  for (int dx = -1; dx <= 1; dx++)
3777  {
3778  samples[sampidx++] = (*this)(x+dx, y+dy, z+dz);
3779  }
3780  }
3781  }
3782  }
3783  }
3784  else
3785  {
3786  int sampidx = 0;
3787  for (int dz = -1; dz <= 1; dz++)
3788  {
3789  for (int dy = -1; dy <= 1; dy++)
3790  {
3791  for (int dx = -1; dx <= 1; dx++)
3792  {
3793  samples[sampidx++] = getValue(x+dx, y+dy, z+dz);
3794  }
3795  }
3796  }
3797  }
3798 
3799  return false;
3800 }
3801 template <typename T>
3802 T
3804  float fx, float fy, float fz) const
3805 {
3806 #if 1
3807  // Do trilinear interpolation.
3808  T vx, vx1, vy, vy1, vz;
3809 
3810  // Lerp x:x+1, y, z
3811  vx = UT_VoxelTile<T>::lerpValues(samples[0],
3812  samples[1],
3813  fx);
3814  // Lerp x:x+1, y+1, z
3815  vx1= UT_VoxelTile<T>::lerpValues(samples[2],
3816  samples[2+1],
3817  fx);
3818  // Lerp x:x+1, y:y+1, z
3819  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3820 
3821  // Lerp x:x+1, y, z+1
3822  vx = UT_VoxelTile<T>::lerpValues(samples[4],
3823  samples[4+1],
3824  fx);
3825  // Lerp x:x+1, y+1, z+1
3826  vx1= UT_VoxelTile<T>::lerpValues(samples[4+2],
3827  samples[4+2+1],
3828  fx);
3829 
3830  // Lerp x:x+1, y:y+1, z+1
3831  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3832 
3833  // Lerp x:x+1, y:y+1, z:z+1
3834  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3835 
3836  return vz;
3837 #else
3838  v4uf a, b, vfx, vfy, vfz;
3839 
3840  a = v4uf(&samples[0]);
3841  b = v4uf(&samples[4]);
3842 
3843  vfx = v4uf(fx);
3844  vfy = v4uf(fy);
3845  vfz = v4uf(fz);
3846 
3847  b -= a;
3848  a = madd(b, fz, a);
3849 
3850  b = a.swizzle<2, 3, 0, 1>();
3851  b -= a;
3852  a = madd(b, fy, a);
3853 
3854  b = a.swizzle<1, 2, 3, 0>();
3855  b -= a;
3856  a = madd(b, fx, a);
3857 
3858  return a[0];
3859 #endif
3860 }
3861 
3862 template <typename T>
3863 template <int AXIS2D>
3864 T
3866  float fx, float fy, float fz) const
3867 {
3868  // Do trilinear interpolation.
3869  T vx, vx1, vy, vy1, vz;
3870 
3871  // Lerp x:x+1, y, z
3872  if (AXIS2D != 0)
3873  vx = UT_VoxelTile<T>::lerpValues(samples[0], samples[1], fx);
3874  else
3875  vx = samples[0];
3876 
3877  if (AXIS2D != 1)
3878  {
3879  // Lerp x:x+1, y+1, z
3880  if (AXIS2D != 0)
3881  vx1= UT_VoxelTile<T>::lerpValues(samples[2], samples[2+1], fx);
3882  else
3883  vx1= samples[2];
3884 
3885  // Lerp x:x+1, y:y+1, z
3886  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3887  }
3888  else
3889  vy = vx;
3890 
3891  if (AXIS2D != 2)
3892  {
3893  // Lerp x:x+1, y, z+1
3894  if (AXIS2D != 0)
3895  vx = UT_VoxelTile<T>::lerpValues(samples[4], samples[4+1], fx);
3896  else
3897  vx = samples[4];
3898 
3899  if (AXIS2D != 1)
3900  {
3901  // Lerp x:x+1, y+1, z+1
3902  if (AXIS2D != 0)
3903  vx1= UT_VoxelTile<T>::lerpValues(samples[4+2], samples[4+2+1], fx);
3904  else
3905  vx1= samples[4+2];
3906 
3907  // Lerp x:x+1, y:y+1, z+1
3908  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3909  }
3910  else
3911  vy1 = vx;
3912 
3913  // Lerp x:x+1, y:y+1, z:z+1
3914  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3915  }
3916  else
3917  vz = vy;
3918 
3919  return vz;
3920 }
3921 
3922 template <typename T>
3923 T
3925 {
3926  int x, y, z;
3927  // Yes, these have to be 32 becaues split float requires 32!
3928  fpreal32 fx, fy, fz;
3929 
3930  // We go from the position in the unit cube into the index.
3931  // The center of cells must map to the exact integer indices.
3932  pos.x() *= myRes[0];
3933  pos.y() *= myRes[1];
3934  pos.z() *= myRes[2];
3935  pos.x() -= 0.5;
3936  pos.y() -= 0.5;
3937  pos.z() -= 0.5;
3938 
3939  // Determine integer & fractional components.
3940  fx = pos.x();
3941  SYSfastSplitFloat(fx, x);
3942  fy = pos.y();
3943  SYSfastSplitFloat(fy, y);
3944  fz = pos.z();
3945  SYSfastSplitFloat(fz, z);
3946 
3947  // Do trilinear interpolation.
3948  T vx, vx1, vy, vy1, vz;
3949 
3950  // NOTE:
3951  // If you get a crash reading one of these getValues, note
3952  // that if you are reading from the same voxel array that you
3953  // are writing to, and doing so in a multi-threaded, tile-by-tile
3954  // approach, just because your positions match doesn't mean you
3955  // won't access neighbouring tiles. To avoid this, perform
3956  // the obvious optimization of checking for aligned tiles
3957  // and using direct indices.
3958  // (I have avoided using if (!fx && !fy && !fz) as the test
3959  // as that is both potentially inaccurate (round off in conversion
3960  // may mess us up) and said optimzation is a useful one anyways)
3961 
3962  // Optimization for common cases (values are within the voxel range and
3963  // are all within the same tile)
3964  if ( !((x | y | z) < 0) &&
3965  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3966  // if ( (x > 0) && (y > 0) && (z > 0) &&
3967  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3968  // report in bounds!
3969  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3970  {
3971  int xm, ym, zm;
3972 
3973  xm = x & TILEMASK;
3974  ym = y & TILEMASK;
3975  zm = z & TILEMASK;
3976 
3977  if ((xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3978  {
3979  const UT_VoxelTile<T> *tile =
3980  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3981 
3982  vz = tile->lerp(xm, ym, zm, fx, fy, fz);
3983  }
3984  else
3985  {
3986  // We cross tile boundaries but we remain within
3987  // the voxel grid. We can thus avoid any bounds checking
3988  // and use operator() rather than getValue.
3989 
3990  // Lerp x:x+1, y, z
3991  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z),
3992  (*this)(x+1, y, z),
3993  fx);
3994  // Lerp x:x+1, y+1, z
3995  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z),
3996  (*this)(x+1, y+1, z),
3997  fx);
3998  // Lerp x:x+1, y:y+1, z
3999  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4000 
4001  // Lerp x:x+1, y, z+1
4002  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z+1),
4003  (*this)(x+1, y, z+1),
4004  fx);
4005  // Lerp x:x+1, y+1, z+1
4006  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z+1),
4007  (*this)(x+1, y+1, z+1),
4008  fx);
4009 
4010  // Lerp x:x+1, y:y+1, z+1
4011  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4012 
4013  // Lerp x:x+1, y:y+1, z:z+1
4014  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
4015  }
4016  }
4017  else
4018  {
4019  // Lerp x:x+1, y, z
4020  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z),
4021  getValue(x+1, y, z),
4022  fx);
4023  // Lerp x:x+1, y+1, z
4024  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z),
4025  getValue(x+1, y+1, z),
4026  fx);
4027 
4028  // Lerp x:x+1, y:y+1, z
4029  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4030 
4031  // Lerp x:x+1, y, z+1
4032  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z+1),
4033  getValue(x+1, y, z+1),
4034  fx);
4035  // Lerp x:x+1, y+1, z+1
4036  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z+1),
4037  getValue(x+1, y+1, z+1),
4038  fx);
4039 
4040  // Lerp x:x+1, y:y+1, z+1
4041  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4042 
4043  // Lerp x:x+1, y:y+1, z:z+1
4044  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
4045  }
4046 
4047  return vz;
4048 }
4049 
4050 #if 0
4051 template <typename T>
4052 T
4054 {
4055  int x, y, z;
4056 
4057  v4ui idx;
4058 
4059  // We go from the position in the unit cube into the index.
4060  // The center of cells must map to the exact integer indices.
4061  pos *= v4uf((float) myRes[0], (float) myRes[1], (float) myRes[2], 0.0f);
4062 
4063  // Because we use truncation we'll get inaccurate results
4064  // at the zero boundary, we thus bump up by two
4065  pos += 1.5;
4066 
4067  idx = pos.splitFloat();
4068 
4069  // And counteract that bump
4070  idx -= 2;
4071 
4072  x = idx[0];
4073  y = idx[1];
4074  z = idx[2];
4075 
4076  // Do trilinear interpolation.
4077  // A | B
4078  // 0 +z +y +yz | +x +xz +xy +xyz
4079 
4080  v4uf a, b, fx, fy, fz;
4081 
4082  int xm, ym, zm;
4083 
4084  xm = x & TILEMASK;
4085  ym = y & TILEMASK;
4086  zm = z & TILEMASK;
4087 
4088  // Optimization for common case (values are within the voxel range and
4089  // are all within the same tile)
4090  if ( (x > 0) & (y > 0) & (z > 0) &
4091  (x < myRes[0]-1) & (y < myRes[1]-1) & (z < myRes[2]-1) &
4092  (xm != TILEMASK) & (ym != TILEMASK) & (zm != TILEMASK) )
4093 /*
4094  if (isValidIndex(x, y, z) && isValidIndex(x+1, y+1, z+1) &&
4095  (x & TILEMASK) != TILEMASK &&
4096  (y & TILEMASK) != TILEMASK &&
4097  (z & TILEMASK) != TILEMASK)
4098 */
4099  {
4100  const UT_VoxelTile<T> *tile =
4101  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
4102 
4103  return tile->lerp(pos, xm, ym, zm);
4104  }
4105  else
4106  {
4107  a = v4uf( getValue(x, y, z),
4108  getValue(x, y, z+1),
4109  getValue(x, y+1, z),
4110  getValue(x, y+1, z+1) );
4111  b = v4uf( getValue(x+1, y, z),
4112  getValue(x+1, y, z+1),
4113  getValue(x+1, y+1, z),
4114  getValue(x+1, y+1, z+1) );
4115  }
4116 
4117  fx = pos.swizzle<0, 0, 0, 0>();
4118  fy = pos.swizzle<1, 1, 1, 1>();
4119  fz = pos.swizzle<2, 2, 2, 2>();
4120 
4121  b -= a;
4122  a = madd(b, fx, a);
4123 
4124  b = a.swizzle<2, 3, 0, 1>();
4125  b -= a;
4126  a = madd(b, fy, a);
4127 
4128  b = a.swizzle<1, 2, 3, 0>();
4129  b -= a;
4130  a = madd(b, fz, a);
4131 
4132  return a[0];
4133 }
4134 #endif
4135 
4136 static inline int
4137 firstTile(int &start, int &end, int res)
4138 {
4139  if (start < 0)
4140  {
4141  start += res;
4142  end += res;
4143  }
4144  return start;
4145 }
4146 
4147 static inline int
4148 nextTile(int &tile, int &pstart, int &start, int &end, int res)
4149 {
4150  int pend;
4151 
4152  if (pstart >= res)
4153  {
4154  pstart -= res;
4155  start -= res;
4156  end -= res;
4157  }
4158  tile = pstart >> TILEBITS;
4159  pend = SYSmin((tile+1) * TILESIZE, end, res);
4160 
4161  UT_ASSERT(pstart >= 0 && pstart < res);
4162  UT_ASSERT(pend > 0 && pstart <= res);
4163  UT_ASSERT(pend-pstart > 0);
4164  UT_ASSERT(pend-pstart <= TILESIZE);
4165 
4166  return pend;
4167 }
4168 
4169 template <typename T>
4170 T
4172  fpreal radius, int clampaxis) const
4173 {
4174  UT_Vector3 tpos;
4175  UT_FilterWindow win[3];
4176  UT_FilterWrap wrap[3];
4177  UT_VoxelTile<T> *tile;
4178  const float *weights[3];
4179  fpreal visible;
4180  int start[3], end[3], size[3];
4181  int pstart[3], pend[3];
4182  int tx, ty, tz, i;
4183  T result;
4184 
4185  if (!myTiles)
4186  {
4187  // If the array is empty, just use the border value.
4188  return myBorderValue;
4189  }
4190 
4191  UT_FilterWrap basewrap;
4192  switch (myBorderType)
4193  {
4194  case UT_VOXELBORDER_CONSTANT: basewrap = UT_WRAP_BORDER; break;
4195  case UT_VOXELBORDER_REPEAT: basewrap = UT_WRAP_REPEAT; break;
4196  case UT_VOXELBORDER_STREAK: basewrap = UT_WRAP_CLAMP; break;
4197 
4198  // NOTE: This is incorrect!
4199  case UT_VOXELBORDER_EXTRAP: basewrap = UT_WRAP_CLAMP; break;
4200  default: basewrap = UT_WRAP_CLAMP; break;
4201  }
4202  for (i = 0; i < 3; i++)
4203  {
4204  if (i == clampaxis)
4205  wrap[i] = UT_WRAP_CLAMP;
4206  else
4207  wrap[i] = basewrap;
4208  }
4209 
4210  memset(&result, 0, sizeof(result));
4211 
4212  radius *= filter.getSupport();
4213 
4214  // Make a local copy of the position so that we can modify it.
4215  tpos = pos;
4216  visible = 1;
4217  for (i = 0; i < 3; i++)
4218  {
4219  tpos[i] = tpos[i]*myRes[i];
4220  if (!win[i].setWeights(filter, tpos[i], radius, myRes[i], wrap[i]))
4221  return result;
4222 
4223  weights[i] = win[i].getWeights();
4224  start[i] = win[i].getStart() % myRes[i];
4225  size[i] = win[i].getSize();
4226 
4227  UT_ASSERT(start[i] >= 0);
4228  UT_ASSERT(size[i] <= myRes[i]);
4229 
4230  end[i] = start[i] + size[i];
4231  visible *= win[i].getVisible();
4232  }
4233 
4234  // Accumulate filtered results
4235  pstart[2] = firstTile(start[2], end[2], myRes[2]);
4236  while (pstart[2] < end[2])
4237  {
4238  pend[2] = nextTile(tz, pstart[2], start[2], end[2], myRes[2]);
4239  pstart[1] = firstTile(start[1], end[1], myRes[1]);
4240  while (pstart[1] < end[1])
4241  {
4242  pend[1] = nextTile(ty, pstart[1], start[1], end[1], myRes[1]);
4243  pstart[0] = firstTile(start[0], end[0], myRes[0]);
4244  while (pstart[0] < end[0])
4245  {
4246  pend[0] = nextTile(tx, pstart[0], start[0], end[0], myRes[0]);
4247  tile = getTile(tx, ty, tz);
4248  UT_ASSERT(tile);
4249  tile->weightedSum(pstart, pend, weights, start, result);
4250  pstart[0] = pend[0];
4251  }
4252  pstart[1] = pend[1];
4253  }
4254  pstart[2] = pend[2];
4255  }
4256 
4257  if (visible < 1)
4258  {
4259  result += (1-visible)*myBorderValue;
4260  }
4261 
4262  return result;
4263 }
4264 
4265 template <typename T>
4266 void
4268  UT_FilterType filtertype,
4269  float filterwidthscale,
4270  int clampaxis)
4271 {
4272  fpreal radius;
4273  UT_Filter *filter;
4274 
4275  filter = UT_Filter::getFilter(filtertype);
4276 
4277  radius = SYSmax( src.getXRes() / (fpreal)getXRes(),
4278  src.getYRes() / (fpreal)getYRes(),
4279  src.getZRes() / (fpreal)getZRes(),
4280  1.0f );
4281  radius *= 0.5 * filterwidthscale;
4282 
4283  resamplethread(src, filter, radius, clampaxis);
4284 
4285  UT_Filter::releaseFilter(filter);
4286 }
4287 
4288 template <typename T>
4289 void
4290 UT_VoxelArray<T>::flattenPartial(T *flatarray, exint ystride, exint zstride,
4291  const UT_JobInfo &info) const
4292 {
4293  // Check to see if we are 2d, if so, we wish to use the
4294  // axis flatten.
4295  if (getXRes() == 1 && ystride == 1)
4296  {
4297  flattenPartialAxis<0>(flatarray, zstride, info);
4298  }
4299  else if (getYRes() == 1 && zstride == ystride)
4300  {
4301  flattenPartialAxis<1>(flatarray, zstride, info);
4302  }
4303  else if (getZRes() == 1)
4304  {
4305  flattenPartialAxis<2>(flatarray, ystride, info);
4306  }
4307  else
4308  {
4310 
4311  // Const cast.
4312  vit.setArray((UT_VoxelArray<T> *)this);
4313  // We can't use splitByTile as then our writes will fight
4314  // over 4k pages.
4315  vit.setPartialRange(info.job(), info.numJobs());
4316 
4317  for (vit.rewind(); !vit.atEnd(); vit.advance())
4318  {
4319  flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride] = vit.getValue();
4320  }
4321  }
4322 }
4323 
4324 
4325 template <typename T>
4326 template <int AXIS2D>
4327 void
4329  const UT_JobInfo &info) const
4330 {
4331  // The x/y refer to the destination x/y.
4332  // The actual source index depends on AXIS2D.
4333  const int ax = (AXIS2D == 0) ? 1 : 0;
4334  const int ay = (AXIS2D == 2) ? 1 : 2;
4335  int tileidx[3] = { 0, 0, 0 };
4336 
4337  while (1)
4338  {
4339  exint tiley = info.nextTask();
4340  exint ystart = tiley * TILESIZE;
4341  if (ystart >= getRes(ay))
4342  break;
4343 
4344  T *stripe = &flatarray[ystart * ystride];
4345 
4346  int yres = SYSmin(getRes(ay) - ystart, TILESIZE);
4347 
4348  for (int tilex = 0, ntilex = getTileRes(ax); tilex < ntilex; tilex++)
4349  {
4350  tileidx[ax] = tilex;
4351  tileidx[ay] = tiley;
4352  auto tile = getTile(tileidx[0], tileidx[1], tileidx[2]);
4353 
4354  int xres = tile->getRes(ax);
4355  T *stripey = stripe;
4356  if (tile->isConstant())
4357  {
4358  const T *srcdata = tile->rawData();
4359 
4360  for (int y = 0; y < yres; y++)
4361  {
4362  for (int x = 0; x < xres; x++)
4363  {
4364  memcpy(&stripey[x], srcdata, sizeof(T));
4365  }
4366  stripey += ystride;
4367  }
4368  }
4369  else if (tile->isSimpleCompression())
4370  {
4371  const T *srcdata = tile->rawData();
4372 
4373  if (xres != TILESIZE)
4374  {
4375  for (int y = 0; y < yres; y++)
4376  {
4377  memcpy(stripey, srcdata, sizeof(T) * xres);
4378  srcdata += xres;
4379  stripey += ystride;
4380  }
4381  }
4382  else
4383  {
4384  for (int y = 0; y < yres; y++)
4385  {
4386  memcpy(stripey, srcdata, sizeof(T) * TILESIZE);
4387  srcdata += TILESIZE;
4388  stripey += ystride;
4389  }
4390  }
4391  }
4392  else
4393  {
4394  for (int y = 0; y < yres; y++)
4395  {
4396  int idx[3] = { 0, 0, 0 };
4397  idx[ay] = y;
4398  for (int x = 0; x < xres; x++)
4399  {
4400  idx[ax] = x;
4401  stripey[x] = (*tile)(idx[0], idx[1], idx[2]);
4402  }
4403  stripey += ystride;
4404  }
4405  }
4406 
4407  stripe += TILESIZE;
4408  }
4409  }
4410 }
4411 
4412 
4413 template <>
4414 inline void
4416  exint ystride, exint zstride,
4417  UT_Vector4F,
4418  const UT_JobInfo &info) const
4419 {
4421 
4422  // Const cast.
4423  vit.setArray(SYSconst_cast(this));
4424  // We can't use splitByTile as then our writes will fight
4425  // over 4k pages.
4426  vit.setPartialRange(info.job(), info.numJobs());
4427 
4428  for (vit.rewind(); !vit.atEnd(); vit.advance())
4429  {
4430  UT_Vector4F v = vit.getValue();
4431  int idx = (vit.x() + vit.y()*ystride + vit.z()*zstride) * 4;
4432 
4433  flatarray[idx] = (uint8) SYSclamp(v.x() * 255.0f, 0.0f, 255.0f);
4434  flatarray[idx+1] = (uint8) SYSclamp(v.y() * 255.0f, 0.0f, 255.0f);
4435  flatarray[idx+2] = (uint8) SYSclamp(v.z() * 255.0f, 0.0f, 255.0f);
4436  flatarray[idx+3] = (uint8) SYSclamp(v.w() * 255.0f, 0.0f, 255.0f);
4437  }
4438 }
4439 
4440 template <typename T>
4441 void
4443  exint ystride, exint zstride,
4444  T, const UT_JobInfo &info) const
4445 {
4446  UT_ASSERT(!"This template requires specific instantiations.");
4447 }
4448 
4449 template <>
4450 inline void
4452  exint ystride, exint zstride,
4453  UT_Vector4F,
4454  const UT_JobInfo &info) const
4455 {
4457 
4458  // Const cast.
4459  vit.setArray(SYSconst_cast(this));
4460  // We can't use splitByTile as then our writes will fight
4461  // over 4k pages.
4462  vit.setPartialRange(info.job(), info.numJobs());
4463 
4464  for (vit.rewind(); !vit.atEnd(); vit.advance())
4465  flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride] = vit.getValue();
4466 }
4467 
4468 template <typename T>
4469 void
4471  exint ystride, exint zstride,
4472  T, const UT_JobInfo &info) const
4473 {
4474  UT_ASSERT(!"This template requires specific instantiations.");
4475 }
4476 
4477 template <>
4478 inline void
4480  exint ystride, exint zstride,
4481  UT_Vector4F,
4482  const UT_JobInfo &info) const
4483 {
4485 
4486  // Const cast.
4487  vit.setArray(SYSconst_cast(this));
4488  // We can't use splitByTile as then our writes will fight
4489  // over 4k pages.
4490  vit.setPartialRange(info.job(), info.numJobs());
4491 
4492  for (vit.rewind(); !vit.atEnd(); vit.advance())
4493  {
4494  UT_Vector4F v = vit.getValue();
4495 
4496  // NOTE: This works around an Nvidia driver bug on OSX, and older
4497  // Nvidia drivers on other platforms. The problem was that very
4498  // small FP values were causing huge random values to appear in
4499  // the 3D Texture.
4500  if(SYSabs(v.x()) < 1e-9)
4501  v.x() = 0.0;
4502  if(SYSabs(v.y()) < 1e-9)
4503  v.y() = 0.0;
4504  if(SYSabs(v.z()) < 1e-9)
4505  v.z() = 0.0;
4506  if(SYSabs(v.w()) < 1e-9)
4507  v.w() = 0.0;
4508 
4509  flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride] = v;
4510  }
4511 }
4512 
4513 template <typename T>
4514 void
4516  exint ystride, exint zstride,
4517  T,const UT_JobInfo &info) const
4518 {
4519  UT_ASSERT(!"This template requires specific instantiations.");
4520 }
4521 
4522 
4523 template <typename T>
4524 void
4526  exint ystride, exint zstride,
4527  const UT_JobInfo &info)
4528 {
4530 
4531  vit.setArray(this);
4532  vit.splitByTile(info);
4533  vit.setCompressOnExit(true);
4534 
4535  for (vit.rewind(); !vit.atEnd(); vit.advance())
4536  {
4537  vit.setValue(flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride]);
4538  }
4539 }
4540 
4541 template <typename T>
4542 template <typename S>
4543 S *
4545  const UT_IntArray &tilelist) const
4546 {
4547  for (int i = 0; i < tilelist.entries(); i++)
4548  {
4549  UT_ASSERT(tilelist(i) >= 0 && tilelist(i) < numTiles());
4550  const UT_VoxelTile<T> *tile = getLinearTile(tilelist(i));
4551 
4552  tile->flatten(dstdata, stride);
4553  dstdata += tile->numVoxels() * stride;
4554  }
4555  return dstdata;
4556 }
4557 
4558 template <typename T>
4559 template <typename S, typename IDX>
4560 S *
4562  const IDX *ix, const IDX *iy, const IDX *iz,
4563  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist) const
4564 {
4565  int srcidx = 0;
4566 
4567  for (auto && tiledata : tilelist)
4568  {
4569  int tileidx = tiledata.tileidx;
4570  UT_ASSERT(tileidx >= 0 && tileidx < numTiles());
4571 
4572  const UT_VoxelTile<T> *tile = getLinearTile(tileidx);
4573  int tilevoxel = tile->numVoxels();
4574 
4575  if (tilevoxel == tiledata.numvoxel)
4576  {
4577  // Full tile.
4578  tile->flatten(dstdata, stride);
4579  dstdata += tiledata.numvoxel * stride;
4580  srcidx += tiledata.numvoxel;
4581  }
4582  else
4583  {
4584  // Partial tile...
4585  int basex, basey, basez;
4586  linearTileToXYZ(tileidx, basex, basey, basez);
4587 
4588  basex <<= TILEBITS;
4589  basey <<= TILEBITS;
4590  basez <<= TILEBITS;
4591 
4592  if (tile->isSimpleCompression())
4593  {
4594  const T *src = tile->rawData();
4595  if (tile->isConstant())
4596  {
4597  S cval = *src;
4598  for (int i = 0; i < tiledata.numvoxel; i++)
4599  {
4600  *dstdata = cval;
4601  dstdata += stride;
4602  srcidx++;
4603  }
4604  }
4605  else
4606  {
4607  int w = tile->xres();
4608  int h = tile->yres();
4609  for (int i = 0; i < tiledata.numvoxel; i++)
4610  {
4611  UT_ASSERT_P(ix[srcidx] >= basex && ix[srcidx] < basex+TILESIZE);
4612  UT_ASSERT_P(iy[srcidx] >= basey && iy[srcidx] < basey+TILESIZE);
4613  UT_ASSERT_P(iz[srcidx] >= basez && iz[srcidx] < basez+TILESIZE);
4614  *dstdata = src[ (ix[srcidx] - basex)
4615  + (iy[srcidx] - basey) * w
4616  + (iz[srcidx] - basez) * w * h];
4617  dstdata += stride;
4618  srcidx++;
4619  }
4620  }
4621  }
4622  else
4623  {
4624  for (int i = 0; i < tiledata.numvoxel; i++)
4625  {
4626  UT_ASSERT_P(ix[srcidx] >= basex && ix[srcidx] < basex+TILESIZE);
4627  UT_ASSERT_P(iy[srcidx] >= basey && iy[srcidx] < basey+TILESIZE);
4628  UT_ASSERT_P(iz[srcidx] >= basez && iz[srcidx] < basez+TILESIZE);
4629  *dstdata = (*tile)(ix[srcidx] - basex,
4630  iy[srcidx] - basey,
4631  iz[srcidx] - basez);
4632  dstdata += stride;
4633  srcidx++;
4634  }
4635  }
4636  }
4637  }
4638  return dstdata;
4639 }
4640 
4641 template <typename T>
4642 template <typename S>
4643 const S *
4645  const UT_IntArray &tilelist)
4646 {
4647  bool docompress = getCompressionOptions().compressionEnabled();
4648  for (int i = 0; i < tilelist.entries(); i++)
4649  {
4650  UT_ASSERT(tilelist(i) >= 0 && tilelist(i) < numTiles());
4651  UT_VoxelTile<T> *tile = getLinearTile(tilelist(i));
4652 
4653  tile->writeData(srcdata, stride);
4654  if (docompress)
4655  tile->tryCompress(getCompressionOptions());
4656  srcdata += tile->numVoxels() * stride;
4657  }
4658  return srcdata;
4659 }
4660 
4661 template <typename T>
4662 template <typename S, typename IDX>
4663 const S *
4665  const IDX *ix, const IDX *iy, const IDX *iz,
4666  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist)
4667 {
4668  bool docompress = getCompressionOptions().compressionEnabled();
4669  int srcidx = 0;
4670 
4671  for (auto && tiledata : tilelist)
4672  {
4673  int tileidx = tiledata.tileidx;
4674  UT_ASSERT(tileidx >= 0 && tileidx < numTiles());
4675 
4676  UT_VoxelTile<T> *tile = getLinearTile(tileidx);
4677  int tilevoxel = tile->numVoxels();
4678 
4679  if (tilevoxel == tiledata.numvoxel)
4680  {
4681  tile->writeData(srcdata, stride);
4682  srcdata += tiledata.numvoxel * stride;
4683  srcidx += tiledata.numvoxel;
4684  }
4685  else
4686  {
4687  // Partial tile.
4688  int basex, basey, basez;
4689  linearTileToXYZ(tileidx, basex, basey, basez);
4690 
4691  basex <<= TILEBITS;
4692  basey <<= TILEBITS;
4693  basez <<= TILEBITS;
4694 
4695  for (int i = 0; i < tiledata.numvoxel; i++)
4696  {
4697  UT_ASSERT_P(ix[srcidx] >= basex && ix[srcidx] < basex+TILESIZE);
4698  UT_ASSERT_P(iy[srcidx] >= basey && iy[srcidx] < basey+TILESIZE);
4699  UT_ASSERT_P(iz[srcidx] >= basez && iz[srcidx] < basez+TILESIZE);
4700  tile->setValue(ix[srcidx] - basex,
4701  iy[srcidx] - basey,
4702  iz[srcidx] - basez,
4703  *srcdata);
4704  srcdata += stride;
4705  srcidx++;
4706  }
4707  }
4708 
4709  if (docompress)
4710  tile->tryCompress(getCompressionOptions());
4711  }
4712  return srcdata;
4713 }
4714 
4715 template <typename T>
4716 void
4718  int tileoffy, int tileoffz,
4719  const UT_JobInfo &info)
4720 {
4721 // This macro fills in the values in tile (UT_VoxelTile<T>*) between the given indices
4722 // from the src array, with offsets stored in inds (array of integers).
4723 #define FILL_IN_VALUES(x0, x1, y0, y1, z0, z1) \
4724  for (int z = z0; z < z1; z++) \
4725  { \
4726  for (int y = y0; y < y1; y++) \
4727  { \
4728  for (int x = x0; x < x1; x++) \
4729  { \
4730  tile->setValue(x, y, z, src.getValue(x + inds[0], y + inds[1], \
4731  z + inds[2])); \
4732  } \
4733  } \
4734  }
4735 
4736  // Is the border constant?
4737  const bool const_src_border = (src.getBorder() == UT_VOXELBORDER_CONSTANT);
4738  const T border_val = src.getBorderValue();
4739 
4740  // Offsets in terms of tiles.
4741  const int off[] = {tileoffx, tileoffy, tileoffz};
4742  // Tile resolution of the source array.
4743  const int src_tileres[] = {src.getTileRes(0), src.getTileRes(1), src.getTileRes(2)};
4744 
4745  // Divide up the work among threads and get at it.
4746  int t0, t1;
4747  info.divideWork(numTiles(), t0, t1);
4748  int xyz[3];
4749  for (int i = t0; i < t1; i++)
4750  {
4751  // The tile we'll be working on.
4752  UT_VoxelTile<T>* tile = getLinearTile(i);
4753 
4754  // Convert the linear tile number to xyz indices, then offset those indices
4755  // and check if we are still inside the source array.
4756  linearTileToXYZ(i, xyz[0], xyz[1], xyz[2]);
4757  bool inside = true;
4758  for (int j = 0; j < 3; j++)
4759  {
4760  xyz[j] += off[j];
4761  inside = inside && xyz[j] >= 0 && xyz[j] < src_tileres[j];
4762  }
4763 
4764  if (inside)
4765  {
4766  // We are inside the source array; start by getting the corresponding source
4767  // tile.
4768  UT_VoxelTile<T>* srctile = src.getTile(xyz[0], xyz[1], xyz[2]);
4769 
4770  if (tile->xres() == srctile->xres() && tile->yres() == srctile->yres()
4771  && tile->zres() == srctile->zres())
4772  {
4773  // The tiles are of the same size, so we can just exchange the data
4774  // pointers.
4775  UTswap(tile->myData, srctile->myData);
4776  UTswap(tile->myCompressionType, srctile->myCompressionType);
4777  UTswap(tile->myForeignData, srctile->myForeignData);
4778  }
4779  else if (const_src_border && srctile->isConstant()
4780  && SYSisEqual((*srctile)(0, 0, 0), border_val))
4781  {
4782  // Our source tile is incomplete, but it is constant and equal to the
4783  // constant border value. Thus, the destination tile can be made constant.
4784  tile->makeConstant(border_val);
4785  }
4786  else
4787  {
4788 #if 0
4789  // Start by copying as much of the data from the source tile as possible.
4790  tile->copyFragment(0, 0, 0, *srctile, 0, 0, 0);
4791 
4792  // This flag will be true if the destination tile is larger along all
4793  // dimensions than the source tile.
4794  bool compl_box = true;
4795  // Starting voxel index for this tile, with respect to the source array.
4796  int inds[] = {xyz[0] * TILESIZE, xyz[1] * TILESIZE, xyz[2] * TILESIZE};
4797 
4798  // For each axis, we set the values for the block of voxels that lies
4799  // outside the source tile (only along that axis).
4800  for (int axis = 0; axis < 3; axis++)
4801  {
4802  // Are there any voxels outside source tile along this axis?
4803  bool needs_work = (tile->getRes(axis) > srctile->getRes(axis));
4804  compl_box = compl_box && needs_work;
4805  // If there are such voxels, set their values; we have to use
4806  // getValue(), since these voxels are outside the source array.
4807  if (needs_work)
4808  {
4809  // Along axis, start at srctile's resolution; along the other
4810  // two axes, end at srctile's resolution.
4811  int starts[] = {0, 0, 0};
4812  int ends[] = {tile->xres(), tile->yres(), tile->zres()};
4813  for (int j = 0; j < 3; j++)
4814  {
4815  if (j == axis)
4816  starts[j] = srctile->getRes(j);
4817  else
4818  ends[j] = SYSmin(ends[j], srctile->getRes(j));
4819  }
4820 
4821  FILL_IN_VALUES(starts[0], ends[0], starts[1], ends[1], starts[2],
4822  ends[2])
4823  }
4824  }
4825  // If there is a block of voxels that was not covered by the loops above,
4826  // fill their values in as well.
4827  if (compl_box)
4828  {
4829  FILL_IN_VALUES(srctile->xres(), tile->xres(), srctile->yres(),
4830  tile->yres(), srctile->zres(), tile->zres())
4831  }
4832 #else
4833  // Safe path for copying voxel data.
4834  int inds[] = {xyz[0] * TILESIZE, xyz[1] * TILESIZE, xyz[2] * TILESIZE};
4835  FILL_IN_VALUES(0, tile->xres(), 0, tile->yres(), 0, tile->zres())
4836 #endif
4837  }
4838  }
4839  else if (const_src_border)
4840  {
4841  // We are outside the source array, but it's got a constant border value!
4842  tile->makeConstant(border_val);
4843  }
4844  else
4845  {
4846  // The whole tile is outside the source array and border value is not constant...
4847  int inds[] = {xyz[0] * TILESIZE, xyz[1] * TILESIZE, xyz[2] * TILESIZE};
4848  FILL_IN_VALUES(0, tile->xres(), 0, tile->yres(), 0, tile->zres())
4849  }
4850  }
4851 }
4852 
4853 template <typename T>
4854 void
4856  int offx, int offy, int offz)
4857 {
4858  UT_VoxelBorderType srcborder;
4859  T srcborderval;
4860 
4861  srcborder = src.getBorder();
4862  srcborderval = src.getBorderValue();
4863 
4864  if (srcborder != UT_VOXELBORDER_EXTRAP)
4865  {
4866  // These borders may be constant friendly
4867  T srcval;
4868 
4869  if (src.isConstant(&srcval))
4870  {
4871  if (srcborder != UT_VOXELBORDER_CONSTANT ||
4872  SYSisEqual(srcborderval, srcval))
4873  {
4874  // We can trivially make ourselves constant.
4875  constant(srcval);
4876  return;
4877  }
4878  }
4879  }
4880 
4881  copyWithOffsetInternal(src, offx, offy, offz);
4882 }
4883 
4884 template <typename T>
4885 void
4887  int offx, int offy, int offz,
4888  const UT_JobInfo &info)
4889 {
4891  UT_Vector3I off(offx, offy, offz);
4892  bool can_copy_tiles = ((offx & TILEMASK) == 0) && ((offy & TILEMASK) == 0)
4893  && ((offz & TILEMASK) == 0);
4894 
4895  vit.setArray(this);
4896  vit.splitByTile(info);
4897  vit.setCompressOnExit(true);
4898 
4899  // Iterate over all tiles
4900  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
4901  {
4902  // Check out this tile
4903  UT_VoxelTile<T> *tile = vit.getTile();
4904  UT_VoxelTile<T> *srctile;
4905  int tx, ty, tz;
4906 
4907  UT_Vector3I start, end, srctileidx, srctileoff, srcend, srcendtile;
4908  vit.getTileVoxels(start, end);
4909 
4910  // If offsets are all multiples of TILESIZE, it is possible to do direct copies,
4911  // assuming source tile is inside and of the same size.
4912  if (can_copy_tiles)
4913  {
4914  // Start by getting the source tile index and ensuring it is inside the
4915  // source array.
4916  bool inside = true;
4917  for (int i = 0; i < 3; i++)
4918  {
4919  srctileidx(i) = vit.myTilePos[i] + (off(i) >> TILEBITS);
4920  inside = inside && srctileidx(i) >= 0
4921  && srctileidx(i) < src.getTileRes(i);
4922  }
4923  // If the tile is inside, we make sure the tile sizes are the same, in which
4924  // case we can do a copy and move on.
4925  if (inside)
4926  {
4927  srctile = src.getTile(srctileidx.x(), srctileidx.y(), srctileidx.z());
4928  if (tile->xres() == srctile->xres() && tile->yres() == srctile->yres()
4929  && tile->zres() == srctile->zres())
4930  {
4931  *tile = *srctile;
4932  continue;
4933  }
4934  }
4935  }
4936 
4937  srctileidx = start;
4938  srctileidx += off;
4939  srctileidx.x() >>= TILEBITS;
4940  srctileidx.y() >>= TILEBITS;
4941  srctileidx.z() >>= TILEBITS;
4942  srctileoff.x() = off.x() & TILEMASK;
4943  srctileoff.y() = off.y() & TILEMASK;
4944  srctileoff.z() = off.z() & TILEMASK;
4945 
4946  srcend = start;
4947  // We are very careful here to be inclusive, so we don't trigger
4948  // the next tile. This is the largest index we expect to index.
4949  srcend.x() += tile->xres() - 1;
4950  srcend.y() += tile->yres() - 1;
4951  srcend.z() += tile->zres() - 1;
4952  srcend += off;
4953  srcendtile = srcend;
4954  srcendtile.x() >>= TILEBITS;
4955  srcendtile.y() >>= TILEBITS;
4956  srcendtile.z() >>= TILEBITS;
4957 
4958  UT_ASSERT(srcendtile.x() == srctileidx.x() ||
4959  srcendtile.x() == srctileidx.x()+1);
4960  UT_ASSERT(srcendtile.y() == srctileidx.y() ||
4961  srcendtile.y() == srctileidx.y()+1);
4962  UT_ASSERT(srcendtile.z() == srctileidx.z() ||
4963  srcendtile.z() == srctileidx.z()+1);
4964 
4965  // Check if we are fully in bounds.
4966  if (srctileidx.x() >= 0 &&
4967  srctileidx.y() >= 0 &&
4968  srctileidx.z() >= 0 &&
4969  srcendtile.x() < src.getTileRes(0) &&
4970  srcendtile.y() < src.getTileRes(1) &&
4971  srcendtile.z() < src.getTileRes(2) &&
4972  srcend.x() < src.getXRes() &&
4973  srcend.y() < src.getYRes() &&
4974  srcend.z() < src.getZRes())
4975  {
4976  bool allconst = true, firsttile = true;
4977  T constval, cval;
4978  // Check if we are all constant...
4979  for (tz = srctileidx.z(); allconst && tz <= srcendtile.z(); tz++)
4980  for (ty = srctileidx.y(); allconst && ty <= srcendtile.y(); ty++)
4981  for (tx = srctileidx.x(); tx <= srcendtile.x(); tx++)
4982  {
4983  srctile = src.getTile(tx, ty, tz);
4984  if (!srctile->isConstant())
4985  {
4986  allconst = false;
4987  break;
4988  }
4989  cval = (*srctile)(0, 0, 0);
4990  if (firsttile)
4991  {
4992  firsttile = false;
4993  constval = cval;
4994  }
4995  if (!SYSisEqual(cval, constval))
4996  {
4997  allconst = false;
4998  break;
4999  }
5000  }
5001  if (allconst)
5002  {
5003  // Should have found at least one tile!
5004  UT_ASSERT(!firsttile);
5005  tile->makeConstant(constval);
5006 
5007  // Onto the next tile!
5008  continue;
5009  }
5010  }
5011 
5012  // All of the fragments aren't constant, or aren't all inside
5013  // our range. So we have to work fragment at a time..
5014  for (tz = srctileidx.z(); tz <= srcendtile.z(); tz++)
5015  for (ty = srctileidx.y(); ty <= srcendtile.y(); ty++)
5016  for (tx = srctileidx.x(); tx <= srcendtile.x(); tx++)
5017  {
5018  int destx, desty, destz;
5019  int srcx, srcy, srcz;
5020 
5021  destx = (tx == srctileidx.x()) ? 0 : (TILESIZE-srctileoff.x());
5022  desty = (ty == srctileidx.y()) ? 0 : (TILESIZE-srctileoff.y());
5023  destz = (tz == srctileidx.z()) ? 0 : (TILESIZE-srctileoff.z());
5024  srcx = (tx == srctileidx.x()) ? srctileoff.x() : 0;
5025  srcy = (ty == srctileidx.y()) ? srctileoff.y() : 0;
5026  srcz = (tz == srctileidx.z()) ? srctileoff.z() : 0;
5027 #if 1
5028  if (tx >= 0 &&
5029  ty >= 0 &&
5030  tz >= 0 &&
5031  tx < src.getTileRes(0) &&
5032  ty < src.getTileRes(1) &&
5033  tz < src.getTileRes(2) &&
5034  ((tx != src.getTileRes(0)-1) ||
5035  srcend.x() < src.getXRes()) &&
5036  ((ty != src.getTileRes(1)-1) ||
5037  srcend.y() < src.getYRes()) &&
5038  ((tz != src.getTileRes(2)-1) ||
5039  srcend.z() < src.getZRes())
5040  )
5041  {
5042  srctile = src.getTile(tx, ty, tz);
5043  // In bounds
5044  tile->copyFragment(
5045  destx, desty, destz,
5046  *srctile,
5047  srcx, srcy, srcz
5048  );
5049  }
5050  else
5051 #endif
5052  {
5053  // Out of bounds!
5054  int maxd = SYSmin(tile->zres(),
5055  TILESIZE-srcz);
5056  int maxh = SYSmin(tile->yres(),
5057  TILESIZE-srcy);
5058  int maxw = SYSmin(tile->xres(),
5059  TILESIZE-srcx);
5060  for (int z = destz; z < maxd; z++)
5061  {
5062  for (int y = desty; y < maxh; y++)
5063  {
5064  for (int x = destx; x < maxw; x++)
5065  {
5066  T val;
5067 
5068  val = src.getValue(x + vit.x() + offx,
5069  y + vit.y() + offy,
5070  z + vit.z() + offz);
5071  tile->setValue(x, y, z, val);
5072  }
5073  }
5074  }
5075  }
5076  }
5077  }
5078 }
5079 
5080 template <typename T>
5081 void
5083  const UT_Filter *filter, float radius,
5084  int clampaxis,
5085  const UT_JobInfo &info)
5086 {
5088  UT_Vector3 pos;
5089  UT_Vector3 ratio;
5090  UT_Interrupt *boss = UTgetInterrupt();
5091 
5092  vit.setArray(this);
5093  vit.splitByTile(info);
5094  vit.setCompressOnExit(true);
5095  vit.setInterrupt(boss);
5096 
5097  ratio.x() = 1.0f / getXRes();
5098  ratio.y() = 1.0f / getYRes();
5099  ratio.z() = 1.0f / getZRes();
5100 
5101  for (vit.rewind(); !vit.atEnd(); vit.advance())
5102  {
5103  pos.x() = vit.x()+0.5f;
5104  pos.y() = vit.y()+0.5f;
5105  pos.z() = vit.z()+0.5f;
5106  pos *= ratio;
5107 
5108  vit.setValue(src.evaluate(pos, *filter, radius, clampaxis));
5109  }
5110 }
5111 
5112 template <typename T>
5113 bool
5114 UT_VoxelArray<T>::posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const
5115 {
5116  // We go from the position in the unit cube into the index.
5117  // The center of cells must map to the exact integer indices.
5118  pos.x() *= myRes[0];
5119  pos.y() *= myRes[1];
5120  pos.z() *= myRes[2];
5121 
5122  // The centers of cells are now mapped .5 too high. Ie, the true
5123  // center of cell index (0,0,0) would be (0.5,0.5,0.5) This, however,
5124  // is exactly what we want for rounding.
5125  x = (int) SYSfloor(pos.x());
5126  y = (int) SYSfloor(pos.y());
5127  z = (int) SYSfloor(pos.z());
5128 
5129  // Determine if out of bounds.
5130  return isValidIndex(x, y, z);
5131 }
5132 
5133 template <typename T>
5134 bool
5136 {
5137  // We go from the position in the unit cube into the index.
5138  // The center of cells must map to the exact integer indices.
5139  pos.x() *= myRes[0];
5140  pos.y() *= myRes[1];
5141  pos.z() *= myRes[2];
5142 
5143  // The centers of cells are now mapped .5 too high. Ie, the true
5144  // center of cell index (0,0,0) would be (0.5,0.5,0.5)
5145  pos.x() -= 0.5;
5146  pos.y() -= 0.5;
5147  pos.z() -= 0.5;
5148 
5149  ipos = pos;
5150 
5151  // Determine if out of bounds.
5152  if (pos.x() < 0 || pos.x() >= myRes[0] ||
5153  pos.y() < 0 || pos.y() >= myRes[1] ||
5154  pos.z() < 0 || pos.z() >= myRes[2])
5155  return false;
5156 
5157  return true;
5158 }
5159 
5160 template <typename T>
5161 bool
5163 {
5164  // We go from the position in the unit cube into the index.
5165  // The center of cells must map to the exact integer indices.
5166  pos.x() *= myRes[0];
5167  pos.y() *= myRes[1];
5168  pos.z() *= myRes[2];
5169 
5170  // The centers of cells are now mapped .5 too high. Ie, the true
5171  // center of cell index (0,0,0) would be (0.5,0.5,0.5) This, however,
5172  // is exactly what we want for rounding.
5173  x = (exint) SYSfloor(pos.x());
5174  y = (exint) SYSfloor(pos.y());
5175  z = (exint) SYSfloor(pos.z());
5176 
5177  // Determine if out of bounds.
5178  return isValidIndex(x, y, z);
5179 }
5180 
5181 template <typename T>
5182 bool
5184 {
5185  // We go from the position in the unit cube into the index.
5186  // The center of cells must map to the exact integer indices.
5187  pos.x() *= myRes[0];
5188  pos.y() *= myRes[1];
5189  pos.z() *= myRes[2];
5190 
5191  // The centers of cells are now mapped .5 too high. Ie, the true
5192  // center of cell index (0,0,0) would be (0.5,0.5,0.5)
5193  pos.x() -= 0.5;
5194  pos.y() -= 0.5;
5195  pos.z() -= 0.5;
5196 
5197  ipos = pos;
5198 
5199  // Determine if out of bounds.
5200  if (pos.x() < 0 || pos.x() >= myRes[0] ||
5201  pos.y() < 0 || pos.y() >= myRes[1] ||
5202  pos.z() < 0 || pos.z() >= myRes[2])
5203  return false;
5204 
5205  return true;
5206 }
5207 
5208 template <typename T>
5209 bool
5210 UT_VoxelArray<T>::indexToPos(int x, int y, int z, UT_Vector3F &pos) const
5211 {
5212  pos.x() = x;
5213  pos.y() = y;
5214  pos.z() = z;
5215 
5216  // Move the indices to the centers of the cells.
5217  pos.x() += 0.5;
5218  pos.y() += 0.5;
5219  pos.z() += 0.5;
5220 
5221  // And scale into the unit cube.
5222  pos *= myInvRes;
5223 
5224  // Return true if the original coordinates were in range.
5225  return isValidIndex(x, y, z);
5226 }
5227 
5228 template <typename T>
5229 bool
5231 {
5232  pos.x() = x;
5233  pos.y() = y;
5234  pos.z() = z;
5235 
5236  // Move the indices to the centers of the cells.
5237  pos.x() += 0.5;
5238  pos.y() += 0.5;
5239  pos.z() += 0.5;
5240 
5241  // And scale into the unit cube.
5242  pos *= myInvRes;
5243 
5244  // Return true if the original coordinates were in range.
5245  return isValidIndex(x, y, z);
5246 }
5247 
5248 template <typename T>
5249 void
5251 {
5252  pos = index;
5253 
5254  // Move the indices to the centers of the cells.
5255  pos.x() += 0.5;
5256  pos.y() += 0.5;
5257  pos.z() += 0.5;
5258 
5259  // And scale into the unit cube.
5260  pos *= myInvRes;
5261 }
5262 
5263 template <typename T>
5264 void
5266 {
5267  pos = index;
5268 
5269  // Move the indices to the centers of the cells.
5270  pos.x() += 0.5;
5271  pos.y() += 0.5;
5272  pos.z() += 0.5;
5273 
5274  // And scale into the unit cube.
5275  pos *= myInvRes;
5276 }
5277 
5278 template <typename T>
5279 void
5281 {
5282  myBorderType = type;
5283  myBorderValue = t;
5284 }
5285 
5286 template <typename T>
5287 void
5289 {
5290  myBorderScale[0] = sx;
5291  myBorderScale[1] = sy;
5292  myBorderScale[2] = sz;
5293 }
5294 
5295 template <typename T>
5296 void
5298 {
5299  UT_VoxelArrayIterator<T> vit(this);
5300  vit.splitByTile(info);
5301  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
5302  {
5303  int i = vit.getLinearTileNum();
5304  myTiles[i].tryCompress(getCompressionOptions());
5305  }
5306 }
5307 
5308 template <typename T>
5309 void
5311 {
5312  UT_VoxelArrayIterator<T> vit(this);
5313  vit.splitByTile(info);
5314  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
5315  {
5316  int i = vit.getLinearTileNum();
5317  myTiles[i].uncompress();
5318  }
5319 }
5320 
5321 template <typename T>
5322 void
5324 {
5325  UT_VoxelArrayIterator<T> vit(this);
5326  vit.splitByTile(info);
5327  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
5328  {
5329  int i = vit.getLinearTileNum();
5330  if (!myTiles[i].isConstant())
5331  myTiles[i].uncompress();
5332  }
5333 }
5334 
5335 template <typename T>
5336 void
5337 UT_VoxelArray<T>::saveData(std::ostream &os) const
5338 {
5339  T cval;
5340  char version;
5341 
5342  // First determine if we are fully constant.
5343  if (isConstant(&cval))
5344  {
5345  // Save a constant array.
5346  version = 0;
5347  UTwrite(os, &version, 1);
5348  UTwrite<T>(os, &cval);
5349  return;
5350  }
5351 
5352  // Compressed tiles.
5353  version = 1;
5354  UTwrite(os, &version, 1);
5355 
5356  // Store list of compression types
5358 
5359  int i, ntiles;
5360 
5361  ntiles = numTiles();
5362  for (i = 0; i < ntiles; i++)
5363  {
5364  myTiles[i].save(os);
5365  }
5366 }
5367 
5368 
5369 template <typename T>
5370 void
5372 {
5373  T cval;
5374  char version;
5375 
5376  is.readChar(version);
5377 
5378  // First determine if we are fully constant.
5379  if (version == 0)
5380  {
5381  // Save a constant array.
5382  is.read<T>(&cval);
5383 
5384  constant(cval);
5385  return;
5386  }
5387 
5388  if (version == 1)
5389  {
5390  UT_IntArray compressions;
5391 
5392  // Store list of compression types
5393  UT_VoxelTile<T>::loadCompressionTypes(is, compressions);
5394 
5395  int i, ntiles;
5396 
5397  ntiles = numTiles();
5398  for (i = 0; i < ntiles; i++)
5399  {
5400  myTiles[i].load(is, compressions);
5401  }
5402  }
5403 }
5404 
5405 template <typename T>
5406 bool
5407 UT_VoxelArray<T>::saveData(UT_JSONWriter &w, const char *shared_mem_owner) const
5408 {
5409  bool ok = true;
5410  T cval;
5411  int8 version;
5412 
5413  // First determine if we are fully constant.
5414  if (isConstant(&cval))
5415  {
5416  ok = ok && w.jsonBeginArray();
5417  // Save a constant array.
5418  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5420  ok = ok && w.jsonValue(cval);
5421  ok = ok && w.jsonEndArray();
5422  return ok;
5423  }
5424 
5425  if (shared_mem_owner)
5426  {
5427  SYS_SharedMemory *shm;
5428  shm = copyToSharedMemory(shared_mem_owner);
5429  if (shm)
5430  {
5431  ok = ok && w.jsonBeginArray();
5432  // Save a shared memory array.
5433  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5435  ok = ok && w.jsonString(shm->id());
5436  ok = ok && w.jsonEndArray();
5437  return ok;
5438  }
5439  // Fall back on raw write.
5440  }
5441 
5442 
5443  // Compressed tiles.
5444  ok = ok && w.jsonBeginArray();
5445  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5447  ok = ok && w.jsonBeginArray();
5448  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5450  version = 1;
5451  ok = ok && w.jsonValue(version);
5452 
5453  // Store list of compression types
5454  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5457 
5458  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5460  ok = ok && w.jsonBeginArray();
5461  int i, ntiles;
5462 
5463  ntiles = numTiles();
5464  for (i = 0; i < ntiles; i++)
5465  {
5466  ok = ok && myTiles[i].save(w);
5467  }
5468  ok = ok && w.jsonEndArray();
5469 
5470  ok = ok && w.jsonEndArray();
5471 
5472  ok = ok && w.jsonEndArray();
5473  return ok;
5474 }
5475 
5476 template <typename T>
5477 bool
5479 {
5480  T cval;
5481  int8 version;
5482  UT_WorkBuffer key;
5483  int key_id;
5484  bool array_error = false;
5485 
5486  delete mySharedMemView;
5487  mySharedMemView = 0;
5488 
5489  delete mySharedMem;
5490  mySharedMem = 0;
5491 
5492  if (!p.parseBeginArray(array_error) || array_error)
5493  return false;
5494 
5495  if (!p.parseString(key))
5496  return false;
5497  key_id = UT_VoxelArrayJSON::getArrayID(key.buffer());
5499  {
5500  if (!p.parseNumber(cval))
5501  return false;
5502 
5503  constant(cval);
5504  }
5505  else if (key_id == UT_VoxelArrayJSON::ARRAY_SHAREDARRAY)
5506  {
5507  UT_WorkBuffer shm_id;
5508  if (!p.parseString(shm_id))
5509  return false;
5510 
5511  if (!populateFromSharedMemory(shm_id.buffer()))
5512  {
5513  // If the shared memory disappears before we get a chance to read
5514  // it, don't abort, just set a constant value. That way Mantra has
5515  // a chance to recover. Not the most elegant solution but works for
5516  // now.
5517  T cval;
5518  cval = 0;
5519  constant(cval);
5520  }
5521  }
5522  else if (key_id == UT_VoxelArrayJSON::ARRAY_TILEDARRAY)
5523  {
5524  UT_JSONParser::traverser it, tile_it;
5525  UT_IntArray compressions;
5526  int i, ntiles;
5527 
5528 
5529  for (it = p.beginArray(); !it.atEnd(); ++it)
5530  {
5531  if (!it.getLowerKey(key))
5532  return false;
5533  switch (UT_VoxelArrayJSON::getArrayID(key.buffer()))
5534  {
5536  if (!p.parseNumber(version))
5537  return false;
5538  break;
5540  // Store list of compression types
5541  if (!UT_VoxelTile<T>::loadCompressionTypes(p, compressions))
5542  return false;
5543  break;
5544 
5546  ntiles = numTiles();
5547  for (tile_it = p.beginArray(), i = 0; !tile_it.atEnd();
5548  ++tile_it, ++i)
5549  {
5550  if (i < ntiles)
5551  {
5552  if (!myTiles[i].load(p, compressions))
5553  return false;
5554  }
5555  else
5556  {
5557  UT_VoxelTile<T> dummy_tile;
5558  dummy_tile.setRes(TILESIZE, TILESIZE, TILESIZE);
5559  if (!dummy_tile.load(p, compressions))
5560  return false;
5561  }
5562  }
5563  if (i != ntiles)
5564  return false;
5565  break;
5566  default:
5567  p.addWarning("Unexpected key for voxel data: %s",
5568  key.buffer());
5569  if (!p.skipNextObject())
5570  return false;
5571  }
5572  }
5573  }
5574  else
5575  {
5576  p.addWarning("Unexpected voxel key: %s\n", key.buffer());
5577  p.skipNextObject();
5578  return false;
5579  }
5580 
5581  if (!p.parseEndArray(array_error) || array_error)
5582  return false;
5583 
5584  return true;
5585 }
5586 
5587 template<typename T>
5589 UT_VoxelArray<T>::copyToSharedMemory(const char *shared_mem_owner) const
5590 {
5592 
5593  int ntiles;
5594 
5595  ntiles = numTiles();
5596 
5597  // Tally up the size we need.
5598  exint total_mem_size;
5599 
5600  total_mem_size = sizeof(exint); // Tile count.
5601 
5602  UT_Array<exint> tile_sizes;
5603  UT_Array<exint> next_block_offsets;
5604 
5605  for (int i = 0; i < ntiles; i++)
5606  {
5607  exint tile_size, block_size;
5608  if (myTiles[i].isConstant())
5609  tile_size = sizeof(T);
5610  else
5611  tile_size = myTiles[i].getDataLength();
5612 
5613  tile_sizes.append(tile_size);
5614 
5615  block_size = SYSroundUpToMultipleOf<exint>(tile_size, sizeof(exint));
5616  block_size += sizeof(exint) * 2; // Offset + compression type
5617 
5618  if (i < (ntiles-1))
5619  next_block_offsets.append(block_size / sizeof(exint));
5620  else
5621  next_block_offsets.append(0);
5622 
5623  total_mem_size += block_size;
5624  }
5625 
5626  // Start with an empty shared memory.
5627  SYS_SharedMemory *shared_mem;
5628  UT_WorkBuffer shared_mem_key;
5629 
5630  shared_mem_key.sprintf("%s:%p", shared_mem_owner, this);
5631 
5632  shared_mem = shmgr.get(shared_mem_key.buffer());
5633 
5634  // Does the existing block have the same number of tiles and same
5635  // offsets? In that case we don't reset the shared memory, just write
5636  // the tiles out again. There's very little adverse effect in changing
5637  // the voxel tile data.
5638  bool same_meta_data = false;
5639  if (shared_mem->size() >= sizeof(exint))
5640  {
5641  same_meta_data = true;
5642  {
5643  SYS_SharedMemoryView sv_tc(*shared_mem, 0, sizeof(exint));
5644 
5645  exint *ptr_ds = (exint *)sv_tc.data();
5646  if (*ptr_ds != ntiles)
5647  same_meta_data = false;
5648  }
5649 
5650  if (same_meta_data)
5651  {
5652  // Check if the offsets + compression type is the same.
5653  exint offset = sizeof(exint);
5654  for (int i = 0; i < ntiles; i++)
5655  {
5656  SYS_SharedMemoryView sv_tile(*shared_mem, offset,
5657  sizeof(exint) * 2);
5658 
5659  exint *ptr_tile = (exint *)sv_tile.data();
5660  if (ptr_tile[0] != next_block_offsets(i) ||
5661  ptr_tile[1] != (exint)myTiles[i].myCompressionType)
5662  {
5663  same_meta_data = false;
5664  break;
5665  }
5666  offset += next_block_offsets(i) * sizeof(exint);
5667  }
5668  }
5669  }
5670 
5671  if (!same_meta_data)
5672  {
5673  if (!shared_mem->reset(total_mem_size) ||
5674  shared_mem->size() != total_mem_size)
5675  {
5676  return NULL;
5677  }
5678  }
5679 
5680  {
5681  SYS_SharedMemoryView sv_tc(*shared_mem, 0, sizeof(exint));
5682 
5683  exint *ptr_ds = (exint *)sv_tc.data();
5684  *ptr_ds = ntiles;
5685  }
5686 
5687  exint offset = sizeof(exint);
5688  for (int i = 0; i < ntiles; i++)
5689  {
5690  SYS_SharedMemoryView sv_tile(*shared_mem, offset,
5691  sizeof(exint) * 2 + tile_sizes(i));
5692 
5693  exint *ptr_tile = (exint *)sv_tile.data();
5694 
5695  // Offset to next tile in exint units.
5696  ptr_tile[0] = next_block_offsets(i);
5697  ptr_tile[1] = myTiles[i].myCompressionType;
5698 
5699  ::memcpy(&ptr_tile[2], myTiles[i].rawData(), tile_sizes(i));
5700 
5701  offset += ptr_tile[0] * sizeof(exint);
5702  }
5703 
5704  return shared_mem;
5705 }
5706 
5707 template<typename T>
5708 bool
5710 {
5711  mySharedMem = new SYS_SharedMemory(id, /*read_only=*/true);
5712  if (!mySharedMem->size())
5713  {
5714  delete mySharedMem;
5715  mySharedMem = 0;
5716  return false;
5717  }
5718 
5719  exint ntiles;
5720  {
5721  SYS_SharedMemoryView sv_tc(*mySharedMem, 0, sizeof(exint));
5722  ntiles = *(const exint *)sv_tc.data();
5723  }
5724  if (ntiles != numTiles())
5725  {
5726  delete mySharedMem;
5727  mySharedMem = 0;
5728  return false;
5729  }
5730 
5731  mySharedMemView = new SYS_SharedMemoryView(*mySharedMem, sizeof(exint));
5732 
5733  exint *data = (exint *)mySharedMemView->data();
5734 
5735  for (int i = 0; i < ntiles; i++)
5736  {
5737  exint offset = data[0];
5738  int8 ctype = (int8)data[1];
5739 
5740  myTiles[i].setForeignData(&data[2], ctype);
5741 
5742  data += offset;
5743  }
5744 
5745  return true;
5746 }
5747 
5748 
5749 
5750 
5751 //
5752 // UT_VoxelMipMap functions
5753 //
5754 template <typename T>
5756 {
5757  initializePrivate();
5758 }
5759 
5760 template <typename T>
5762 {
5763  destroyPrivate();
5764 }
5765 
5766 template <typename T>
5768 {
5769  initializePrivate();
5770 
5771  *this = src;
5772 }
5773 
5774 template <typename T>
5775 const UT_VoxelMipMap<T> &
5777 {
5779  int level, i;
5780 
5781  if (&src == this)
5782  return *this;
5783 
5784  destroyPrivate();
5785 
5786  // We do a deep copy. We have learned from UT_String that
5787  // shallow copies will destroy us in the end.
5788  myOwnBase = true;
5789  myBaseLevel = new UT_VoxelArray<T>;
5790  *myBaseLevel = *src.myBaseLevel;
5791 
5792  myNumLevels = src.myNumLevels;
5793 
5794  for (i = 0; i < src.myLevels.entries(); i++)
5795  {
5796  levels = new UT_VoxelArray<T> *[myNumLevels];
5797  myLevels.append(levels);
5798 
5799  for (level = 0; level < myNumLevels; level++)
5800  {
5801  levels[level] = new UT_VoxelArray<T>;
5802  *levels[level] = *src.myLevels(i)[level];
5803  }
5804  }
5805  return *this;
5806 }
5807 
5808 template <typename T>
5809 void
5811  mipmaptype function)
5812 {
5813  UT_Array<mipmaptype> functions;
5814 
5815  functions.append(function);
5816  build(baselevel, functions);
5817 }
5818 
5819 template <typename T>
5820 void
5822  const UT_Array<mipmaptype> &functions)
5823 {
5824  destroyPrivate();
5825 
5826  // Setup our baselevel.
5827  myBaseLevel = baselevel;
5828  myOwnBase = false;
5829 
5830  // Now build from bottom up. First, calculate the number
5831  // of levels we'll need.
5832  myNumLevels = 0;
5833  int maxres, level;
5834 
5835  maxres = SYSmax(myBaseLevel->getXRes(), myBaseLevel->getYRes());
5836  maxres = SYSmax(maxres, myBaseLevel->getZRes());
5837 
5838  //cerr << "Max res " << maxres << endl;
5839 
5840  while (maxres > 1)
5841  {
5842  myNumLevels++;
5843  maxres++;
5844  maxres >>= 1;
5845  }
5846 
5847  //cerr << "Levels: " << myNumLevels << endl;
5848 
5849  // Create our desired voxel array list.
5850  for (int i = 0; i < functions.entries(); i++)
5851  {
5852  mipmaptype function = functions(i);
5853 
5854  UT_VoxelArray<T> **levels = new UT_VoxelArray<T> *[myNumLevels];
5855  myLevels.append(levels);
5856 
5857  UT_VoxelArray<T> *lastlevel = myBaseLevel;
5858  for (level = myNumLevels-1; level >= 0; level--)
5859  {
5860  // Find our resolutions.
5861  int xres = (lastlevel->getXRes() + 1) >> 1;
5862  int yres = (lastlevel->getYRes() + 1) >> 1;
5863  int zres = (lastlevel->getZRes() + 1) >> 1;
5864 
5865  levels[level] = new UT_VoxelArray<T>;
5866  levels[level]->size(xres, yres, zres);
5867 
5868  downsample(*levels[level], *lastlevel, function);
5869 
5870  lastlevel = levels[level];
5871  }
5872  }
5873 }
5874 
5875 template <typename T>
5876 void
5879  const UT_VoxelArray<T> &src,
5880  mipmaptype function,
5881  const UT_JobInfo &info)
5882 {
5883  UT_VoxelArrayIterator<T> vit(&dst);
5884 
5885  vit.splitByTile(info);
5886  vit.setCompressOnExit(true);
5887 
5888  for (vit.rewind(); !vit.atEnd(); vit.advance())
5889  {
5890  int x = 2*vit.x();
5891  int y = 2*vit.y();
5892  int z = 2*vit.z();
5893  T sam[8];
5894 
5895  if (src.extractSample(x, y, z, sam))
5896  {
5897  vit.setValue(sam[0]);
5898  }
5899  else
5900  {
5901  vit.setValue(
5902  mixValues(
5903  mixValues(
5904  mixValues(sam[0], sam[1], function),
5905  mixValues(sam[2], sam[3], function), function),
5906  mixValues(
5907  mixValues(sam[4], sam[5], function),
5908  mixValues(sam[6], sam[7], function), function),
5909  function));
5910  }
5911  }
5912 }
5913 
5914 
5915 template <typename T>
5916 int64
5918 {
5919  int64 mem = inclusive ? sizeof(*this) : 0;
5920 
5921  mem += myLevels.getMemoryUsage(false);
5922  mem += myLevels.entries() * myNumLevels * sizeof(UT_VoxelArray<T>*);
5923  for (exint j = 0; j < myLevels.entries(); j++)
5924  {
5925  for (int i = 0; i < myNumLevels; i++)
5926  mem += myLevels(j)[i]->getMemoryUsage(true);
5927  }
5928 
5929  return mem;
5930 }
5931 
5932 template <typename T>
5933 void
5934 UT_VoxelMipMap<T>::traverseTopDown(Callback function, void *data) const
5935 {
5936  doTraverse(0, 0, 0, 0, function, data);
5937 }
5938 
5939 template <typename T>
5940 void
5941 UT_VoxelMipMap<T>::doTraverse(int x, int y, int z, int level,
5942  Callback function, void *data) const
5943 {
5944  UT_StackBuffer<T> tval(myLevels.entries());
5945  bool isfinal;
5946  UT_VoxelArray<T> *vox;
5947  int shift;
5948 
5949  if (level == myNumLevels)
5950  {
5951  isfinal = true;
5952  vox = myBaseLevel;
5953  tval[0] = (*vox)(x, y, z);
5954  for (int i = 1; i < myLevels.entries(); i++)
5955  tval[i] = tval[0];
5956  }
5957  else
5958  {
5959  isfinal = false;
5960  for (int i = 0; i < myLevels.entries(); i++)
5961  {
5962  vox = myLevels(i)[level];
5963  tval[i] = (*vox)(x, y, z);
5964  }
5965  }
5966 
5967  shift = myNumLevels - level;
5968 
5969  UT_BoundingBox box;
5970 
5971  box.initBounds(SYSmin(x << shift, myBaseLevel->getXRes()),
5972  SYSmin(y << shift, myBaseLevel->getYRes()),
5973  SYSmin(z << shift, myBaseLevel->getZRes()));
5974  box.enlargeBounds(SYSmin((x+1) << shift, myBaseLevel->getXRes()),
5975  SYSmin((y+1) << shift, myBaseLevel->getYRes()),
5976  SYSmin((z+1) << shift, myBaseLevel->getZRes()));
5977 
5978  if (!function(tval, box, isfinal, data))
5979  {
5980  // Function asked for an early exit.
5981  // Give it.
5982  return;
5983  }
5984 
5985  // We now want to proceed to the next level.
5986  if (isfinal)
5987  return;
5988 
5989  level++;
5990  shift--;
5991  x <<= 1;
5992  y <<= 1;
5993  z <<= 1;
5994 
5995  bool xinc, yinc, zinc;
5996 
5997  // Determine which of our children are valid.
5998  if ( ((x+1) << shift) < myBaseLevel->getXRes() )
5999  xinc = true;
6000  else
6001  xinc = false;
6002  if ( ((y+1) << shift) < myBaseLevel->getYRes() )
6003  yinc = true;
6004  else
6005  yinc = false;
6006  if ( ((z+1) << shift) < myBaseLevel->getZRes() )
6007  zinc = true;
6008  else
6009  zinc = false;
6010 
6011  //
6012  // Traverse all valid children. Note that this is deliberately a gray
6013  // order traversal for full nodes.
6014  //
6015 
6016  doTraverse(x, y, z, level, function, data);
6017 
6018  if (yinc)
6019  {
6020  doTraverse(x, y+1, z, level, function, data);
6021  if (zinc)
6022  doTraverse(x, y+1, z+1, level, function, data);
6023  }
6024  if (zinc)
6025  doTraverse(x, y, z+1, level, function, data);
6026 
6027  if (xinc)
6028  {
6029  if (zinc)
6030  doTraverse(x+1, y, z+1, level, function, data);
6031  doTraverse(x+1, y, z, level, function, data);
6032  if (yinc)
6033  {
6034  doTraverse(x+1, y+1, z, level, function, data);
6035  if (zinc)
6036  doTraverse(x+1, y+1, z+1, level, function, data);
6037  }
6038  }
6039 }
6040 
6041 template <typename T>
6042 template <typename OP>
6043 void
6045 {
6046  doTraverse(0, 0, 0, numLevels()-1, op);
6047 }
6048 
6049 template <typename T>
6050 template <typename OP>
6051 void
6052 UT_VoxelMipMap<T>::doTraverse(int x, int y, int z, int level,
6053  OP &op) const
6054 {
6055  int shift;
6056 
6057  shift = level;
6058 
6059  UT_BoundingBoxI box;
6060 
6061  box.initBounds(SYSmin(x << shift, myBaseLevel->getXRes()),
6062  SYSmin(y << shift, myBaseLevel->getYRes()),
6063  SYSmin(z << shift, myBaseLevel->getZRes()));
6064  box.enlargeBounds(SYSmin((x+1) << shift, myBaseLevel->getXRes()),
6065  SYSmin((y+1) << shift, myBaseLevel->getYRes()),
6066  SYSmin((z+1) << shift, myBaseLevel->getZRes()));
6067 
6068  if (!op(box, level))
6069  {
6070  // Function asked for an early exit.
6071  // Give it.
6072  return;
6073  }
6074 
6075  level--;
6076  // We now want to proceed to the next level.
6077  if (level < 0)
6078  return;
6079 
6080  x <<= 1;
6081  y <<= 1;
6082  z <<= 1;
6083 
6084  bool xinc, yinc, zinc;
6085 
6086  // Determine which of our children are valid.
6087  if ( ((x+1) << level) < myBaseLevel->getXRes() )
6088  xinc = true;
6089  else
6090  xinc = false;
6091  if ( ((y+1) << level) < myBaseLevel->getYRes() )
6092  yinc = true;
6093  else
6094  yinc = false;
6095  if ( ((z+1) << level) < myBaseLevel->getZRes() )
6096  zinc = true;
6097  else
6098  zinc = false;
6099 
6100  //
6101  // Traverse all valid children. Note that this is deliberately a gray
6102  // order traversal for full nodes.
6103  //
6104 
6105  doTraverse(x, y, z, level, op);
6106 
6107  if (yinc)
6108  {
6109  doTraverse(x, y+1, z, level, op);
6110  if (zinc)
6111  doTraverse(x, y+1, z+1, level, op);
6112  }
6113  if (zinc)
6114  doTraverse(x, y, z+1, level, op);
6115 
6116  if (xinc)
6117  {
6118  if (zinc)
6119  doTraverse(x+1, y, z+1, level, op);
6120  doTraverse(x+1, y, z, level, op);
6121  if (yinc)
6122  {
6123  doTraverse(x+1, y+1, z, level, op);
6124  if (zinc)
6125  doTraverse(x+1, y+1, z+1, level, op);
6126  }
6127  }
6128 }
6129 
6130 template <typename T>
6131 template <typename OP>
6132 void
6134 {
6135  doTraverseSorted(0, 0, 0, numLevels()-1, op);
6136 }
6137 
6139 {
6140 public:
6141  float value;
6142  int key;
6143 };
6144 
6146 {
6147  return lhs.value < rhs.value;
6148 }
6149 
6150 template <typename T>
6151 template <typename OP>
6152 void
6153 UT_VoxelMipMap<T>::doTraverseSorted(int x, int y, int z, int level,
6154  OP &op) const
6155 {
6156  UT_BoundingBoxI box;
6157 
6158  box.initBounds(SYSmin(x << level, myBaseLevel->getXRes()),
6159  SYSmin(y << level, myBaseLevel->getYRes()),
6160  SYSmin(z << level, myBaseLevel->getZRes()));
6161  box.enlargeBounds(SYSmin((x+1) << level, myBaseLevel->getXRes()),
6162  SYSmin((y+1) << level, myBaseLevel->getYRes()),
6163  SYSmin((z+1) << level, myBaseLevel->getZRes()));
6164 
6165  if (!op(box, level))
6166  {
6167  // Function asked for an early exit.
6168  // Give it.
6169  return;
6170  }
6171 
6172  level--;
6173  // We now want to proceed to the next level.
6174  if (level < 0)
6175  return;
6176 
6177  x <<= 1;
6178  y <<= 1;
6179  z <<= 1;
6180 
6181  bool xinc, yinc, zinc;
6182 
6183  // Determine which of our children are valid.
6184  if ( ((x+1) << level) < myBaseLevel->getXRes() )
6185  xinc = true;
6186  else
6187  xinc = false;
6188  if ( ((y+1) << level) < myBaseLevel->getYRes() )
6189  yinc = true;
6190  else
6191  yinc = false;
6192  if ( ((z+1) << level) < myBaseLevel->getZRes() )
6193  zinc = true;
6194  else
6195  zinc = false;
6196 
6197  UT_BoundingBoxI boxes[8];
6198  int numboxes = 0;
6199 
6200  // Build all of our bounding boxes.
6201  for (int dz = 0; dz < 2; dz++)
6202  {
6203  if (dz && !zinc)
6204  continue;
6205  for (int dy = 0; dy < 2; dy++)
6206  {
6207  if (dy && !yinc)
6208  continue;
6209  for (int dx = 0; dx < 2; dx++)
6210  {
6211  if (dx && !xinc)
6212  continue;
6213  boxes[numboxes].initBounds(
6214  SYSmin((x+dx) << level, myBaseLevel->getXRes()),
6215  SYSmin((y+dy) << level, myBaseLevel->getYRes()),
6216  SYSmin((z+dz) << level, myBaseLevel->getZRes()));
6217  boxes[numboxes].enlargeBounds(
6218  SYSmin((x+dx+1) << level, myBaseLevel->getXRes()),
6219  SYSmin((y+dy+1) << level, myBaseLevel->getYRes()),
6220  SYSmin((z+dz+1) << level, myBaseLevel->getZRes()));
6221  numboxes++;
6222  }
6223  }
6224  }
6225 
6226  ut_VoxelMipMapSortCompare sortstats[8];
6227  for (int i = 0; i < numboxes; i++)
6228  {
6229  sortstats[i].value = op.sortValue(boxes[i], level);
6230  sortstats[i].key = i;
6231  }
6232  std::stable_sort(sortstats, &sortstats[numboxes]);
6233 
6234  for (int i = 0; i < numboxes; i++)
6235  {
6236  int whichbox = sortstats[i].key;
6237  doTraverseSorted(boxes[whichbox](0,0)>>level, boxes[whichbox](1,0)>>level, boxes[whichbox](2,0)>>level, level, op);
6238  }
6239 }
6240 
6241 template <typename T>
6242 void
6244 {
6245  myNumLevels = 0;
6246  myBaseLevel = 0;
6247  myOwnBase = false;
6248 }
6249 
6250 template <typename T>
6251 void
6253 {
6254  for (exint i = 0; i < myLevels.entries(); i++)
6255  {
6256  for (exint level = 0; level < myNumLevels; level++)
6257  delete myLevels(i)[level];
6258  delete [] myLevels(i);
6259  }
6260  myLevels.entries(0);
6261 
6262  if (myOwnBase)
6263  delete myBaseLevel;
6264 
6265  initializePrivate();
6266 }
6267 
6268 //
6269 // UT_VoxelArrayIterator implementation
6270 //
6271 template <typename T>
6273 {
6274  myArray = 0;
6275  myHandle.resetHandle();
6276  myCurTile = -1;
6277  myShouldCompressOnExit = false;
6278  myUseTileList = false;
6279  myJobInfo = 0;
6280  myInterrupt = 0;
6281 }
6282 
6283 template <typename T>
6285 {
6286  myShouldCompressOnExit = false;
6287  myUseTileList = false;
6288  myJobInfo = 0;
6289  setArray(vox);
6290  myInterrupt = 0;
6291 }
6292 
6293 template <typename T>
6295 {
6296  myShouldCompressOnExit = false;
6297  myUseTileList = false;
6298  myJobInfo = 0;
6299  setHandle(handle);
6300  myInterrupt = 0;
6301 }
6302 
6303 template <typename T>
6305 {
6306 }
6307 
6308 template <typename T>
6309 void
6311 {
6312  int numtiles;
6313  fpreal tileperrange;
6314 
6315  // Must already have been set.
6316  UT_ASSERT(myArray);
6317  if (!myArray)
6318  return;
6319 
6320  // Divide our tiles in to num ranges groups.
6321  numtiles = myArray->numTiles();
6322 
6323  // If we are using a tile list, use it instead
6324  if (myUseTileList)
6325  numtiles = myTileList.entries();
6326 
6327  // Trivial case.
6328  if (!numtiles)
6329  {
6330  myTileStart = 0;
6331  myTileEnd = 0;
6332  return;
6333  }
6334 
6335  // Sanity check idx.
6336  if (idx < 0 || idx >= numranges)
6337  {
6338  UT_ASSERT(!"Idx out of bounds");
6339  myTileStart = -1;
6340  myTileEnd = -1;
6341  return;
6342  }
6343 
6344  // Sanity test numranges
6345  if (numranges < 1)
6346  {
6347  UT_ASSERT(!"Invalid range count!");
6348  numranges = 1;
6349  }
6350 
6351  // Compute tile per range.
6352  // We use floating point so, if you have 15 tiles and
6353  // 16 processors, you don't decide to do all 15
6354  // tiles on one processor.
6355  tileperrange = (fpreal)numtiles / (fpreal)numranges;
6356 
6357  myTileStart = (int) SYSfloor(idx * tileperrange);
6358  myTileEnd = (int) SYSfloor((idx+1) * tileperrange);
6359 
6360  // Special case to ensure we always get the last tile,
6361  // despite the evils of floating point.
6362  if (idx == numranges-1)
6363  myTileEnd = numtiles;
6364 
6365  UT_ASSERT(myTileStart >= 0);
6366  UT_ASSERT(myTileEnd <= numtiles);
6367 }
6368 
6369 template <typename T>
6370 void
6372 {
6373  // Must already have been set.
6374  UT_ASSERT(myArray);
6375  if (!myArray)
6376  return;
6377 
6378  // If there is one thread, don't bother
6379  if (info.numJobs() == 1)
6380  myJobInfo = 0;
6381  else
6382  myJobInfo = &info;
6383 }
6384 
6385 template <typename T>
6386 void
6388 {
6389  UT_Vector3 pmin, pmax, vmin, vmax;
6390 
6391  pmin = bbox.minvec();
6392  pmax = bbox.maxvec();
6393 
6394  pmin.x() = SYSmax(pmin.x(), 0.0f);
6395  pmin.y() = SYSmax(pmin.y(), 0.0f);
6396  pmin.z() = SYSmax(pmin.z(), 0.0f);
6397  pmax.x() = SYSmin(pmax.x(), 1.0f);
<