HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_VoxelArray.C
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_VoxelArray.C ( UT Library, C++)
7  *
8  * COMMENTS:
9  * Tiled Voxel Array Implementation.
10  */
11 
12 #include "UT_VoxelArray.h"
13 
14 #include "UT_Array.h"
15 #include "UT_Assert.h"
16 #include "UT_BoundingBox.h"
17 #include "UT_COW.h"
18 #include "UT_Debug.h"
19 #include "UT_Filter.h"
20 #include "UT_FilterType.h"
21 #include "UT_Interrupt.h"
22 #include "UT_IStream.h"
23 #include "UT_JSONDefines.h"
24 #include "UT_JSONParser.h"
25 #include "UT_JSONWriter.h"
26 #include "UT_NTStreamUtil.h"
27 #include "UT_ParallelUtil.h"
28 #include "UT_SharedMemoryManager.h"
29 #include "UT_StackBuffer.h"
30 #include "UT_String.h"
31 #include "UT_ThreadedAlgorithm.h"
32 #include "UT_ValArray.h"
33 #include "UT_Vector2.h"
34 #include "UT_Vector3.h"
35 #include "UT_Vector4.h"
36 #include "UT_VectorTypes.h"
37 #include "UT_VoxelArrayJSON.h"
38 #include "UT_WorkBuffer.h"
39 #include <SYS/SYS_Compiler.h>
40 #include <SYS/SYS_Floor.h>
41 #include <SYS/SYS_Math.h>
42 #include <SYS/SYS_SharedMemory.h>
43 #include <SYS/SYS_Types.h>
44 
45 #include <algorithm>
46 #include <iostream>
47 
48 #include <string.h>
49 
50 ///
51 /// fpreal16 conversion functions
52 ///
53 inline fpreal16
55 inline fpreal16
57 inline fpreal16
59 inline fpreal16
60 UTvoxelConvertFP16(int8 a) { return a; }
61 inline fpreal16
63 inline fpreal16
65 inline fpreal16
67 
68 ///
69 /// VoxelTileCompress definitions
70 ///
71 template <typename T>
72 void
74  T &min, T &max) const
75 {
76  int x, y, z;
77 
78  min = getValue(tile, 0, 0, 0);
79  max = min;
80 
81  // Generic approach is to just use the getValue() ability.
82  for (z = 0; z < tile.zres(); z++)
83  {
84  for (y = 0; y < tile.yres(); y++)
85  {
86  for (x = 0; x < tile.xres(); x++)
87  {
88  tile.expandMinMax(getValue(tile, x, y, z), min, max);
89  }
90  }
91  }
92  return;
93 }
94 
95 //
96 // VoxelTile definitions.
97 //
98 
99 template <typename T>
101 {
102  myRes[0] = 0;
103  myRes[1] = 0;
104  myRes[2] = 0;
105 
106  myCompressionType = COMPRESS_CONSTANT;
107  myForeignData = false;
108 
109  if (sizeof(T) <= sizeof(T*))
110  {
111  myData = 0;
112  }
113  else
114  {
115  myData = UT_VOXEL_ALLOC(sizeof(T));
116 
117  // It is not accidental that this is not a static_cast!
118  // There isn't a UT_Vector3(fpreal) constructor (as we have a foolish
119  // UT_Vector3(fpreal *) constructor that would cause massive problems)
120  // but there is a UT_Vector3 operator=(fpreal)!
121  ((T *)myData)[0] = 0;
122  }
123 }
124 
125 template <typename T>
127 {
128  freeData();
129 }
130 
131 template <typename T>
133 {
134  myData = 0;
135 
136  // Use assignment operator.
137  *this = src;
138 }
139 
140 template <typename T>
141 const UT_VoxelTile<T> &
143 {
144  if (&src == this)
145  return *this;
146 
147  freeData();
148 
149  myRes[0] = src.myRes[0];
150  myRes[1] = src.myRes[1];
151  myRes[2] = src.myRes[2];
152 
153  myCompressionType = src.myCompressionType;
154  switch (myCompressionType)
155  {
156  case COMPRESS_RAW:
157  myData = UT_VOXEL_ALLOC(
158  sizeof(T) * myRes[0] * myRes[1] * myRes[2]);
159  memcpy(myData, src.myData, sizeof(T) * myRes[0] * myRes[1] * myRes[2]);
160  break;
161  case COMPRESS_CONSTANT:
162  if (inlineConstant())
163  {
164  myData = src.myData;
165  }
166  else
167  {
168  myData = UT_VOXEL_ALLOC(sizeof(T));
169  memcpy(myData, src.myData, sizeof(T));
170  }
171  break;
172  case COMPRESS_RAWFULL:
173  myData = UT_VOXEL_ALLOC(
174  sizeof(T) * TILESIZE * TILESIZE * myRes[2]);
175  memcpy(myData, src.myData,
176  sizeof(T) * TILESIZE * TILESIZE * myRes[2]);
177  break;
178  case COMPRESS_FPREAL16:
179  {
180  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
181  myData = UT_VOXEL_ALLOC(
182  sizeof(fpreal16) * myRes[0] * myRes[1] * myRes[2] * tuple_size);
183  memcpy(myData, src.myData,
184  sizeof(fpreal16) * myRes[0] * myRes[1] * myRes[2] * tuple_size);
185  break;
186  }
187  default:
188  {
189  UT_VoxelTileCompress<T> *engine;
190 
191  engine = getCompressionEngine(myCompressionType);
192  myData = UT_VOXEL_ALLOC(engine->getDataLength(src));
193  memcpy(myData, src.myData, engine->getDataLength(src));
194  break;
195  }
196  }
197 
198  return *this;
199 }
200 
201 template <typename T>
202 bool
204 {
205  switch (myCompressionType)
206  {
207  case COMPRESS_RAW:
208  // Do the assignment.
209  ((T *)myData)[ ((z * myRes[1]) + y) * myRes[0] + x ] = t;
210  return true;
211 
212  case COMPRESS_CONSTANT:
213  if (rawConstVal() == t)
214  {
215  // Trivially true.
216  return true;
217  }
218  return false;
219 
220  case COMPRESS_RAWFULL:
221  ((T *)myData)[ ((z * TILESIZE) + y) * TILESIZE + x ] = t;
222  return true;
223 
224  case COMPRESS_FPREAL16:
225  return false;
226  }
227 
228  // Use the compression engine.
229  UT_VoxelTileCompress<T> *engine;
230 
231  engine = getCompressionEngine(myCompressionType);
232  return engine->writeThrough(*this, x, y, z, t);
233 }
234 
235 template <typename T>
236 T
237 UT_VoxelTile<T>::operator()(int x, int y, int z) const
238 {
239  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
240  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
241 
242  switch (myCompressionType)
243  {
244  case COMPRESS_RAW:
245  return ((T *)myData)[
246  ((z * myRes[1]) + y) * myRes[0] + x ];
247 
248  case COMPRESS_CONSTANT:
249  return rawConstVal();
250 
251  case COMPRESS_RAWFULL:
252  return ((T *)myData)[
253  ((z * TILESIZE) + y) * TILESIZE + x ];
254 
255  case COMPRESS_FPREAL16:
256  {
257  static constexpr UT_FromUnbounded<T> convertFromFP16{};
258 
259  fpreal16* data = (fpreal16*) myData;
260  int offset = ((z * myRes[1] + y) * myRes[0] + x)
262  T result = convertFromFP16(data + offset);
263  return result;
264  }
265  }
266 
267  // By default use the compression engine.
268  UT_VoxelTileCompress<T> *engine;
269 
270  engine = getCompressionEngine(myCompressionType);
271  return engine->getValue(*this, x, y, z);
272 }
273 
274 template <typename T>
275 T
276 UT_VoxelTile<T>::lerp(int x, int y, int z, fpreal32 fx, fpreal32 fy, fpreal32 fz) const
277 {
278  T vx, vx1, vy, vy1, vz;
279 
280  switch (myCompressionType)
281  {
282  case COMPRESS_RAW:
283  {
284  T *data = (T *) myData;
285  int offset = (z * myRes[1] + y) * myRes[0] + x;
286  int yinc = myRes[0];
287  int zinc = myRes[0] * myRes[1];
288 
289  // Lerp x:x+1, y, z
290  vx = lerpValues(data[offset], data[offset+1], fx);
291  // Lerp x:x+1, y+1, z
292  vx1 = lerpValues(data[offset+yinc], data[offset+yinc+1], fx);
293 
294  // Lerp x:x+1, y:y+1, z
295  vy = lerpValues(vx, vx1, fy);
296 
297  // Lerp x:x+1, y, z+1
298  vx = lerpValues(data[offset+zinc], data[offset+zinc+1], fx);
299  // Lerp x:x+1, y+1, z+1
300  vx1 = lerpValues(data[offset+zinc+yinc], data[offset+zinc+yinc+1], fx);
301 
302  // Lerp x:x+1, y:y+1, z+1
303  vy1 = lerpValues(vx, vx1, fy);
304 
305  // Lerp x:x+1, y:y+1, z:z+1
306  vz = lerpValues(vy, vy1, fz);
307  break;
308  }
309  case COMPRESS_RAWFULL:
310  {
311  T *data = (T *) myData;
312  int offset = (z * TILESIZE + y) * TILESIZE + x;
313  int yinc = TILESIZE;
314  int zinc = TILESIZE * TILESIZE;
315 
316  // Lerp x:x+1, y, z
317  vx = lerpValues(data[offset], data[offset+1], fx);
318  // Lerp x:x+1, y+1, z
319  vx1 = lerpValues(data[offset+yinc], data[offset+yinc+1], fx);
320 
321  // Lerp x:x+1, y:y+1, z
322  vy = lerpValues(vx, vx1, fy);
323 
324  // Lerp x:x+1, y, z+1
325  vx = lerpValues(data[offset+zinc], data[offset+zinc+1], fx);
326  // Lerp x:x+1, y+1, z+1
327  vx1 = lerpValues(data[offset+zinc+yinc], data[offset+zinc+yinc+1], fx);
328 
329  // Lerp x:x+1, y:y+1, z+1
330  vy1 = lerpValues(vx, vx1, fy);
331 
332  // Lerp x:x+1, y:y+1, z:z+1
333  vz = lerpValues(vy, vy1, fz);
334  break;
335  }
336  case COMPRESS_FPREAL16:
337  {
338  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
339  static constexpr UT_FromUnbounded<T> convertFromFP16{};
340 
341  fpreal16 *data = (fpreal16 *) myData;
342  int xinc = tuple_size;
343  int yinc = myRes[0] * xinc;
344  int zinc = myRes[1] * yinc;
345  int offset = z * zinc + y * yinc + x * xinc;
346  fpreal16 vx, vx1, vy, vy1;
347  fpreal16 result[tuple_size];
348 
349  for (int j = 0; j < tuple_size; j++, offset++)
350  {
351  // Lerp x:x+1, y, z
352  vx = SYSlerp(data[offset], data[offset+xinc], fx);
353  // Lerp x:x+1, y+1, z
354  vx1 = SYSlerp(data[offset+yinc], data[offset+yinc+xinc], fx);
355 
356  // Lerp x:x+1, y:y+1, z
357  vy = SYSlerp(vx, vx1, fy);
358 
359  // Lerp x:x+1, y, z+1
360  vx = SYSlerp(data[offset+zinc], data[offset+zinc+xinc], fx);
361  // Lerp x:x+1, y+1, z+1
362  vx1 = SYSlerp(data[offset+zinc+yinc], data[offset+zinc+yinc+xinc], fx);
363 
364  // Lerp x:x+1, y:y+1, z+1
365  vy1 = SYSlerp(vx, vx1, fy);
366 
367  // Lerp x:x+1, y:y+1, z:z+1
368  result[j] = SYSlerp(vy, vy1, fz);
369  }
370 
371  return convertFromFP16(result);
372  }
373  case COMPRESS_CONSTANT:
374  {
375  // This is quite trivial to do a trilerp on.
376  vz = rawConstVal();
377  break;
378  }
379 
380  default:
381  {
382  UT_VoxelTileCompress<T> *engine;
383 
384  engine = getCompressionEngine(myCompressionType);
385  // Lerp x:x+1, y, z
386  vx = lerpValues(engine->getValue(*this, x, y, z),
387  engine->getValue(*this, x+1, y, z),
388  fx);
389  // Lerp x:x+1, y+1, z
390  vx1 = lerpValues(engine->getValue(*this, x, y+1, z),
391  engine->getValue(*this, x+1, y+1, z),
392  fx);
393 
394  // Lerp x:x+1, y:y+1, z
395  vy = lerpValues(vx, vx1, fy);
396 
397  // Lerp x:x+1, y, z+1
398  vx = lerpValues(engine->getValue(*this, x, y, z+1),
399  engine->getValue(*this, x+1, y, z+1),
400  fx);
401  // Lerp x:x+1, y+1, z+1
402  vx1 = lerpValues(engine->getValue(*this, x, y+1, z+1),
403  engine->getValue(*this, x+1, y+1, z+1),
404  fx);
405 
406  // Lerp x:x+1, y:y+1, z+1
407  vy1 = lerpValues(vx, vx1, fy);
408 
409  // Lerp x:x+1, y:y+1, z:z+1
410  vz = lerpValues(vy, vy1, fz);
411  break;
412  }
413  }
414 
415  return vz;
416 }
417 
418 template <typename T>
419 template <int AXIS2D>
420 T
421 UT_VoxelTile<T>::lerpAxis(int x, int y, int z, fpreal32 fx, fpreal32 fy, fpreal32 fz) const
422 {
423  T vx, vx1, vy, vy1, vz;
424 
425  switch (myCompressionType)
426  {
427  case COMPRESS_RAW:
428  {
429  T *data = (T *) myData;
430  int offset = (z * myRes[1] + y) * myRes[0] + x;
431  int yinc = myRes[0];
432  int zinc = myRes[0] * myRes[1];
433 
434  // Lerp x:x+1, y, z
435  if (AXIS2D != 0)
436  vx = lerpValues(data[offset],
437  data[offset+1],
438  fx);
439  else
440  vx = data[offset];
441 
442  if (AXIS2D != 1)
443  {
444  // Lerp x:x+1, y+1, z
445  if (AXIS2D != 0)
446  vx1= lerpValues(data[offset+yinc],
447  data[offset+yinc+1],
448  fx);
449  else
450  vx1 = data[offset+yinc];
451  // Lerp x:x+1, y:y+1, z
452  vy = lerpValues(vx, vx1, fy);
453  }
454  else
455  vy = vx;
456 
457  if (AXIS2D != 2)
458  {
459  // Lerp x:x+1, y, z+1
460  if (AXIS2D != 0)
461  vx = lerpValues(data[offset+zinc],
462  data[offset+zinc+1],
463  fx);
464  else
465  vx = data[offset+zinc];
466 
467  if (AXIS2D != 1)
468  {
469  // Lerp x:x+1, y+1, z+1
470  if (AXIS2D != 0)
471  vx1= lerpValues(data[offset+zinc+yinc],
472  data[offset+zinc+yinc+1],
473  fx);
474  else
475  vx1 = data[offset+zinc+yinc];
476  // Lerp x:x+1, y:y+1, z+1
477  vy1 = lerpValues(vx, vx1, fy);
478  }
479  else
480  vy1 = vx;
481 
482  // Lerp x:x+1, y:y+1, z:z+1
483  vz = lerpValues(vy, vy1, fz);
484  }
485  else
486  vz = vy;
487  break;
488  }
489  case COMPRESS_RAWFULL:
490  {
491  T *data = (T *) myData;
492  int offset = (z * TILESIZE + y) * TILESIZE + x;
493  int yinc = TILESIZE;
494  int zinc = TILESIZE * TILESIZE;
495 
496  // Lerp x:x+1, y, z
497  if (AXIS2D != 0)
498  vx = lerpValues(data[offset],
499  data[offset+1],
500  fx);
501  else
502  vx = data[offset];
503 
504  if (AXIS2D != 1)
505  {
506  // Lerp x:x+1, y+1, z
507  if (AXIS2D != 0)
508  vx1= lerpValues(data[offset+yinc],
509  data[offset+yinc+1],
510  fx);
511  else
512  vx1 = data[offset+yinc];
513  // Lerp x:x+1, y:y+1, z
514  vy = lerpValues(vx, vx1, fy);
515  }
516  else
517  vy = vx;
518 
519  if (AXIS2D != 2)
520  {
521  // Lerp x:x+1, y, z+1
522  if (AXIS2D != 0)
523  vx = lerpValues(data[offset+zinc],
524  data[offset+zinc+1],
525  fx);
526  else
527  vx = data[offset+zinc];
528 
529  if (AXIS2D != 1)
530  {
531  // Lerp x:x+1, y+1, z+1
532  if (AXIS2D != 0)
533  vx1= lerpValues(data[offset+zinc+yinc],
534  data[offset+zinc+yinc+1],
535  fx);
536  else
537  vx1 = data[offset+zinc+yinc];
538  // Lerp x:x+1, y:y+1, z+1
539  vy1 = lerpValues(vx, vx1, fy);
540  }
541  else
542  vy1 = vx;
543 
544  // Lerp x:x+1, y:y+1, z:z+1
545  vz = lerpValues(vy, vy1, fz);
546  }
547  else
548  vz = vy;
549  break;
550  }
551  case COMPRESS_FPREAL16:
552  {
553  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
554  static constexpr UT_FromUnbounded<T> convertFromFP16{};
555 
556  fpreal16 *data = (fpreal16 *) myData;
557  int xinc = tuple_size;
558  int yinc = myRes[0] * xinc;
559  int zinc = myRes[1] * yinc;
560  int offset = z * zinc + y * yinc + x * xinc;
561  fpreal16 vx, vx1, vy, vy1;
562  fpreal16 result[tuple_size];
563 
564  for (int j = 0; j < tuple_size; j++, offset++)
565  {
566  // Lerp x:x+1, y, z
567  if (AXIS2D != 0)
568  vx = SYSlerp(data[offset],
569  data[offset+xinc],
570  fx);
571  else
572  vx = data[offset];
573 
574  if (AXIS2D != 1)
575  {
576  // Lerp x:x+1, y+1, z
577  if (AXIS2D != 0)
578  vx1= SYSlerp(data[offset+yinc],
579  data[offset+yinc+xinc],
580  fx);
581  else
582  vx1 = data[offset+yinc];
583  // Lerp x:x+1, y:y+1, z
584  vy = SYSlerp(vx, vx1, fy);
585  }
586  else
587  vy = vx;
588 
589  if (AXIS2D != 2)
590  {
591  // Lerp x:x+1, y, z+1
592  if (AXIS2D != 0)
593  vx = SYSlerp(data[offset+zinc],
594  data[offset+zinc+xinc],
595  fx);
596  else
597  vx = data[offset+zinc];
598 
599  if (AXIS2D != 1)
600  {
601  // Lerp x:x+1, y+1, z+1
602  if (AXIS2D != 0)
603  vx1= SYSlerp(data[offset+zinc+yinc],
604  data[offset+zinc+yinc+xinc],
605  fx);
606  else
607  vx1 = data[offset+zinc+yinc];
608  // Lerp x:x+1, y:y+1, z+1
609  vy1 = SYSlerp(vx, vx1, fy);
610  }
611  else
612  vy1 = vx;
613 
614  // Lerp x:x+1, y:y+1, z:z+1
615  result[j] = SYSlerp(vy, vy1, fz);
616  }
617  else
618  result[j] = vy;
619  }
620 
621  return convertFromFP16(result);
622  }
623  case COMPRESS_CONSTANT:
624  {
625  // This is quite trivial to do a bilerp on.
626  vz = rawConstVal();
627  break;
628  }
629 
630  default:
631  {
632  UT_VoxelTileCompress<T> *engine;
633 
634  engine = getCompressionEngine(myCompressionType);
635  // Lerp x:x+1, y, z
636  if (AXIS2D != 0)
637  vx = lerpValues(engine->getValue(*this, x, y, z),
638  engine->getValue(*this, x+1, y, z),
639  fx);
640  else
641  vx = engine->getValue(*this, x, y, z);
642 
643  if (AXIS2D != 1)
644  {
645  // Lerp x:x+1, y+1, z
646  if (AXIS2D != 0)
647  vx1= lerpValues(engine->getValue(*this, x, y+1, z),
648  engine->getValue(*this, x+1, y+1, z),
649  fx);
650  else
651  vx1 = engine->getValue(*this, x, y+1, z);
652  // Lerp x:x+1, y:y+1, z
653  vy = lerpValues(vx, vx1, fy);
654  }
655  else
656  vy = vx;
657 
658  if (AXIS2D != 2)
659  {
660  // Lerp x:x+1, y, z+1
661  if (AXIS2D != 0)
662  vx = lerpValues(engine->getValue(*this, x, y, z+1),
663  engine->getValue(*this, x+1, y, z+1),
664  fx);
665  else
666  vx = engine->getValue(*this, x, y, z+1);
667 
668  if (AXIS2D != 1)
669  {
670  // Lerp x:x+1, y+1, z+1
671  if (AXIS2D != 0)
672  vx1= lerpValues(engine->getValue(*this, x, y+1, z+1),
673  engine->getValue(*this, x+1, y+1, z+1),
674  fx);
675  else
676  vx1 = engine->getValue(*this, x, y+1, z+1);
677  // Lerp x:x+1, y:y+1, z+1
678  vy1 = lerpValues(vx, vx1, fy);
679  }
680  else
681  vy1 = vx;
682 
683  // Lerp x:x+1, y:y+1, z:z+1
684  vz = lerpValues(vy, vy1, fz);
685  }
686  else
687  vz = vy;
688  break;
689  }
690  }
691 
692  return vz;
693 }
694 
695 template <typename T>
696 bool
697 UT_VoxelTile<T>::extractSample(int x, int y, int z, T *sample) const
698 {
699  switch (myCompressionType)
700  {
701  case COMPRESS_RAW:
702  {
703  T *data = (T *) myData;
704  int offset = (z * myRes[1] + y) * myRes[0] + x;
705  int yinc = myRes[0];
706  int zinc = myRes[0] * myRes[1];
707 
708  sample[0] = data[offset];
709  sample[1] = data[offset+1];
710  sample[2+0] = data[offset+yinc];
711  sample[2+1] = data[offset+yinc+1];
712  sample[4+0] = data[offset+zinc];
713  sample[4+1] = data[offset+zinc+1];
714  sample[4+2+0] = data[offset+zinc+yinc];
715  sample[4+2+1] = data[offset+zinc+yinc+1];
716  break;
717  }
718  case COMPRESS_RAWFULL:
719  {
720  T *data = (T *) myData;
721  int offset = (z * TILESIZE + y) * TILESIZE + x;
722  int yinc = TILESIZE;
723  int zinc = TILESIZE * TILESIZE;
724 
725  sample[0] = data[offset];
726  sample[1] = data[offset+1];
727  sample[2+0] = data[offset+yinc];
728  sample[2+1] = data[offset+yinc+1];
729  sample[4+0] = data[offset+zinc];
730  sample[4+1] = data[offset+zinc+1];
731  sample[4+2+0] = data[offset+zinc+yinc];
732  sample[4+2+1] = data[offset+zinc+yinc+1];
733  break;
734  }
735  case COMPRESS_FPREAL16:
736  {
737  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
738  static constexpr UT_FromUnbounded<T> convertFromFP16{};
739 
740  fpreal16 *data = (fpreal16 *) myData;
741  int xinc = tuple_size;
742  int yinc = myRes[0] * xinc;
743  int zinc = myRes[1] * yinc;
744  int offset = z * zinc + y * yinc + x * xinc;
745 
746  data += offset;
747  sample[0] = convertFromFP16(data);
748  sample[1] = convertFromFP16(data + xinc);
749  sample[2+0] = convertFromFP16(data + yinc);
750  sample[2+1] = convertFromFP16(data + yinc + xinc);
751  sample[4+0] = convertFromFP16(data + zinc);
752  sample[4+1] = convertFromFP16(data + zinc + xinc);
753  sample[4+2+0] = convertFromFP16(data + zinc + yinc);
754  sample[4+2+1] = convertFromFP16(data + zinc + yinc + xinc);
755  break;
756  }
757  case COMPRESS_CONSTANT:
758  {
759  sample[0] = rawConstVal();
760  return true;
761  }
762 
763  default:
764  {
765  UT_VoxelTileCompress<T> *engine;
766 
767  engine = getCompressionEngine(myCompressionType);
768  // Lerp x:x+1, y, z
769  sample[0] = engine->getValue(*this, x, y, z);
770  sample[1] = engine->getValue(*this, x+1, y, z);
771  sample[2+0] = engine->getValue(*this, x, y+1, z);
772  sample[2+1] = engine->getValue(*this, x+1, y+1, z);
773  sample[4+0] = engine->getValue(*this, x, y, z+1);
774  sample[4+1] = engine->getValue(*this, x+1, y, z+1);
775  sample[4+2+0] = engine->getValue(*this, x, y+1, z+1);
776  sample[4+2+1] = engine->getValue(*this, x+1, y+1, z+1);
777  break;
778  }
779  }
780  return false;
781 }
782 
783 template <typename T>
784 bool
785 UT_VoxelTile<T>::extractSamplePlus(int x, int y, int z, T *sample) const
786 {
787  switch (myCompressionType)
788  {
789  case COMPRESS_RAW:
790  {
791  T *data = (T *) myData;
792  int offset = (z * myRes[1] + y) * myRes[0] + x;
793  int yinc = myRes[0];
794  int zinc = myRes[0] * myRes[1];
795 
796  sample[0] = data[offset-1];
797  sample[1] = data[offset+1];
798  sample[2+0] = data[offset-yinc];
799  sample[2+1] = data[offset+yinc];
800  sample[4+0] = data[offset-zinc];
801  sample[4+1] = data[offset+zinc];
802  sample[6] = data[offset];
803  break;
804  }
805  case COMPRESS_RAWFULL:
806  {
807  T *data = (T *) myData;
808  int offset = (z * TILESIZE + y) * TILESIZE + x;
809  int yinc = TILESIZE;
810  int zinc = TILESIZE * TILESIZE;
811 
812  sample[0] = data[offset-1];
813  sample[1] = data[offset+1];
814  sample[2+0] = data[offset-yinc];
815  sample[2+1] = data[offset+yinc];
816  sample[4+0] = data[offset-zinc];
817  sample[4+1] = data[offset+zinc];
818  sample[6] = data[offset];
819  break;
820  }
821  case COMPRESS_FPREAL16:
822  {
823  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
824  static constexpr UT_FromUnbounded<T> convertFromFP16{};
825 
826  fpreal16 *data = (fpreal16 *) myData;
827  int xinc = tuple_size;
828  int yinc = myRes[0] * xinc;
829  int zinc = myRes[1] * yinc;
830  int offset = z * zinc + y * yinc + x * xinc;
831 
832  data += offset;
833  sample[0] = convertFromFP16(data - xinc);
834  sample[1] = convertFromFP16(data + xinc);
835  sample[2+0] = convertFromFP16(data - yinc);
836  sample[2+1] = convertFromFP16(data + yinc);
837  sample[4+0] = convertFromFP16(data - zinc);
838  sample[4+1] = convertFromFP16(data + zinc);
839  sample[6] = convertFromFP16(data);
840  break;
841  }
842  case COMPRESS_CONSTANT:
843  {
844  sample[0] = rawConstVal();
845  return true;
846  }
847 
848  default:
849  {
850  UT_VoxelTileCompress<T> *engine;
851 
852  engine = getCompressionEngine(myCompressionType);
853  // Lerp x:x+1, y, z
854  sample[0] = engine->getValue(*this, x-1, y, z);
855  sample[1] = engine->getValue(*this, x+1, y, z);
856  sample[2+0] = engine->getValue(*this, x, y-1, z);
857  sample[2+1] = engine->getValue(*this, x, y+1, z);
858  sample[4+0] = engine->getValue(*this, x, y, z+1);
859  sample[4+1] = engine->getValue(*this, x, y, z-1);
860  sample[6] = engine->getValue(*this, x, y, z);
861  break;
862  }
863  }
864  return false;
865 }
866 
867 #if 0
868 /// Implementation of this function has an error. The outer dz loop assumes that
869 /// the incoming offset is for y=0, as it immediately subtracts yinc to get to
870 /// y=-1; it then loops over the 3 y layers, adding yinc every time. After this
871 /// dz loop, yinc is subtracted thrice, bringing us back to y=-1. On the next
872 /// iteration of the dz loop, this violates the y=0 assumption.
873 /// There aren't any users of these methods in our own code.
874 template <typename T>
875 bool
876 UT_VoxelTile<T>::extractSampleCube(int x, int y, int z, T *sample) const
877 {
878  switch (myCompressionType)
879  {
880  case COMPRESS_RAW:
881  {
882  T *data = (T *) myData;
883  int offset = (z * myRes[1] + y) * myRes[0] + x;
884  int yinc = myRes[0];
885  int zinc = myRes[0] * myRes[1];
886  int sampidx = 0;
887 
888  offset -= zinc;
889  for (int dz = -1; dz <= 1; dz++)
890  {
891  offset -= yinc;
892  for (int dy = -1; dy <= 1; dy++)
893  {
894  sample[sampidx] = data[offset-1];
895  sample[sampidx+1] = data[offset];
896  sample[sampidx+2] = data[offset+1];
897  sampidx += 3;
898  offset += yinc;
899  }
900  offset -= yinc * 3;
901  offset += zinc;
902  }
903  break;
904  }
905  case COMPRESS_RAWFULL:
906  {
907  T *data = (T *) myData;
908  int offset = (z * TILESIZE + y) * TILESIZE + x;
909  int yinc = TILESIZE;
910  int zinc = TILESIZE * TILESIZE;
911  int sampidx = 0;
912 
913  offset -= zinc;
914  for (int dz = -1; dz <= 1; dz++)
915  {
916  offset -= yinc;
917  for (int dy = -1; dy <= 1; dy++)
918  {
919  sample[sampidx] = data[offset-1];
920  sample[sampidx+1] = data[offset];
921  sample[sampidx+2] = data[offset+1];
922  sampidx += 3;
923  offset += yinc;
924  }
925  offset -= yinc * 3;
926  offset += zinc;
927  }
928  break;
929  }
930  case COMPRESS_FPREAL16:
931  {
932  fpreal16 *data = (fpreal16 *) myData;
933  int offset = (z * myRes[1] + y) * myRes[0] + x;
934  int yinc = myRes[0];
935  int zinc = myRes[0] * myRes[1];
936  int sampidx = 0;
937 
938  offset -= zinc;
939  for (int dz = -1; dz <= 1; dz++)
940  {
941  offset -= yinc;
942  for (int dy = -1; dy <= 1; dy++)
943  {
944  sample[sampidx] = data[offset-1];
945  sample[sampidx+1] = data[offset];
946  sample[sampidx+2] = data[offset+1];
947  sampidx += 3;
948  offset += yinc;
949  }
950  offset -= yinc * 3;
951  offset += zinc;
952  }
953  break;
954  }
955  case COMPRESS_CONSTANT:
956  {
957  sample[0] = rawConstVal();
958  return true;
959  }
960 
961  default:
962  {
963  UT_VoxelTileCompress<T> *engine;
964 
965  engine = getCompressionEngine(myCompressionType);
966  int sampidx = 0;
967  // Lerp x:x+1, y, z
968  for (int dz = -1; dz <= 1; dz++)
969  {
970  for (int dy = -1; dy <= 1; dy++)
971  {
972  for (int dx = -1; dx <= 1; dx++)
973  {
974  sample[sampidx++] = engine->getValue(*this, x+dx, y+dy, z+dz);
975  }
976  }
977  }
978  break;
979  }
980  }
981  return false;
982 }
983 #endif
984 
985 template <typename T>
986 template <int AXIS2D>
987 bool
988 UT_VoxelTile<T>::extractSampleAxis(int x, int y, int z, T *sample) const
989 {
990  switch (myCompressionType)
991  {
992  case COMPRESS_RAW:
993  {
994  T *data = (T *) myData;
995  int offset = (z * myRes[1] + y) * myRes[0] + x;
996  int yinc = myRes[0];
997  int zinc = myRes[0] * myRes[1];
998 
999  sample[0] = data[offset];
1000  if (AXIS2D != 0)
1001  sample[1] = data[offset+1];
1002  if (AXIS2D != 1)
1003  {
1004  sample[2+0] = data[offset+yinc];
1005  if (AXIS2D != 0)
1006  sample[2+1] = data[offset+yinc+1];
1007  }
1008  if (AXIS2D != 2)
1009  {
1010  sample[4+0] = data[offset+zinc];
1011  if (AXIS2D != 0)
1012  sample[4+1] = data[offset+zinc+1];
1013  if (AXIS2D != 1)
1014  {
1015  sample[4+2+0] = data[offset+zinc+yinc];
1016  if (AXIS2D != 0)
1017  sample[4+2+1] = data[offset+zinc+yinc+1];
1018  }
1019  }
1020  break;
1021  }
1022  case COMPRESS_RAWFULL:
1023  {
1024  T *data = (T *) myData;
1025  int offset = (z * TILESIZE + y) * TILESIZE + x;
1026  int yinc = TILESIZE;
1027  int zinc = TILESIZE * TILESIZE;
1028 
1029  sample[0] = data[offset];
1030  if (AXIS2D != 0)
1031  sample[1] = data[offset+1];
1032  if (AXIS2D != 1)
1033  {
1034  sample[2+0] = data[offset+yinc];
1035  if (AXIS2D != 0)
1036  sample[2+1] = data[offset+yinc+1];
1037  }
1038  if (AXIS2D != 2)
1039  {
1040  sample[4+0] = data[offset+zinc];
1041  if (AXIS2D != 0)
1042  sample[4+1] = data[offset+zinc+1];
1043  if (AXIS2D != 1)
1044  {
1045  sample[4+2+0] = data[offset+zinc+yinc];
1046  if (AXIS2D != 0)
1047  sample[4+2+1] = data[offset+zinc+yinc+1];
1048  }
1049  }
1050  break;
1051  }
1052  case COMPRESS_FPREAL16:
1053  {
1054  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
1055  static constexpr UT_FromUnbounded<T> convertFromFP16{};
1056 
1057  fpreal16 *data = (fpreal16 *) myData;
1058  int xinc = tuple_size;
1059  int yinc = myRes[0] * xinc;
1060  int zinc = myRes[1] * yinc;
1061  int offset = z * zinc + y * yinc + x * xinc;
1062 
1063  data += offset;
1064  sample[0] = convertFromFP16(data);
1065  if (AXIS2D != 0)
1066  sample[1] = convertFromFP16(data + xinc);
1067  if (AXIS2D != 1)
1068  {
1069  sample[2+0] = convertFromFP16(data + yinc);
1070  if (AXIS2D != 0)
1071  sample[2+1] = convertFromFP16(data + yinc + xinc);
1072  }
1073  if (AXIS2D != 2)
1074  {
1075  sample[4+0] = convertFromFP16(data + zinc);
1076  if (AXIS2D != 0)
1077  sample[4+1] = convertFromFP16(data + zinc + xinc);
1078  if (AXIS2D != 1)
1079  {
1080  sample[4+2+0] = convertFromFP16(data + zinc + yinc);
1081  if (AXIS2D != 0)
1082  sample[4+2+1] = convertFromFP16(data + zinc + yinc + xinc);
1083  }
1084  }
1085  break;
1086  }
1087  case COMPRESS_CONSTANT:
1088  {
1089  sample[0] = rawConstVal();
1090  return true;
1091  }
1092 
1093  default:
1094  {
1095  UT_VoxelTileCompress<T> *engine;
1096 
1097  engine = getCompressionEngine(myCompressionType);
1098  // Lerp x:x+1, y, z
1099  sample[0] = engine->getValue(*this, x, y, z);
1100  if (AXIS2D != 0)
1101  sample[1] = engine->getValue(*this, x+1, y, z);
1102  if (AXIS2D != 1)
1103  {
1104  sample[2+0] = engine->getValue(*this, x, y+1, z);
1105  if (AXIS2D != 0)
1106  sample[2+1] = engine->getValue(*this, x+1, y+1, z);
1107  }
1108  if (AXIS2D != 2)
1109  {
1110  sample[4+0] = engine->getValue(*this, x, y, z+1);
1111  if (AXIS2D != 0)
1112  sample[4+1] = engine->getValue(*this, x+1, y, z+1);
1113  if (AXIS2D != 1)
1114  {
1115  sample[4+2+0] = engine->getValue(*this, x, y+1, z+1);
1116  if (AXIS2D != 0)
1117  sample[4+2+1] = engine->getValue(*this, x+1, y+1, z+1);
1118  }
1119  }
1120  break;
1121  }
1122  }
1123  return false;
1124 }
1125 
1126 #if 0
1127 template <typename T>
1128 T
1129 UT_VoxelTile<T>::lerp(v4uf frac, int x, int y, int z) const
1130 {
1131  v4uf a, b;
1132 
1133  switch (myCompressionType)
1134  {
1135  case COMPRESS_RAW:
1136  case COMPRESS_RAWFULL:
1137  {
1138  T *data = (T *) myData;
1139  int offset = (z * myRes[1] + y) * myRes[0] + x;
1140  int yinc = myRes[0];
1141  int zinc = myRes[0] * myRes[1];
1142 
1143  a = v4uf( data[offset],
1144  data[offset+zinc],
1145  data[offset+yinc],
1146  data[offset+yinc+zinc] );
1147  b = v4uf( data[offset+1],
1148  data[offset+zinc+1],
1149  data[offset+yinc+1],
1150  data[offset+yinc+zinc+1] );
1151  break;
1152  }
1153 
1154  case COMPRESS_CONSTANT:
1155  {
1156  // This is quite trivial to do a trilerp on.
1157  return rawConstVal();
1158  }
1159 
1160  default:
1161  {
1162  UT_VoxelTileCompress<T> *engine;
1163 
1164  engine = getCompressionEngine(myCompressionType);
1165  // Lerp x:x+1, y, z
1166  a = v4uf( engine->getValue(*this, x, y, z),
1167  engine->getValue(*this, x, y, z+1),
1168  engine->getValue(*this, x, y+1, z),
1169  engine->getValue(*this, x, y+1, z+1) );
1170  b = v4uf( engine->getValue(*this, x+1, y, z),
1171  engine->getValue(*this, x+1, y, z+1),
1172  engine->getValue(*this, x+1, y+1, z),
1173  engine->getValue(*this, x+1, y+1, z+1) );
1174  break;
1175  }
1176  }
1177 
1178  v4uf fx, fy, fz;
1179 
1180  fx = frac.swizzle<0, 0, 0, 0>();
1181  fy = frac.swizzle<1, 1, 1, 1>();
1182  fz = frac.swizzle<2, 2, 2, 2>();
1183 
1184  b -= a;
1185  a = madd(b, fx, a);
1186 
1187  b = a.swizzle<2, 3, 0, 1>();
1188  b -= a;
1189  a = madd(b, fy, a);
1190 
1191  b = a.swizzle<1, 2, 3, 0>();
1192  b -= a;
1193  a = madd(b, fz, a);
1194 
1195  return a[0];
1196 }
1197 #endif
1198 
1199 template <typename T>
1200 T *
1201 UT_VoxelTile<T>::fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const
1202 {
1203  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
1204  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
1205 
1206  T *src;
1207  int i, xres = myRes[0];
1208 
1209  // All the directly handled types exit directly from this switch.
1210  switch (myCompressionType)
1211  {
1212  case COMPRESS_RAW:
1213  stride = 1;
1214  src = (T *)myData + (z * myRes[1] + y) * xres;
1215  if (!forcecopy)
1216  return &src[x];
1217 
1218  for (i = 0; i < xres; i++)
1219  cacheline[i] = src[i];
1220 
1221  return &cacheline[x];
1222 
1223  case COMPRESS_FPREAL16:
1224  {
1225  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
1226  static constexpr UT_FromUnbounded<T> convertFromFP16{};
1227 
1228  fpreal16 *src = (fpreal16 *) myData;
1229  int xinc = tuple_size;
1230  int yinc = myRes[0] * xinc;
1231  int zinc = myRes[1] * yinc;
1232  int offset = z * zinc + y * yinc + x * xinc;
1233 
1234  stride = 1;
1235  src += offset;
1236 
1237  for (i = 0; i < xres; i++, src += xinc)
1238  cacheline[i] = convertFromFP16(src);
1239 
1240  return &cacheline[x];
1241  }
1242 
1243 
1244  case COMPRESS_CONSTANT:
1245  src = rawConstData();
1246  if (!forcecopy && !strideofone)
1247  {
1248  stride = 0;
1249  return src;
1250  }
1251  stride = 1;
1252  // Fill out a constant value
1253  for (i = 0; i < xres; i++)
1254  cacheline[i] = *src;
1255 
1256  return &cacheline[x];
1257 
1258 
1259  case COMPRESS_RAWFULL:
1260  stride = 1;
1261  src = (T *)myData + (z * TILESIZE + y) * TILESIZE;
1262  if (!forcecopy)
1263  return &src[x];
1264 
1265  for (i = 0; i < xres; i++)
1266  cacheline[i] = src[i];
1267 
1268  return &cacheline[x];
1269  }
1270 
1271  // By default use the compression engine.
1272  UT_VoxelTileCompress<T> *engine;
1273 
1274  engine = getCompressionEngine(myCompressionType);
1275 
1276  // We could add support for a direct cacheline fill to accelerate
1277  // this case as well.
1278  stride = 1;
1279  for (i = 0; i < xres; i++)
1280  cacheline[i] = engine->getValue(*this, i, y, z);
1281 
1282  return &cacheline[x];
1283 }
1284 
1285 template <typename T>
1286 void
1287 UT_VoxelTile<T>::writeCacheLine(T *cacheline, int y, int z)
1288 {
1289  UT_ASSERT_P(y >= 0 && z >= 0);
1290  UT_ASSERT_P(y < myRes[1] && z < myRes[2]);
1291 
1292  T *dst, value;
1293  int i, xres = myRes[0];
1294 
1295  // All the directly handled types exit directly from this switch.
1296  switch (myCompressionType)
1297  {
1298  case COMPRESS_RAW:
1299  dst = (T *)myData + (z * myRes[1] + y) * xres;
1300  for (i = 0; i < xres; i++)
1301  *dst++ = *cacheline++;
1302  return;
1303 
1304  case COMPRESS_CONSTANT:
1305  value = rawConstVal();
1306  for (i = 0; i < xres; i++)
1307  if (cacheline[i] != value)
1308  break;
1309  // If everything was equal, our write is trivial.
1310  if (i == xres)
1311  return;
1312 
1313  break;
1314 
1315  case COMPRESS_RAWFULL:
1316  dst = (T *)myData + (z * TILESIZE + y) * TILESIZE;
1317  for (i = 0; i < TILESIZE; i++)
1318  *dst++ = *cacheline++;
1319 
1320  return;
1321  }
1322 
1323  // Switch back to invoking writeThrough. Ideally we'd have
1324  // a version that can handle a whole cache line at once
1325  for (i = 0; i < xres; i++)
1326  if (!writeThrough(i, y, z, cacheline[i]))
1327  break;
1328 
1329  // Determine if we failed to write everything through
1330  if (i != xres)
1331  {
1332  // Uncompress and reinvoke ourselves.
1333  uncompress();
1334  writeCacheLine(cacheline, y, z);
1335  }
1336 }
1337 
1338 template <typename T>
1339 void
1340 UT_VoxelTile<T>::copyFragment(int dstx, int dsty, int dstz,
1341  const UT_VoxelTile<T> &srctile,
1342  int srcx, int srcy, int srcz)
1343 {
1344  int w = SYSmin(xres() - dstx, srctile.xres() - srcx);
1345  int h = SYSmin(yres() - dsty, srctile.yres() - srcy);
1346  int d = SYSmin(zres() - dstz, srctile.zres() - srcz);
1347 
1348 #if 1
1349  if (srctile.isSimpleCompression())
1350  {
1351  T *dst;
1352  const T *src;
1353  int srcinc;
1354 
1355  src = srctile.rawData();
1356  srcinc = srctile.isConstant() ? 0 : 1;
1357 
1358  // Ensure we are easy to write to.
1359  uncompress();
1360 
1361  dst = rawData();
1362  dst += dstx + (dsty + dstz*yres())*xres();
1363 
1364  if (srcinc)
1365  src += srcx + (srcy + srcz*srctile.yres())*srctile.xres();
1366 
1367  for (int z = 0; z < d; z++)
1368  {
1369  for (int y = 0; y < h; y++)
1370  {
1371  if (srcinc)
1372  {
1373  for (int x = 0; x < w; x++)
1374  dst[x] = src[x];
1375  }
1376  else
1377  {
1378  for (int x = 0; x < w; x++)
1379  dst[x] = *src;
1380  }
1381  dst += xres();
1382  if (srcinc)
1383  src += srctile.xres();
1384  }
1385  dst += (yres()-h) * xres();
1386  if (srcinc)
1387  src += (srctile.yres() - h) * srctile.xres();
1388  }
1389 
1390  return;
1391  }
1392 #endif
1393 
1394  // Fail safe approach.
1395  for (int z = 0; z < d; z++)
1396  for (int y = 0; y < h; y++)
1397  for (int x = 0; x < w; x++)
1398  {
1399  setValue(dstx+x, dsty+y, dstz+z,
1400  srctile(srcx+x, srcy+y, srcz+z));
1401  }
1402 }
1403 
1404 template <typename T>
1405 template <typename S>
1406 void
1408 {
1409  int w = xres();
1410  int h = yres();
1411  int d = zres();
1412 
1413  if (isSimpleCompression())
1414  {
1415  const T *src;
1416  int srcinc;
1417 
1418  src = rawData();
1419  srcinc = isConstant() ? 0 : 1;
1420 
1421  if (stride == 1)
1422  {
1423  if (srcinc == 1)
1424  {
1425  // Super trivial!
1426  for (int i = 0; i < w * h * d; i++)
1427  {
1428  *dst++ = T(*src++);
1429  }
1430  }
1431  else
1432  {
1433  // Constant, also trivial!
1434  T cval = T(*src);
1435 
1436  for (int i = 0; i < w * h * d; i++)
1437  {
1438  *dst++ = cval;
1439  }
1440  }
1441  }
1442  else
1443  {
1444  for (int z = 0; z < d; z++)
1445  {
1446  for (int y = 0; y < h; y++)
1447  {
1448  if (srcinc)
1449  {
1450  for (int x = 0; x < w; x++)
1451  {
1452  *dst = S(src[x]);
1453  dst += stride;
1454  }
1455  }
1456  else
1457  {
1458  for (int x = 0; x < w; x++)
1459  {
1460  *dst = S(*src);
1461  dst += stride;
1462  }
1463  }
1464  if (srcinc)
1465  src += w;
1466  }
1467  }
1468  }
1469 
1470  return;
1471  }
1472 
1473  // Fail safe approach.
1474  for (int z = 0; z < d; z++)
1475  for (int y = 0; y < h; y++)
1476  for (int x = 0; x < w; x++)
1477  {
1478  *dst = S((*this)(x, y, z));
1479  dst += stride;
1480  }
1481 }
1482 
1483 template <typename T>
1484 template <typename S>
1485 void
1486 UT_VoxelTile<T>::writeData(const S *srcdata, int srcstride)
1487 {
1488  int w = xres();
1489  int h = yres();
1490  int d = zres();
1491  int i, n = w * h * d;
1492 
1493  // Check if src is constant
1494  S compare = srcdata[0];
1495  int srcoff = srcstride;
1496 
1497  if (srcstride == 0)
1498  {
1499  // Trivially constant
1500  makeConstant(T(compare));
1501  return;
1502  }
1503 
1504  for (i = 1; i < n; i++)
1505  {
1506  if (srcdata[srcoff] != compare)
1507  break;
1508  srcoff += srcstride;
1509  }
1510 
1511  if (i == n)
1512  {
1513  // Constant source!
1514  makeConstant(compare);
1515  return;
1516  }
1517 
1518  // Create a raw tile, expanding constants
1519  uncompress();
1520 
1521  if (srcstride == 1)
1522  {
1523  T *dst = rawData();
1524  for (i = 0; i < n; i++)
1525  {
1526  *dst++ = T(*srcdata++);
1527  }
1528  }
1529  else
1530  {
1531  T *dst = rawData();
1532 
1533  srcoff = 0;
1534  for (i = 0; i < n; i++)
1535  {
1536  dst[i] = T(srcdata[srcoff]);
1537  srcoff += srcstride;
1538  }
1539  }
1540 }
1541 
1542 template <typename T>
1543 void
1544 UT_VoxelTile<T>::setValue(int x, int y, int z, T t)
1545 {
1546  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
1547  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
1548 
1549  // Determine if assignment is compatible with current
1550  // compression technique.
1551  if (writeThrough(x, y, z, t))
1552  {
1553  return;
1554  }
1555  // Can't write to our current type of tile with the
1556  // given value, so abandon.
1557  uncompress();
1558 
1559  // Attempt to write through again. This must succeed
1560  // as we should now be uncompressed.
1561  UT_VERIFY_P(writeThrough(x, y, z, t));
1562 }
1563 
1564 template <typename T>
1565 void
1567 {
1568  switch (myCompressionType)
1569  {
1570  case COMPRESS_RAW:
1571  // Trivial.
1572  return;
1573 
1574  case COMPRESS_CONSTANT:
1575  {
1576  // Must expand the tile!
1577  T cval;
1578  int i, n;
1579 
1580  cval = rawConstVal();
1581  freeData();
1582 
1583  myCompressionType = COMPRESS_RAW;
1584 
1585  if (myRes[0] == TILESIZE &&
1586  myRes[1] == TILESIZE)
1587  {
1588  // Flag that we can do fast lookups on this tile.
1589  myCompressionType = COMPRESS_RAWFULL;
1590  }
1591 
1592  n = myRes[0] * myRes[1] * myRes[2];
1593  myData = UT_VOXEL_ALLOC(sizeof(T) * n);
1594 
1595  for (i = 0; i < n; i++)
1596  {
1597  ((T *)myData)[i] = cval;
1598  }
1599  return;
1600  }
1601  case COMPRESS_RAWFULL:
1602  {
1603  T *raw;
1604  int x, y, z, i, n;
1605 
1606  if (myRes[0] == TILESIZE &&
1607  myRes[1] == TILESIZE)
1608  {
1609  // Trivial
1610  return;
1611  }
1612 
1613  // Need to contract to the actual compact size.
1614  n = myRes[0] * myRes[1] * myRes[2];
1615  raw = (T *)UT_VOXEL_ALLOC(sizeof(T) * n);
1616  i = 0;
1617  for (z = 0; z < myRes[2]; z++)
1618  {
1619  for (y = 0; y < myRes[1]; y++)
1620  {
1621  for (x = 0; x < myRes[0]; x++)
1622  {
1623  raw[i++] = ((T *)myData)[x+(y+z*TILESIZE)*TILESIZE];
1624  }
1625  }
1626  }
1627 
1628  freeData();
1629  myCompressionType = COMPRESS_RAW;
1630  myData = raw;
1631 
1632  return;
1633  }
1634 
1635  case COMPRESS_FPREAL16:
1636  {
1637  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
1638  static constexpr UT_FromUnbounded<T> convertFromFP16{};
1639 
1640  T *raw;
1641  int x, y, z, i, n;
1642  fpreal16 *src = (fpreal16 *) myData;
1643 
1644  n = myRes[0] * myRes[1] * myRes[2];
1645  raw = (T *)UT_VOXEL_ALLOC(sizeof(T) * n);
1646  i = 0;
1647  for (z = 0; z < myRes[2]; z++)
1648  {
1649  for (y = 0; y < myRes[1]; y++)
1650  {
1651  for (x = 0; x < myRes[0]; x++, src += tuple_size)
1652  {
1653  raw[i] = convertFromFP16(src);
1654  i++;
1655  }
1656  }
1657  }
1658  freeData();
1659  myCompressionType = COMPRESS_RAW;
1660  myData = raw;
1661  return;
1662  }
1663  }
1664 
1665  // Use the compression engine.
1666  UT_VoxelTileCompress<T> *engine;
1667 
1668  engine = getCompressionEngine(myCompressionType);
1669 
1670  // We use the read ability to set our values.
1671  int x, y, z, i;
1672  T *raw;
1673 
1674  raw = (T *) UT_VOXEL_ALLOC(sizeof(T) * myRes[0] * myRes[1] * myRes[2]);
1675  i = 0;
1676  for (z = 0; z < myRes[2]; z++)
1677  {
1678  for (y = 0; y < myRes[1]; y++)
1679  {
1680  for (x = 0; x < myRes[0]; x++)
1681  {
1682  raw[i++] = engine->getValue(*this, x, y, z);
1683  }
1684  }
1685  }
1686 
1687  freeData();
1688 
1689  // Now copy in the result
1690  myCompressionType = COMPRESS_RAW;
1691  if (myRes[0] == TILESIZE &&
1692  myRes[1] == TILESIZE)
1693  {
1694  // Flag that we can do fast lookups on this tile.
1695  myCompressionType = COMPRESS_RAWFULL;
1696  }
1697 
1698  myData = raw;
1699 }
1700 
1701 template <typename T>
1702 void
1704 {
1705  T *raw;
1706  int x, y, z, i;
1707 
1708  if (myCompressionType == COMPRESS_RAWFULL)
1709  return;
1710 
1711  uncompress();
1712 
1713  UT_ASSERT(myCompressionType == COMPRESS_RAW);
1714 
1715  // The RAWFULL format only differs from RAW when the tile dimensions
1716  // are smaller than the maximum tile size.
1717  if (myRes[0] < TILESIZE || myRes[1] < TILESIZE)
1718  {
1719  raw = (T *)UT_VOXEL_ALLOC(sizeof(T) * TILESIZE * TILESIZE * myRes[2]);
1720  i = 0;
1721  for (z = 0; z < myRes[2]; z++)
1722  {
1723  for (y = 0; y < myRes[1]; y++)
1724  {
1725  for (x = 0; x < myRes[0]; x++)
1726  {
1727  raw[x+(y+z*TILESIZE)*TILESIZE] = ((T *)myData)[i++];
1728  }
1729  }
1730  }
1731  freeData();
1732  myData = raw;
1733  }
1734  myCompressionType = COMPRESS_RAWFULL;
1735 }
1736 
1737 template <typename T>
1738 void
1740 {
1741  if (isRaw() || isRawFull())
1742  return;
1743 
1744  freeData();
1745 
1746  if (myRes[0] == TILESIZE && myRes[1] == TILESIZE)
1747  myCompressionType = COMPRESS_RAWFULL;
1748  else
1749  myCompressionType = COMPRESS_RAW;
1750 
1751  myData = UT_VOXEL_ALLOC(sizeof(T) * numVoxels());
1752 }
1753 
1754 template <typename T>
1755 void
1757 {
1758  float irx, iry, irz;
1759 
1760  irx = 1.0 / myRes[0];
1761  iry = 1.0 / myRes[1];
1762  irz = 1.0 / myRes[2];
1763  switch (myCompressionType)
1764  {
1765  case COMPRESS_RAW:
1766  {
1767  int i;
1768  const T *data = (const T*) myData;
1769 
1770  i = 0;
1771  T zavg;
1772  zavg = 0;
1773  for (int z = 0; z < myRes[2]; z++)
1774  {
1775  T yavg;
1776  yavg = 0;
1777  for (int y = 0; y < myRes[1]; y++)
1778  {
1779  T xavg;
1780  xavg = 0;
1781  for (int x = 0; x < myRes[0]; x++)
1782  {
1783  xavg += data[i++];
1784  }
1785  xavg *= irx;
1786  yavg += xavg;
1787  }
1788  yavg *= iry;
1789  zavg += yavg;
1790  }
1791  zavg *= irz;
1792 
1793  avg = zavg;
1794  return;
1795  }
1796 
1797  case COMPRESS_FPREAL16:
1798  {
1799  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
1800  static constexpr UT_FromUnbounded<T> convertFromFP16{};
1801 
1802  int i;
1803  const fpreal16 *data = (fpreal16 *) myData;
1804 
1805  i = 0;
1806  // The code below uses 4-wide vectors for averaging (though only the
1807  // needed components are actually used). This needs to be increased
1808  // in size if we need to start working with voxel arrays of vectors
1809  // with more components.
1810  static_assert(tuple_size <= 4);
1811  fpreal16 zavg[] = { 0, 0, 0, 0 };
1812  for (int z = 0; z < myRes[2]; z++)
1813  {
1814  fpreal16 yavg[] = { 0, 0, 0, 0 };
1815  for (int y = 0; y < myRes[1]; y++)
1816  {
1817  fpreal16 xavg[] = { 0, 0, 0, 0 };
1818  for (int x = 0; x < myRes[0]; x++)
1819  {
1820  for (int j = 0; j < tuple_size; j++)
1821  {
1822  xavg[j] += data[i++];
1823  }
1824  }
1825  for (int j = 0; j < tuple_size; j++)
1826  {
1827  xavg[j] *= irx;
1828  yavg[j] += xavg[j];
1829  }
1830  }
1831  for (int j = 0; j < tuple_size; j++)
1832  {
1833  yavg[j] *= iry;
1834  zavg[j] += yavg[j];
1835  }
1836  }
1837  for (int j = 0; j < tuple_size; j++)
1838  {
1839  zavg[j] *= irz;
1840  }
1841 
1842  avg = convertFromFP16(zavg);
1843  return;
1844  }
1845 
1846  case COMPRESS_CONSTANT:
1847  avg = rawConstVal();
1848  return;
1849 
1850  case COMPRESS_RAWFULL:
1851  {
1852  int offset;
1853  const T *data = (const T*) myData;
1854 
1855  offset = 0;
1856  T zavg;
1857  zavg = 0;
1858  for (int z = 0; z < myRes[2]; z++)
1859  {
1860  T yavg;
1861  yavg = 0;
1862  for (int y = 0; y < myRes[1]; y++)
1863  {
1864  T xavg;
1865  xavg = 0;
1866  for (int x = 0; x < myRes[0]; x++)
1867  {
1868  xavg += data[x+offset];
1869  }
1870  xavg *= irx;
1871  yavg += xavg;
1872  offset += TILESIZE;
1873  }
1874  yavg *= iry;
1875  zavg += yavg;
1876  }
1877  zavg *= irz;
1878 
1879  avg = zavg;
1880  return;
1881  }
1882 
1883  default:
1884  {
1885  T zavg;
1886  zavg = 0;
1887  for (int z = 0; z < myRes[2]; z++)
1888  {
1889  T yavg;
1890  yavg = 0;
1891  for (int y = 0; y < myRes[1]; y++)
1892  {
1893  T xavg;
1894  xavg = 0;
1895  for (int x = 0; x < myRes[0]; x++)
1896  {
1897  xavg += (*this)(x, y, z);
1898  }
1899  xavg *= irx;
1900  yavg += xavg;
1901  }
1902  yavg *= iry;
1903  zavg += yavg;
1904  }
1905  zavg *= irz;
1906 
1907  avg = zavg;
1908  return;
1909  }
1910  }
1911 }
1912 
1913 template <typename T>
1914 void
1916 {
1917  switch (myCompressionType)
1918  {
1919  case COMPRESS_RAW:
1920  {
1921  int n = myRes[0] * myRes[1] * myRes[2];
1922  int i;
1923 
1924  min = max = *(T*)myData;
1925  for (i = 1; i < n; i++)
1926  {
1927  expandMinMax( ((T*)myData)[i], min, max );
1928  }
1929  return;
1930  }
1931 
1932  case COMPRESS_FPREAL16:
1933  {
1934  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
1935  static constexpr UT_FromUnbounded<T> convertFromFP16{};
1936 
1937  int n = myRes[0] * myRes[1] * myRes[2];
1938  int i;
1939  fpreal16 *src = (fpreal16 *)myData;
1940 
1941  min = max = *src;
1942  for (i = 1; i < n; i++, src += tuple_size)
1943  {
1944  T val = convertFromFP16(src);
1945  expandMinMax( val, min, max );
1946  }
1947  return;
1948  }
1949 
1950  case COMPRESS_CONSTANT:
1951  min = max = rawConstVal();
1952  return;
1953 
1954  case COMPRESS_RAWFULL:
1955  {
1956  int x, y, z, offset;
1957 
1958  min = max = *(T*)myData;
1959  offset = 0;
1960  for (z = 0; z < myRes[2]; z++)
1961  {
1962  for (y = 0; y < myRes[1]; y++)
1963  {
1964  for (x = 0; x < myRes[0]; x++)
1965  {
1966  expandMinMax(
1967  ((T*)myData)[x+offset],
1968  min, max );
1969  }
1970  offset += TILESIZE;
1971  }
1972  }
1973  return;
1974  }
1975 
1976  default:
1977  {
1978  // Use the compression engine.
1979  UT_VoxelTileCompress<T> *engine;
1980 
1981  engine = getCompressionEngine(myCompressionType);
1982 
1983  engine->findMinMax(*this, min, max);
1984  return;
1985  }
1986  }
1987 }
1988 
1989 template <typename T>
1990 bool
1992 {
1993  switch (myCompressionType)
1994  {
1995  case COMPRESS_RAW:
1996  case COMPRESS_RAWFULL:
1997  {
1998  int n = myRes[0] * myRes[1] * myRes[2];
1999  int i;
2000 
2001  for (i = 0; i < n; i++)
2002  {
2003  if (SYSisNan( ((T*)myData)[i] ))
2004  return true;
2005  }
2006  return false;
2007  }
2008 
2009  case COMPRESS_FPREAL16:
2010  {
2011  return false;
2012  }
2013 
2014  case COMPRESS_CONSTANT:
2015  if (SYSisNan(rawConstVal()))
2016  return true;
2017  return false;
2018 
2019  default:
2020  {
2021  // Use the compression engine.
2022  UT_VoxelTileCompress<T> *engine;
2023  int x, y, z;
2024 
2025  engine = getCompressionEngine(myCompressionType);
2026 
2027  for (z = 0; z < myRes[2]; z++)
2028  for (y = 0; y < myRes[1]; y++)
2029  for (x = 0; x < myRes[0]; x++)
2030  if (SYSisNan(engine->getValue(*this, x, y, z)))
2031  {
2032  return true;
2033  }
2034 
2035  return false;
2036  }
2037  }
2038 }
2039 
2040 template <typename T>
2041 bool
2043 {
2044  T min, max;
2045  bool losslessonly = (options.myQuantizeTol == 0.0);
2046  int i;
2047  UT_VoxelTileCompress<T> *engine;
2048 
2049  // This is as easy as it gets.
2050  if (myCompressionType == COMPRESS_CONSTANT)
2051  return false;
2052 
2053  findMinMax(min, max);
2054 
2055  // See if we can be made into a constant tile.
2056  if (dist(min, max) <= options.myConstantTol)
2057  {
2058  // Ignore if we are already constant.
2059  if (myCompressionType == COMPRESS_CONSTANT)
2060  return false;
2061 
2062  // We are fully constant!
2063  if (min != max)
2064  {
2065  T avg;
2066  findAverage(avg);
2067  makeConstant(avg);
2068  }
2069  else
2070  {
2071  makeConstant(min);
2072  }
2073  return true;
2074  }
2075 
2076  bool result = false;
2077 
2078  if (options.myAllowFP16)
2079  {
2080  if (myCompressionType == COMPRESS_RAW ||
2081  myCompressionType == COMPRESS_RAWFULL)
2082  {
2083  makeFpreal16();
2084  result = true;
2085  }
2086  }
2087 
2088  for (i = 0; i < getCompressionEngines().entries(); i++)
2089  {
2090  engine = getCompressionEngines()(i);
2091 
2092  // Ignore possibly expensive lossy compressions.
2093  if (losslessonly && !engine->isLossless())
2094  continue;
2095 
2096  if (engine->tryCompress(*this, options, min, max))
2097  {
2098  myCompressionType = i + COMPRESS_ENGINE;
2099  result = true;
2100  // We keep testing in case another compression engine
2101  // can get a better number...
2102  }
2103  }
2104 
2105  // If we are RAW compress, check to see if we could become
2106  // RAWFULL for faster access.
2107  if (myCompressionType == COMPRESS_RAW)
2108  {
2109  if (myRes[0] == TILESIZE && myRes[1] == TILESIZE)
2110  {
2111  myCompressionType = COMPRESS_RAWFULL;
2112  }
2113  }
2114 
2115  // No suitable compression found.
2116  return result;
2117 }
2118 
2119 template <typename T>
2120 void
2122 {
2123  if (!isConstant())
2124  {
2125  freeData();
2126 
2127  if (!inlineConstant())
2128  myData = UT_VOXEL_ALLOC(sizeof(T));
2129  }
2130 
2131  myCompressionType = COMPRESS_CONSTANT;
2132  *rawConstData() = t;
2133 }
2134 
2135 template <typename T>
2136 void
2138 {
2139  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
2140  using ScalarType = typename UT_FixedVectorTraits<T>::DataType;
2141 
2142  if (isConstant())
2143  {
2144  return;
2145  }
2146 
2147  if (myCompressionType == COMPRESS_FPREAL16)
2148  return;
2149 
2150  // Get our new data.
2151  int len = myRes[2] * myRes[1] * myRes[0] * tuple_size;
2152  fpreal16 *data = (fpreal16 *)UT_VOXEL_ALLOC(sizeof(fpreal16) * len);
2153 
2154  if (myCompressionType == COMPRESS_RAW ||
2155  myCompressionType == COMPRESS_RAWFULL)
2156  {
2157  for (int i = 0; i < len; i++)
2158  {
2159  data[i] = UTvoxelConvertFP16(((ScalarType*) myData)[i]);
2160  }
2161  }
2162  else
2163  {
2164  // Apply any converters.
2165  int i = 0;
2166 
2167  for (int z = 0; z < myRes[2]; z++)
2168  {
2169  for (int y = 0; y < myRes[1]; y++)
2170  {
2171  for (int x = 0; x < myRes[0]; x++)
2172  {
2173  if constexpr (tuple_size == 1)
2174  data[i++] = UTvoxelConvertFP16((*this)(x, y, z));
2175  else
2176  {
2177  T value = (*this)(x, y, z);
2178  for (int j = 0; j < tuple_size; j++)
2179  {
2180  data[i++] = UTvoxelConvertFP16(value(j));
2181  }
2182  }
2183  }
2184  }
2185  }
2186  }
2187 
2188  freeData();
2189  myData = data;
2190  myCompressionType = COMPRESS_FPREAL16;
2191 }
2192 
2193 template <typename T>
2194 int64
2195 UT_VoxelTile<T>::getMemoryUsage(bool inclusive) const
2196 {
2197  int64 mem = inclusive ? sizeof(*this) : 0;
2198  mem += getDataLength();
2199  return mem;
2200 }
2201 
2202 template <typename T>
2203 int64
2205 {
2206  exint usage;
2207 
2208  switch (myCompressionType)
2209  {
2210  case COMPRESS_RAW:
2211  usage = sizeof(T) * xres() * yres() * zres();
2212  break;
2213  case COMPRESS_FPREAL16:
2214  usage = sizeof(fpreal16) * xres() * yres() * zres()
2216  break;
2217  case COMPRESS_CONSTANT:
2218  if (inlineConstant())
2219  usage = 0;
2220  else
2221  usage = sizeof(T);
2222  break;
2223  case COMPRESS_RAWFULL:
2224  usage = sizeof(T) * TILESIZE * TILESIZE * zres();
2225  break;
2226  default:
2227  {
2228  // Use the compression engine.
2229  UT_VoxelTileCompress<T> *engine;
2230  engine = getCompressionEngine(myCompressionType);
2231  usage = engine->getDataLength(*this);
2232  break;
2233  }
2234  }
2235  return usage;
2236 }
2237 
2238 template <typename T>
2239 void
2240 UT_VoxelTile<T>::weightedSum(int pstart[3], int pend[3],
2241  const float *weights[3], int start[3],
2242  T &result)
2243 {
2244  int ix, iy, iz, i;
2245  int px, py, pz;
2246  int ixstart, ixend;
2247  int tstart[3];
2248  fpreal w, pw;
2249  T psumx, psumy;
2250 
2251  switch (myCompressionType)
2252  {
2253  case COMPRESS_CONSTANT:
2254  {
2255  w = 1;
2256  for (i = 0; i < 3; i++)
2257  {
2258  pw = 0;
2259  for (ix = 0; ix < pend[i]-pstart[i]; ix++)
2260  pw += weights[i][ix+pstart[i]-start[i]];
2261  w *= pw;
2262  }
2263  result += w * rawConstVal();
2264  break;
2265  }
2266 
2267  case COMPRESS_RAW:
2268  {
2269  tstart[0] = pstart[0] & TILEMASK;
2270  tstart[1] = pstart[1] & TILEMASK;
2271  tstart[2] = pstart[2] & TILEMASK;
2272  ixstart = pstart[0]-start[0];
2273  ixend = pend[0]-start[0];
2274  pz = tstart[2];
2275  UT_ASSERT(pz < myRes[2]);
2276  UT_ASSERT(ixend - ixstart <= myRes[0]);
2277  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2278  {
2279  psumy = 0;
2280  py = tstart[1];
2281  UT_ASSERT(py < myRes[1]);
2282  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2283  {
2284  psumx = 0;
2285  px = ((pz * myRes[1]) + py) * myRes[0] + tstart[0];
2286  for (ix = ixstart; ix < ixend; ix++, px++)
2287  {
2288  psumx += weights[0][ix]* ((T*)myData)[px];
2289  }
2290  psumy += weights[1][iy-start[1]] * psumx;
2291  }
2292  result += weights[2][iz-start[2]] * psumy;
2293  }
2294  break;
2295  }
2296 
2297  case COMPRESS_FPREAL16:
2298  {
2299  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
2300  static constexpr UT_FromUnbounded<T> convertFromFP16{};
2301 
2302  int xinc = tuple_size;
2303  int yinc = myRes[0] * xinc;
2304  int zinc = myRes[1] * yinc;
2305 
2306  fpreal16 *src = (fpreal16 *) myData;
2307 
2308  tstart[0] = pstart[0] & TILEMASK;
2309  tstart[1] = pstart[1] & TILEMASK;
2310  tstart[2] = pstart[2] & TILEMASK;
2311  ixstart = pstart[0]-start[0];
2312  ixend = pend[0]-start[0];
2313  pz = tstart[2];
2314  UT_ASSERT(pz < myRes[2]);
2315  UT_ASSERT(ixend - ixstart <= myRes[0]);
2316  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2317  {
2318  psumy = 0;
2319  py = tstart[1];
2320  UT_ASSERT(py < myRes[1]);
2321  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2322  {
2323  psumx = 0;
2324  px = pz * zinc + py * yinc + tstart[0] * xinc;
2325  for (ix = ixstart; ix < ixend; ix++, px += xinc)
2326  {
2327  T val = convertFromFP16(src + px);
2328  psumx += weights[0][ix]* val;
2329  }
2330  psumy += weights[1][iy-start[1]] * psumx;
2331  }
2332  result += weights[2][iz-start[2]] * psumy;
2333  }
2334  break;
2335  }
2336  case COMPRESS_RAWFULL:
2337  {
2338  tstart[0] = pstart[0] & TILEMASK;
2339  tstart[1] = pstart[1] & TILEMASK;
2340  tstart[2] = pstart[2] & TILEMASK;
2341  ixstart = pstart[0]-start[0];
2342  ixend = pend[0]-start[0];
2343  pz = tstart[2];
2344  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2345  {
2346  psumy = 0;
2347  py = tstart[1];
2348  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2349  {
2350  psumx = 0;
2351  px = ((pz * TILESIZE) + py) * TILESIZE + tstart[0];
2352  for (ix = ixstart; ix < ixend; ix++, px++)
2353  {
2354  psumx += weights[0][ix]* ((T*)myData)[px];
2355  }
2356  psumy += weights[1][iy-start[1]] * psumx;
2357  }
2358  result += weights[2][iz-start[2]] * psumy;
2359  }
2360  break;
2361  }
2362 
2363  default:
2364  {
2365  // For all other compression types, we use our
2366  // getValue() accessor rather than trying to
2367  // do anything fancy.
2368  tstart[0] = pstart[0] & TILEMASK;
2369  tstart[1] = pstart[1] & TILEMASK;
2370  tstart[2] = pstart[2] & TILEMASK;
2371  ixstart = pstart[0]-start[0];
2372  ixend = pend[0]-start[0];
2373  pz = tstart[2];
2374  for (iz = pstart[2]; iz < pend[2]; iz++, pz++)
2375  {
2376  psumy = 0;
2377  py = tstart[1];
2378  for (iy = pstart[1]; iy < pend[1]; iy++, py++)
2379  {
2380  psumx = 0;
2381  px = tstart[0];
2382  for (ix = ixstart; ix < ixend; ix++, px++)
2383  {
2384  psumx += weights[0][ix] *
2385  (*this)(px, py, pz);
2386  }
2387  psumy += weights[1][iy-start[1]] * psumx;
2388  }
2389  result += weights[2][iz-start[2]] * psumy;
2390  }
2391  break;
2392  }
2393  }
2394 }
2395 
2396 template <typename T>
2397 void
2399 {
2400  int i;
2401 
2402  // Repalce old copy, if any.
2403  for (i = 0; i < getCompressionEngines().entries(); i++)
2404  {
2405  if (!strcmp(engine->getName(), getCompressionEngines()(i)->getName()))
2406  {
2407  getCompressionEngines()(i) = engine;
2408  return;
2409  }
2410  }
2411 
2412  getCompressionEngines().append(engine);
2413 }
2414 
2415 template <typename T>
2416 int
2418 {
2419  int i;
2420 
2421  if (!name)
2422  return -1;
2423 
2424  for (i = 0; i < getCompressionEngines().entries(); i++)
2425  {
2426  if (!strcmp(name, getCompressionEngines()(i)->getName()))
2427  {
2428  return i + COMPRESS_ENGINE;
2429  }
2430  }
2431 
2432  return -1;
2433 }
2434 
2435 template <typename T>
2438 {
2439  index -= COMPRESS_ENGINE;
2440 
2441  return getCompressionEngines()(index);
2442 }
2443 
2444 template <typename T>
2445 void
2446 UT_VoxelTile<T>::save(std::ostream &os) const
2447 {
2448  // Always save in our native format...
2449  if (myCompressionType >= COMPRESS_ENGINE)
2450  {
2451  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2452 
2453  if (engine->canSave())
2454  {
2455  char type = myCompressionType;
2456 
2457  UTwrite(os, &type, 1);
2458  engine->save(os, *this);
2459  return;
2460  }
2461 
2462  // Can't save, must save as raw.
2463  char type = COMPRESS_RAW;
2464  T value;
2465 
2466  UTwrite(os, &type, 1);
2467 
2468  for (int z = 0; z < zres(); z++)
2469  for (int y = 0; y < yres(); y++)
2470  for (int x = 0; x < xres(); x++)
2471  {
2472  value = (*this)(x, y, z);
2473  UTwrite<T>(os, &value, 1);
2474  }
2475  return;
2476  }
2477 
2478  int len;
2479  char type = myCompressionType;
2480 
2481  if (type == COMPRESS_RAWFULL &&
2482  myRes[0] == TILESIZE &&
2483  myRes[1] == TILESIZE &&
2484  myRes[2] != TILESIZE)
2485  {
2486  // Forbid saving this as a raw full as that will confuse
2487  // older versions of Houdini.
2488  type = COMPRESS_RAW;
2489  }
2490 
2491  UT_ASSERT(type >= 0 && type < COMPRESS_ENGINE);
2492 
2493  UTwrite(os, &type, 1);
2494 
2495  switch ((CompressionType) type)
2496  {
2497  case COMPRESS_RAW:
2498  len = myRes[2] * myRes[1] * myRes[0];
2499  UTwrite<T>(os, (T *) myData, len);
2500  break;
2501 
2502  case COMPRESS_FPREAL16:
2503  len = myRes[2] * myRes[1] * myRes[0]
2505  UTwrite(os, (int16 *) myData, len);
2506  break;
2507 
2508  case COMPRESS_RAWFULL:
2509  len = TILESIZE * TILESIZE * TILESIZE;
2510  UTwrite<T>(os, (T *) myData, len);
2511  break;
2512 
2513  case COMPRESS_CONSTANT:
2514  UTwrite<T>(os, rawConstData(), 1);
2515  break;
2516 
2517  case COMPRESS_ENGINE:
2518  UT_ASSERT(!"Invalid compression type");
2519  break;
2520  }
2521 }
2522 
2523 template <typename T>
2524 void
2526 {
2527  char type, otype;
2528  int len;
2529 
2530  is.readChar(type);
2531 
2532  // Perform the indirection to find out our native type.
2533  if (type >= 0 && type < compress.entries())
2534  {
2535  otype = type;
2536  type = compress(type);
2537  if (type == -1)
2538  {
2539  std::cerr << "Missing compression engine " << (int) otype << "\n";
2540  }
2541  }
2542 
2543  if (type >= COMPRESS_ENGINE)
2544  {
2545  freeData();
2546  myCompressionType = type;
2547 
2548  if (type - COMPRESS_ENGINE >= getCompressionEngines().entries())
2549  {
2550  // Invalid type!
2551  std::cerr << "Invalid compression engine " << (int) otype << "\n";
2552  return;
2553  }
2554 
2555  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2556 
2557  engine->load(is, *this);
2558 
2559  return;
2560  }
2561 
2562  UT_ASSERT(type >= 0);
2563  if (type < 0)
2564  {
2565  return;
2566  }
2567 
2568  freeData();
2569 
2570  myCompressionType = type;
2571 
2572  switch ((CompressionType) myCompressionType)
2573  {
2574  case COMPRESS_RAW:
2575  len = myRes[2] * myRes[1] * myRes[0];
2576  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2577  is.read<T>((T *) myData, len);
2578  break;
2579 
2580  case COMPRESS_FPREAL16:
2581  len = myRes[2] * myRes[1] * myRes[0]
2583  myData = UT_VOXEL_ALLOC(sizeof(fpreal16) * len);
2584  is.read((int16 *) myData, len);
2585  break;
2586 
2587  case COMPRESS_RAWFULL:
2588  len = TILESIZE * TILESIZE * TILESIZE;
2589  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2590  is.read<T>((T *) myData, len);
2591  break;
2592 
2593  case COMPRESS_CONSTANT:
2594  if (!inlineConstant())
2595  myData = UT_VOXEL_ALLOC(sizeof(T));
2596  is.read<T>(rawConstData(), 1);
2597  break;
2598 
2599  case COMPRESS_ENGINE:
2600  UT_ASSERT(!"Invalid compression type");
2601  break;
2602  }
2603 
2604  // Recompress raw full that are not complete in z
2605  if (myCompressionType == COMPRESS_RAW &&
2606  myRes[0] == TILESIZE &&
2607  myRes[1] == TILESIZE)
2608  myCompressionType = COMPRESS_RAWFULL;
2609 }
2610 
2611 template <typename T>
2612 bool
2614 {
2615  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
2616  using ScalarType = typename UT_FixedVectorTraits<T>::DataType;
2617 
2618  bool ok = true;
2619 
2620  // Always save in our native format...
2621  if (myCompressionType >= COMPRESS_ENGINE)
2622  {
2623  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2624 
2625  if (engine->canSave())
2626  {
2627  char type = myCompressionType;
2628 
2629  ok = ok && w.jsonBeginArray();
2630  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2632  ok = ok && w.jsonInt(type);
2633  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2635  ok = ok && engine->save(w, *this);
2636  ok = ok && w.jsonEndArray();
2637  return ok;
2638  }
2639 
2640  // Can't save, must save as raw.
2641  char type = COMPRESS_RAW;
2642  T value;
2643  ScalarType dummy;
2644 
2645  ok = ok && w.jsonBeginArray();
2646  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2648  ok = ok && w.jsonInt(type);
2649  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2651 
2652  ok = ok && w.beginUniformArray(xres()*yres()*zres()*tuple_size,
2653  w.jidFromValue(&dummy));
2654  for (int z = 0; z < zres(); z++)
2655  for (int y = 0; y < yres(); y++)
2656  for (int x = 0; x < xres(); x++)
2657  {
2658  value = (*this)(x, y, z);
2659  if constexpr (tuple_size == 1)
2660  {
2661  ok = ok && w.uniformWrite(value);
2662  }
2663  else
2664  {
2665  for (int i = 0; i < tuple_size; i++)
2666  {
2667  ok = ok && w.uniformWrite(value(i));
2668  }
2669  }
2670  }
2671  ok = ok && w.endUniformArray();
2672  ok = ok && w.jsonEndArray();
2673  return ok;
2674  }
2675 
2676  int len;
2677  char type = myCompressionType;
2678 
2679  UT_ASSERT(type >= 0 && type < COMPRESS_ENGINE);
2680  ok = ok && w.jsonBeginArray();
2681  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2683  ok = ok && w.jsonInt(type);
2684  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
2686 
2687  switch (myCompressionType)
2688  {
2689  case COMPRESS_RAW:
2690  len = myRes[2] * myRes[1] * myRes[0];
2691  ok = ok && w.jsonUniformArray(len * tuple_size, (ScalarType *)myData);
2692  break;
2693 
2694  case COMPRESS_FPREAL16:
2695  len = myRes[2] * myRes[1] * myRes[0] * UT_FixedVectorTraits<T>::TupleSize;
2696  ok = ok && w.jsonUniformArray(len, (fpreal16 *)myData);
2697  break;
2698 
2699  case COMPRESS_RAWFULL:
2700  len = TILESIZE * TILESIZE * myRes[2];
2701  ok = ok && w.jsonUniformArray(len * tuple_size, (ScalarType *)myData);
2702  break;
2703 
2704  case COMPRESS_CONSTANT:
2705  if constexpr (tuple_size == 1)
2706  ok = ok && w.jsonValue(rawConstVal());
2707  else
2708  ok = ok && w.jsonUniformArray(tuple_size, rawConstVal().data());
2709  break;
2710  }
2711 
2712  ok = ok && w.jsonEndArray();
2713  return ok;
2714 }
2715 
2716 template <typename T>
2717 bool
2719 {
2720  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
2721  using ScalarType = typename UT_FixedVectorTraits<T>::DataType;
2722 
2724  UT_WorkBuffer key;
2725  int8 type, otype;
2726  int len;
2727 
2728  it = p.beginArray();
2729  if (it.atEnd() || !it.getLowerKey(key))
2730  return false;
2731  if (UT_VoxelArrayJSON::getTileID(key.buffer()) !=
2733  return false;
2734  if (!p.parseNumber(type))
2735  return false;
2736 
2737  ++it;
2738 
2739  if (it.atEnd() || !it.getLowerKey(key))
2740  return false;
2741  if (UT_VoxelArrayJSON::getTileID(key.buffer()) !=
2743  return false;
2744 
2745  // Perform the indirection to find out our native type.
2746  if (type >= 0 && type < compress.entries())
2747  {
2748  otype = type;
2749  type = compress(type);
2750  if (type == -1)
2751  {
2752  std::cerr << "Missing compression engine " << (int) otype << "\n";
2753  }
2754  }
2755 
2756  if (type >= COMPRESS_ENGINE)
2757  {
2758  freeData();
2759  myCompressionType = type;
2760 
2761  if (type - COMPRESS_ENGINE >= getCompressionEngines().entries())
2762  {
2763  // Invalid type!
2764  std::cerr << "Invalid compression engine " << (int) otype << "\n";
2765  return false;
2766  }
2767 
2768  UT_VoxelTileCompress<T> *engine = getCompressionEngine(myCompressionType);
2769 
2770  bool engine_ok = engine->load(p, *this);
2771  ++it;
2772  return it.atEnd() && engine_ok;
2773  }
2774 
2775  UT_ASSERT(type >= 0);
2776  if (type < 0)
2777  {
2778  return false;
2779  }
2780 
2781  freeData();
2782 
2783  myCompressionType = type;
2784 
2785  switch (myCompressionType)
2786  {
2787  case COMPRESS_RAW:
2788  len = myRes[2] * myRes[1] * myRes[0];
2789  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2790  len *= tuple_size;
2791  if (p.parseUniformArray((ScalarType *)myData, len) != len)
2792  return false;
2793  break;
2794 
2795  case COMPRESS_FPREAL16:
2796  len = myRes[2] * myRes[1] * myRes[0] * UT_FixedVectorTraits<T>::TupleSize;
2797  myData = UT_VOXEL_ALLOC(sizeof(fpreal16) * len);
2798  if (p.parseUniformArray((fpreal16 *)myData, len) != len)
2799  return false;
2800  break;
2801 
2802  case COMPRESS_RAWFULL:
2803  len = TILESIZE * TILESIZE * myRes[2];
2804  myData = UT_VOXEL_ALLOC(sizeof(T) * len);
2805  len *= tuple_size;
2806  if (p.parseUniformArray((ScalarType *)myData, len) != len)
2807  return false;
2808  break;
2809 
2810  case COMPRESS_CONSTANT:
2811  if (!inlineConstant())
2812  myData = UT_VOXEL_ALLOC(sizeof(T));
2813  if constexpr (tuple_size == 1)
2814  {
2815  if (!p.parseNumber(*rawConstData()))
2816  return false;
2817  }
2818  else
2819  {
2820  if (!p.parseUniformArray((ScalarType *) rawConstData()->data(), tuple_size))
2821  return false;
2822  }
2823  break;
2824  }
2825 
2826  ++it;
2827  return it.atEnd();
2828 }
2829 
2830 template <typename T>
2831 void
2833 {
2834  int16 ntype = getCompressionEngines().entries();
2835  int i;
2836 
2837  ntype += COMPRESS_ENGINE;
2838 
2839  UTwrite(os, &ntype);
2840 
2841  UT_ASSERT(COMPRESS_ENGINE == 4); // Change lower lines!
2843  UTsaveStringBinary(os, "rawfull", UT_STRING_8BIT_IO);
2844  UTsaveStringBinary(os, "constant", UT_STRING_8BIT_IO);
2845  UTsaveStringBinary(os, "fpreal16", UT_STRING_8BIT_IO);
2846 
2847  ntype -= COMPRESS_ENGINE;
2848  for (i = 0; i < ntype; i++)
2849  {
2850  UTsaveStringBinary(os, getCompressionEngines()(i)->getName(), UT_STRING_8BIT_IO);
2851  }
2852 }
2853 
2854 template <typename T>
2855 void
2857 {
2858  int16 ntype;
2859  int i, idx;
2860 
2861  compress.entries(0);
2862 
2863  is.read(&ntype);
2864 
2865  for (i = 0; i < ntype; i++)
2866  {
2867  UT_String name;
2868 
2870  if (name == "raw")
2871  compress.append(COMPRESS_RAW);
2872  else if (name == "rawfull")
2873  compress.append(COMPRESS_RAWFULL);
2874  else if (name == "constant")
2875  compress.append(COMPRESS_CONSTANT);
2876  else if (name == "fpreal16")
2877  compress.append(COMPRESS_FPREAL16);
2878  else
2879  {
2880  idx = lookupCompressionEngine(name);
2881 
2882  // -1 means a failure to find it in our set..
2883  // this is only a bad thing if a tile actually uses this engine.
2884 
2885  compress.append(idx);
2886  }
2887  }
2888 }
2889 
2890 template <typename T>
2891 bool
2893 {
2894  int16 ntype = getCompressionEngines().entries();
2895  int i;
2896  bool ok = true;
2897 
2898  ntype += COMPRESS_ENGINE;
2899 
2900  UT_ASSERT(COMPRESS_ENGINE == 4); // Change lower lines!
2901  ok = ok && w.beginUniformArray(ntype, UT_JID_STRING);
2902  ok = ok && w.uniformWrite("raw");
2903  ok = ok && w.uniformWrite("rawfull");
2904  ok = ok && w.uniformWrite("constant");
2905  ok = ok && w.uniformWrite("fpreal16");
2906 
2907  ntype -= COMPRESS_ENGINE;
2908  for (i = 0; i < ntype; i++)
2909  {
2910  ok = ok && w.uniformWrite(getCompressionEngines()(i)->getName());
2911  }
2912 
2913  ok = ok && w.endUniformArray();
2914  return ok;
2915 }
2916 
2917 template <typename T>
2918 bool
2920 {
2923  int idx;
2924 
2925  compress.entries(0);
2926  for (it = p.beginArray(); !it.atEnd(); ++it)
2927  {
2928  if (!p.parseString(buffer))
2929  return false;
2930 
2931  if (!buffer.strcmp("raw"))
2932  compress.append(COMPRESS_RAW);
2933  else if (!buffer.strcmp("rawfull"))
2934  compress.append(COMPRESS_RAWFULL);
2935  else if (!buffer.strcmp("constant"))
2936  compress.append(COMPRESS_CONSTANT);
2937  else if (!buffer.strcmp("fpreal16"))
2938  compress.append(COMPRESS_FPREAL16);
2939  else
2940  {
2941  idx = lookupCompressionEngine(buffer.buffer());
2942 
2943  // -1 means a failure to find it in our set..
2944  // this is only a bad thing if a tile actually uses this engine.
2945 
2946  compress.append(idx);
2947  }
2948  }
2949  return true;
2950 }
2951 
2952 
2953 
2954 //
2955 // VoxelArray definitions.
2956 //
2957 
2958 template <typename T>
2960 {
2961  myRes[0] = 0;
2962  myRes[1] = 0;
2963  myRes[2] = 0;
2964  myTileRes[0] = 0;
2965  myTileRes[1] = 0;
2966  myTileRes[2] = 0;
2967 
2968  myTiles = 0;
2969 
2970  myInvRes = 1;
2971 
2972  myBorderValue = 0;
2973  myBorderScale[0] = 0;
2974  myBorderScale[1] = 0;
2975  myBorderScale[2] = 0;
2976  myBorderType = UT_VOXELBORDER_STREAK;
2977 
2978  mySharedMem = 0;
2979  mySharedMemView = 0;
2980 }
2981 
2982 template <typename T>
2984 {
2985  deleteVoxels();
2986 }
2987 
2988 template <typename T>
2990 {
2991  myRes[0] = 0;
2992  myRes[1] = 0;
2993  myRes[2] = 0;
2994  myInvRes = 1;
2995  myTileRes[0] = 0;
2996  myTileRes[1] = 0;
2997  myTileRes[2] = 0;
2998 
2999  myTiles = 0;
3000 
3001  mySharedMem = 0;
3002  mySharedMemView = 0;
3003 
3004  *this = src;
3005 }
3006 
3007 template <typename T>
3008 void
3010  const UT_JobInfo &info)
3011 {
3012  UT_ASSERT(isMatching(src));
3013  UT_VoxelArrayIterator<T> vit(this);
3014  vit.splitByTile(info);
3015  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
3016  {
3017  int i = vit.getLinearTileNum();
3018  myTiles[i] = src.myTiles[i];
3019  }
3020 }
3021 
3022 template <typename T>
3023 const UT_VoxelArray<T> &
3025 {
3026  // Paranoid code:
3027  if (&src == this)
3028  return *this;
3029 
3030  myBorderScale[0] = src.myBorderScale[0];
3031  myBorderScale[1] = src.myBorderScale[1];
3032  myBorderScale[2] = src.myBorderScale[2];
3033  myBorderValue = src.myBorderValue;
3034  myBorderType = src.myBorderType;
3035 
3036  myCompressionOptions = src.myCompressionOptions;
3037 
3038  // Allocate proper size; don't bother setting to zero, since we'll copy the
3039  // data immediately after.
3040  size(src.myRes[0], src.myRes[1], src.myRes[2], false);
3041 
3042  copyData(src);
3043 
3044  return *this;
3045 }
3046 
3047 // If defined, construction and destruction of tiles will be multithreaded.
3048 // Otherwise, new[] and delete[] take care of calling these methods.
3049 // Unfortunately, this doesn't work at the moment...
3050 #define __MULTITHREADED_STRUCTORS__
3051 template <typename T>
3052 void
3054 {
3055 #ifdef __MULTITHREADED_STRUCTORS__
3056  // Free up each tile's heap data, then bulk-deallocate the array.
3058  [&](const UT_BlockedRange<int>& range)
3059  {
3060  for(int i = range.begin(); i < range.end(); i++)
3061  {
3062  myTiles[i].~UT_VoxelTile<T>();
3063  }
3064  });
3065  operator delete(myTiles, std::nothrow);
3066 #else
3067  delete [] myTiles;
3068 #endif
3069  myTiles = 0;
3070 
3071  myTileRes[0] = myTileRes[1] = myTileRes[2] = 0;
3072  myRes[0] = myRes[1] = myRes[2] = 0;
3073 
3074  delete mySharedMemView;
3075  mySharedMemView = 0;
3076 
3077  delete mySharedMem;
3078  mySharedMem = 0;
3079 }
3080 
3081 template <typename T>
3082 void
3083 UT_VoxelArray<T>::size(int xres, int yres, int zres, bool zero)
3084 {
3085  // Check if the sizes are already right.
3086  if (myRes[0] == xres && myRes[1] == yres && myRes[2] == zres)
3087  {
3088  if (zero)
3089  {
3090  T tzero;
3091  tzero = 0;
3092  constant(tzero);
3093  }
3094  return;
3095  }
3096 
3097  deleteVoxels();
3098 
3099  // Initialize our tiles.
3100  int tile_res[3];
3101  tile_res[0] = (xres + TILEMASK) >> TILEBITS;
3102  tile_res[1] = (yres + TILEMASK) >> TILEBITS;
3103  tile_res[2] = (zres + TILEMASK) >> TILEBITS;
3104 
3105  exint ntiles = ((exint)tile_res[0]) * tile_res[1] * tile_res[2];
3106  if (ntiles)
3107  {
3108 #ifdef __MULTITHREADED_STRUCTORS__
3109  // Allocate the big array of the right size, then use placement new to
3110  // construct each tile.
3111  // Note that it is not guaranteed that the returned pointer is properly
3112  // aligned with raw operator new. However, we only practically need 8-byte
3113  // alignment given the current implementation, which should be respected by
3114  // operator new.
3115  // We have to wait for C++17 for version of operator new that accepts
3116  // alignment requirements...
3117  myTiles = (UT_VoxelTile<T>*) operator new(sizeof(UT_VoxelTile<T>) * ntiles,
3118  std::nothrow);
3120  [&](const UT_BlockedRange<int>& range)
3121  {
3122  for(int k = range.begin(); k < range.end(); k++)
3123  {
3124  new(myTiles + k) UT_VoxelTile<T>();
3125 
3126  // Set the resolution of this tile.
3127  int tilex = k % tile_res[0];
3128  int k2 = k / tile_res[0];
3129  int tiley = k2 % tile_res[1];
3130  int tilez = k2 / tile_res[1];
3131  myTiles[k].setRes(SYSmin(TILESIZE, xres - tilex * TILESIZE),
3132  SYSmin(TILESIZE, yres - tiley * TILESIZE),
3133  SYSmin(TILESIZE, zres - tilez * TILESIZE));
3134  }
3135  });
3136 #else
3137  myTiles = new UT_VoxelTile<T>[ntiles];
3138 #endif
3139 
3140  // Only set resolutions *AFTER* we successfully allocate
3141  myRes[0] = xres;
3142  myRes[1] = yres;
3143  myRes[2] = zres;
3144 
3145  myInvRes = 1;
3146  if (xres)
3147  myInvRes.x() = 1.0f / myRes[0];
3148  if (yres)
3149  myInvRes.y() = 1.0f / myRes[1];
3150  if (zres)
3151  myInvRes.z() = 1.0f / myRes[2];
3152 
3153  myTileRes[0] = tile_res[0];
3154  myTileRes[1] = tile_res[1];
3155  myTileRes[2] = tile_res[2];
3156 
3157 #ifndef __MULTITHREADED_STRUCTORS__
3158  int i = 0;
3159  for (int tz = 0; tz < myTileRes[2]; tz++)
3160  {
3161  int zr;
3162  if (tz < myTileRes[2]-1)
3163  zr = TILESIZE;
3164  else
3165  zr = zres - tz * TILESIZE;
3166 
3167  for (int ty = 0; ty < myTileRes[1]; ty++)
3168  {
3169  int yr;
3170  if (ty < myTileRes[1]-1)
3171  yr = TILESIZE;
3172  else
3173  yr = yres - ty * TILESIZE;
3174 
3175  int tx, xr = TILESIZE;
3176  for (tx = 0; tx < myTileRes[0]-1; tx++)
3177  {
3178  myTiles[i].setRes(xr, yr, zr);
3179 
3180  i++;
3181  }
3182  xr = xres - tx * TILESIZE;
3183  myTiles[i].setRes(xr, yr, zr);
3184  i++;
3185  }
3186  }
3187 #endif
3188  }
3189  else
3190  myTiles = 0;
3191 }
3192 
3193 template <typename T>
3194 void
3196 {
3197  // This call will only actually resize if the sizes are different. If it
3198  // does not resize, contents will be left alone.
3199  size(src.getXRes(), src.getYRes(), src.getZRes(), false);
3200 
3201  // Update border conditions.
3202  myBorderType = src.myBorderType;
3203  myBorderScale[0] = src.myBorderScale[0];
3204  myBorderScale[1] = src.myBorderScale[1];
3205  myBorderScale[2] = src.myBorderScale[2];
3206  myBorderValue = src.myBorderValue;
3207 
3208  // Match our compression tolerances
3209  myCompressionOptions = src.myCompressionOptions;
3210 }
3211 
3212 template <typename T>
3213 int64
3215 {
3216  int64 mem = inclusive ? sizeof(*this) : 0;
3217 
3218  int ntiles = numTiles();
3219  for (int i = 0; i < ntiles; i++)
3220  mem += myTiles[i].getMemoryUsage(true);
3221 
3222  if (mySharedMem)
3223  mem += mySharedMem->getMemoryUsage(true);
3224 
3225  if (mySharedMemView)
3226  mem += mySharedMemView->getMemoryUsage(true);
3227 
3228  return mem;
3229 }
3230 
3231 template <typename T>
3232 void
3234 {
3235  UT_VoxelArrayIterator<T> vit(this);
3236  // We don't want split tile as we update the actual tiles here
3237  // so will have false-sharing if we interleaved.
3238  vit.setPartialRange(info.job(), info.numJobs());
3239  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
3240  {
3241  int i = vit.getLinearTileNum();
3242  myTiles[i].makeConstant(t);
3243  }
3244 }
3245 
3246 template <typename T>
3247 bool
3249 {
3250  int i, ntiles;
3251  T cval;
3252  cval = 0;
3253  const fpreal tol = SYS_FTOLERANCE_R;
3254 
3255  ntiles = numTiles();
3256  for (i = 0; i < ntiles; i++)
3257  {
3258  if (!myTiles[i].isConstant())
3259  {
3260  return false;
3261  }
3262 
3263  if (!i)
3264  {
3265  // First tile, get the constant value.
3266  cval = myTiles[i].rawConstVal();
3267  }
3268  else
3269  {
3270  // See if we have deviated too much.
3271  if (UT_VoxelTile<T>::dist(cval, myTiles[i].rawConstVal()) > tol)
3272  {
3273  return false;
3274  }
3275  }
3276  }
3277 
3278  // All tiles are both constant and within tolerance of each
3279  // other. Write out our constant value and return true.
3280  if (t)
3281  *t = cval;
3282 
3283  return true;
3284 }
3285 
3286 template <typename T>
3287 bool
3289 {
3290  int i, ntiles;
3291 
3292  ntiles = numTiles();
3293  for (i = 0; i < ntiles; i++)
3294  {
3295  if (myTiles[i].hasNan())
3296  {
3297  return true;
3298  }
3299  }
3300 
3301  return false;
3302 }
3303 
3304 template <typename T>
3305 T
3307 {
3308  // We go from the position in the unit cube into the index.
3309  // The center of cells must map to the exact integer indices.
3310  pos.x() *= myRes[0];
3311  pos.y() *= myRes[1];
3312  pos.z() *= myRes[2];
3313  pos.x() -= 0.5;
3314  pos.y() -= 0.5;
3315  pos.z() -= 0.5;
3316 
3317  return lerpVoxelCoord(pos);
3318 }
3319 
3320 template <typename T>
3321 T
3323 {
3324  int x, y, z;
3325  // Yes, these have to be 32 becaues split float requires 32!
3326  fpreal32 fx, fy, fz;
3327 
3328  splitVoxelCoord(pos, x, y, z, fx, fy, fz);
3329 
3330  return lerpVoxel(x, y, z, fx, fy, fz);
3331 }
3332 
3333 template <typename T>
3334 template <int AXIS2D>
3335 T
3337 {
3338  int x, y, z;
3339  // Yes, these have to be 32 becaues split float requires 32!
3340  fpreal32 fx, fy, fz;
3341 
3342  splitVoxelCoordAxis<AXIS2D>(pos, x, y, z, fx, fy, fz);
3343 
3344  return lerpVoxelAxis<AXIS2D>(x, y, z, fx, fy, fz);
3345 }
3346 
3347 template <typename T>
3348 T
3349 UT_VoxelArray<T>::lerpVoxel(int x, int y, int z,
3350  float fx, float fy, float fz) const
3351 {
3352  // Do trilinear interpolation.
3353  T vx, vx1, vy, vy1, vz;
3354 
3355  // Optimization for common cases (values are within the voxel range and
3356  // are all within the same tile)
3357  if ( !((x | y | z) < 0) &&
3358  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3359 
3360  // (x > 0) && (y > 0) && (z > 0) &&
3361  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3362  // report in bounds!
3363  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3364  {
3365  int xm, ym, zm;
3366 
3367  xm = x & TILEMASK;
3368  ym = y & TILEMASK;
3369  zm = z & TILEMASK;
3370 
3371  if ((xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3372  {
3373  const UT_VoxelTile<T> *tile =
3374  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3375 
3376  vz = tile->lerp(xm, ym, zm, fx, fy, fz);
3377  }
3378  else
3379  {
3380  // We cross tile boundaries but we remain within
3381  // the voxel grid. We can thus avoid any bounds checking
3382  // and use operator() rather than getValue.
3383 
3384  // Lerp x:x+1, y, z
3385  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z),
3386  (*this)(x+1, y, z),
3387  fx);
3388  // Lerp x:x+1, y+1, z
3389  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z),
3390  (*this)(x+1, y+1, z),
3391  fx);
3392  // Lerp x:x+1, y:y+1, z
3393  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3394 
3395  // Lerp x:x+1, y, z+1
3396  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z+1),
3397  (*this)(x+1, y, z+1),
3398  fx);
3399  // Lerp x:x+1, y+1, z+1
3400  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z+1),
3401  (*this)(x+1, y+1, z+1),
3402  fx);
3403 
3404  // Lerp x:x+1, y:y+1, z+1
3405  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3406 
3407  // Lerp x:x+1, y:y+1, z:z+1
3408  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3409  }
3410  }
3411  else
3412  {
3413  // Lerp x:x+1, y, z
3414  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z),
3415  getValue(x+1, y, z),
3416  fx);
3417  // Lerp x:x+1, y+1, z
3418  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z),
3419  getValue(x+1, y+1, z),
3420  fx);
3421 
3422  // Lerp x:x+1, y:y+1, z
3423  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3424 
3425  // Lerp x:x+1, y, z+1
3426  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z+1),
3427  getValue(x+1, y, z+1),
3428  fx);
3429  // Lerp x:x+1, y+1, z+1
3430  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z+1),
3431  getValue(x+1, y+1, z+1),
3432  fx);
3433 
3434  // Lerp x:x+1, y:y+1, z+1
3435  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3436 
3437  // Lerp x:x+1, y:y+1, z:z+1
3438  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3439  }
3440 
3441  return vz;
3442 }
3443 
3444 template <typename T>
3445 template <int AXIS2D>
3446 T
3448  float fx, float fy, float fz) const
3449 {
3450  // Do bilinear interpolation.
3451  T vx, vx1, vy, vy1, vz;
3452 
3453  int lesscomp = 0, greatercomp = -1;
3454 
3455  if (AXIS2D != 0)
3456  {
3457  lesscomp |= x;
3458  greatercomp &= (x - myRes[0]+1);
3459  }
3460  if (AXIS2D != 1)
3461  {
3462  lesscomp |= y;
3463  greatercomp &= (y - myRes[1]+1);
3464  }
3465  if (AXIS2D != 2)
3466  {
3467  lesscomp |= z;
3468  greatercomp &= (z - myRes[2]+1);
3469  }
3470 
3471  // Optimization for common cases (values are within the voxel range and
3472  // are all within the same tile)
3473  if ( !(lesscomp < 0) && (greatercomp < 0) )
3474  {
3475  int xm, ym, zm;
3476 
3477  xm = x & TILEMASK;
3478  ym = y & TILEMASK;
3479  zm = z & TILEMASK;
3480 
3481  if ((AXIS2D == 0 || xm != TILEMASK) &&
3482  (AXIS2D == 1 || ym != TILEMASK) &&
3483  (AXIS2D == 2 || zm != TILEMASK))
3484  {
3485  const UT_VoxelTile<T> *tile =
3486  getTile( (AXIS2D == 0) ? 0 : (x >> TILEBITS),
3487  (AXIS2D == 1) ? 0 : (y >> TILEBITS),
3488  (AXIS2D == 2) ? 0 : (z >> TILEBITS) );
3489 
3490  vz = tile->template lerpAxis<AXIS2D>(xm, ym, zm, fx, fy, fz);
3491  }
3492  else
3493  {
3494  // We cross tile boundaries but we remain within
3495  // the voxel grid. We can thus avoid any bounds checking
3496  // and use operator() rather than getValue.
3497 
3498  // Lerp x:x+1, y, z
3499  if (AXIS2D != 0)
3500  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z),
3501  (*this)(x+1, y, z),
3502  fx);
3503  else
3504  vx = (*this)(x, y, z);
3505 
3506  if (AXIS2D != 1)
3507  {
3508  // Lerp x:x+1, y+1, z
3509  if (AXIS2D != 0)
3510  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z),
3511  (*this)(x+1, y+1, z),
3512  fx);
3513  else
3514  vx1 = (*this)(x, y+1, z);
3515  // Lerp x:x+1, y:y+1, z
3516  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3517  }
3518  else
3519  vy = vx;
3520 
3521  if (AXIS2D != 2)
3522  {
3523  // Lerp x:x+1, y, z+1
3524  if (AXIS2D != 0)
3525  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z+1),
3526  (*this)(x+1, y, z+1),
3527  fx);
3528  else
3529  vx = (*this)(x, y, z+1);
3530 
3531  if (AXIS2D != 1)
3532  {
3533  // Lerp x:x+1, y+1, z+1
3534  if (AXIS2D != 0)
3535  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z+1),
3536  (*this)(x+1, y+1, z+1),
3537  fx);
3538  else
3539  vx1 = (*this)(x, y+1, z+1);
3540  // Lerp x:x+1, y:y+1, z+1
3541  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3542  }
3543  else
3544  vy1 = vx;
3545 
3546  // Lerp x:x+1, y:y+1, z:z+1
3547  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3548  }
3549  else
3550  vz = vy;
3551  }
3552  }
3553  else
3554  {
3555  // Lerp x:x+1, y, z
3556  if (AXIS2D != 0)
3557  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z),
3558  getValue(x+1, y, z),
3559  fx);
3560  else
3561  vx = getValue(x, y, z);
3562 
3563  if (AXIS2D != 1)
3564  {
3565  // Lerp x:x+1, y+1, z
3566  if (AXIS2D != 0)
3567  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z),
3568  getValue(x+1, y+1, z),
3569  fx);
3570  else
3571  vx1 = getValue(x, y+1, z);
3572  // Lerp x:x+1, y:y+1, z
3573  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3574  }
3575  else
3576  vy = vx;
3577 
3578  if (AXIS2D != 2)
3579  {
3580  // Lerp x:x+1, y, z+1
3581  if (AXIS2D != 0)
3582  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z+1),
3583  getValue(x+1, y, z+1),
3584  fx);
3585  else
3586  vx = getValue(x, y, z+1);
3587 
3588  if (AXIS2D != 1)
3589  {
3590  // Lerp x:x+1, y+1, z+1
3591  if (AXIS2D != 0)
3592  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z+1),
3593  getValue(x+1, y+1, z+1),
3594  fx);
3595  else
3596  vx1 = getValue(x, y+1, z+1);
3597  // Lerp x:x+1, y:y+1, z+1
3598  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
3599  }
3600  else
3601  vy1 = vx;
3602 
3603  // Lerp x:x+1, y:y+1, z:z+1
3604  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
3605  }
3606  else
3607  vz = vy;
3608  }
3609 
3610  return vz;
3611 }
3612 
3613 template <typename T>
3614 void
3616  UT_Vector3F pos) const
3617 {
3618  // We go from the position in the unit cube into the index.
3619  // The center of cells must map to the exact integer indices.
3620  pos.x() *= myRes[0];
3621  pos.y() *= myRes[1];
3622  pos.z() *= myRes[2];
3623  pos.x() -= 0.5;
3624  pos.y() -= 0.5;
3625  pos.z() -= 0.5;
3626 
3627  lerpVoxelCoordMinMax(lerp, lmin, lmax, pos);
3628 }
3629 
3630 template <typename T>
3631 void
3633  UT_Vector3F pos) const
3634 {
3635  int x, y, z;
3636  // Yes, these have to be 32 becaues split float requires 32!
3637  fpreal32 fx, fy, fz;
3638 
3639  splitVoxelCoord(pos, x, y, z, fx, fy, fz);
3640 
3641  lerpVoxelMinMax(lerp, lmin, lmax, x, y, z, fx, fy, fz);
3642 }
3643 
3644 
3645 template <typename T>
3646 template <int AXIS2D>
3647 void
3649  UT_Vector3F pos) const
3650 {
3651  int x, y, z;
3652  // Yes, these have to be 32 becaues split float requires 32!
3653  fpreal32 fx, fy, fz;
3654 
3655  splitVoxelCoordAxis<AXIS2D>(pos, x, y, z, fx, fy, fz);
3656 
3657  lerpVoxelMinMaxAxis<AXIS2D>(lerp, lmin, lmax, x, y, z, fx, fy, fz);
3658 }
3659 
3660 
3661 template <typename T>
3662 void
3664  T &lerp, T &smin, T &smax,
3665  int x, int y, int z,
3666  float fx, float fy, float fz) const
3667 {
3668  T samples[8];
3669 
3670  if (extractSample(x, y, z, samples))
3671  {
3672  lerp = smin = smax = samples[0];
3673  return;
3674  }
3675 
3676  lerp = lerpSample(samples, fx, fy, fz);
3677 
3678  T smin1, smax1;
3679 
3680  SYSminmax(samples[0], samples[1], samples[2], samples[3],
3681  smin, smax);
3682  SYSminmax(samples[4+0], samples[4+1], samples[4+2], samples[4+3],
3683  smin1, smax1);
3684 
3685  smin = SYSmin(smin, smin1);
3686  smax = SYSmax(smax, smax1);
3687 }
3688 
3689 template <typename T>
3690 template <int AXIS2D>
3691 void
3693  T &lerp, T &smin, T &smax,
3694  int x, int y, int z,
3695  float fx, float fy, float fz) const
3696 {
3697  T samples[8];
3698 
3699  if (extractSampleAxis<AXIS2D>(x, y, z, samples))
3700  {
3701  lerp = smin = smax = samples[0];
3702  return;
3703  }
3704 
3705  lerp = lerpSampleAxis<AXIS2D>(samples, fx, fy, fz);
3706 
3707  if (AXIS2D == 0)
3708  SYSminmax(samples[0], samples[2], samples[4], samples[6],
3709  smin, smax);
3710  else if (AXIS2D == 1)
3711  SYSminmax(samples[0], samples[1], samples[4], samples[5],
3712  smin, smax);
3713  else if (AXIS2D == 2)
3714  SYSminmax(samples[0], samples[1], samples[2], samples[3],
3715  smin, smax);
3716  else
3717  {
3718  T smin1, smax1;
3719 
3720  SYSminmax(samples[0], samples[1], samples[2], samples[3],
3721  smin, smax);
3722  SYSminmax(samples[4+0], samples[4+1], samples[4+2], samples[4+3],
3723  smin1, smax1);
3724 
3725  smin = SYSmin(smin, smin1);
3726  smax = SYSmax(smax, smax1);
3727  }
3728 }
3729 
3730 template <typename T>
3731 bool
3733  T *samples) const
3734 {
3735  // Optimization for common cases (values are within the voxel range and
3736  // are all within the same tile)
3737  if ( !((x | y | z) < 0) &&
3738  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3739 
3740  // (x > 0) && (y > 0) && (z > 0) &&
3741  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3742  // report in bounds!
3743  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3744  {
3745  int xm, ym, zm;
3746 
3747  xm = x & TILEMASK;
3748  ym = y & TILEMASK;
3749  zm = z & TILEMASK;
3750 
3751  if ((xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3752  {
3753  const UT_VoxelTile<T> *tile =
3754  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3755 
3756  return tile->extractSample(xm, ym, zm, samples);
3757  }
3758  else
3759  {
3760  // We cross tile boundaries but we remain within
3761  // the voxel grid. We can thus avoid any bounds checking
3762  // and use operator() rather than getValue.
3763  samples[0] = (*this)(x, y, z);
3764  samples[1] = (*this)(x+1, y, z);
3765  samples[2+0] = (*this)(x, y+1, z);
3766  samples[2+1] = (*this)(x+1, y+1, z);
3767  samples[4+0] = (*this)(x, y, z+1);
3768  samples[4+1] = (*this)(x+1, y, z+1);
3769  samples[4+2+0] = (*this)(x, y+1, z+1);
3770  samples[4+2+1] = (*this)(x+1, y+1, z+1);
3771  }
3772  }
3773  else
3774  {
3775  samples[0] = getValue(x, y, z);
3776  samples[1] = getValue(x+1, y, z);
3777  samples[2+0] = getValue(x, y+1, z);
3778  samples[2+1] = getValue(x+1, y+1, z);
3779  samples[4+0] = getValue(x, y, z+1);
3780  samples[4+1] = getValue(x+1, y, z+1);
3781  samples[4+2+0] = getValue(x, y+1, z+1);
3782  samples[4+2+1] = getValue(x+1, y+1, z+1);
3783  }
3784 
3785  return false;
3786 }
3787 
3788 
3789 template <typename T>
3790 template <int AXIS2D>
3791 bool
3793  T *samples) const
3794 {
3795  // Optimization for common cases (values are within the voxel range and
3796  // are all within the same tile)
3797  int lesscomp = 0, greatercomp = -1;
3798 
3799  if (AXIS2D != 0)
3800  {
3801  lesscomp |= x;
3802  greatercomp &= (x - myRes[0]+1);
3803  }
3804  if (AXIS2D != 1)
3805  {
3806  lesscomp |= y;
3807  greatercomp &= (y - myRes[1]+1);
3808  }
3809  if (AXIS2D != 2)
3810  {
3811  lesscomp |= z;
3812  greatercomp &= (z - myRes[2]+1);
3813  }
3814 
3815  // Optimization for common cases (values are within the voxel range and
3816  // are all within the same tile)
3817  if ( !(lesscomp < 0) && (greatercomp < 0) )
3818  {
3819  int xm, ym, zm;
3820 
3821  xm = x & TILEMASK;
3822  ym = y & TILEMASK;
3823  zm = z & TILEMASK;
3824 
3825  if ((AXIS2D == 0 || xm != TILEMASK) &&
3826  (AXIS2D == 1 || ym != TILEMASK) &&
3827  (AXIS2D == 2 || zm != TILEMASK))
3828  {
3829  const UT_VoxelTile<T> *tile =
3830  getTile( (AXIS2D == 0) ? 0 : (x >> TILEBITS),
3831  (AXIS2D == 1) ? 0 : (y >> TILEBITS),
3832  (AXIS2D == 2) ? 0 : (z >> TILEBITS) );
3833 
3834  return tile->template extractSampleAxis<AXIS2D>(xm, ym, zm, samples);
3835  }
3836  else
3837  {
3838  // We cross tile boundaries but we remain within
3839  // the voxel grid. We can thus avoid any bounds checking
3840  // and use operator() rather than getValue.
3841  samples[0] = (*this)(x, y, z);
3842  if (AXIS2D != 0)
3843  samples[1] = (*this)(x+1, y, z);
3844  if (AXIS2D != 1)
3845  {
3846  samples[2+0] = (*this)(x, y+1, z);
3847  if (AXIS2D != 0)
3848  samples[2+1] = (*this)(x+1, y+1, z);
3849  }
3850  if (AXIS2D != 2)
3851  {
3852  samples[4+0] = (*this)(x, y, z+1);
3853  if (AXIS2D != 0)
3854  samples[4+1] = (*this)(x+1, y, z+1);
3855  if (AXIS2D != 1)
3856  {
3857  samples[4+2+0] = (*this)(x, y+1, z+1);
3858  if (AXIS2D != 0)
3859  samples[4+2+1] = (*this)(x+1, y+1, z+1);
3860  }
3861  }
3862  }
3863  }
3864  else
3865  {
3866  samples[0] = getValue(x, y, z);
3867  if (AXIS2D != 0)
3868  samples[1] = getValue(x+1, y, z);
3869  if (AXIS2D != 1)
3870  {
3871  samples[2+0] = getValue(x, y+1, z);
3872  if (AXIS2D != 0)
3873  samples[2+1] = getValue(x+1, y+1, z);
3874  }
3875  if (AXIS2D != 2)
3876  {
3877  samples[4+0] = getValue(x, y, z+1);
3878  if (AXIS2D != 0)
3879  samples[4+1] = getValue(x+1, y, z+1);
3880  if (AXIS2D != 1)
3881  {
3882  samples[4+2+0] = getValue(x, y+1, z+1);
3883  if (AXIS2D != 0)
3884  samples[4+2+1] = getValue(x+1, y+1, z+1);
3885  }
3886  }
3887  }
3888 
3889  return false;
3890 }
3891 
3892 template <typename T>
3893 bool
3895  T *samples) const
3896 {
3897  // Optimization for common cases (values are within the voxel range and
3898  // are all within the same tile)
3899  if ( !(((x-1) | (y-1) | (z-1)) < 0) &&
3900  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3901 
3902  // (x > 0) && (y > 0) && (z > 0) &&
3903  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3904  // report in bounds!
3905  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3906  {
3907  int xm, ym, zm;
3908 
3909  xm = x & TILEMASK;
3910  ym = y & TILEMASK;
3911  zm = z & TILEMASK;
3912 
3913  if (xm && ym && zm && (xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3914  {
3915  const UT_VoxelTile<T> *tile =
3916  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3917 
3918  return tile->extractSamplePlus(xm, ym, zm, samples);
3919  }
3920  else
3921  {
3922  // We cross tile boundaries but we remain within
3923  // the voxel grid. We can thus avoid any bounds checking
3924  // and use operator() rather than getValue.
3925  samples[0] = (*this)(x-1, y, z);
3926  samples[1] = (*this)(x+1, y, z);
3927  samples[2+0] = (*this)(x, y-1, z);
3928  samples[2+1] = (*this)(x, y+1, z);
3929  samples[4+0] = (*this)(x, y, z-1);
3930  samples[4+1] = (*this)(x, y, z+1);
3931  samples[6] = (*this)(x, y, z);
3932  }
3933  }
3934  else
3935  {
3936  samples[0] = getValue(x-1, y, z);
3937  samples[1] = getValue(x+1, y, z);
3938  samples[2+0] = getValue(x, y-1, z);
3939  samples[2+1] = getValue(x, y+1, z);
3940  samples[4+0] = getValue(x, y, z-1);
3941  samples[4+1] = getValue(x, y, z+1);
3942  samples[6] = getValue(x, y, z);
3943  }
3944 
3945  return false;
3946 }
3947 
3948 #if 0
3949 /// Implementation of UT_VoxelTile::extractSampleCube() has an error (see the
3950 /// comments for it), which cascaded to here. Simply removing this function for
3951 /// now, as there isn't any need for it--at least in our own code.
3952 template <typename T>
3953 bool
3954 UT_VoxelArray<T>::extractSampleCube(int x, int y, int z,
3955  T *samples) const
3956 {
3957  // Optimization for common cases (values are within the voxel range and
3958  // are all within the same tile)
3959  if ( !(((x-1) | (y-1) | (z-1)) < 0) &&
3960  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
3961 
3962  // (x > 0) && (y > 0) && (z > 0) &&
3963  // Do not use x+1 < foo as if x is MAX_INT that will falsely
3964  // report in bounds!
3965  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
3966  {
3967  int xm, ym, zm;
3968 
3969  xm = x & TILEMASK;
3970  ym = y & TILEMASK;
3971  zm = z & TILEMASK;
3972 
3973  if (xm && ym && zm && (xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
3974  {
3975  const UT_VoxelTile<T> *tile =
3976  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
3977 
3978  return tile->extractSampleCube(xm, ym, zm, samples);
3979  }
3980  else
3981  {
3982  // We cross tile boundaries but we remain within
3983  // the voxel grid. We can thus avoid any bounds checking
3984  // and use operator() rather than getValue.
3985  int sampidx = 0;
3986  for (int dz = -1; dz <= 1; dz++)
3987  {
3988  for (int dy = -1; dy <= 1; dy++)
3989  {
3990  for (int dx = -1; dx <= 1; dx++)
3991  {
3992  samples[sampidx++] = (*this)(x+dx, y+dy, z+dz);
3993  }
3994  }
3995  }
3996  }
3997  }
3998  else
3999  {
4000  int sampidx = 0;
4001  for (int dz = -1; dz <= 1; dz++)
4002  {
4003  for (int dy = -1; dy <= 1; dy++)
4004  {
4005  for (int dx = -1; dx <= 1; dx++)
4006  {
4007  samples[sampidx++] = getValue(x+dx, y+dy, z+dz);
4008  }
4009  }
4010  }
4011  }
4012 
4013  return false;
4014 }
4015 #endif
4016 
4017 template <typename T>
4018 T
4020  float fx, float fy, float fz) const
4021 {
4022 #if 1
4023  // Do trilinear interpolation.
4024  T vx, vx1, vy, vy1, vz;
4025 
4026  // Lerp x:x+1, y, z
4027  vx = UT_VoxelTile<T>::lerpValues(samples[0],
4028  samples[1],
4029  fx);
4030  // Lerp x:x+1, y+1, z
4031  vx1= UT_VoxelTile<T>::lerpValues(samples[2],
4032  samples[2+1],
4033  fx);
4034  // Lerp x:x+1, y:y+1, z
4035  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4036 
4037  // Lerp x:x+1, y, z+1
4038  vx = UT_VoxelTile<T>::lerpValues(samples[4],
4039  samples[4+1],
4040  fx);
4041  // Lerp x:x+1, y+1, z+1
4042  vx1= UT_VoxelTile<T>::lerpValues(samples[4+2],
4043  samples[4+2+1],
4044  fx);
4045 
4046  // Lerp x:x+1, y:y+1, z+1
4047  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4048 
4049  // Lerp x:x+1, y:y+1, z:z+1
4050  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
4051 
4052  return vz;
4053 #else
4054  v4uf a, b, vfx, vfy, vfz;
4055 
4056  a = v4uf(&samples[0]);
4057  b = v4uf(&samples[4]);
4058 
4059  vfx = v4uf(fx);
4060  vfy = v4uf(fy);
4061  vfz = v4uf(fz);
4062 
4063  b -= a;
4064  a = madd(b, fz, a);
4065 
4066  b = a.swizzle<2, 3, 0, 1>();
4067  b -= a;
4068  a = madd(b, fy, a);
4069 
4070  b = a.swizzle<1, 2, 3, 0>();
4071  b -= a;
4072  a = madd(b, fx, a);
4073 
4074  return a[0];
4075 #endif
4076 }
4077 
4078 template <typename T>
4079 template <int AXIS2D>
4080 T
4082  float fx, float fy, float fz) const
4083 {
4084  // Do trilinear interpolation.
4085  T vx, vx1, vy, vy1, vz;
4086 
4087  // Lerp x:x+1, y, z
4088  if (AXIS2D != 0)
4089  vx = UT_VoxelTile<T>::lerpValues(samples[0], samples[1], fx);
4090  else
4091  vx = samples[0];
4092 
4093  if (AXIS2D != 1)
4094  {
4095  // Lerp x:x+1, y+1, z
4096  if (AXIS2D != 0)
4097  vx1= UT_VoxelTile<T>::lerpValues(samples[2], samples[2+1], fx);
4098  else
4099  vx1= samples[2];
4100 
4101  // Lerp x:x+1, y:y+1, z
4102  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4103  }
4104  else
4105  vy = vx;
4106 
4107  if (AXIS2D != 2)
4108  {
4109  // Lerp x:x+1, y, z+1
4110  if (AXIS2D != 0)
4111  vx = UT_VoxelTile<T>::lerpValues(samples[4], samples[4+1], fx);
4112  else
4113  vx = samples[4];
4114 
4115  if (AXIS2D != 1)
4116  {
4117  // Lerp x:x+1, y+1, z+1
4118  if (AXIS2D != 0)
4119  vx1= UT_VoxelTile<T>::lerpValues(samples[4+2], samples[4+2+1], fx);
4120  else
4121  vx1= samples[4+2];
4122 
4123  // Lerp x:x+1, y:y+1, z+1
4124  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4125  }
4126  else
4127  vy1 = vx;
4128 
4129  // Lerp x:x+1, y:y+1, z:z+1
4130  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
4131  }
4132  else
4133  vz = vy;
4134 
4135  return vz;
4136 }
4137 
4138 template <typename T>
4139 T
4141 {
4142  int x, y, z;
4143  // Yes, these have to be 32 becaues split float requires 32!
4144  fpreal32 fx, fy, fz;
4145 
4146  // We go from the position in the unit cube into the index.
4147  // The center of cells must map to the exact integer indices.
4148  pos.x() *= myRes[0];
4149  pos.y() *= myRes[1];
4150  pos.z() *= myRes[2];
4151  pos.x() -= 0.5;
4152  pos.y() -= 0.5;
4153  pos.z() -= 0.5;
4154 
4155  // Determine integer & fractional components.
4156  fx = pos.x();
4157  SYSfastSplitFloat(fx, x);
4158  fy = pos.y();
4159  SYSfastSplitFloat(fy, y);
4160  fz = pos.z();
4161  SYSfastSplitFloat(fz, z);
4162 
4163  // Do trilinear interpolation.
4164  T vx, vx1, vy, vy1, vz;
4165 
4166  // NOTE:
4167  // If you get a crash reading one of these getValues, note
4168  // that if you are reading from the same voxel array that you
4169  // are writing to, and doing so in a multi-threaded, tile-by-tile
4170  // approach, just because your positions match doesn't mean you
4171  // won't access neighbouring tiles. To avoid this, perform
4172  // the obvious optimization of checking for aligned tiles
4173  // and using direct indices.
4174  // (I have avoided using if (!fx && !fy && !fz) as the test
4175  // as that is both potentially inaccurate (round off in conversion
4176  // may mess us up) and said optimzation is a useful one anyways)
4177 
4178  // Optimization for common cases (values are within the voxel range and
4179  // are all within the same tile)
4180  if ( !((x | y | z) < 0) &&
4181  (((x - myRes[0]+1) & (y - myRes[1]+1) & (z - myRes[2]+1)) < 0))
4182  // if ( (x > 0) && (y > 0) && (z > 0) &&
4183  // Do not use x+1 < foo as if x is MAX_INT that will falsely
4184  // report in bounds!
4185  // (x < myRes[0]-1) && (y < myRes[1]-1) && (z < myRes[2]-1) )
4186  {
4187  int xm, ym, zm;
4188 
4189  xm = x & TILEMASK;
4190  ym = y & TILEMASK;
4191  zm = z & TILEMASK;
4192 
4193  if ((xm != TILEMASK) && (ym != TILEMASK) && (zm != TILEMASK))
4194  {
4195  const UT_VoxelTile<T> *tile =
4196  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
4197 
4198  vz = tile->lerp(xm, ym, zm, fx, fy, fz);
4199  }
4200  else
4201  {
4202  // We cross tile boundaries but we remain within
4203  // the voxel grid. We can thus avoid any bounds checking
4204  // and use operator() rather than getValue.
4205 
4206  // Lerp x:x+1, y, z
4207  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z),
4208  (*this)(x+1, y, z),
4209  fx);
4210  // Lerp x:x+1, y+1, z
4211  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z),
4212  (*this)(x+1, y+1, z),
4213  fx);
4214  // Lerp x:x+1, y:y+1, z
4215  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4216 
4217  // Lerp x:x+1, y, z+1
4218  vx = UT_VoxelTile<T>::lerpValues((*this)(x, y, z+1),
4219  (*this)(x+1, y, z+1),
4220  fx);
4221  // Lerp x:x+1, y+1, z+1
4222  vx1= UT_VoxelTile<T>::lerpValues((*this)(x, y+1, z+1),
4223  (*this)(x+1, y+1, z+1),
4224  fx);
4225 
4226  // Lerp x:x+1, y:y+1, z+1
4227  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4228 
4229  // Lerp x:x+1, y:y+1, z:z+1
4230  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
4231  }
4232  }
4233  else
4234  {
4235  // Lerp x:x+1, y, z
4236  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z),
4237  getValue(x+1, y, z),
4238  fx);
4239  // Lerp x:x+1, y+1, z
4240  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z),
4241  getValue(x+1, y+1, z),
4242  fx);
4243 
4244  // Lerp x:x+1, y:y+1, z
4245  vy = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4246 
4247  // Lerp x:x+1, y, z+1
4248  vx = UT_VoxelTile<T>::lerpValues(getValue(x, y, z+1),
4249  getValue(x+1, y, z+1),
4250  fx);
4251  // Lerp x:x+1, y+1, z+1
4252  vx1= UT_VoxelTile<T>::lerpValues(getValue(x, y+1, z+1),
4253  getValue(x+1, y+1, z+1),
4254  fx);
4255 
4256  // Lerp x:x+1, y:y+1, z+1
4257  vy1 = UT_VoxelTile<T>::lerpValues(vx, vx1, fy);
4258 
4259  // Lerp x:x+1, y:y+1, z:z+1
4260  vz = UT_VoxelTile<T>::lerpValues(vy, vy1, fz);
4261  }
4262 
4263  return vz;
4264 }
4265 
4266 #if 0
4267 template <typename T>
4268 T
4270 {
4271  int x, y, z;
4272 
4273  v4ui idx;
4274 
4275  // We go from the position in the unit cube into the index.
4276  // The center of cells must map to the exact integer indices.
4277  pos *= v4uf((float) myRes[0], (float) myRes[1], (float) myRes[2], 0.0f);
4278 
4279  // Because we use truncation we'll get inaccurate results
4280  // at the zero boundary, we thus bump up by two
4281  pos += 1.5;
4282 
4283  idx = pos.splitFloat();
4284 
4285  // And counteract that bump
4286  idx -= 2;
4287 
4288  x = idx[0];
4289  y = idx[1];
4290  z = idx[2];
4291 
4292  // Do trilinear interpolation.
4293  // A | B
4294  // 0 +z +y +yz | +x +xz +xy +xyz
4295 
4296  v4uf a, b, fx, fy, fz;
4297 
4298  int xm, ym, zm;
4299 
4300  xm = x & TILEMASK;
4301  ym = y & TILEMASK;
4302  zm = z & TILEMASK;
4303 
4304  // Optimization for common case (values are within the voxel range and
4305  // are all within the same tile)
4306  if ( (x > 0) & (y > 0) & (z > 0) &
4307  (x < myRes[0]-1) & (y < myRes[1]-1) & (z < myRes[2]-1) &
4308  (xm != TILEMASK) & (ym != TILEMASK) & (zm != TILEMASK) )
4309 /*
4310  if (isValidIndex(x, y, z) && isValidIndex(x+1, y+1, z+1) &&
4311  (x & TILEMASK) != TILEMASK &&
4312  (y & TILEMASK) != TILEMASK &&
4313  (z & TILEMASK) != TILEMASK)
4314 */
4315  {
4316  const UT_VoxelTile<T> *tile =
4317  getTile(x >> TILEBITS, y >> TILEBITS, z >> TILEBITS);
4318 
4319  return tile->lerp(pos, xm, ym, zm);
4320  }
4321  else
4322  {
4323  a = v4uf( getValue(x, y, z),
4324  getValue(x, y, z+1),
4325  getValue(x, y+1, z),
4326  getValue(x, y+1, z+1) );
4327  b = v4uf( getValue(x+1, y, z),
4328  getValue(x+1, y, z+1),
4329  getValue(x+1, y+1, z),
4330  getValue(x+1, y+1, z+1) );
4331  }
4332 
4333  fx = pos.swizzle<0, 0, 0, 0>();
4334  fy = pos.swizzle<1, 1, 1, 1>();
4335  fz = pos.swizzle<2, 2, 2, 2>();
4336 
4337  b -= a;
4338  a = madd(b, fx, a);
4339 
4340  b = a.swizzle<2, 3, 0, 1>();
4341  b -= a;
4342  a = madd(b, fy, a);
4343 
4344  b = a.swizzle<1, 2, 3, 0>();
4345  b -= a;
4346  a = madd(b, fz, a);
4347 
4348  return a[0];
4349 }
4350 #endif
4351 
4352 static inline int
4353 firstTile(int &start, int &end, int res)
4354 {
4355  if (start < 0)
4356  {
4357  start += res;
4358  end += res;
4359  }
4360  return start;
4361 }
4362 
4363 static inline int
4364 nextTile(int &tile, int &pstart, int &start, int &end, int res)
4365 {
4366  int pend;
4367 
4368  if (pstart >= res)
4369  {
4370  pstart -= res;
4371  start -= res;
4372  end -= res;
4373  }
4374  tile = pstart >> TILEBITS;
4375  pend = SYSmin((tile+1) * TILESIZE, end, res);
4376 
4377  UT_ASSERT(pstart >= 0 && pstart < res);
4378  UT_ASSERT(pend > 0 && pstart <= res);
4379  UT_ASSERT(pend-pstart > 0);
4380  UT_ASSERT(pend-pstart <= TILESIZE);
4381 
4382  return pend;
4383 }
4384 
4385 template <typename T>
4386 T
4388  fpreal radius, int clampaxis) const
4389 {
4390  UT_Vector3 tpos;
4391  UT_FilterWindow win[3];
4392  UT_FilterWrap wrap[3];
4393  UT_VoxelTile<T> *tile;
4394  const float *weights[3];
4395  fpreal visible;
4396  int start[3], end[3], size[3];
4397  int pstart[3], pend[3];
4398  int tx, ty, tz, i;
4399  T result;
4400 
4401  if (!myTiles)
4402  {
4403  // If the array is empty, just use the border value.
4404  return myBorderValue;
4405  }
4406 
4407  UT_FilterWrap basewrap;
4408  switch (myBorderType)
4409  {
4410  case UT_VOXELBORDER_CONSTANT: basewrap = UT_WRAP_BORDER; break;
4411  case UT_VOXELBORDER_REPEAT: basewrap = UT_WRAP_REPEAT; break;
4412  case UT_VOXELBORDER_STREAK: basewrap = UT_WRAP_CLAMP; break;
4413 
4414  // NOTE: These are incorrect!
4415  case UT_VOXELBORDER_MIRROR: basewrap = UT_WRAP_CLAMP; break;
4416  case UT_VOXELBORDER_EXTRAP: basewrap = UT_WRAP_CLAMP; break;
4417  default: basewrap = UT_WRAP_CLAMP; break;
4418  }
4419  for (i = 0; i < 3; i++)
4420  {
4421  if (i == clampaxis)
4422  wrap[i] = UT_WRAP_CLAMP;
4423  else
4424  wrap[i] = basewrap;
4425  }
4426 
4427  memset(&result, 0, sizeof(result));
4428 
4429  radius *= filter.getSupport();
4430 
4431  // Make a local copy of the position so that we can modify it.
4432  tpos = pos;
4433  visible = 1;
4434  for (i = 0; i < 3; i++)
4435  {
4436  tpos[i] = tpos[i]*myRes[i];
4437  if (!win[i].setWeights(filter, tpos[i], radius, myRes[i], wrap[i]))
4438  return result;
4439 
4440  weights[i] = win[i].getWeights();
4441  start[i] = win[i].getStart() % myRes[i];
4442  size[i] = win[i].getSize();
4443 
4444  UT_ASSERT(start[i] >= 0);
4445  UT_ASSERT(size[i] <= myRes[i]);
4446 
4447  end[i] = start[i] + size[i];
4448  visible *= win[i].getVisible();
4449  }
4450 
4451  // Accumulate filtered results
4452  pstart[2] = firstTile(start[2], end[2], myRes[2]);
4453  while (pstart[2] < end[2])
4454  {
4455  pend[2] = nextTile(tz, pstart[2], start[2], end[2], myRes[2]);
4456  pstart[1] = firstTile(start[1], end[1], myRes[1]);
4457  while (pstart[1] < end[1])
4458  {
4459  pend[1] = nextTile(ty, pstart[1], start[1], end[1], myRes[1]);
4460  pstart[0] = firstTile(start[0], end[0], myRes[0]);
4461  while (pstart[0] < end[0])
4462  {
4463  pend[0] = nextTile(tx, pstart[0], start[0], end[0], myRes[0]);
4464  tile = getTile(tx, ty, tz);
4465  UT_ASSERT(tile);
4466  tile->weightedSum(pstart, pend, weights, start, result);
4467  pstart[0] = pend[0];
4468  }
4469  pstart[1] = pend[1];
4470  }
4471  pstart[2] = pend[2];
4472  }
4473 
4474  if (visible < 1)
4475  {
4476  result += (1-visible)*myBorderValue;
4477  }
4478 
4479  return result;
4480 }
4481 
4482 template <typename T>
4483 void
4485  UT_FilterType filtertype,
4486  float filterwidthscale,
4487  int clampaxis)
4488 {
4489  fpreal radius;
4490  UT_Filter *filter;
4491 
4492  filter = UT_Filter::getFilter(filtertype);
4493 
4494  radius = SYSmax( src.getXRes() / (fpreal)getXRes(),
4495  src.getYRes() / (fpreal)getYRes(),
4496  src.getZRes() / (fpreal)getZRes(),
4497  1.0f );
4498  radius *= 0.5 * filterwidthscale;
4499 
4500  resamplethread(src, filter, radius, clampaxis);
4501 
4502  UT_Filter::releaseFilter(filter);
4503 }
4504 
4505 template <typename T>
4506 template <typename OP>
4507 void
4508 UT_VoxelArray<T>::forEachTile(const OP &op, bool shouldthread)
4509 {
4510  auto blockop = [&op, this](const UT_BlockedRange<int> &range)
4511  {
4513  for (int tileidx = range.begin(); tileidx != range.end(); tileidx++)
4514  {
4515  vit.setLinearTile(tileidx, this);
4516  op(vit);
4517  }
4518  };
4519 
4520  if (!shouldthread)
4521  {
4522  UTserialForEachNumber(numTiles(), blockop);
4523  }
4524  else
4525  {
4526  UTparallelForEachNumber(numTiles(), blockop);
4527  }
4528 }
4529 
4530 template <typename T>
4531 void
4532 UT_VoxelArray<T>::flattenPartial(T *flatarray, exint ystride, exint zstride,
4533  const UT_JobInfo &info) const
4534 {
4535  // Check to see if we are 2d, if so, we wish to use the
4536  // axis flatten.
4537  if (getXRes() == 1 && ystride == 1)
4538  {
4539  flattenPartialAxis<0>(flatarray, zstride, info);
4540  }
4541  else if (getYRes() == 1 && zstride == ystride)
4542  {
4543  flattenPartialAxis<1>(flatarray, zstride, info);
4544  }
4545  else if (getZRes() == 1)
4546  {
4547  flattenPartialAxis<2>(flatarray, ystride, info);
4548  }
4549  else
4550  {
4552 
4553  // Const cast.
4554  vit.setArray((UT_VoxelArray<T> *)this);
4555  // We can't use splitByTile as then our writes will fight
4556  // over 4k pages.
4557  vit.setPartialRange(info.job(), info.numJobs());
4558 
4559  for (vit.rewind(); !vit.atEnd(); vit.advance())
4560  {
4561  flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride] = vit.getValue();
4562  }
4563  }
4564 }
4565 
4566 
4567 template <typename T>
4568 template <int AXIS2D>
4569 void
4571  const UT_JobInfo &info) const
4572 {
4573  // The x/y refer to the destination x/y.
4574  // The actual source index depends on AXIS2D.
4575  const int ax = (AXIS2D == 0) ? 1 : 0;
4576  const int ay = (AXIS2D == 2) ? 1 : 2;
4577  int tileidx[3] = { 0, 0, 0 };
4578 
4579  while (1)
4580  {
4581  exint tiley = info.nextTask();
4582  exint ystart = tiley * TILESIZE;
4583  if (ystart >= getRes(ay))
4584  break;
4585 
4586  T *stripe = &flatarray[ystart * ystride];
4587 
4588  int yres = SYSmin(getRes(ay) - ystart, TILESIZE);
4589 
4590  for (int tilex = 0, ntilex = getTileRes(ax); tilex < ntilex; tilex++)
4591  {
4592  tileidx[ax] = tilex;
4593  tileidx[ay] = tiley;
4594  auto tile = getTile(tileidx[0], tileidx[1], tileidx[2]);
4595 
4596  int xres = tile->getRes(ax);
4597  T *stripey = stripe;
4598  if (tile->isConstant())
4599  {
4600  const T *srcdata = tile->rawData();
4601 
4602  for (int y = 0; y < yres; y++)
4603  {
4604  for (int x = 0; x < xres; x++)
4605  {
4606  memcpy(&stripey[x], srcdata, sizeof(T));
4607  }
4608  stripey += ystride;
4609  }
4610  }
4611  else if (tile->isSimpleCompression())
4612  {
4613  const T *srcdata = tile->rawData();
4614 
4615  if (xres != TILESIZE)
4616  {
4617  for (int y = 0; y < yres; y++)
4618  {
4619  memcpy(stripey, srcdata, sizeof(T) * xres);
4620  srcdata += xres;
4621  stripey += ystride;
4622  }
4623  }
4624  else
4625  {
4626  for (int y = 0; y < yres; y++)
4627  {
4628  memcpy(stripey, srcdata, sizeof(T) * TILESIZE);
4629  srcdata += TILESIZE;
4630  stripey += ystride;
4631  }
4632  }
4633  }
4634  else
4635  {
4636  for (int y = 0; y < yres; y++)
4637  {
4638  int idx[3] = { 0, 0, 0 };
4639  idx[ay] = y;
4640  for (int x = 0; x < xres; x++)
4641  {
4642  idx[ax] = x;
4643  stripey[x] = (*tile)(idx[0], idx[1], idx[2]);
4644  }
4645  stripey += ystride;
4646  }
4647  }
4648 
4649  stripe += TILESIZE;
4650  }
4651  }
4652 }
4653 
4654 
4655 template <>
4656 inline void
4658  exint ystride, exint zstride,
4659  UT_Vector4F,
4660  const UT_JobInfo &info) const
4661 {
4663 
4664  // Const cast.
4665  vit.setArray(SYSconst_cast(this));
4666  // We can't use splitByTile as then our writes will fight
4667  // over 4k pages.
4668  vit.setPartialRange(info.job(), info.numJobs());
4669 
4670  for (vit.rewind(); !vit.atEnd(); vit.advance())
4671  {
4672  UT_Vector4F v = vit.getValue();
4673  int idx = (vit.x() + vit.y()*ystride + vit.z()*zstride) * 4;
4674 
4675  flatarray[idx] = (uint8) SYSclamp(v.x() * 255.0f, 0.0f, 255.0f);
4676  flatarray[idx+1] = (uint8) SYSclamp(v.y() * 255.0f, 0.0f, 255.0f);
4677  flatarray[idx+2] = (uint8) SYSclamp(v.z() * 255.0f, 0.0f, 255.0f);
4678  flatarray[idx+3] = (uint8) SYSclamp(v.w() * 255.0f, 0.0f, 255.0f);
4679  }
4680 }
4681 
4682 template <typename T>
4683 void
4685  exint ystride, exint zstride,
4686  T, const UT_JobInfo &info) const
4687 {
4688  UT_ASSERT(!"This template requires specific instantiations.");
4689 }
4690 
4691 template <>
4692 inline void
4694  exint ystride, exint zstride,
4695  UT_Vector4F,
4696  const UT_JobInfo &info) const
4697 {
4699 
4700  // Const cast.
4701  vit.setArray(SYSconst_cast(this));
4702  // We can't use splitByTile as then our writes will fight
4703  // over 4k pages.
4704  vit.setPartialRange(info.job(), info.numJobs());
4705 
4706  for (vit.rewind(); !vit.atEnd(); vit.advance())
4707  flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride] = vit.getValue();
4708 }
4709 
4710 template <typename T>
4711 void
4713  exint ystride, exint zstride,
4714  T, const UT_JobInfo &info) const
4715 {
4716  UT_ASSERT(!"This template requires specific instantiations.");
4717 }
4718 
4719 template <>
4720 inline void
4722  exint ystride, exint zstride,
4723  UT_Vector4F,
4724  const UT_JobInfo &info) const
4725 {
4727 
4728  // Const cast.
4729  vit.setArray(SYSconst_cast(this));
4730  // We can't use splitByTile as then our writes will fight
4731  // over 4k pages.
4732  vit.setPartialRange(info.job(), info.numJobs());
4733 
4734  for (vit.rewind(); !vit.atEnd(); vit.advance())
4735  {
4736  UT_Vector4F v = vit.getValue();
4737 
4738  // NOTE: This works around an Nvidia driver bug on OSX, and older
4739  // Nvidia drivers on other platforms. The problem was that very
4740  // small FP values were causing huge random values to appear in
4741  // the 3D Texture.
4742  if(SYSabs(v.x()) < 1e-9)
4743  v.x() = 0.0;
4744  if(SYSabs(v.y()) < 1e-9)
4745  v.y() = 0.0;
4746  if(SYSabs(v.z()) < 1e-9)
4747  v.z() = 0.0;
4748  if(SYSabs(v.w()) < 1e-9)
4749  v.w() = 0.0;
4750 
4751  flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride] = v;
4752  }
4753 }
4754 
4755 template <typename T>
4756 void
4758  exint ystride, exint zstride,
4759  T,const UT_JobInfo &info) const
4760 {
4761  UT_ASSERT(!"This template requires specific instantiations.");
4762 }
4763 
4764 
4765 template <typename T>
4766 void
4768  exint ystride, exint zstride,
4769  const UT_JobInfo &info)
4770 {
4772 
4773  vit.setArray(this);
4774  vit.splitByTile(info);
4775  vit.setCompressOnExit(true);
4776 
4777  for (vit.rewind(); !vit.atEnd(); vit.advance())
4778  {
4779  vit.setValue(flatarray[vit.x() + vit.y()*ystride + vit.z()*zstride]);
4780  }
4781 }
4782 
4783 template <typename T>
4784 template <int SLICE, typename S>
4785 S *
4786 UT_VoxelArray<T>::extractSlice(S *dstdata, int slice, bool half_slice) const
4787 {
4788  int slice_res = getRes(SLICE);
4789  // Exit out early if the slice isn't inside the array proper.
4790  if (slice < 0 || slice >= slice_res ||
4791  (half_slice && slice == slice_res - 1))
4792  return nullptr;
4793 
4794  constexpr int AXIS1 = (SLICE == 0 ? 1 : 0);
4795  constexpr int AXIS2 = (SLICE == 1 ? 2 : 1);
4796 
4797  // Voxel resolution along the major axis of the voxel array.
4798  int a1_res = getRes(AXIS1);
4799  // Tile resolutions of the voxel array.
4800  int a1_tiles = getTileRes(AXIS1);
4801  int a2_tiles = getTileRes(AXIS2);
4802  int ntiles = a1_tiles * a2_tiles;
4803  // Strides used to figure out the starting index of each tile's voxels in
4804  // the destination array.
4805  int tile_stride2 = (a1_res << TILEBITS);
4806  int tile_stride1 = TILESIZE;
4807  // Index of the tile for the slice and the local coordinate within it.
4808  int tile_slice = (slice >> TILEBITS);
4809  int local_slice = (slice & TILEMASK);
4810  // If half_slice_st is true, we are averaging two voxel slices which lie
4811  // within the same tile slices.
4812  bool half_slice_st = half_slice && (local_slice != TILEMASK);
4813  // If half_slice_tb is true, we are averaging current value of dstdata with
4814  // the tile's values.
4815  bool half_slice_tb = false;
4816 
4817  // This functor sets the destination array value. idx might get changed
4818  // internally, but it comes out the way it went in.
4819  // Behaviour of this function is controlled by the following booleans, in
4820  // this order.
4821  // * half_slice_st: sets the value to the average of tile values at local
4822  // local idx and one offset along SLICE by 1;
4823  // * half_slice_tb: sets the value to the average between the current value
4824  // in the destination array and the tile's value at the local idx;
4825  // * when neither of the above is true, sets to tile's value at the local
4826  // idx.
4827  auto set_functor = [&](const UT_VoxelTile<T>* tile, int idx[3], int vidx)
4828  {
4829  if (half_slice_st)
4830  {
4831  dstdata[vidx] = (*tile)(idx[0], idx[1], idx[2]);
4832  idx[SLICE]++;
4833  dstdata[vidx] = 0.5f * (dstdata[vidx] +
4834  (*tile)(idx[0], idx[1], idx[2]));
4835  idx[SLICE]--;
4836  }
4837  else if (half_slice_tb)
4838  dstdata[vidx] = 0.5f * (dstdata[vidx] +
4839  (*tile)(idx[0], idx[1], idx[2]));
4840  else
4841  dstdata[vidx] = (*tile)(idx[0], idx[1], idx[2]);
4842  };
4843  // This functor will be executed by the multithreaded loop below. Uses
4844  // set_functor to write the correct value into dstdata.
4845  auto loop_functor = [&](const UT_BlockedRange<int>& range)
4846  {
4847  // Tile index array.
4848  int is[3];
4849  is[SLICE] = tile_slice;
4850  // Array of indices within the tile.
4851  int is_local[3];
4852  is_local[SLICE] = local_slice;
4853 
4854  for (int i = range.begin(); i < range.end(); i++)
4855  {
4856  // Figure out coordinates of the current tile.
4857  is[AXIS1] = i % a1_tiles;
4858  is[AXIS2] = i / a1_tiles;
4859 
4860  // Get the current tile and its size.
4861  const UT_VoxelTile<T>* tile = getTile(is[0], is[1], is[2]);
4862  int a1_tile_res = tile->getRes(AXIS1);
4863  int a2_tile_res = tile->getRes(AXIS2);
4864  // Get the index we should write this tile to.
4865  int vcounter = is[AXIS2] * tile_stride2 + is[AXIS1] * tile_stride1;
4866 
4867  for (is_local[AXIS2] = 0; is_local[AXIS2] < a2_tile_res;
4868  is_local[AXIS2]++)
4869  {
4870  for (is_local[AXIS1] = 0; is_local[AXIS1] < a1_tile_res;
4871  is_local[AXIS1]++)
4872  {
4873  set_functor(tile, is_local, vcounter);
4874  vcounter++;
4875  }
4876  // Wrap the counter to the next row.
4877  vcounter += a1_res - a1_tile_res;
4878  }
4879  }
4880  };
4881 
4882  // Run the loop.
4883  UTparallelFor(UT_BlockedRange<int>(0, ntiles), loop_functor);
4884 
4885  // If we're halfway between two voxel slices that lie in different tiles, we
4886  // need to do another sweep and modify the results.
4887  if (half_slice && !half_slice_st)
4888  {
4889  half_slice_tb = true;
4890  tile_slice++;
4891  local_slice = 0;
4892  UTparallelFor(UT_BlockedRange<int>(0, ntiles), loop_functor);
4893  }
4894 
4895  return dstdata;
4896 }
4897 
4898 template <typename T>
4899 template <typename S>
4900 S *
4902  const UT_IntArray &tilelist) const
4903 {
4904  for (int i = 0; i < tilelist.entries(); i++)
4905  {
4906  UT_ASSERT(tilelist(i) >= 0 && tilelist(i) < numTiles());
4907  const UT_VoxelTile<T> *tile = getLinearTile(tilelist(i));
4908 
4909  tile->flatten(dstdata, stride);
4910  dstdata += tile->numVoxels() * stride;
4911  }
4912  return dstdata;
4913 }
4914 
4915 template <typename T>
4916 template <typename S, typename IDX>
4917 S *
4919  const IDX *ix, const IDX *iy, const IDX *iz,
4920  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist) const
4921 {
4922  int srcidx = 0;
4923 
4924  for (auto && tiledata : tilelist)
4925  {
4926  int tileidx = tiledata.tileidx;
4927  UT_ASSERT(tileidx >= 0 && tileidx < numTiles());
4928 
4929  const UT_VoxelTile<T> *tile = getLinearTile(tileidx);
4930  int tilevoxel = tile->numVoxels();
4931 
4932  if (tilevoxel == tiledata.numvoxel)
4933  {
4934  // Full tile.
4935  tile->flatten(dstdata, stride);
4936  dstdata += tiledata.numvoxel * stride;
4937  srcidx += tiledata.numvoxel;
4938  }
4939  else
4940  {
4941  // Partial tile...
4942  int basex, basey, basez;
4943  linearTileToXYZ(tileidx, basex, basey, basez);
4944 
4945  basex <<= TILEBITS;
4946  basey <<= TILEBITS;
4947  basez <<= TILEBITS;
4948 
4949  if (tile->isSimpleCompression())
4950  {
4951  const T *src = tile->rawData();
4952  if (tile->isConstant())
4953  {
4954  S cval = *src;
4955  for (int i = 0; i < tiledata.numvoxel; i++)
4956  {
4957  *dstdata = cval;
4958  dstdata += stride;
4959  srcidx++;
4960  }
4961  }
4962  else
4963  {
4964  int w = tile->xres();
4965  int h = tile->yres();
4966  for (int i = 0; i < tiledata.numvoxel; i++)
4967  {
4968  UT_ASSERT_P(ix[srcidx] >= basex && ix[srcidx] < basex+TILESIZE);
4969  UT_ASSERT_P(iy[srcidx] >= basey && iy[srcidx] < basey+TILESIZE);
4970  UT_ASSERT_P(iz[srcidx] >= basez && iz[srcidx] < basez+TILESIZE);
4971  *dstdata = src[ (ix[srcidx] - basex)
4972  + (iy[srcidx] - basey) * w
4973  + (iz[srcidx] - basez) * w * h];
4974  dstdata += stride;
4975  srcidx++;
4976  }
4977  }
4978  }
4979  else
4980  {
4981  for (int i = 0; i < tiledata.numvoxel; i++)
4982  {
4983  UT_ASSERT_P(ix[srcidx] >= basex && ix[srcidx] < basex+TILESIZE);
4984  UT_ASSERT_P(iy[srcidx] >= basey && iy[srcidx] < basey+TILESIZE);
4985  UT_ASSERT_P(iz[srcidx] >= basez && iz[srcidx] < basez+TILESIZE);
4986  *dstdata = (*tile)(ix[srcidx] - basex,
4987  iy[srcidx] - basey,
4988  iz[srcidx] - basez);
4989  dstdata += stride;
4990  srcidx++;
4991  }
4992  }
4993  }
4994  }
4995  return dstdata;
4996 }
4997 
4998 template <typename T>
4999 template <typename S>
5000 const S *
5002  const UT_IntArray &tilelist)
5003 {
5004  bool docompress = getCompressionOptions().compressionEnabled();
5005  for (int i = 0; i < tilelist.entries(); i++)
5006  {
5007  UT_ASSERT(tilelist(i) >= 0 && tilelist(i) < numTiles());
5008  UT_VoxelTile<T> *tile = getLinearTile(tilelist(i));
5009 
5010  tile->writeData(srcdata, stride);
5011  if (docompress)
5012  tile->tryCompress(getCompressionOptions());
5013  srcdata += tile->numVoxels() * stride;
5014  }
5015  return srcdata;
5016 }
5017 
5018 template <typename T>
5019 template <typename S, typename IDX>
5020 const S *
5022  const IDX *ix, const IDX *iy, const IDX *iz,
5023  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist)
5024 {
5025  bool docompress = getCompressionOptions().compressionEnabled();
5026  int srcidx = 0;
5027 
5028  for (auto && tiledata : tilelist)
5029  {
5030  int tileidx = tiledata.tileidx;
5031  UT_ASSERT(tileidx >= 0 && tileidx < numTiles());
5032 
5033  UT_VoxelTile<T> *tile = getLinearTile(tileidx);
5034  int tilevoxel = tile->numVoxels();
5035 
5036  if (tilevoxel == tiledata.numvoxel)
5037  {
5038  tile->writeData(srcdata, stride);
5039  srcdata += tiledata.numvoxel * stride;
5040  srcidx += tiledata.numvoxel;
5041  }
5042  else
5043  {
5044  // Partial tile.
5045  int basex, basey, basez;
5046  linearTileToXYZ(tileidx, basex, basey, basez);
5047 
5048  basex <<= TILEBITS;
5049  basey <<= TILEBITS;
5050  basez <<= TILEBITS;
5051 
5052  for (int i = 0; i < tiledata.numvoxel; i++)
5053  {
5054  UT_ASSERT_P(ix[srcidx] >= basex && ix[srcidx] < basex+TILESIZE);
5055  UT_ASSERT_P(iy[srcidx] >= basey && iy[srcidx] < basey+TILESIZE);
5056  UT_ASSERT_P(iz[srcidx] >= basez && iz[srcidx] < basez+TILESIZE);
5057  tile->setValue(ix[srcidx] - basex,
5058  iy[srcidx] - basey,
5059  iz[srcidx] - basez,
5060  *srcdata);
5061  srcdata += stride;
5062  srcidx++;
5063  }
5064  }
5065 
5066  if (docompress)
5067  tile->tryCompress(getCompressionOptions());
5068  }
5069  return srcdata;
5070 }
5071 
5072 ///
5073 /// This helper class can be used for iterating over a voxel array in relation to another
5074 /// one. In particular, dst is the destination voxel array that is to be iterated over; its
5075 /// tile at [X, Y, Z] is taken to be coincident with tile [X + xoff, Y + yoff, Z + zoff] of
5076 /// src. Tiles of dst are partitioned into two sets:
5077 /// - BP (boundary pass): these are tiles of dst that are coincident with tiles of src that
5078 /// are at the boundary (so they affect extrapolation);
5079 /// - IP (internal pass): these tiles are coincident with internal tiles of src (those that
5080 /// are irrelevant for extrapolation).
5081 /// Once created, an object of this class can return the number of tiles in each set and
5082 /// convert from a linear 0-N index to [X, Y, Z] coordinates that identify the tile within
5083 /// dst.
5084 ///
5085 /// This class can be roughly used as follows... Main function:
5086 ///
5087 /// __linearTileIndexConverter index(dst, src, xoff, yoff, zoff);
5088 /// customFunctionBP(..., index);
5089 /// customFunctionIP(..., index);
5090 ///
5091 /// BPPartial(..., const __linearTileIndexConverter& index, const UT_JobInfo& i) function:
5092 ///
5093 /// int t0, t1;
5094 /// i.divideWork(index.getNTilesBP(), t0, t1);
5095 /// for (int j = t0; j < t1; j++)
5096 /// {
5097 /// int x, y, z;
5098 /// index.toLinearBP(j, x, y, z);
5099 /// UT_VoxerTile<T>* dtile = src.getTile(x, y, z);
5100 /// ...
5101 /// }
5102 ///
5103 /// With a similar implementation for IP. See moveTilesWithOffset() for an example.
5104 ///
5106 {
5107 public:
5108  /// Creates an index converter from the given voxel arrays. This object partitions tiles
5109  /// of dst into those that are affected by
5110  template <typename T>
5112  const UT_VoxelArray<T>* src,
5113  int xoff, int yoff, int zoff)
5114  {
5115  myXMax = dst->getTileRes(0);
5116  myYMax = dst->getTileRes(1);
5117  myZMax = dst->getTileRes(2);
5118 
5119  // If source has a constant border, no tile affects extrapolation; so all tiles
5120  // can be put into IP. Otherwise, the outer layer of src's tiles are part of BP.
5121  int m = (src->getBorder() == UT_VOXELBORDER_CONSTANT) ? 0 : 1;
5122  myXSkipMin = SYSmax(m - xoff, 0);
5123  myXSkipLen =
5124  SYSmax(SYSmin(myXMax, src->getTileRes(0) - xoff - m) - myXSkipMin, 0);
5125  myYSkipMin = SYSmax(m - yoff, 0);
5126  myYSkipLen =
5127  SYSmax(SYSmin(myYMax, src->getTileRes(1) - yoff - m) - myYSkipMin, 0);
5128  myZSkipMin = SYSmax(m - zoff, 0);
5129  myZSkipLen =
5130  SYSmax(SYSmin(myZMax, src->getTileRes(2) - zoff - m) - myZSkipMin, 0);
5131  }
5132 
5133  /// Returns the number of tiles in each part.
5134  int getNTilesBP() const
5135  {
5136  return myXMax * myYMax * myZMax - getNTilesIP();
5137  }
5138  int getNTilesIP() const
5139  {
5140  return myXSkipLen * myYSkipLen * myZSkipLen;
5141  }
5142 
5143  /// Converts a linear index (between 0 and getNTiles*P()-1) to the [X, Y, Z] tile
5144  /// coordinate with respect to dst.
5145  void toLinearBP(int k, int& x, int& y, int& z) const
5146  {
5147  // Number of voxels before the first Z slice with a hole.
5148  const int check_0 = myXMax * myYMax * myZSkipMin;
5149  // Check if we are before the first Z slice of the hole or if there is no hole.
5150  if (myXSkipLen == 0 || myYSkipLen == 0 || myZSkipLen == 0 || k < check_0)
5151  {
5152  _toRegularLinear(k, myXMax, myYMax, x, y, z);
5153  return;
5154  }
5155  k -= check_0;
5156 
5157  // Number of voxels per Z-slice with a hole.
5158  const int check_1a = myXMax * myYMax - myXSkipLen * myYSkipLen;
5159  // Total number of voxels in Z-slices with a hole.
5160  const int check_1b = check_1a * myZSkipLen;
5161  // Check if we are past the hole...
5162  if (k >= check_1b)
5163  {
5164  _toRegularLinear(k - check_1b, myXMax, myYMax, x, y, z);
5165  z += myZSkipMin + myZSkipLen;
5166  return;
5167  }
5168 
5169  // We are in the holed slices... First, determine the Z-slice.
5170  z = k / check_1a + myZSkipMin;
5171  // The remainder (index within the slice).
5172  k = k % check_1a;
5173  // Check if we are before the first Y slice with a hole.
5174  const int check_2 = myXMax * myYSkipMin;
5175  if (k < check_2)
5176  {
5177  y = k / myXMax;
5178  x = k % myXMax;
5179  return;
5180  }
5181  k -= check_2;
5182  // Check if we are past the last Y slice with a ahole.
5183  const int check_3 = (myXMax - myXSkipLen) * myYSkipLen;
5184  if (k >= check_3)
5185  {
5186  k -= check_3;
5187  y = k / myXMax + myYSkipMin + myYSkipLen;
5188  x = k % myXMax;
5189  return;
5190  }
5191 
5192  // We are in the holed slices. Find the y coordinate.
5193  y = k / (myXMax - myXSkipLen) + myYSkipMin;
5194  x = k % (myXMax - myXSkipLen);
5195  if (x >= myXSkipMin)
5196  x += myXSkipLen;
5197  }
5198  void toLinearIP(int k, int& x, int& y, int& z) const
5199  {
5200  _toRegularLinear(k, myXSkipLen, myYSkipLen, x, y, z);
5201  x += myXSkipMin;
5202  y += myYSkipMin;
5203  z += myZSkipMin;
5204  }
5205 
5206 protected:
5207  static void _toRegularLinear(int k, int xdim, int ydim, int& x, int& y, int& z)
5208  {
5209  x = k % xdim;
5210  k = k / xdim;
5211  y = k % ydim;
5212  z = k / ydim;
5213  }
5214 
5215 protected:
5219 };
5220 
5221 template <typename T>
5222 void
5224  int tileoffy, int tileoffz)
5225 {
5226  __linearTileIndexConverter index(this, &src, tileoffx, tileoffy, tileoffz);
5227  UTparallelInvoke(true, [&]
5228  {
5229  // Divide up the work (only internal tiles). Source tiles for these guys can be
5230  // safely changed without affecting other threads.
5232  [&](const UT_BlockedRange<int>& range)
5233  {
5234  for (int i = range.begin(); i < range.end(); i++)
5235  {
5236  // Get the location of the current tile...
5237  int xyz[3];
5238  index.toLinearIP(i, xyz[0], xyz[1], xyz[2]);
5239 
5240  // Get the tiles and exchange data.
5241  UT_VoxelTile<T>* tile = getTile(xyz[0], xyz[1], xyz[2]);
5242  UT_VoxelTile<T>* srctile = src.getTile(xyz[0] + tileoffx,
5243  xyz[1] + tileoffy,
5244  xyz[2] + tileoffz);
5245 
5246  // If tiles are of the same size, simply exchange the data. We know these
5247  // tiles do not affect results outside of src.
5248  if (tile->xres() == srctile->xres() && tile->yres() == srctile->yres()
5249  && tile->zres() == srctile->zres())
5250  {
5251  // The tiles are of the same size, so we can just exchange the data
5252  // pointers.
5253  UTswap(tile->myData, srctile->myData);
5254  UTswap(tile->myCompressionType, srctile->myCompressionType);
5255  UTswap(tile->myForeignData, srctile->myForeignData);
5256  }
5257  else
5258  {
5259  // Otherwise, manually fill in the values.
5260  int offv[] = {(xyz[0] + tileoffx) * TILESIZE,
5261  (xyz[1] + tileoffy) * TILESIZE,
5262  (xyz[2] + tileoffz) * TILESIZE};
5263  for (int z = 0; z < tile->zres(); z++)
5264  {
5265  for (int y = 0; y < tile->yres(); y++)
5266  {
5267  for (int x = 0; x < tile->xres(); x++)
5268  {
5269  tile->setValue(x, y, z, src.getValue(x + offv[0],
5270  y + offv[1],
5271  z + offv[2]));
5272  }
5273  }
5274  }
5275  }
5276  }
5277  });
5278  }, [&]
5279  {
5280  // Is the border constant?
5281  const bool const_src_border = (src.getBorder() == UT_VOXELBORDER_CONSTANT);
5282  const T border_val = src.getBorderValue();
5283 
5284  // Offsets in terms of tiles.
5285  const int off[] = {tileoffx, tileoffy, tileoffz};
5286  // Tile resolution of the source.
5287  const int src_tileres[] = {src.getTileRes(0), src.getTileRes(1), src.getTileRes(2)};
5288 
5289  // Divide up the work (only boundary tiles). Source tiles for these guys cannot be
5290  // modified safely.
5292  [&](const UT_BlockedRange<int>& range)
5293  {
5294  for (int i = range.begin(); i < range.end(); i++)
5295  {
5296  // Get the location of the current tile...
5297  int xyz[3];
5298  index.toLinearBP(i, xyz[0], xyz[1], xyz[2]);
5299 
5300  // Get the current tile...
5301  UT_VoxelTile<T>* tile = getTile(xyz[0], xyz[1], xyz[2]);
5302  bool outside = false;
5303  for (int j = 0; j < 3; j++)
5304  {
5305  xyz[j] += off[j];
5306  outside = outside || (xyz[j] < 0 || xyz[j] >= src_tileres[j]);
5307  }
5308 
5309  // If we are completely outside, and borders are constant, we can go ahead and
5310  // make this tile constant.
5311  if (outside && const_src_border)
5312  {
5313  tile->makeConstant(border_val);
5314  }
5315  else
5316  {
5317  // Otherwise, manually fill in the values.
5318  int offv[] = {xyz[0] * TILESIZE, xyz[1] * TILESIZE, xyz[2] * TILESIZE};
5319  for (int z = 0; z < tile->zres(); z++)
5320  {
5321  for (int y = 0; y < tile->yres(); y++)
5322  {
5323  for (int x = 0; x < tile->xres(); x++)
5324  {
5325  tile->setValue(x, y, z, src.getValue(x + offv[0],
5326  y + offv[1],
5327  z + offv[2]));
5328  }
5329  }
5330  }
5331  }
5332  }
5333  });
5334  });
5335 }
5336 
5337 template <typename T>
5338 void
5340  int offx, int offy, int offz)
5341 {
5342  UT_VoxelBorderType srcborder;
5343  T srcborderval;
5344 
5345  srcborder = src.getBorder();
5346  srcborderval = src.getBorderValue();
5347 
5348  if (srcborder != UT_VOXELBORDER_EXTRAP)
5349  {
5350  // These borders may be constant friendly
5351  T srcval;
5352 
5353  if (src.isConstant(&srcval))
5354  {
5355  if (srcborder != UT_VOXELBORDER_CONSTANT ||
5356  SYSisEqual(srcborderval, srcval))
5357  {
5358  // We can trivially make ourselves constant.
5359  constant(srcval);
5360  return;
5361  }
5362  }
5363  }
5364 
5365  copyWithOffsetInternal(src, offx, offy, offz);
5366 }
5367 
5368 template <typename T>
5369 void
5371  int offx, int offy, int offz,
5372  const UT_JobInfo &info)
5373 {
5375  UT_Vector3I off(offx, offy, offz);
5376  bool can_copy_tiles = ((offx & TILEMASK) == 0) && ((offy & TILEMASK) == 0)
5377  && ((offz & TILEMASK) == 0);
5378 
5379  vit.setArray(this);
5380  vit.splitByTile(info);
5381  vit.setCompressOnExit(true);
5382 
5383  // Iterate over all tiles
5384  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
5385  {
5386  // Check out this tile
5387  UT_VoxelTile<T> *tile = vit.getTile();
5388  UT_VoxelTile<T> *srctile;
5389  int tx, ty, tz;
5390 
5391  UT_Vector3I start, end, srctileidx, srctileoff, srcend, srcendtile;
5392  vit.getTileVoxels(start, end);
5393 
5394  // If offsets are all multiples of TILESIZE, it is possible to do direct copies,
5395  // assuming source tile is inside and of the same size.
5396  if (can_copy_tiles)
5397  {
5398  // Start by getting the source tile index and ensuring it is inside the
5399  // source array.
5400  bool inside = true;
5401  for (int i = 0; i < 3; i++)
5402  {
5403  srctileidx(i) = vit.myTilePos[i] + (off(i) >> TILEBITS);
5404  inside = inside && srctileidx(i) >= 0
5405  && srctileidx(i) < src.getTileRes(i);
5406  }
5407  // If the tile is inside, we make sure the tile sizes are the same, in which
5408  // case we can do a copy and move on.
5409  if (inside)
5410  {
5411  srctile = src.getTile(srctileidx.x(), srctileidx.y(), srctileidx.z());
5412  if (tile->xres() == srctile->xres() && tile->yres() == srctile->yres()
5413  && tile->zres() == srctile->zres())
5414  {
5415  *tile = *srctile;
5416  continue;
5417  }
5418  }
5419  }
5420 
5421  srctileidx = start;
5422  srctileidx += off;
5423  srctileidx.x() >>= TILEBITS;
5424  srctileidx.y() >>= TILEBITS;
5425  srctileidx.z() >>= TILEBITS;
5426  srctileoff.x() = off.x() & TILEMASK;
5427  srctileoff.y() = off.y() & TILEMASK;
5428  srctileoff.z() = off.z() & TILEMASK;
5429 
5430  srcend = start;
5431  // We are very careful here to be inclusive, so we don't trigger
5432  // the next tile. This is the largest index we expect to index.
5433  srcend.x() += tile->xres() - 1;
5434  srcend.y() += tile->yres() - 1;
5435  srcend.z() += tile->zres() - 1;
5436  srcend += off;
5437  srcendtile = srcend;
5438  srcendtile.x() >>= TILEBITS;
5439  srcendtile.y() >>= TILEBITS;
5440  srcendtile.z() >>= TILEBITS;
5441 
5442  UT_ASSERT(srcendtile.x() == srctileidx.x() ||
5443  srcendtile.x() == srctileidx.x()+1);
5444  UT_ASSERT(srcendtile.y() == srctileidx.y() ||
5445  srcendtile.y() == srctileidx.y()+1);
5446  UT_ASSERT(srcendtile.z() == srctileidx.z() ||
5447  srcendtile.z() == srctileidx.z()+1);
5448 
5449  // Check if we are fully in bounds.
5450  if (srctileidx.x() >= 0 &&
5451  srctileidx.y() >= 0 &&
5452  srctileidx.z() >= 0 &&
5453  srcendtile.x() < src.getTileRes(0) &&
5454  srcendtile.y() < src.getTileRes(1) &&
5455  srcendtile.z() < src.getTileRes(2) &&
5456  srcend.x() < src.getXRes() &&
5457  srcend.y() < src.getYRes() &&
5458  srcend.z() < src.getZRes())
5459  {
5460  bool allconst = true, firsttile = true;
5461  T constval, cval;
5462  // Check if we are all constant...
5463  for (tz = srctileidx.z(); allconst && tz <= srcendtile.z(); tz++)
5464  for (ty = srctileidx.y(); allconst && ty <= srcendtile.y(); ty++)
5465  for (tx = srctileidx.x(); tx <= srcendtile.x(); tx++)
5466  {
5467  srctile = src.getTile(tx, ty, tz);
5468  if (!srctile->isConstant())
5469  {
5470  allconst = false;
5471  break;
5472  }
5473  cval = (*srctile)(0, 0, 0);
5474  if (firsttile)
5475  {
5476  firsttile = false;
5477  constval = cval;
5478  }
5479  if (!SYSisEqual(cval, constval))
5480  {
5481  allconst = false;
5482  break;
5483  }
5484  }
5485  if (allconst)
5486  {
5487  // Should have found at least one tile!
5488  UT_ASSERT(!firsttile);
5489  tile->makeConstant(constval);
5490 
5491  // Onto the next tile!
5492  continue;
5493  }
5494  }
5495 
5496  // All of the fragments aren't constant, or aren't all inside
5497  // our range. So we have to work fragment at a time..
5498  for (tz = srctileidx.z(); tz <= srcendtile.z(); tz++)
5499  for (ty = srctileidx.y(); ty <= srcendtile.y(); ty++)
5500  for (tx = srctileidx.x(); tx <= srcendtile.x(); tx++)
5501  {
5502  int destx, desty, destz;
5503  int srcx, srcy, srcz;
5504 
5505  destx = (tx == srctileidx.x()) ? 0 : (TILESIZE-srctileoff.x());
5506  desty = (ty == srctileidx.y()) ? 0 : (TILESIZE-srctileoff.y());
5507  destz = (tz == srctileidx.z()) ? 0 : (TILESIZE-srctileoff.z());
5508  srcx = (tx == srctileidx.x()) ? srctileoff.x() : 0;
5509  srcy = (ty == srctileidx.y()) ? srctileoff.y() : 0;
5510  srcz = (tz == srctileidx.z()) ? srctileoff.z() : 0;
5511 #if 1
5512  if (tx >= 0 &&
5513  ty >= 0 &&
5514  tz >= 0 &&
5515  tx < src.getTileRes(0) &&
5516  ty < src.getTileRes(1) &&
5517  tz < src.getTileRes(2) &&
5518  ((tx != src.getTileRes(0)-1) ||
5519  srcend.x() < src.getXRes()) &&
5520  ((ty != src.getTileRes(1)-1) ||
5521  srcend.y() < src.getYRes()) &&
5522  ((tz != src.getTileRes(2)-1) ||
5523  srcend.z() < src.getZRes())
5524  )
5525  {
5526  srctile = src.getTile(tx, ty, tz);
5527  // In bounds
5528  tile->copyFragment(
5529  destx, desty, destz,
5530  *srctile,
5531  srcx, srcy, srcz
5532  );
5533  }
5534  else
5535 #endif
5536  {
5537  // Out of bounds!
5538  int maxd = SYSmin(tile->zres(),
5539  TILESIZE-srcz);
5540  int maxh = SYSmin(tile->yres(),
5541  TILESIZE-srcy);
5542  int maxw = SYSmin(tile->xres(),
5543  TILESIZE-srcx);
5544  for (int z = destz; z < maxd; z++)
5545  {
5546  for (int y = desty; y < maxh; y++)
5547  {
5548  for (int x = destx; x < maxw; x++)
5549  {
5550  T val;
5551 
5552  val = src.getValue(x + vit.x() + offx,
5553  y + vit.y() + offy,
5554  z + vit.z() + offz);
5555  tile->setValue(x, y, z, val);
5556  }
5557  }
5558  }
5559  }
5560  }
5561  }
5562 }
5563 
5564 template <typename T>
5565 void
5567  const UT_Filter *filter, float radius,
5568  int clampaxis,
5569  const UT_JobInfo &info)
5570 {
5572  UT_Vector3 pos;
5573  UT_Vector3 ratio;
5574  UT_Interrupt *boss = UTgetInterrupt();
5575 
5576  vit.setArray(this);
5577  vit.splitByTile(info);
5578  vit.setCompressOnExit(true);
5579  vit.setInterrupt(boss);
5580 
5581  ratio.x() = 1.0f / getXRes();
5582  ratio.y() = 1.0f / getYRes();
5583  ratio.z() = 1.0f / getZRes();
5584 
5585  for (vit.rewind(); !vit.atEnd(); vit.advance())
5586  {
5587  pos.x() = vit.x()+0.5f;
5588  pos.y() = vit.y()+0.5f;
5589  pos.z() = vit.z()+0.5f;
5590  pos *= ratio;
5591 
5592  vit.setValue(src.evaluate(pos, *filter, radius, clampaxis));
5593  }
5594 }
5595 
5596 template <typename T>
5597 bool
5598 UT_VoxelArray<T>::posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const
5599 {
5600  // We go from the position in the unit cube into the index.
5601  // The center of cells must map to the exact integer indices.
5602  pos.x() *= myRes[0];
5603  pos.y() *= myRes[1];
5604  pos.z() *= myRes[2];
5605 
5606  // The centers of cells are now mapped .5 too high. Ie, the true
5607  // center of cell index (0,0,0) would be (0.5,0.5,0.5) This, however,
5608  // is exactly what we want for rounding.
5609  x = (int) SYSfloor(pos.x());
5610  y = (int) SYSfloor(pos.y());
5611  z = (int) SYSfloor(pos.z());
5612 
5613  // Determine if out of bounds.
5614  return isValidIndex(x, y, z);
5615 }
5616 
5617 template <typename T>
5618 bool
5620 {
5621  // We go from the position in the unit cube into the index.
5622  // The center of cells must map to the exact integer indices.
5623  pos.x() *= myRes[0];
5624  pos.y() *= myRes[1];
5625  pos.z() *= myRes[2];
5626 
5627  // The centers of cells are now mapped .5 too high. Ie, the true
5628  // center of cell index (0,0,0) would be (0.5,0.5,0.5)
5629  pos.x() -= 0.5;
5630  pos.y() -= 0.5;
5631  pos.z() -= 0.5;
5632 
5633  ipos = pos;
5634 
5635  // Determine if out of bounds.
5636  if (pos.x() < 0 || pos.x() >= myRes[0] ||
5637  pos.y() < 0 || pos.y() >= myRes[1] ||
5638  pos.z() < 0 || pos.z() >= myRes[2])
5639  return false;
5640 
5641  return true;
5642 }
5643 
5644 template <typename T>
5645 bool
5647 {
5648  // We go from the position in the unit cube into the index.
5649  // The center of cells must map to the exact integer indices.
5650  pos.x() *= myRes[0];
5651  pos.y() *= myRes[1];
5652  pos.z() *= myRes[2];
5653 
5654  // The centers of cells are now mapped .5 too high. Ie, the true
5655  // center of cell index (0,0,0) would be (0.5,0.5,0.5) This, however,
5656  // is exactly what we want for rounding.
5657  x = (exint) SYSfloor(pos.x());
5658  y = (exint) SYSfloor(pos.y());
5659  z = (exint) SYSfloor(pos.z());
5660 
5661  // Determine if out of bounds.
5662  return isValidIndex(x, y, z);
5663 }
5664 
5665 template <typename T>
5666 bool
5668 {
5669  // We go from the position in the unit cube into the index.
5670  // The center of cells must map to the exact integer indices.
5671  pos.x() *= myRes[0];
5672  pos.y() *= myRes[1];
5673  pos.z() *= myRes[2];
5674 
5675  // The centers of cells are now mapped .5 too high. Ie, the true
5676  // center of cell index (0,0,0) would be (0.5,0.5,0.5)
5677  pos.x() -= 0.5;
5678  pos.y() -= 0.5;
5679  pos.z() -= 0.5;
5680 
5681  ipos = pos;
5682 
5683  // Determine if out of bounds.
5684  if (pos.x() < 0 || pos.x() >= myRes[0] ||
5685  pos.y() < 0 || pos.y() >= myRes[1] ||
5686  pos.z() < 0 || pos.z() >= myRes[2])
5687  return false;
5688 
5689  return true;
5690 }
5691 
5692 template <typename T>
5693 bool
5694 UT_VoxelArray<T>::indexToPos(int x, int y, int z, UT_Vector3F &pos) const
5695 {
5696  pos.x() = x;
5697  pos.y() = y;
5698  pos.z() = z;
5699 
5700  // Move the indices to the centers of the cells.
5701  pos.x() += 0.5;
5702  pos.y() += 0.5;
5703  pos.z() += 0.5;
5704 
5705  // And scale into the unit cube.
5706  pos *= myInvRes;
5707 
5708  // Return true if the original coordinates were in range.
5709  return isValidIndex(x, y, z);
5710 }
5711 
5712 template <typename T>
5713 bool
5715 {
5716  pos.x() = x;
5717  pos.y() = y;
5718  pos.z() = z;
5719 
5720  // Move the indices to the centers of the cells.
5721  pos.x() += 0.5;
5722  pos.y() += 0.5;
5723  pos.z() += 0.5;
5724 
5725  // And scale into the unit cube.
5726  pos *= myInvRes;
5727 
5728  // Return true if the original coordinates were in range.
5729  return isValidIndex(x, y, z);
5730 }
5731 
5732 template <typename T>
5733 void
5735 {
5736  pos = index;
5737 
5738  // Move the indices to the centers of the cells.
5739  pos.x() += 0.5;
5740  pos.y() += 0.5;
5741  pos.z() += 0.5;
5742 
5743  // And scale into the unit cube.
5744  pos *= myInvRes;
5745 }
5746 
5747 template <typename T>
5748 void
5750 {
5751  pos = index;
5752 
5753  // Move the indices to the centers of the cells.
5754  pos.x() += 0.5;
5755  pos.y() += 0.5;
5756  pos.z() += 0.5;
5757 
5758  // And scale into the unit cube.
5759  pos *= myInvRes;
5760 }
5761 
5762 template <typename T>
5763 void
5765 {
5766  myBorderType = type;
5767  myBorderValue = t;
5768 }
5769 
5770 template <typename T>
5771 void
5773 {
5774  myBorderScale[0] = sx;
5775  myBorderScale[1] = sy;
5776  myBorderScale[2] = sz;
5777 }
5778 
5779 template <typename T>
5780 void
5782 {
5783  UT_VoxelArrayIterator<T> vit(this);
5784  vit.splitByTile(info);
5785  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
5786  {
5787  int i = vit.getLinearTileNum();
5788  myTiles[i].tryCompress(getCompressionOptions());
5789  }
5790 }
5791 
5792 template <typename T>
5793 void
5795 {
5796  UT_VoxelArrayIterator<T> vit(this);
5797  vit.splitByTile(info);
5798  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
5799  {
5800  int i = vit.getLinearTileNum();
5801  myTiles[i].uncompress();
5802  }
5803 }
5804 
5805 template <typename T>
5806 void
5808 {
5809  UT_VoxelArrayIterator<T> vit(this);
5810  vit.splitByTile(info);
5811  for (vit.rewind(); !vit.atEnd(); vit.advanceTile())
5812  {
5813  int i = vit.getLinearTileNum();
5814  if (!myTiles[i].isConstant())
5815  myTiles[i].uncompress();
5816  }
5817 }
5818 
5819 template <typename T>
5820 void
5821 UT_VoxelArray<T>::saveData(std::ostream &os) const
5822 {
5823  T cval;
5824  char version;
5825 
5826  // First determine if we are fully constant.
5827  if (isConstant(&cval))
5828  {
5829  // Save a constant array.
5830  version = 0;
5831  UTwrite(os, &version, 1);
5832  UTwrite<T>(os, &cval);
5833  return;
5834  }
5835 
5836  // Compressed tiles.
5837  version = 1;
5838  UTwrite(os, &version, 1);
5839 
5840  // Store list of compression types
5842 
5843  int i, ntiles;
5844 
5845  ntiles = numTiles();
5846  for (i = 0; i < ntiles; i++)
5847  {
5848  myTiles[i].save(os);
5849  }
5850 }
5851 
5852 
5853 template <typename T>
5854 void
5856 {
5857  T cval;
5858  char version;
5859 
5860  is.readChar(version);
5861 
5862  // First determine if we are fully constant.
5863  if (version == 0)
5864  {
5865  // Save a constant array.
5866  is.read<T>(&cval);
5867 
5868  constant(cval);
5869  return;
5870  }
5871 
5872  if (version == 1)
5873  {
5874  UT_IntArray compressions;
5875 
5876  // Store list of compression types
5877  UT_VoxelTile<T>::loadCompressionTypes(is, compressions);
5878 
5879  int i, ntiles;
5880 
5881  ntiles = numTiles();
5882  for (i = 0; i < ntiles; i++)
5883  {
5884  myTiles[i].load(is, compressions);
5885  }
5886  }
5887 }
5888 
5889 template <typename T>
5890 bool
5891 UT_VoxelArray<T>::saveData(UT_JSONWriter &w, const char *shared_mem_owner) const
5892 {
5893  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
5894 
5895  bool ok = true;
5896  T cval;
5897  int8 version;
5898 
5899  // First determine if we are fully constant.
5900  if (isConstant(&cval))
5901  {
5902  ok = ok && w.jsonBeginArray();
5903  // Save a constant array.
5904  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5906  if constexpr (tuple_size == 1)
5907  ok = ok && w.jsonValue(cval);
5908  else
5909  ok = ok && w.jsonUniformArray(tuple_size, cval.data());
5910  ok = ok && w.jsonEndArray();
5911  return ok;
5912  }
5913 
5914  if (shared_mem_owner)
5915  {
5916  SYS_SharedMemory *shm;
5917  shm = copyToSharedMemory(shared_mem_owner);
5918  if (shm)
5919  {
5920  ok = ok && w.jsonBeginArray();
5921  // Save a shared memory array.
5922  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5924  ok = ok && w.jsonString(shm->id());
5925  ok = ok && w.jsonEndArray();
5926  return ok;
5927  }
5928  // Fall back on raw write.
5929  }
5930 
5931 
5932  // Compressed tiles.
5933  ok = ok && w.jsonBeginArray();
5934  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5936  ok = ok && w.jsonBeginArray();
5937  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5939  version = 1;
5940  ok = ok && w.jsonValue(version);
5941 
5942  // Store list of compression types
5943  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5946 
5947  ok = ok && w.jsonKey(UT_VoxelArrayJSON::getToken(
5949  ok = ok && w.jsonBeginArray();
5950  int i, ntiles;
5951 
5952  ntiles = numTiles();
5953  for (i = 0; i < ntiles; i++)
5954  {
5955  ok = ok && myTiles[i].save(w);
5956  }
5957  ok = ok && w.jsonEndArray();
5958 
5959  ok = ok && w.jsonEndArray();
5960 
5961  ok = ok && w.jsonEndArray();
5962  return ok;
5963 }
5964 
5965 template <typename T>
5966 bool
5968 {
5969  constexpr exint tuple_size = UT_FixedVectorTraits<T>::TupleSize;
5970 
5971  T cval;
5972  int8 version;
5973  UT_WorkBuffer key;
5974  int key_id;
5975  bool array_error = false;
5976 
5977  delete mySharedMemView;
5978  mySharedMemView = 0;
5979 
5980  delete mySharedMem;
5981  mySharedMem = 0;
5982 
5983  if (!p.parseBeginArray(array_error) || array_error)
5984  return false;
5985 
5986  if (!p.parseString(key))
5987  return false;
5988  key_id = UT_VoxelArrayJSON::getArrayID(key.buffer());
5990  {
5991  if constexpr (tuple_size == 1)
5992  {
5993  if (!p.parseNumber(cval))
5994  return false;
5995  }
5996  else
5997  {
5998  if (!p.parseUniformArray(cval.data(), tuple_size))
5999  return false;
6000  }
6001 
6002  constant(cval);
6003  }
6004  else if (key_id == UT_VoxelArrayJSON::ARRAY_SHAREDARRAY)
6005  {
6006  UT_WorkBuffer shm_id;
6007  if (!p.parseString(shm_id))
6008  return false;
6009 
6010  if (!populateFromSharedMemory(shm_id.buffer()))
6011  {
6012  // If the shared memory disappears before we get a chance to read
6013  // it, don't abort, just set a constant value. That way Mantra has
6014  // a chance to recover. Not the most elegant solution but works for
6015  // now.
6016  T cval;
6017  cval = 0;
6018  constant(cval);
6019  }
6020  }
6021  else if (key_id == UT_VoxelArrayJSON::ARRAY_TILEDARRAY)
6022  {
6023  UT_JSONParser::traverser it, tile_it;
6024  UT_IntArray compressions;
6025  int i, ntiles;
6026 
6027 
6028  for (it = p.beginArray(); !it.atEnd(); ++it)
6029  {
6030  if (!it.getLowerKey(key))
6031  return false;
6032  switch (UT_VoxelArrayJSON::getArrayID(key.buffer()))
6033  {
6035  if (!p.parseNumber(version))
6036  return false;
6037  break;
6039  // Store list of compression types
6040  if (!UT_VoxelTile<T>::loadCompressionTypes(p, compressions))
6041  return false;
6042  break;
6043 
6045  ntiles = numTiles();
6046  for (tile_it = p.beginArray(), i = 0; !tile_it.atEnd();
6047  ++tile_it, ++i)
6048  {
6049  if (i < ntiles)
6050  {
6051  if (!myTiles[i].load(p, compressions))
6052  return false;
6053  }
6054  else
6055  {
6056  UT_VoxelTile<T> dummy_tile;
6057  dummy_tile.setRes(TILESIZE, TILESIZE, TILESIZE);
6058  if (!dummy_tile.load(p, compressions))
6059  return false;
6060  }
6061  }
6062  if (i != ntiles)
6063  return false;
6064  break;
6065  default:
6066  p.addWarning("Unexpected key for voxel data: %s",
6067  key.buffer());
6068  if (!p.skipNextObject())
6069  return false;
6070  }
6071  }
6072  }
6073  else
6074  {
6075  p.addWarning("Unexpected voxel key: %s\n", key.buffer());
6076  p.skipNextObject();
6077  return false;
6078  }
6079 
6080  if (!p.parseEndArray(array_error) || array_error)
6081  return false;
6082 
6083  return true;
6084 }
6085 
6086 template<typename T>
6088 UT_VoxelArray<T>::copyToSharedMemory(const char *shared_mem_owner) const
6089 {
6091 
6092  int ntiles;
6093 
6094  ntiles = numTiles();
6095 
6096  // Tally up the size we need.
6097  exint total_mem_size;
6098 
6099  total_mem_size = sizeof(exint); // Tile count.
6100 
6101  UT_Array<exint> tile_sizes;
6102  UT_Array<exint> next_block_offsets;
6103 
6104  for (int i = 0; i < ntiles; i++)
6105  {
6106  exint tile_size, block_size;
6107  if (myTiles[i].isConstant())
6108  tile_size = sizeof(T);
6109  else
6110  tile_size = myTiles[i].getDataLength();
6111 
6112  tile_sizes.append(tile_size);
6113 
6114  block_size = SYSroundUpToMultipleOf<exint>(tile_size, sizeof(exint));
6115  block_size += sizeof(exint) * 2; // Offset + compression type
6116 
6117  if (i < (ntiles-1))
6118  next_block_offsets.append(block_size / sizeof(exint));
6119  else
6120  next_block_offsets.append(0);
6121 
6122  total_mem_size += block_size;
6123  }
6124 
6125  // Start with an empty shared memory.
6126  SYS_SharedMemory *shared_mem;
6127  UT_WorkBuffer shared_mem_key;
6128 
6129  shared_mem_key.sprintf("%s:%p", shared_mem_owner, this);
6130 
6131  shared_mem = shmgr.get(shared_mem_key.buffer());
6132 
6133  // Does the existing block have the same number of tiles and same
6134  // offsets? In that case we don't reset the shared memory, just write
6135  // the tiles out again. There's very little adverse effect in changing
6136  // the voxel tile data.
6137  bool same_meta_data = false;
6138  if (shared_mem->size() >= sizeof(exint))
6139  {
6140  same_meta_data = true;
6141  {
6142  SYS_SharedMemoryView sv_tc(*shared_mem, 0, sizeof(exint));
6143 
6144  exint *ptr_ds = (exint *)sv_tc.data();
6145  if (*ptr_ds != ntiles)
6146  same_meta_data = false;
6147  }
6148 
6149  if (same_meta_data)
6150  {
6151  // Check if the offsets + compression type is the same.
6152  exint offset = sizeof(exint);
6153  for (int i = 0; i < ntiles; i++)
6154  {
6155  SYS_SharedMemoryView sv_tile(*shared_mem, offset,
6156  sizeof(exint) * 2);
6157 
6158  exint *ptr_tile = (exint *)sv_tile.data();
6159  if (ptr_tile[0] != next_block_offsets(i) ||
6160  ptr_tile[1] != (exint)myTiles[i].myCompressionType)
6161  {
6162  same_meta_data = false;
6163  break;
6164  }
6165  offset += next_block_offsets(i) * sizeof(exint);
6166  }
6167  }
6168  }
6169 
6170  if (!same_meta_data)
6171  {
6172  if (!shared_mem->reset(total_mem_size) ||
6173  shared_mem->size() != total_mem_size)
6174  {
6175  return nullptr;
6176  }
6177  }
6178 
6179  {
6180  SYS_SharedMemoryView sv_tc(*shared_mem, 0, sizeof(exint));
6181 
6182  exint *ptr_ds = (exint *)sv_tc.data();
6183  *ptr_ds = ntiles;
6184  }
6185 
6186  exint offset = sizeof(exint);
6187  for (int i = 0; i < ntiles; i++)
6188  {
6189  SYS_SharedMemoryView sv_tile(*shared_mem, offset,
6190  sizeof(exint) * 2 + tile_sizes(i));
6191 
6192  exint *ptr_tile = (exint *)sv_tile.data();
6193 
6194  // Offset to next tile in exint units.
6195  ptr_tile[0] = next_block_offsets(i);
6196  ptr_tile[1] = myTiles[i].myCompressionType;
6197 
6198  ::memcpy(&ptr_tile[2], myTiles[i].rawData(), tile_sizes(i));
6199 
6200  offset += ptr_tile[0] * sizeof(exint);
6201  }
6202 
6203  return shared_mem;
6204 }
6205 
6206 template<typename T>
6207 bool
6209 {
6210  mySharedMem = new SYS_SharedMemory(id, /*read_only=*/true);
6211  if (!mySharedMem->size())
6212  {
6213  delete mySharedMem;
6214  mySharedMem = 0;
6215  return false;
6216  }
6217 
6218  exint ntiles;
6219  {
6220  SYS_SharedMemoryView sv_tc(*mySharedMem, 0, sizeof(exint));
6221  ntiles = *(const exint *)sv_tc.data();
6222  }
6223  if (ntiles != numTiles())
6224  {
6225  delete mySharedMem;
6226  mySharedMem = 0;
6227  return false;
6228  }
6229 
6230  mySharedMemView = new SYS_SharedMemoryView(*mySharedMem, sizeof(exint));
6231 
6232  exint *data = (exint *)mySharedMemView->data();
6233 
6234  for (int i = 0; i < ntiles; i++)
6235  {
6236  exint offset = data[0];
6237  int8 ctype = (int8)data[1];
6238 
6239  myTiles[i].setForeignData(&data[2], ctype);
6240 
6241  data += offset;
6242  }
6243 
6244  return true;
6245 }
6246 
6247 
6248 
6249 
6250 //
6251 // UT_VoxelMipMap functions
6252 //
6253 template <typename T>
6255 {
6256  initializePrivate();
6257 }
6258 
6259 template <typename T>
6261 {
6262  destroyPrivate();
6263 }
6264 
6265 template <typename T>
6267 {
6268  initializePrivate();
6269 
6270  *this = src;
6271 }
6272 
6273 template <typename T>
6274 const UT_VoxelMipMap<T> &
6276 {
6278  int level, i;
6279 
6280  if (&src == this)
6281  return *this;
6282 
6283  destroyPrivate();
6284 
6285  // We do a deep copy. We have learned from UT_String that
6286  // shallow copies will destroy us in the end.
6287  myOwnBase = true;
6288  myBaseLevel = new UT_VoxelArray<T>;
6289  *myBaseLevel = *src.myBaseLevel;
6290 
6291  myNumLevels = src.myNumLevels;
6292 
6293  for (i = 0; i < src.myLevels.entries(); i++)
6294  {
6295  levels = new UT_VoxelArray<T> *[myNumLevels];
6296  myLevels.append(levels);
6297 
6298  for (level = 0; level < myNumLevels; level++)
6299  {
6300  levels[level] = new UT_VoxelArray<T>;
6301  *levels[level] = *src.myLevels(i)[level];
6302  }
6303  }
6304  return *this;
6305 }
6306 
6307 template <typename T>
6308 void
6310  mipmaptype function)
6311 {
6312  UT_Array<mipmaptype> functions;
6313 
6314  functions.append(function);
6315  build(baselevel, functions);
6316 }
6317 
6318 template <typename T>
6319 void