HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_VoxelArray.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_VoxelArray.h ( UT Library, C++)
7  *
8  * COMMENTS:
9  * This provides support for transparently tiled voxel arrays of data.
10  * The given type, T, should support normal arithmatic operations.
11  *
12  * The created array has elements indexed from 0, ie: [0..xdiv-1].
13  */
14 
15 #ifndef __UT_VoxelArray__
16 #define __UT_VoxelArray__
17 
18 #include "UT_API.h"
19 #include "UT_BoundingBox.h"
20 #include "UT_Vector2.h"
21 #include "UT_Vector3.h"
22 #include "UT_Vector4.h"
23 #include "UT_ValArray.h"
24 #include "UT_Array.h"
25 #include "UT_FilterType.h"
26 #include "UT_COW.h"
27 #include "UT_ThreadedAlgorithm.h"
28 #include "UT_Interrupt.h"
29 #include <SYS/SYS_Align.h>
30 #include <SYS/SYS_Floor.h>
31 #include <SYS/SYS_Inline.h>
32 #include <SYS/SYS_Math.h>
33 
34 #include <SYS/SYS_StaticAssert.h>
35 #include <SYS/SYS_Types.h>
36 
37 // TBB alloc results in real-world tests that are 3-4% faster. Yay!
38 // But unfortunately it is less aggressive with fragmentation, so
39 // we use effectively 2x the memory. Boo.
40 
41 //#define VOXEL_USE_TBB_ALLOC
42 
43 #ifdef VOXEL_USE_TBB_ALLOC
44 
45 #include <tbb/scalable_allocator.h>
46 
47 #define UT_VOXEL_ALLOC(x) scalable_malloc(x)
48 #define UT_VOXEL_FREE(x) scalable_free(x)
49 
50 #else
51 
52 #define UT_VOXEL_ALLOC(x) SYSamalloc((x), 128)
53 #define UT_VOXEL_FREE(x) SYSafree(x)
54 
55 #endif
56 
57 class UT_Filter;
58 class UT_JSONWriter;
59 class UT_JSONParser;
60 class SYS_SharedMemory;
62 
63 static const int TILEBITS = 4;
64 static const int TILESIZE = 1 << TILEBITS;
65 static const int TILEMASK = TILESIZE-1;
66 
67 ///
68 /// Behaviour of out of bound reads.
69 ///
71 {
77 };
78 
79 template <typename T> class UT_VoxelTile;
80 template <typename T> class UT_VoxelArray;
81 template <typename T, bool DoRead, bool DoWrite, bool TestForWrite> class UT_VoxelProbe;
82 template <typename T> class UT_VoxelProbeCube;
83 template <typename T> class UT_VoxelProbeFace;
84 
86 {
87  int tileidx;
88  int numvoxel;
89 };
90 
92 {
93 public:
95  {
96  myConstantTol = 0;
97  myQuantizeTol = 0;
98  myAllowFP16 = false;
99  }
100 
101  // Used for quantization.
103  {
106  };
107 
108  /// Determines if compressTile should be run on this grid for
109  /// things other than constant compression. Used by writeTiles
110  /// to limit compression attempts.
111  bool compressionEnabled() const
112  {
113  return myAllowFP16 || myConstantTol > 0 || myQuantizeTol > 0;
114  }
115 
116  /// Tiles will be constant if within this range. This may
117  /// need to be tighter than quantization tolerance as
118  /// dithering can't recover partial values.
120  /// Tolerance for quantizing to reduced bit depth
122 
124 
125  /// Conversion to fpreal16, only valid for scalar data.
127 };
128 
129 ///
130 /// UT_VoxelTileCompress
131 ///
132 /// A compression engine for UT_VoxelTiles of a specific type. This
133 /// is a verb class which is invoked from the voxeltile class.
134 ///
135 template <typename T>
137 {
138 public:
141 
142  /// Attempts to write data directly to the compressed tile.
143  /// Returns false if not possible.
144  virtual bool writeThrough(UT_VoxelTile<T> &tile,
145  int x, int y, int z, T t) const = 0;
146 
147  /// Reads directly from the compressed data.
148  /// Cannot alter the tile in any way because it must be threadsafe.
149  virtual T getValue(const UT_VoxelTile<T> &tile,
150  int x, int y, int z) const = 0;
151 
152  /// Attempts to compress the data according to the given tolerance.
153  /// If succesful, returns true.
154  virtual bool tryCompress(UT_VoxelTile<T> &tile,
155  const UT_VoxelCompressOptions &options,
156  T min, T max) const = 0;
157 
158  /// Returns the length in bytes of the data in the tile.
159  /// It must be at least one byte long.
160  virtual int getDataLength(const UT_VoxelTile<T> &tile) const = 0;
161 
162  /// Returns true if the compression type is lossless
163  virtual bool isLossless() const { return false; }
164 
165  /// Determines the min & max values of the tile. A default
166  /// implementation uses getValue() on all voxels.
167  virtual void findMinMax(const UT_VoxelTile<T> &tile, T &min, T &max) const;
168 
169  /// Does this engine support saving and loading?
170  virtual bool canSave() const { return false; }
171  virtual void save(std::ostream &os, const UT_VoxelTile<T> &tile) const {}
172  virtual bool save(UT_JSONWriter &w, const UT_VoxelTile<T> &tile) const
173  { return false; }
174  virtual void load(UT_IStream &is, UT_VoxelTile<T> &tile) const {}
175  virtual bool load(UT_JSONParser &p, UT_VoxelTile<T> &tile) const
176  { return false; }
177 
178  /// Returns the unique name of this compression engine so
179  /// we can look up engines by name (the index of the compression
180  /// engine is assigned at load time so isn't constant)
181  virtual const char *getName() = 0;
182 };
183 
195 
196 #define DEFINE_STD_FUNC(TYPE) \
197 inline void \
198 UTvoxelTileExpandMinMax(TYPE v, TYPE &min, TYPE &max) \
199 { \
200  if (v < min) \
201  min = v; \
202  else if (v > max) \
203  max = v; \
204 } \
205  \
206 inline fpreal \
207 UTvoxelTileDist(TYPE a, TYPE b) \
208 { \
209  return (fpreal) SYSabs(a - b); \
210 }
211 
220 
221 #undef DEFINE_STD_FUNC
222 
223 inline void
225 {
226  min.x() = SYSmin(v.x(), min.x());
227  max.x() = SYSmax(v.x(), max.x());
228 
229  min.y() = SYSmin(v.y(), min.y());
230  max.y() = SYSmax(v.y(), max.y());
231 }
232 
233 inline void
235 {
236  min.x() = SYSmin(v.x(), min.x());
237  max.x() = SYSmax(v.x(), max.x());
238 
239  min.y() = SYSmin(v.y(), min.y());
240  max.y() = SYSmax(v.y(), max.y());
241 
242  min.z() = SYSmin(v.z(), min.z());
243  max.z() = SYSmax(v.z(), max.z());
244 }
245 
246 inline void
248 {
249  min.x() = SYSmin(v.x(), min.x());
250  max.x() = SYSmax(v.x(), max.x());
251 
252  min.y() = SYSmin(v.y(), min.y());
253  max.y() = SYSmax(v.y(), max.y());
254 
255  min.z() = SYSmin(v.z(), min.z());
256  max.z() = SYSmax(v.z(), max.z());
257 
258  min.w() = SYSmin(v.w(), min.w());
259  max.w() = SYSmax(v.w(), max.w());
260 }
261 
262 inline fpreal
264 {
265  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y());
266 }
267 
268 inline fpreal
270 {
271  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
272  + SYSabs(a.z() - b.z());
273 }
274 
275 inline fpreal
277 {
278  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
279  + SYSabs(a.z() - b.z()) + SYSabs(a.w() - b.w());
280 }
281 
282 ///
283 /// UT_VoxelTile
284 ///
285 /// A UT_VoxelArray is composed of a number of these tiles. This is
286 /// done for two reasons:
287 /// 1) Increased memory locality when processing neighbouring points.
288 /// 2) Ability to compress or page out unneeded tiles.
289 /// Currently, the only special ability is the ability to create constant
290 /// tiles.
291 ///
292 /// To the end user of the UT_VoxelArray, the UT_VoxelTile should be
293 /// usually transparent. The only exception may be if they want to do
294 /// a FOR_ALL_TILES in order to ensure an optimal traversal order.
295 ///
296 template <typename T>
297 class UT_VoxelTile
298 {
299 public:
300  UT_VoxelTile();
301  ~UT_VoxelTile();
302 
303  // Copy constructor:
305 
306 
307  // Assignment operator:
309 
311  {
317  };
318 
319  /// Fetch a given local value. (x,y,z) should be local to
320  /// this tile.
321  SYS_FORCE_INLINE T operator()(int x, int y, int z) const;
322 
323  /// Lerps two numbers, templated to work with T.
325  {
326  return v1 + (v2 - v1) * bias;
327  }
328 
329  /// Does a trilinear interpolation. x,y,z should be local to this
330  /// as should x+1, y+1, and z+1. fx-fz should be 0..1.
331  SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const;
332 
333  template <int AXIS2D>
334  SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const;
335 
336  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
337  /// array should have 8 elements, x minor, z major.
338  /// Requires it is in bounds.
339  /// Returns true if all constant, in which case only a single
340  /// sample is filled, [0]
342  bool extractSample(int x, int y, int z,
343  T *sample) const;
344  template <int AXIS2D>
346  bool extractSampleAxis(int x, int y, int z,
347  T *sample) const;
348 
349  /// Extracts +/- dx, +/- dy, +/- dz and then the center into
350  /// 7 samples.
351  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
352  T *sample) const;
353 #if 0
354  /// Extracts the full cube of +/- dx, dy, dz. xminor, zmajor, into
355  /// 27 elements.
356  /// Previous implementation had an error and this method isn't used in
357  /// Houdini code.
358  bool extractSampleCube(int x, int y, int z,
359  T *sample) const;
360 #endif
361 
362 #if 0
363  /// MSVC can't handle aligned parameters after the third so
364  /// frac must come first.
365  T lerp(v4uf frac, int x, int y, int z) const;
366 #endif
367 
368  /// Returns a cached line to our internal data, at local address x,y,z.
369  /// cacheline is a caller allocated structure to fill out if we have
370  /// to decompress. If forcecopy isn't set and we can, the result may
371  /// be an internal pointer. stride is set to the update for moving one
372  /// x position in the cache.
373  /// strideofone should be set to true if you want to prevent 0 stride
374  /// results for constant tiles.
375  T *fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const;
376 
377  /// Fills a cache line from an external buffer into our own data.
378  void writeCacheLine(T *cacheline, int y, int z);
379 
380  /// Copies between two tiles. The tile's voxels match up, but don't
381  /// have the same offset. The maximal overlapping voxels are copied.
382  /// this->setValue(dst, dsty, dstz, src(srcx, srcy, srcz));
383  void copyFragment(int dstx, int dsty, int dstz,
384  const UT_VoxelTile<T> &srctile,
385  int srcx, int srcy, int srcz);
386 
387  /// Flattens ourself into the given destination buffer.
388  template <typename S>
389  void flatten(S *dst, int dststride) const;
390 
391  /// Fills our values from the given dense flat buffer. Will
392  /// create a constant tile if the source is constant.
393  template <typename S>
394  void writeData(const S *src, int srcstride);
395 
396  /// The setData is intentionally seperate so we can avoid
397  /// expanding constant data when we write the same value to it.
398  void setValue(int x, int y, int z, T t);
399 
400  /// Finds the minimum and maximum T values
401  void findMinMax(T &min, T &max) const;
402 
403  /// Determines the average value of the tile.
404  void findAverage(T &avg) const;
405 
406  /// Returns if this tile is constant.
407  bool isConstant() const
408  { return myCompressionType == COMPRESS_CONSTANT; }
409 
410  /// Returns true if any NANs are in this tile
411  bool hasNan() const;
412 
413  /// Returns if this tile is in raw format.
414  bool isRaw() const
415  { return myCompressionType == COMPRESS_RAW; }
416 
417  /// Returns if this tile is in raw full format.
418  bool isRawFull() const
419  { return myCompressionType == COMPRESS_RAWFULL; }
420 
421  /// Returns true if this is a simple form of compression, either
422  /// constant, raw, or a raw full that isn't padded
423  bool isSimpleCompression() const
424  {
425  if (isRaw()) return true;
426  if (isConstant()) return true;
427  if (isRawFull() && myRes[0] == TILESIZE && myRes[1] == TILESIZE)
428  return true;
429  return false;
430  }
431 
432  /// Attempts to compress this tile. Returns true if any
433  /// compression performed.
434  bool tryCompress(const UT_VoxelCompressOptions &options);
435 
436  /// Turns this tile into a constant tile of the given value.
437  void makeConstant(T t);
438 
439  /// Explicit compress to fpreal16. Lossy. No-op if already constant.
440  void makeFpreal16();
441 
442  /// Turns a compressed tile into a raw tile.
443  void uncompress();
444 
445  /// Turns a tile into a raw full tile.
446  void uncompressFull();
447 
448  /// Like uncompress() except it leaves the data uninitialized. Result
449  /// is either COMPRESS_RAW or COMPRESS_RAWFULL depending on the tile res.
450  /// @note USE WITH CAUTION!
451  void makeRawUninitialized();
452 
453  /// Returns the raw full data of the tile.
455  {
456  uncompressFull();
457  return (T *)myData;
458  }
459 
460  /// This only makes sense for simple compression. Use with
461  /// extreme care.
463  { if (inlineConstant() && isConstant())
464  { return (T *) &myData; }
465  return (T *)myData; }
466  const T *rawData() const
467  { if (inlineConstant() && isConstant())
468  { return (const T *) &myData; }
469  return (const T *)myData; }
470 
471  /// Read the current resolution.
472  int xres() const { return myRes[0]; }
473  int yres() const { return myRes[1]; }
474  int zres() const { return myRes[2]; }
475 
476  int getRes(int dim) const { return myRes[dim]; }
477 
478 
479  int numVoxels() const { return myRes[0] * myRes[1] * myRes[2]; }
480 
481  /// Returns the amount of memory used by this tile.
482  int64 getMemoryUsage(bool inclusive) const;
483 
484  /// Returns the amount of data used by the tile myData pointer.
485  exint getDataLength() const;
486 
487  /// A routine used by filtered evaluation to accumulated a partial
488  /// filtered sum in this tile.
489  /// pstart, pend - voxel bounds (in UT_VoxelArray coordinates)
490  /// weights - weight array
491  /// start - UT_VoxelArray coordinates at [0] in the weight array
492  void weightedSum(int pstart[3], int pend[3],
493  const float *weights[3], int start[3],
494  T &result);
495 
496  void avgNonZero(int pstart[3], int pend[3], int start[3],
497  T &result);
498 
499  /// Designed to be specialized according to T
500 
501  /// Update min & max to encompass T itself.
502  static void expandMinMax(T v, T &min, T &max)
503  {
504  UTvoxelTileExpandMinMax(v, min, max);
505  }
506 
507  /// Return the "distance" of a & b. This is used for
508  /// tolerance checks on equality comparisons.
509  static fpreal dist(T a, T b)
510  {
511  return UTvoxelTileDist(a, b);
512  }
513 
515 
516  // Returns the index of the bound compression engine.
517  static int lookupCompressionEngine(const char *name);
518  // Given an index, gets the compression engine.
520 
521  /// Saves this tile's data, in compressed form.
522  /// May save in uncompressed form is the compression type does
523  /// not support saving.
524  void save(std::ostream &os) const;
525  bool save(UT_JSONWriter &w) const;
526 
527  /// Loads tile data. Uses the compression index to map the saved
528  /// compression types into the correct loading compression types.
529  void load(UT_IStream &is, const UT_IntArray &compression);
530  bool load(UT_JSONParser &p, const UT_IntArray &compression);
531 
532  /// Stores a list of compresson engines to os.
533  static void saveCompressionTypes(std::ostream &os);
534  static bool saveCompressionTypes(UT_JSONWriter &w);
535 
536  /// Builds a translation table from the given stream's compression types
537  /// into our own valid compression types.
538  static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions);
539  static bool loadCompressionTypes(UT_JSONParser &p, UT_IntArray &compressions);
540 
541 protected:
542  // Attempts to set the value to the native compressed format
543  // Some compression types allow some values to be written
544  // without decompression. Eg, you can write to a constant tile
545  // the tile's own value without decompression.
546  // If this returns true, t has been written.
547  bool writeThrough(int x, int y, int z, T t);
548 
549  /// Sets the local res of the tile. Does *not* resize the allocated
550  /// memory.
551  void setRes(int xr, int yr, int zr)
552  { myRes[0] = xr; myRes[1] = yr; myRes[2] = zr; }
553 
555  {
556  return (sizeof(T) <= sizeof(T*));
557  }
558 
560  { if (inlineConstant()) { return *((const T *)&myData); }
561  return *((const T*)myData); }
563  { if (inlineConstant()) { return ((T *)&myData); }
564  return ((T*)myData); }
565 
566  void setForeignData(void *data, int8 compress_type)
567  {
568  freeData();
569  myCompressionType = compress_type;
570 
571  if (isConstant() && inlineConstant())
572  {
573  makeConstant(*(T *)data);
574  }
575  else
576  {
577  myData = data;
578  myForeignData = true;
579  }
580  }
581 
582 public:
583  /// Frees myData and sets it to zero. This is a bit tricky
584  /// as the constant tiles may be inlined.
585  /// This is only public for the compression engines.
587  {
588  if (inlineConstant() && isConstant())
589  {
590  // Do nothing!
591  }
592  else if (myData && !myForeignData)
593  {
595  }
596  myData = 0;
597  myForeignData = false;
598  }
599 
600 public:
601  // This is only public so the compression engines can get to it.
602  // It is blind data, do not alter!
603  void *myData;
604 private:
605 
606  /// Resolutions.
607  int8 myRes[3];
608 
609  /// Am I a constant tile?
610  int8 myCompressionType;
611 
612  int8 myForeignData;
613 
614  static UT_ValArray<UT_VoxelTileCompress<T> *> &getCompressionEngines()
615  {
616  return UTvoxelTileGetCompressionEngines((T *) 0);
617  }
618 
619  friend class UT_VoxelTileCompress<T>;
620  friend class UT_VoxelArray<T>;
621  template <typename S, bool DoWrite, bool DoRead, bool TestForWrites>
622  friend class UT_VoxelProbe;
623 };
624 
625 ///
626 /// UT_VoxelArray
627 ///
628 /// This provides data structure to hold a three dimmensional array
629 /// of data. The data should be some simple arithmetic type, such
630 /// as uint8, fpreal16, or UT_Vector3.
631 ///
632 /// Some operations, such as gradiants, may make less sense with uint8.
633 ///
634 template <typename T>
635 class UT_VoxelArray
636 {
637 public:
638  using ScalarType = T;
639 
640  UT_VoxelArray();
641  ~UT_VoxelArray();
642 
643  /// Copy constructor:
645 
646  /// Assignment operator:
648 
649  /// This sets the voxelarray to have the given resolution. If resolution is
650  /// changed, all elements will be set to 0. If resolution is already equal
651  /// to the arguments, all elements will be set to 0 only if reset is true;
652  /// otherwise, the voxel array will be left untouched.
653  void size(int xres, int yres, int zres, bool reset = true);
654 
655  /// This will ensure this voxel array matches the given voxel array
656  /// in terms of dimensions & border conditions. It may invoke
657  /// a size() and hence reset the field to 0.
658  void match(const UT_VoxelArray<T> &src);
659 
660  template <typename S>
661  bool isMatching(const UT_VoxelArray<S> &src) const
662  {
663  return src.getXRes() == getXRes() &&
664  src.getYRes() == getYRes() &&
665  src.getZRes() == getZRes();
666  }
667 
668  int getXRes() const { return myRes[0]; }
669  int getYRes() const { return myRes[1]; }
670  int getZRes() const { return myRes[2]; }
671  int getRes(int axis) const { return myRes[axis]; }
672 
674  {
675  return UT_Vector3I(myRes[0], myRes[1], myRes[2]);
676 
677  }
678 
679  /// Return the amount of memory used by this array.
680  int64 getMemoryUsage(bool inclusive) const;
681 
682  /// Sets this voxel array to the given constant value. All tiles
683  /// are turned into constant tiles.
685  constant,
686  T, t)
687  void constantPartial(T t, const UT_JobInfo &info);
688 
689  /// If this voxel array is all constant tiles, returns true.
690  /// The optional pointer is initialized to the constant value iff
691  /// the array is constant. (Note by constant we mean made of constant
692  /// tiles of the same value - if some tiles are uncompressed but
693  /// constant, it will still return false)
694  bool isConstant(T *cval = 0) const;
695 
696  /// Returns true if any element of the voxel array is NAN
697  bool hasNan() const;
698 
699  /// This convience function lets you sample the voxel array.
700  /// pos is in the range [0..1]^3.
701  /// T value trilinearly interpolated. Edges are determined by the border
702  /// mode.
703  /// The cells are sampled at the center of the voxels.
704  T operator()(UT_Vector3D pos) const;
705  T operator()(UT_Vector3F pos) const;
706 
707  /// This convience function lets you sample the voxel array.
708  /// pos is in the range [0..1]^3.
709  /// The min/max is the range of the sampled values.
710  void evaluateMinMax(T &lerp, T &lmin, T &lmax,
711  UT_Vector3F pos) const;
712 
713  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
714  /// Allows out of range evaluation
716  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
717  /// Allows out of range evaluation
718  SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z,
719  float fx, float fy, float fz) const;
720  template <int AXIS2D>
722  template <int AXIS2D>
723  SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z,
724  float fx, float fy, float fz) const;
725 
726  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
727  /// Allows out of range evaluation. Also computes min/max of
728  /// interpolated samples.
729  SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax,
730  UT_Vector3F pos) const;
731  template <int AXIS2D>
732  SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax,
733  UT_Vector3F pos) const;
734  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
735  /// Allows out of range evaluation. Also computes min/max of
736  /// interpolated samples.
738  T &lerp, T &lmin, T &lmax,
739  int x, int y, int z,
740  float fx, float fy, float fz) const;
741  template <int AXIS2D>
743  T &lerp, T &lmin, T &lmax,
744  int x, int y, int z,
745  float fx, float fy, float fz) const;
746 
747  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
748  /// array should have 8 elements, x minor, z major.
749  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
750  T *sample) const;
751  template <int AXIS2D>
752  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
753  T *sample) const;
754 
755  /// Extracts a sample in a plus shape, dx, then dy, then dz, finally
756  /// the center into 7 voxels.
757  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
758  T *sample) const;
759 #if 0
760  /// Extracts 27 dense 3x3x3 cube centered at x,y,z into samples
761  /// z major, xminor.
762  /// Previous implementation had an error and this method isn't used in
763  /// Houdini code.
764  SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z,
765  T *sample) const;
766 #endif
767 
768  /// Lerps the given sample using trilinear interpolation
770  float fx, float fy, float fz) const;
771  template <int AXIS2D>
773  float fx, float fy, float fz) const;
774 
775  SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z,
776  float &fx, float &fy, float &fz) const
777  {
778  // Determine integer & fractional components.
779  fx = pos.x();
780  SYSfastSplitFloat(fx, x);
781  fy = pos.y();
782  SYSfastSplitFloat(fy, y);
783  fz = pos.z();
784  SYSfastSplitFloat(fz, z);
785  }
786  template <int AXIS2D>
787  SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z,
788  float &fx, float &fy, float &fz) const
789  {
790  // Determine integer & fractional components.
791  if (AXIS2D != 0)
792  {
793  fx = pos.x();
794  SYSfastSplitFloat(fx, x);
795  }
796  else
797  {
798  fx = 0.0;
799  x = 0;
800  }
801  if (AXIS2D != 1)
802  {
803  fy = pos.y();
804  SYSfastSplitFloat(fy, y);
805  }
806  else
807  {
808  fy = 0.0;
809  y = 0;
810  }
811  if (AXIS2D != 2)
812  {
813  fz = pos.z();
814  SYSfastSplitFloat(fz, z);
815  }
816  else
817  {
818  fz = 0.0;
819  z = 0;
820  }
821  }
822 #if 0
823  T operator()(v4uf pos) const;
824 #endif
825 
826  /// Filtered evaluation of the voxel array. This operation should
827  /// exhibit the same behavior as IMG3D_Channel::evaluate.
828  T evaluate(const UT_Vector3 &pos, const UT_Filter &filter,
829  fpreal radius, int clampaxis = -1) const;
830 
831  /// average of non-zero values of the voxel array.
832  T avgNonZero(const UT_Vector3 &pos, const UT_Filter &filter,
833  fpreal radius, int clampaxis = -1) const;
834 
835  /// Fills this by resampling the given voxel array.
836  void resample(const UT_VoxelArray<T> &src,
837  UT_FilterType filtertype = UT_FILTER_POINT,
838  float filterwidthscale = 1.0f,
839  int clampaxis = -1);
840 
841 
842  /// Calls [](UT_VoxelTileIterator<T> &vit) -> void
843  /// in parallel for each tile.
844  template <typename OP>
845  void forEachTile(const OP &op, bool shouldthread = true);
846 
847  /// Calls [](UT_VoxelTileIterator<T> &vit) -> void
848  /// in parallel for each tile. Since TileIterator don't understand
849  /// const correctness, it is important you do not use setValue
850  /// in the op.
851  template <typename OP>
852  void forEachTileConst(const OP &op, bool shouldthread = true) const
853  {
854  SYSconst_cast(this)->forEachTile(op, shouldthread);
855  }
856 
857  /// Flattens this into an array. Z major, then Y, then X.
858  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
860  flatten,
861  T *, flatarray,
862  exint, ystride,
863  exint, zstride)
864  void flattenPartial(T *flatarray, exint ystride, exint zstride,
865  const UT_JobInfo &info) const;
866 
867  /// Flattens this into an array. Z major, then Y, then X.
868  /// Flattens a 2d slice where AXIS2D is constant.
869  /// If AXIS2D == 2 (ie, z): flatarray[x + y * ystride] = getValue(x, y, 0);
870  /// Flattens by destination x-major stripes to avoid page collisions
871  /// on freshly allocated memory buffers.
872  template <int AXIS2D>
873  void flattenPartialAxis(T *flatarray, exint ystride,
874  const UT_JobInfo &info) const;
875 
876  /// Flattens this into an array suitable for a GL 8bit texture.
877  /// Z major, then Y, then X.
878  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
880  flattenGLFixed8,
881  uint8 *, flatarray,
882  exint, ystride,
883  exint, zstride,
884  T , dummy)
885  void flattenGLFixed8Partial(uint8 *flatarray,
886  exint ystride, exint zstride,
887  T dummy,
888  const UT_JobInfo &info) const;
889 
890  /// Flattens this into an array suitable for a GL 16bit FP texture.
891  /// Z major, then Y, then X.
892  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
893  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
894  flattenGL16F,
895  UT_Vector4H *, flatarray,
896  exint, ystride,
897  exint, zstride,
898  T , dummy)
899  void flattenGL16FPartial(UT_Vector4H *flatarray,
900  exint ystride, exint zstride,
901  T dummy,
902  const UT_JobInfo &info) const;
903 
904  /// Flattens this into an array suitable for a GL 32b FP texture. Note that
905  /// this also works around an older Nvidia driver bug that caused very small
906  /// valued texels (<1e-9) to appear as huge random values in the texture.
907  /// Z major, then Y, then X.
908  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
909  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
910  flattenGL32F,
911  UT_Vector4F *, flatarray,
912  exint, ystride,
913  exint, zstride,
914  T , dummy)
915  void flattenGL32FPartial(UT_Vector4F *flatarray,
916  exint ystride, exint zstride,
917  T dummy,
918  const UT_JobInfo &info) const;
919 
920  /// Fills this from a flattened array. Z major, then Y, then X.
921  /// setValue(x,y,z, flatarray[x + y * ystride + z * zstride];
922  THREADED_METHOD3(UT_VoxelArray<T>, numTiles() > 16,
923  extractFromFlattened,
924  const T *, flatarray,
925  exint, ystride,
926  exint, zstride)
927  void extractFromFlattenedPartial(const T *flatarray,
928  exint ystride, exint zstride,
929  const UT_JobInfo &info);
930 
931  /// Copies into this voxel array from the source array.
932  /// Conceptually,
933  /// this->setValue(x, y, z, src.getValue(x+offx, y+offy, z+offz);
934  void copyWithOffset(const UT_VoxelArray<T> &src,
935  int offx, int offy, int offz);
936  THREADED_METHOD4(UT_VoxelArray<T>, numTiles() > 4,
937  copyWithOffsetInternal,
938  const UT_VoxelArray<T> &, src,
939  int, offx,
940  int, offy,
941  int, offz)
942  void copyWithOffsetInternalPartial(const UT_VoxelArray<T> &src,
943  int offx, int offy, int offz,
944  const UT_JobInfo &info);
945 
946  /// Moves data from the source voxel array into this array. The offsets should
947  /// be in terms of tiles. Source may be modified as this array steals its data
948  /// buffers in such a way that no dynamic memory will leak when these arrays
949  /// are freed.
950  /// Conceptually, this function performs the same operation as copyWithOffset,
951  /// but with offsets specified in terms of tiles:
952  /// this->setValue(x, y, z, src.getValue(x+off_v_x, y+off_v_y, z+off_v_z)
953  /// where off_v_A=tileoffA*TILESIZE for A in {x, y, z}.
954  void moveTilesWithOffset(UT_VoxelArray<T> &src, int tileoffx, int tileoffy,
955  int tileoffz);
956 
957  /// Fills dstdata with the voxel data of listed tiles. Stride is measured
958  /// in T. Data order is in tile-order. So, sorted by tilelist, then
959  /// z, y, x within that tile.
960  /// The ix/iy/iz variant allows partial tiles. If the number of
961  /// voxels to write to a tile matches the tile size, however, the
962  /// ix/iy/iz is ignored and the tile is written in canonical order.
963  template <typename S>
964  S *extractTiles(S *dstdata, int stride,
965  const UT_IntArray &tilelist) const;
966  template <typename S, typename IDX>
967  S *extractTiles(S *dstdata, int stride,
968  const IDX *ix, const IDX *iy, const IDX *iz,
969  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist) const;
970 
971  /// Fills dstdata with the voxel data of the slice with the coordinate at
972  /// component SLICE_AXIS fixed at slice. Returns nullptr if slice is outside
973  /// the domain.
974  /// If half_slice is true, the extracted values lie halfway between slice
975  /// and slice+1.
976  template <int SLICE_AXIS, typename S>
977  S *extractSlice(S *dstdata, int slice, bool half_slice) const;
978 
979  /// Overwrites our tiles with the given data. Does checking
980  /// for constant tiles. Input srcdata stream should match
981  /// that of extractTiles.
982  template <typename S>
983  const S *writeTiles(const S *srcdata, int srcstride,
984  const UT_IntArray &tilelist);
985  template <typename S, typename IDX>
986  const S *writeTiles(const S *srcdata, int srcstride,
987  const IDX *ix, const IDX *iy, const IDX *iz,
988  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist);
989 
990  /// Converts a 3d position in range [0..1]^3 into the closest
991  /// index value.
992  /// Returns false if the resulting index was out of range. The index
993  /// will still be set.
994  bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const;
995  bool posToIndex(UT_Vector3D pos, exint &x, exint &y, exint &z) const;
996  /// Convertes a 3d position in [0..1]^3 into the equivalent in
997  /// the integer cell space. Does not clamp to the closest value.
998  bool posToIndex(UT_Vector3 pos, UT_Vector3 &ipos) const;
999  bool posToIndex(UT_Vector3D pos, UT_Vector3D &ipos) const;
1000  /// Converts an index into a position.
1001  /// Returns false if the source index was out of range, in which case
1002  /// pos will be outside [0..1]^3
1003  bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const;
1004  bool indexToPos(exint x, exint y, exint z, UT_Vector3D &pos) const;
1005  void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const;
1006  void findexToPos(UT_Vector3D ipos, UT_Vector3D &pos) const;
1007 
1008  /// Clamps the given x, y, and z values to lie inside the valid index
1009  /// range.
1010  void clampIndex(int &x, int &y, int &z) const
1011  {
1012  x = SYSclamp(x, 0, myRes[0]-1);
1013  y = SYSclamp(y, 0, myRes[1]-1);
1014  z = SYSclamp(z, 0, myRes[2]-1);
1015  }
1016 
1017  /// Returns true if the given x, y, z values lie inside the valid index.
1018  bool isValidIndex(int x, int y, int z) const
1019  {
1020  return !((x | y | z) < 0) &&
1021  (((x - myRes[0]) & (y - myRes[1]) & (z - myRes[2])) < 0);
1022  }
1023 
1024  /// This allows you to read & write the raw data.
1025  /// Out of bound reads are illegal.
1027  {
1028  return (*this)(index[0], index[1], index[2]);
1029  }
1030  T operator()(int x, int y, int z) const
1031  {
1032  UT_ASSERT_P(isValidIndex(x, y, z));
1033  return (*getTile(x >> TILEBITS,
1034  y >> TILEBITS,
1035  z >> TILEBITS))
1036  (x & TILEMASK, y & TILEMASK, z & TILEMASK);
1037  }
1038 
1040  {
1041  setValue(index[0], index[1], index[2], value);
1042  }
1043 
1044  void setValue(int x, int y, int z, T t)
1045  {
1046  UT_ASSERT_P(isValidIndex(x, y, z));
1047  getTile(x >> TILEBITS,
1048  y >> TILEBITS,
1049  z >> TILEBITS)->setValue(
1050  x & TILEMASK, y & TILEMASK, z & TILEMASK, t);
1051  }
1052 
1053  /// Mirrors the coordinate for the given resolution. This is effectively
1054  /// like using one reflection then repeating that with twice the resolution.
1055  static inline int mirrorCoordinates(int x, int res)
1056  {
1057  int res2 = res * 2;
1058  int y = x % res2;
1059  if (y < 0)
1060  y += res2;
1061  if (y >= res)
1062  y = res2 - y - 1;
1063  return y;
1064  }
1065 
1066  /// This will clamp the bounds to fit within the voxel array,
1067  /// using the border type to resolve out of range values.
1068  T getValue(int x, int y, int z) const
1069  {
1070  // First handle the most common case.
1071  if (isValidIndex(x, y, z))
1072  return (*this)(x, y, z);
1073 
1074  // Verify our voxel array is non-empty.
1075  if (!myTiles)
1076  return myBorderValue;
1077 
1078  // We now know we are out of range, adjust appropriately
1079  switch (myBorderType)
1080  {
1082  return myBorderValue;
1083 
1084  case UT_VOXELBORDER_REPEAT:
1085  if (x < 0 || x >= myRes[0])
1086  {
1087  x %= myRes[0];
1088  if (x < 0)
1089  x += myRes[0];
1090  }
1091  if (y < 0 || y >= myRes[1])
1092  {
1093  y %= myRes[1];
1094  if (y < 0)
1095  y += myRes[1];
1096  }
1097  if (z < 0 || z >= myRes[2])
1098  {
1099  z %= myRes[2];
1100  if (z < 0)
1101  z += myRes[2];
1102  }
1103  break;
1104 
1105  case UT_VOXELBORDER_MIRROR:
1106  if (x < 0 || x >= myRes[0])
1107  x = mirrorCoordinates(x, myRes[0]);
1108  if (y < 0 || y >= myRes[1])
1109  y = mirrorCoordinates(y, myRes[1]);
1110  if (z < 0 || z >= myRes[2])
1111  z = mirrorCoordinates(z, myRes[2]);
1112 
1113  case UT_VOXELBORDER_STREAK:
1114  clampIndex(x, y, z);
1115  break;
1116  case UT_VOXELBORDER_EXTRAP:
1117  {
1118  int cx, cy, cz;
1119  T result;
1120 
1121  cx = x; cy = y; cz = z;
1122  clampIndex(cx, cy, cz);
1123 
1124  result = (*this)(cx, cy, cz);
1125  result += (x - cx) * myBorderScale[0] +
1126  (y - cy) * myBorderScale[1] +
1127  (z - cz) * myBorderScale[2];
1128  return result;
1129  }
1130  }
1131 
1132  // It is now within bounds, do normal fetch.
1133  return (*this)(x, y, z);
1134  }
1135 
1136  /// Gets values in the box [bbox.minvec(), bbox.maxvec())
1137  /// Values are stored in the array `values` of size `size` that has to be at least `bbox.volume()`
1138  /// The order of values is give by: `i + bbox.xsize() * (j + bbox.ysize() * k)`
1139  ///
1140  /// If returns true, values in `bbox` are constant and only values[0] is guaranteed to be assigned.
1141  bool getValues(const UT_BoundingBoxI &bbox,
1142  T * values,
1143  const exint size) const
1144  {
1145  UT_ASSERT_P(bbox.volume() <= size);
1146 
1147  const UT_BoundingBoxI bounds = {0, 0, 0, getXRes(), getYRes(), getZRes()};
1148 
1149  const UT_BoundingBoxI tiles =
1150  {bbox.xmin() >> TILEBITS,
1151  bbox.ymin() >> TILEBITS,
1152  bbox.zmin() >> TILEBITS,
1153  ((bbox.xmax() - 1) >> TILEBITS) + 1,
1154  ((bbox.ymax() - 1) >> TILEBITS) + 1,
1155  ((bbox.zmax() - 1) >> TILEBITS) + 1};
1156 
1157  bool allconstant = true;
1158 
1159  UT_BoundingBoxI tilesamples;
1160 
1161  for (int kt = tiles.zmin(); kt < tiles.zmax(); kt++)
1162  {
1163  // zmin & zmax
1164  tilesamples.vals[2][0] = TILESIZE * kt;
1165  tilesamples.vals[2][1] = TILESIZE * (kt + 1);
1166  // clip bounds
1167  if (kt == tiles.zmin())
1168  tilesamples.vals[2][0] = bbox.zmin();
1169  if (kt == tiles.zmax() - 1)
1170  tilesamples.vals[2][1] = bbox.zmax();
1171 
1172  for (int jt = tiles.ymin(); jt < tiles.ymax(); jt++)
1173  {
1174  // ymin & ymax
1175  tilesamples.vals[1][0] = TILESIZE * jt;
1176  tilesamples.vals[1][1] = TILESIZE * (jt + 1);
1177  // clip bounds
1178  if (jt == tiles.ymin())
1179  tilesamples.vals[1][0] = bbox.ymin();
1180  if (jt == tiles.ymax() - 1)
1181  tilesamples.vals[1][1] = bbox.ymax();
1182 
1183  for (int it = tiles.xmin(); it < tiles.xmax(); it++)
1184  {
1185  // xmin & xmax
1186  tilesamples.vals[0][0] = TILESIZE * it;
1187  tilesamples.vals[0][1] = TILESIZE * (it + 1);
1188  // clip bounds
1189  if (it == tiles.xmin())
1190  tilesamples.vals[0][0] = bbox.xmin();
1191  if (it == tiles.xmax() - 1)
1192  tilesamples.vals[0][1] = bbox.xmax();
1193 
1194  const bool inbounds = tilesamples.isInside(bounds);
1195 
1196  if (inbounds)
1197  {
1198  const UT_VoxelTile<T> *tile = getTile(it, jt, kt);
1199 
1200  for (int k = tilesamples.zmin();
1201  k < tilesamples.zmax(); k++)
1202  {
1203  for (int j = tilesamples.ymin();
1204  j < tilesamples.ymax(); j++)
1205  {
1206  for (int i = tilesamples.xmin();
1207  i < tilesamples.xmax(); i++)
1208  {
1209  const UT_Vector3I localindex = {
1210  i - bbox.xmin(),
1211  j - bbox.ymin(),
1212  k - bbox.zmin()};
1213 
1214  const int locallinindex
1215  = localindex.x()
1216  + bbox.xsize() * (localindex.y()
1217  + bbox.ysize() * localindex.z());
1218 
1219  values[locallinindex] = (*tile)(
1220  i & TILEMASK,
1221  j & TILEMASK,
1222  k & TILEMASK);
1223 
1224  if (allconstant
1225  && (values[0] != values[locallinindex]))
1226  {
1227  allconstant = false;
1228  }
1229  }
1230  }
1231  }
1232  }
1233  else
1234  {
1235  for (int k = tilesamples.zmin(); k < tilesamples.zmax(); k++)
1236  {
1237  for (int j = tilesamples.ymin();
1238  j < tilesamples.ymax(); j++)
1239  {
1240  for (int i = tilesamples.xmin();
1241  i < tilesamples.xmax(); i++)
1242  {
1243  const UT_Vector3I localindex = {
1244  i - bbox.xmin(),
1245  j - bbox.ymin(),
1246  k - bbox.zmin()};
1247 
1248  const int locallinindex
1249  = localindex.x()
1250  + bbox.xsize() * (localindex.y()
1251  + bbox.ysize() * localindex.z());
1252 
1253  values[locallinindex] = getValue(i, j, k);
1254 
1255  if (allconstant
1256  && (values[0] != values[locallinindex]))
1257  {
1258  allconstant = false;
1259  }
1260  }
1261  }
1262  }
1263 
1264  }
1265  }
1266  }
1267  }
1268 
1269  return allconstant;
1270  }
1271 
1273  void setBorderScale(T scalex, T scaley, T scalez);
1274  UT_VoxelBorderType getBorder() const { return myBorderType; }
1275  T getBorderValue() const { return myBorderValue; }
1276  T getBorderScale(int axis) const { return myBorderScale[axis]; }
1277 
1278  /// This tries to compress or collapse each tile. This can
1279  /// be expensive (ie, converting a tile to constant), so
1280  /// should be saved until modifications are complete.
1282  collapseAllTiles)
1283  void collapseAllTilesPartial(const UT_JobInfo &info);
1284 
1285  /// Uncompresses all tiles into non-constant tiles. Useful
1286  /// if you have a multithreaded algorithm that may need to
1287  /// both read and write, if you write to a collapsed tile
1288  /// while someone else reads from it, bad stuff happens.
1289  /// Instead, you can expandAllTiles. This may have serious
1290  /// consequences in memory use, however.
1292  expandAllTiles)
1293  void expandAllTilesPartial(const UT_JobInfo &info);
1294 
1295  /// Uncompresses all tiles, but leaves constant tiles alone.
1296  /// Useful for cleaning out any non-standard compression algorithm
1297  /// that some external program can't handle.
1298  THREADED_METHOD(UT_VoxelArray<T>, numTiles() > 100,
1299  expandAllNonConstTiles)
1300  void expandAllNonConstTilesPartial(const UT_JobInfo &info);
1301 
1302  /// The direct tile access methods are to make TBF writing a bit
1303  /// more efficient.
1304  UT_VoxelTile<T> *getTile(int tx, int ty, int tz) const
1305  { return &myTiles[xyzTileToLinear(tx, ty, tz)]; }
1307  { return &myTiles[idx]; }
1308  void linearTileToXYZ(int idx, int &x, int &y, int &z) const
1309  {
1310  x = idx % myTileRes[0];
1311  idx -= x;
1312  idx /= myTileRes[0];
1313  y = idx % myTileRes[1];
1314  idx -= y;
1315  idx /= myTileRes[1];
1316  z = idx;
1317  }
1319  {
1320  UT_Vector3I tileindex;
1321  tileindex[0] = idx % myTileRes[0];
1322  idx -= tileindex[0];
1323  idx /= myTileRes[0];
1324  tileindex[1] = idx % myTileRes[1];
1325  idx -= tileindex[1];
1326  idx /= myTileRes[1];
1327  tileindex[2] = idx;
1328 
1329  return tileindex;
1330  }
1331 
1332  int xyzTileToLinear(int x, int y, int z) const
1333  { return (z * myTileRes[1] + y) * myTileRes[0] + x; }
1334 
1335  int indexToLinearTile(int x, int y, int z) const
1336  { return ((z >> TILEBITS) * myTileRes[1] + (y >> TILEBITS)) * myTileRes[0] + (x >> TILEBITS); }
1337 
1338  /// idxth tile represents the voxels indexed [start,end).
1339  void getTileVoxels(int idx,
1340  UT_Vector3I &start, UT_Vector3I &end) const
1341  {
1342  int x, y, z;
1343  linearTileToXYZ(idx, x, y, z);
1344 
1345  start.x() = x * TILESIZE;
1346  start.y() = y * TILESIZE;
1347  start.z() = z * TILESIZE;
1348  end = start;
1349  end.x() += myTiles[idx].xres();
1350  end.y() += myTiles[idx].yres();
1351  end.z() += myTiles[idx].zres();
1352  }
1353 
1355  {
1357  getTileVoxels(idx, start, end);
1358  return UT_BoundingBoxI(start, end);
1359  }
1360 
1361  /// Number of tiles along that axis. Not to be confused with
1362  /// the resolution of the individual tiles.
1363  int getTileRes(int dim) const { return myTileRes[dim]; }
1364  int numTiles() const
1365  { return myTileRes[0] * myTileRes[1] * myTileRes[2]; }
1367  { return ((exint)myRes[0]) * myRes[1] * myRes[2]; }
1368 
1370  { myCompressionOptions = options; }
1372  { return myCompressionOptions; }
1373 
1375  { myCompressionOptions.myConstantTol = tol; }
1377  { return myCompressionOptions.myConstantTol; }
1378 
1379  /// Saves only the data of this array to the given stream.
1380  /// To reload it you will have to have a matching array in tiles
1381  /// dimensions and size.
1382  void saveData(std::ostream &os) const;
1383  bool saveData(UT_JSONWriter &w,
1384  const char *shared_mem_owner = 0) const;
1385 
1386  /// Load an array, requires you have already size()d this array.
1387  void loadData(UT_IStream &is);
1388  bool loadData(UT_JSONParser &p);
1389 
1390  /// Copy only the data from the source array.
1391  /// Note that it is an error to call this unless isMatching(src).
1393  copyData,
1394  const UT_VoxelArray<T> &, src)
1395 
1396  void copyDataPartial(const UT_VoxelArray<T> &src,
1397  const UT_JobInfo &info);
1398 
1399 private:
1401  resamplethread,
1402  const UT_VoxelArray<T> &, src,
1403  const UT_Filter *, filter,
1404  float, radius,
1405  int, clampaxis)
1406  void resamplethreadPartial(const UT_VoxelArray<T> &src,
1407  const UT_Filter *filter,
1408  float radius,
1409  int clampaxis,
1410  const UT_JobInfo &info);
1411 
1412 
1413  void deleteVoxels();
1414 
1415  SYS_SharedMemory *copyToSharedMemory(const char *shared_mem_owner) const;
1416  bool populateFromSharedMemory(const char *id);
1417 
1418 
1419  /// Number of elements in each dimension.
1420  int myRes[3];
1421 
1422  /// Inverse tile res, 1/myRes
1423  UT_Vector3 myInvRes;
1424 
1425  /// Number of tiles in each dimension.
1426  int myTileRes[3];
1427 
1428  /// Compression tolerance for lossy compression.
1429  UT_VoxelCompressOptions myCompressionOptions;
1430 
1431  /// Double dereferenced so we can theoretically resize easily.
1432  UT_VoxelTile<T> *myTiles;
1433 
1434  /// Outside values get this if constant borders are used
1435  T myBorderValue;
1436  /// Per axis scale factors for when extrapolating.
1437  T myBorderScale[3];
1438  UT_VoxelBorderType myBorderType;
1439 
1440  /// For initializing the tiles from shared memory.
1441  SYS_SharedMemory *mySharedMem;
1442  SYS_SharedMemoryView *mySharedMemView;
1443 };
1444 
1445 
1446 ///
1447 /// UT_VoxelMipMap
1448 ///
1449 /// This provides a mip-map type structure for a voxel array.
1450 /// It manages the different levels of voxels arrays that are needed.
1451 /// You can create different types of mip maps: average, maximum, etc,
1452 /// which can allow different tricks.
1453 /// Each level is one half the previous level, rounded up.
1454 /// Out of bound voxels are ignored from the lower levels.
1455 ///
1456 template <typename T>
1458 {
1459 public:
1460  /// The different types of functions that can be used for
1461  /// constructing a mip map.
1462  enum mipmaptype { MIPMAP_MAXIMUM=0, MIPMAP_AVERAGE=1, MIPMAP_MINIMUM=2 };
1463 
1464  UT_VoxelMipMap();
1465  ~UT_VoxelMipMap();
1466 
1467  /// Copy constructor.
1468  UT_VoxelMipMap(const UT_VoxelMipMap<T> &src);
1469 
1470  /// Assignment operator:
1471  const UT_VoxelMipMap<T> &operator=(const UT_VoxelMipMap<T> &src);
1472 
1473  /// Builds from a given voxel array. The ownership flag determines
1474  /// if we gain ownership of the voxel array and should delete it.
1475  /// In any case, the new levels are owned by us.
1476  void build(UT_VoxelArray<T> *baselevel,
1477  mipmaptype function);
1478 
1479  /// Same as above but construct mipmaps simultaneously for more than
1480  /// one function. The order of the functions will correspond to the
1481  /// order of the data values passed to the traversal callback.
1482  void build(UT_VoxelArray<T> *baselevel,
1483  const UT_Array<mipmaptype> &functions);
1484 
1485  /// This does a top down traversal of the implicit octree defined
1486  /// by the voxel array. Returning false will abort that
1487  /// branch of the octree.
1488  /// The bounding box given is in cell space and is an exclusive
1489  /// box of the included cells (ie: (0..1)^3 means just cell 0,0,0)
1490  /// Note that each bounding box will not be square, unless you
1491  /// have the good fortune of starting with a power of 2 cube.
1492  /// The boolean goes true when the the callback is invoked on a
1493  /// base level.
1494  typedef bool (*Callback)(const T *funcs,
1495  const UT_BoundingBox &box,
1496  bool baselevel, void *data);
1497  void traverseTopDown(Callback function,
1498  void *data) const;
1499 
1500  /// Top down traversal on op. OP is invoked with
1501  /// bool op(const UT_BoundingBoxI &indexbox, int level)
1502  ///
1503  /// indexbox is half-inclusive (0..1)^3 means cell 0,0,0
1504  /// level 0 means the base level.
1505  /// (box.min.x()>>level, box.min.y()>>level, box.min.z()>>level)
1506  /// gives the index to extract the value from level..
1507  template <typename OP>
1508  void traverseTopDown(OP&op) const;
1509 
1510 
1511  /// Top down traversal, but which quad tree is visited first
1512  /// is controlled by
1513  /// float op.sortValue(UT_BoundingBoxI &indexbox, int level);
1514  /// Lower values are visited first.
1515  template <typename OP>
1516  void traverseTopDownSorted(OP&op) const;
1517 
1518 
1519  /// Return the amount of memory used by this mipmap.
1520  int64 getMemoryUsage(bool inclusive) const;
1521 
1522  int numLevels() const { return myNumLevels+1; }
1523 
1524  /// level 0 is the original grid, each level higher is a power
1525  /// of two smaller.
1526  const UT_VoxelArray<T> *level(int level, int function) const
1527  {
1528  if (level == 0)
1529  return myBaseLevel;
1530 
1531  return myLevels(function)[numLevels() - 1 - level];
1532  }
1533 
1534 private:
1535  void doTraverse(int x, int y, int z, int level,
1536  Callback function,
1537  void *data) const;
1538 
1539  /// Note: This variant of doTraverse has the opposite sense of level!
1540  template <typename OP>
1541  void doTraverse(int x, int y, int z, int level,
1542  OP &op) const;
1543  template <typename OP>
1544  void doTraverseSorted(int x, int y, int z, int level,
1545  OP &op) const;
1546 
1547  void initializePrivate();
1548  void destroyPrivate();
1549 
1550  THREADED_METHOD3(UT_VoxelMipMap<T>, dst.numTiles() > 1,
1551  downsample,
1552  UT_VoxelArray<T> &, dst,
1553  const UT_VoxelArray<T> &, src,
1554  mipmaptype, function)
1555  void downsamplePartial(UT_VoxelArray<T> &dst,
1556  const UT_VoxelArray<T> &src,
1557  mipmaptype function,
1558  const UT_JobInfo &info);
1559 
1560 protected:
1561  T mixValues(T t1, T t2, mipmaptype function) const
1562  {
1563  switch (function)
1564  {
1565  case MIPMAP_MAXIMUM:
1566  return SYSmax(t1, t2);
1567 
1568  case MIPMAP_AVERAGE:
1569  return (t1 + t2) / 2;
1570 
1571  case MIPMAP_MINIMUM:
1572  return SYSmin(t1, t2);
1573  }
1574 
1575  return t1;
1576  }
1577 
1578 
1579  /// This stores the base most level that was provided
1580  /// externally.
1581  UT_VoxelArray<T> *myBaseLevel;
1582  /// If true, we will delete the base level when we are done.
1584 
1585  /// Tracks the number of levels which we used to represent
1586  /// this hierarchy.
1588  /// The array of VoxelArrays, one per level.
1589  /// myLevels[0] is a 1x1x1 array. Each successive layer is twice
1590  /// as big in each each dimension. However, every layer is clamped
1591  /// against the resolution of the base layer.
1592  /// We own all these layers.
1594 };
1595 
1596 
1597 /// Iterator for Voxel Arrays
1598 ///
1599 /// This class eliminates the need for having
1600 /// for (z = 0; z < zres; z++)
1601 /// ...
1602 /// for (x = 0; x < xres; x++)
1603 /// loops everywhere.
1604 ///
1605 /// Note that the order of iteration is undefined! (The actual order is
1606 /// to complete each tile in turn, thereby hopefully improving cache
1607 /// coherency)
1608 ///
1609 /// It is safe to write to the voxel array while this iterator is active.
1610 /// It is *not* safe to resize the voxel array (or destroy it)
1611 ///
1612 /// The iterator is similar in principal to an STL iterator, but somewhat
1613 /// simpler. The classic STL loop
1614 /// for ( it = begin(); it != end(); ++it )
1615 /// is done using
1616 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1617 ///
1618 template <typename T>
1620 {
1621 public:
1626 
1628  {
1629  myCurTile = -1;
1630  myHandle.resetHandle();
1631  myArray = vox;
1632  // Reset the range
1633  setPartialRange(0, 1);
1634  }
1636  {
1637  setArray((UT_VoxelArray<T> *) vox);
1638  }
1639 
1640  /// Iterates over the array pointed to by the handle. Only
1641  /// supports read access during the iteration as it does
1642  /// a read lock.
1644  {
1645  myHandle = handle;
1646  // Ideally we'd have a separate const iterator
1647  // from our non-const iterator so this would
1648  // only be exposed in the const version.
1649  myArray = const_cast<UT_VoxelArray<T> *>(&*myHandle);
1650 
1651  // Reset our range.
1652  myCurTile = -1;
1653  setPartialRange(0, 1);
1654  }
1655 
1656 
1657  /// Restricts this iterator to only run over a subset
1658  /// of the tiles. The tiles will be divided into approximately
1659  /// numrange equal groups, this will be the idx'th.
1660  /// The resulting iterator may have zero tiles.
1661  void setPartialRange(int idx, int numranges);
1662 
1663  /// Ties this iterator to the given jobinfo so it will
1664  /// match the jobinfo's processing.
1665  void splitByTile(const UT_JobInfo &info);
1666 
1667  /// Sets this iterator to run over the tile specified by the referenced
1668  /// iterator.
1669  /// This assumes the underlying arrays are matching.
1670  template <typename S>
1672  UT_VoxelArray<T> *array)
1673  {
1674  UT_ASSERT_P(vit.isStartOfTile());
1675  UT_ASSERT_P(getArray()->isMatching(*vit.getArray()));
1676  UT_ASSERT_P(!myJobInfo && !myUseTileList);
1677  myTileStart = vit.getLinearTileNum();
1678  myTileEnd = myTileStart+1;
1679  rewind();
1680  }
1681 
1683  {
1684  setTile(vit, vit.getArray());
1685  }
1686 
1687  /// Assigns an interrupt handler. This will be tested whenever
1688  /// it advances to a new tile. If it is interrupted, the iterator
1689  /// will jump forward to atEnd()
1690  void setInterrupt(UT_Interrupt *interrupt) { myInterrupt = interrupt; }
1691  void detectInterrupts() { myInterrupt = UTgetInterrupt(); }
1692 
1693  /// Restricts this iterator to the tiles that intersect
1694  /// the given bounding box of voxel coordinates.
1695  /// Note that this will not be a precise restriction as
1696  /// each tile is either included or not.
1697  /// You should setPartialRange() after setting the bbox range
1698  /// The bounding box is on the [0..1]^3 range.
1699  void restrictToBBox(const UT_BoundingBox &bbox);
1700  /// The [xmin, xmax] are inclusive and measured in voxels.
1701  void restrictToBBox(int xmin, int xmax,
1702  int ymin, int ymax,
1703  int zmin, int zmax);
1704 
1705  /// Resets the iterator to point to the first voxel.
1706  void rewind();
1707 
1708  /// Returns true if we have iterated over all of the voxels.
1709  bool atEnd() const
1710  { return myCurTile < 0; }
1711 
1712  /// Advances the iterator to point to the next voxel.
1713  void advance()
1714  {
1715  // We try to advance each axis, rolling over to the next.
1716  // If we exhaust this tile, we call advanceTile.
1717  myPos[0]++;
1718  myTileLocalPos[0]++;
1719  if (myTileLocalPos[0] >= myTileSize[0])
1720  {
1721  // Wrapped in X.
1722  myPos[0] -= myTileLocalPos[0];
1723  myTileLocalPos[0] = 0;
1724 
1725  myPos[1]++;
1726  myTileLocalPos[1]++;
1727  if (myTileLocalPos[1] >= myTileSize[1])
1728  {
1729  // Wrapped in Y.
1730  myPos[1] -= myTileLocalPos[1];
1731  myTileLocalPos[1] = 0;
1732 
1733  myPos[2]++;
1734  myTileLocalPos[2]++;
1735  if (myTileLocalPos[2] >= myTileSize[2])
1736  {
1737  // Wrapped in Z! Finished this tile!
1738  advanceTile();
1739  }
1740  }
1741  }
1742  }
1743 
1744  /// Retrieve the current location of the iterator.
1745  int x() const { return myPos[0]; }
1746  int y() const { return myPos[1]; }
1747  int z() const { return myPos[2]; }
1748  int idx(int idx) const { return myPos[idx]; }
1749 
1750  /// Retrieves the value that we are currently pointing at.
1751  /// This is faster than an operator(x,y,z) as we already know
1752  /// our current tile and that bounds checking isn't needed.
1753  T getValue() const
1754  {
1755  UT_ASSERT_P(myCurTile >= 0);
1756 
1757  UT_VoxelTile<T> *tile;
1758 
1759  tile = myArray->getLinearTile(myCurTile);
1760  return (*tile)(myTileLocalPos[0],
1761  myTileLocalPos[1],
1762  myTileLocalPos[2]);
1763  }
1764 
1765  /// Sets the voxel we are currently pointing to the given value.
1766  void setValue(T t) const
1767  {
1768  UT_ASSERT_P(myCurTile >= 0);
1769 
1770  UT_VoxelTile<T> *tile;
1771 
1772  tile = myArray->getLinearTile(myCurTile);
1773 
1774  tile->setValue(myTileLocalPos[0],
1775  myTileLocalPos[1],
1776  myTileLocalPos[2], t);
1777  }
1778 
1779  /// Returns true if the tile we are currently in is a constant tile.
1780  bool isTileConstant() const
1781  {
1782  UT_ASSERT_P(myCurTile >= 0);
1783 
1784  UT_VoxelTile<T> *tile;
1785 
1786  tile = myArray->getLinearTile(myCurTile);
1787  return tile->isConstant();
1788  }
1789 
1790  /// This tile will iterate over the voxels indexed [start,end).
1792  {
1793  start.x() = myTilePos[0] * TILESIZE;
1794  start.y() = myTilePos[1] * TILESIZE;
1795  start.z() = myTilePos[2] * TILESIZE;
1796  end = start;
1797  end.x() += myTileSize[0];
1798  end.y() += myTileSize[1];
1799  end.z() += myTileSize[2];
1800  }
1801 
1802  /// This tile will iterate over the *inclusive* voxels indexed
1803  /// in the returned boudning box.
1805  {
1807  getTileVoxels(start, end);
1808  return UT_BoundingBoxI(start, end);
1809  }
1810 
1811  /// Returns true if we are at the start of a new tile.
1812  bool isStartOfTile() const
1813  { return !(myTileLocalPos[0] ||
1814  myTileLocalPos[1] ||
1815  myTileLocalPos[2]); }
1816 
1817  /// Returns the VoxelTile we are currently processing
1819  {
1820  UT_ASSERT_P(myCurTile >= 0);
1821  return myArray->getLinearTile(myCurTile);
1822  }
1823  int getLinearTileNum() const
1824  {
1825  return myCurTile;
1826  }
1827 
1828  /// Advances the iterator to point to the next tile. Useful if the
1829  /// constant test showed that you didn't need to deal with this one.
1830  void advanceTile();
1831 
1832  /// Advances the iterator to pointing just before the next tile so
1833  /// the next advance() will be an advanceTile(). This is useful
1834  /// if you want to do a continue; as your break but the forloop
1835  /// is doing advance()
1836  /// Note the iterator is in a bad state until advance() is called.
1837  void skipToEndOfTile();
1838 
1839  /// Sets a flag which causes the iterator to tryCompress()
1840  /// tiles when it is done with them.
1841  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1842  void setCompressOnExit(bool shouldcompress)
1843  { myShouldCompressOnExit = shouldcompress; }
1844 
1845  /// These templated algorithms are designed to apply simple operations
1846  /// across all of the voxels with as little overhead as possible.
1847  /// The iterator should already point to a voxel array and if multithreaded
1848  /// had its partial range set. The source arrays must be matching size.
1849  /// The operator should support a () operator, and the result is
1850  /// vit.setValue( op(vit.getValue(), a->getValue(vit), ...);
1851  /// Passing T instead of UT_VoxelArray will treat it as a constant source
1852  /// Note if both source and destination tiles are constant, only
1853  /// a single operation is invoked.
1854  template <typename OP>
1855  void applyOperation(const OP &op);
1856  template <typename OP, typename S>
1857  void applyOperation(const OP &op, const UT_VoxelArray<S> &a);
1858  template <typename OP>
1859  void applyOperation(const OP &op, T a);
1860  template <typename OP, typename S, typename R>
1861  void applyOperation(const OP &op, const UT_VoxelArray<S> &a,
1862  const UT_VoxelArray<R> &b);
1863  template <typename OP, typename S, typename R, typename Q>
1864  void applyOperation(const OP &op, const UT_VoxelArray<S> &a,
1865  const UT_VoxelArray<R> &b,
1866  const UT_VoxelArray<Q> &c);
1867  /// These variants will invoke op.isNoop(a, b, ...) which will return
1868  /// true if those values won't affect the destination. This allows
1869  /// constant source tiles to be skipped, for example when adding
1870  /// 0.
1871  template <typename OP, typename S>
1872  void applyOperationCheckNoop(const OP &op, const UT_VoxelArray<S> &a);
1873  template <typename OP>
1874  void applyOperationCheckNoop(const OP &op, T a);
1875 
1876  /// These variants of apply operation also accept a mask array. The
1877  /// operation is applied only where the mask is greater than 0.5.
1878  template <typename OP, typename M>
1879  void maskedApplyOperation(const OP &op,
1880  const UT_VoxelArray<M> &mask);
1881  template <typename OP, typename S, typename M>
1882  void maskedApplyOperation(const OP &op, const UT_VoxelArray<S> &a,
1883  const UT_VoxelArray<M> &mask);
1884  template <typename OP, typename S, typename R, typename M>
1885  void maskedApplyOperation(const OP &op, const UT_VoxelArray<S> &a,
1886  const UT_VoxelArray<R>& b,
1887  const UT_VoxelArray<M> &mask);
1888  template <typename OP, typename S, typename R, typename Q, typename M>
1889  void maskedApplyOperation(const OP& op, const UT_VoxelArray<S> &a,
1890  const UT_VoxelArray<R>& b,
1891  const UT_VoxelArray<Q>& c,
1892  const UT_VoxelArray<M> &mask);
1893 
1894  /// Assign operation works like apply operation, but *this is written
1895  /// to without reading, so there is one less parameter to the ()
1896  /// callback. This can optimize constant tile writes as the
1897  /// constant() status of the destination doesn't matter.
1898  template <typename OP, typename S>
1899  void assignOperation(const OP &op, const UT_VoxelArray<S> &a);
1900  template <typename OP, typename S, typename R>
1901  void assignOperation(const OP &op, const UT_VoxelArray<S> &a,
1902  const UT_VoxelArray<R> &b);
1903  template <typename OP, typename S, typename R, typename Q>
1904  void assignOperation(const OP &op, const UT_VoxelArray<S> &a,
1905  const UT_VoxelArray<R> &b,
1906  const UT_VoxelArray<Q> &c);
1907 
1908  /// These variants of assign operation also accept a mask array. The
1909  /// assignment operation is performed only where the mask is greater
1910  /// than 0.5.
1911  template <typename OP, typename S, typename M>
1912  void maskedAssignOperation(const OP& op, const UT_VoxelArray<S>& a,
1913  const UT_VoxelArray<M>& mask);
1914  template <typename OP, typename S, typename R, typename M>
1915  void maskedAssignOperation(const OP& op, const UT_VoxelArray<S>& a,
1916  const UT_VoxelArray<R>& b,
1917  const UT_VoxelArray<M>& mask);
1918  template <typename OP, typename S, typename R, typename Q, typename M>
1919  void maskedAssignOperation(const OP& op, const UT_VoxelArray<S>& a,
1920  const UT_VoxelArray<R>& b,
1921  const UT_VoxelArray<Q>& c,
1922  const UT_VoxelArray<M>& mask);
1923 
1924  /// Reduction operators.
1925  /// op.reduce(T a) called for each voxel, *but*,
1926  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1927  template <typename OP>
1928  void reduceOperation(OP &op);
1929 
1930  UT_VoxelArray<T> *getArray() const { return myArray; }
1931 
1932 protected:
1933  /// The array we belong to.
1935  /// The handle that we have locked to get our array. It is null
1936  /// by default which makes the lock/unlock nops.
1938 
1939  /// Absolute index into voxel array.
1940  int myPos[3];
1941 
1942  /// Flag determining if we should compress tiles whenever we
1943  /// advance out of them.
1945 
1948 
1949 public:
1950  /// Our current linear tile idx. A value of -1 implies at end.
1952 
1953  /// Our current index into the tile list
1955 
1956  /// Our start & end tiles for processing a subrange.
1957  /// The tile range is half open [start, end)
1958  int myTileStart, myTileEnd;
1959 
1960  /// Which tile we are as per tx,ty,tz rather than linear index.
1961  int myTilePos[3];
1962 
1963  /// Our position within the current tile.
1964  int myTileLocalPos[3];
1965 
1966  /// The size of the current tile
1967  int myTileSize[3];
1968 
1969  /// The job info to use for tilefetching
1971 
1973 };
1974 
1975 /// Iterator for tiles inside Voxel Arrays
1976 ///
1977 /// This class eliminates the need for having
1978 /// for (z = 0; z < zres; z++)
1979 /// ...
1980 /// for (x = 0; x < xres; x++)
1981 /// loops everywhere.
1982 ///
1983 /// The iterator is similar in principal to an STL iterator, but somewhat
1984 /// simpler. The classic STL loop
1985 /// for ( it = begin(); it != end(); ++it )
1986 /// is done using
1987 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1988 ///
1989 template <typename T>
1991 {
1992 public:
1995  template <typename S>
1997  UT_VoxelArray<T> *array);
1999 
2000  template <typename S>
2002  UT_VoxelArray<T> *array)
2003  {
2004  UT_ASSERT_P(vit.isStartOfTile());
2005  myCurTile = array->getLinearTile(vit.getLinearTileNum());
2006  myLinearTileNum = vit.getLinearTileNum();
2007  myArray = array;
2008  myTileStart[0] = vit.x();
2009  myTileStart[1] = vit.y();
2010  myTileStart[2] = vit.z();
2011  }
2012 
2014  {
2015  setTile(vit, vit.getArray());
2016  }
2017 
2018  void setLinearTile(exint lineartilenum, UT_VoxelArray<T> *array)
2019  {
2020  myCurTile = array->getLinearTile(lineartilenum);
2021  myLinearTileNum = lineartilenum;
2022  myArray = array;
2023 
2024  array->linearTileToXYZ(lineartilenum,
2025  myTileStart[0], myTileStart[1], myTileStart[2]);
2026  myTileStart[0] <<= TILEBITS;
2027  myTileStart[1] <<= TILEBITS;
2028  myTileStart[2] <<= TILEBITS;
2029  }
2030 
2031  /// Resets the iterator to point to the first voxel.
2032  void rewind();
2033 
2034  /// Returns true if we have iterated over all of the voxels.
2035  bool atEnd() const
2036  { return myCurTile == 0 || myAtEnd; }
2037 
2038  /// Advances the iterator to point to the next voxel.
2039  void advance()
2040  {
2041  // We try to advance each axis, rolling over to the next.
2042  // If we exhaust this tile, we call advanceTile.
2043  myPos[0]++;
2044  myTileLocalPos[0]++;
2045  if (myTileLocalPos[0] >= myTileSize[0])
2046  {
2047  // Wrapped in X.
2048  myPos[0] -= myTileLocalPos[0];
2049  myTileLocalPos[0] = 0;
2050 
2051  myPos[1]++;
2052  myTileLocalPos[1]++;
2053  if (myTileLocalPos[1] >= myTileSize[1])
2054  {
2055  // Wrapped in Y.
2056  myPos[1] -= myTileLocalPos[1];
2057  myTileLocalPos[1] = 0;
2058 
2059  myPos[2]++;
2060  myTileLocalPos[2]++;
2061  if (myTileLocalPos[2] >= myTileSize[2])
2062  {
2063  // Wrapped in Z! Finished this tile!
2064  advanceTile();
2065  }
2066  }
2067  }
2068  }
2069 
2070  /// Retrieve the current location of the iterator, in the
2071  /// containing voxel array, not in the tile.
2072  int x() const { return myPos[0]; }
2073  int y() const { return myPos[1]; }
2074  int z() const { return myPos[2]; }
2075  int idx(int idx) const { return myPos[idx]; }
2076 
2077  /// Retrieves the value that we are currently pointing at.
2078  /// This is faster than an operator(x,y,z) as we already know
2079  /// our current tile and that bounds checking isn't needed.
2080  T getValue() const
2081  {
2082  UT_ASSERT_P(myCurTile);
2083 
2084  return (*myCurTile)(myTileLocalPos[0],
2085  myTileLocalPos[1],
2086  myTileLocalPos[2]);
2087  }
2088 
2089  /// Sets the voxel we are currently pointing to the given value.
2090  void setValue(T t) const
2091  {
2092  UT_ASSERT_P(myCurTile);
2093 
2094  myCurTile->setValue(myTileLocalPos[0],
2095  myTileLocalPos[1],
2096  myTileLocalPos[2], t);
2097  }
2098 
2099  /// Returns true if the tile we are currently in is a constant tile.
2100  bool isTileConstant() const
2101  {
2102  UT_ASSERT_P(myCurTile);
2103 
2104  return myCurTile->isConstant();
2105  }
2106 
2107  /// Returns true if we are at the start of a new tile.
2108  bool isStartOfTile() const
2109  { return !(myTileLocalPos[0] ||
2110  myTileLocalPos[1] ||
2111  myTileLocalPos[2]); }
2112 
2113  /// Returns the VoxelTile we are currently processing
2115  {
2116  return myCurTile;
2117  }
2118  int getLinearTileNum() const
2119  {
2120  return myLinearTileNum;
2121  }
2122 
2123 
2124  /// Advances the iterator to point to the next tile. Since
2125  /// we are restricted to one tile, effectively just ends the iterator.
2126  void advanceTile();
2127 
2128  /// Sets a flag which causes the iterator to tryCompress()
2129  /// tiles when it is done with them.
2130  bool getCompressOnExit() const { return myShouldCompressOnExit; }
2131  void setCompressOnExit(bool shouldcompress)
2132  { myShouldCompressOnExit = shouldcompress; }
2133 
2134  /// These templated algorithms are designed to apply simple operations
2135  /// across all of the voxels with as little overhead as possible.
2136  /// The iterator should already point to a voxel array and if multithreaded
2137  /// had its partial range set. The source arrays must be matching size.
2138  /// The operator should support a () operator, and the result is
2139  /// vit.setValue( op(vit.getValue(), a->getValue(vit), ...);
2140  /// Passing T instead of UT_VoxelArray will treat it as a constant source
2141  /// Note if both source and destination tiles are constant, only
2142  /// a single operation is invoked.
2143  template <typename OP>
2144  void applyOperation(const OP &op);
2145  template <typename OP, typename S>
2146  void applyOperation(const OP &op, const UT_VoxelArray<S> &a);
2147  template <typename OP>
2148  void applyOperation(const OP &op, T a);
2149  template <typename OP, typename S, typename R>
2150  void applyOperation(const OP &op, const UT_VoxelArray<S> &a,
2151  const UT_VoxelArray<R> &b);
2152  template <typename OP, typename S, typename R, typename Q>
2153  void applyOperation(const OP &op, const UT_VoxelArray<S> &a,
2154  const UT_VoxelArray<R> &b,
2155  const UT_VoxelArray<Q> &c);
2156 
2157  /// Assign operation works like apply operation, but *this is written
2158  /// to without reading, so there is one less parameter to the ()
2159  /// callback. This can optimize constant tile writes as the
2160  /// constant() status of the destination doesn't matter.
2161  template <typename OP, typename S>
2162  void assignOperation(const OP &op, const UT_VoxelArray<S> &a);
2163  template <typename OP, typename S, typename R>
2164  void assignOperation(const OP &op, const UT_VoxelArray<S> &a,
2165  const UT_VoxelArray<R> &b);
2166  template <typename OP, typename S, typename R, typename Q>
2167  void assignOperation(const OP &op, const UT_VoxelArray<S> &a,
2168  const UT_VoxelArray<R> &b,
2169  const UT_VoxelArray<Q> &c);
2170 
2171 
2172  /// Reduction operators.
2173  /// op.reduce(T a) called for each voxel, *but*,
2174  /// op.reduceMany(T a, int n) called to reduce constant blocks.
2175  /// Early exits if op.reduce() returns false.
2176  template <typename OP>
2177  bool reduceOperation(OP &op);
2178 
2179 protected:
2180  /// Current processing tile
2183 
2184  /// Current's tile linear number.
2186 
2187  /// Absolute index into voxel array.
2188  int myPos[3];
2189  /// Absolute index of start of tile
2190  int myTileStart[3];
2191 
2192  /// Flag determining if we should compress tiles whenever we
2193  /// advance out of them.
2195 
2196  /// Since we want to allow multiple passes, we can't
2197  /// clear out myCurTile when we hit the end.
2198  bool myAtEnd;
2199 
2200 public:
2201  /// Our position within the current tile.
2202  int myTileLocalPos[3];
2203 
2204  /// The size of the current tile
2205  int myTileSize[3];
2206 };
2207 
2208 /// Probe for Voxel Arrays
2209 ///
2210 /// This class is designed to allow for efficient evaluation
2211 /// of aligned indices of a voxel array, provided the voxels are iterated
2212 /// in a tile-by-tile, x-inner most, manner.
2213 ///
2214 /// This class will create a local copy of the voxel data where needed,
2215 /// uncompressing the information once for every 16 queries. It will
2216 /// also create an aligned buffer so you can safely use v4uf on fpreal32
2217 /// data.
2218 ///
2219 /// For queries where you need surrounding values, the prex and postx can
2220 /// specify padding on the probe. prex should be -1 to allow reading
2221 /// -1 offset, postx 1 to allow reading a 1 offset.
2222 ///
2223 
2224 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
2225 class UT_VoxelProbe
2226 {
2227 public:
2228  UT_VoxelProbe();
2229  UT_VoxelProbe(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
2230  ~UT_VoxelProbe();
2231 
2232  void setArray(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
2234  int prex = 0, int postx = 0)
2235  {
2236  SYS_STATIC_ASSERT(DoWrite == false);
2237  setArray((UT_VoxelArray<T> *)vox, prex, postx);
2238  }
2239 
2240  UT_VoxelArray<T> *getArray() const { return myArray; }
2241 
2242  bool isValid() const { return myArray != 0; }
2243 
2244  inline T getValue() const
2245  {
2246  return *myCurLine;
2247  }
2248  inline T getValue(int offset) const
2249  {
2250  return myCurLine[myStride*offset];
2251  }
2252 
2253  inline void setValue(T value)
2254  {
2255  UT_ASSERT_P(DoWrite);
2256  *myCurLine = value;
2257  if (TestForWrites)
2258  myDirty = true;
2259  }
2260 
2261 
2262  /// Resets where we currently point to.
2263  /// Returns true if we had to reset our cache line. If we didn't,
2264  /// and you have multiple probes acting in-step, you can just
2265  /// advanceX() the other probes
2266  template <typename S>
2268  { return setIndex(vit.x(), vit.y(), vit.z()); }
2269  template <typename S>
2271  { return setIndex(vit.x(), vit.y(), vit.z()); }
2272 
2273  bool setIndex(int x, int y, int z);
2274 
2275  /// Blindly advances our current pointer.
2276  inline void advanceX()
2277  {
2278  myCurLine += myStride;
2279  myX++;
2280  UT_ASSERT_P(myX < myMaxValidX);
2281  }
2282 
2283  /// Adjusts our current pointer to the given absolute location,
2284  /// assumes the new value is inside our valid range.
2285  inline void resetX(int x)
2286  {
2287  myCurLine += myStride * (x - myX);
2288  myX = x;
2289  UT_ASSERT_P(myX < myMaxValidX && myX >= myMinValidX);
2290  }
2291 
2292 protected:
2293  void reloadCache(int x, int y, int z);
2294 
2295  void writeCacheLine();
2296 
2297  void buildConstantCache(T value);
2298 
2300  /// myCacheLine[0] is the start of the cache line, so -1 would be
2301  /// the first pre-rolled value
2303  /// Where we actually allocated our cache line, aligned to 4x multiple
2304  /// to ensure SSE compatible.
2306 
2307  int myX, myY, myZ;
2308  int myPreX, myPostX;
2311  /// Half inclusive [,) range of valid x queries for current cache.
2312  int myMinValidX, myMaxValidX;
2313 
2314  /// Determines if we have anything to write back, only
2315  /// valid if TestForWrites is enabled.
2316  bool myDirty;
2317 
2319 
2320  friend class UT_VoxelProbeCube<T>;
2321  friend class UT_VoxelProbeFace<T>;
2322 };
2323 
2324 ///
2325 /// The vector probe is three normal probes into separate voxel arrays
2326 /// making it easier to read and write to aligned vector fields.
2327 /// If the vector field is face-centered, see the UT_VoxelProbeFace.
2328 ///
2329 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
2331 {
2332 public:
2334  { }
2336  { setArray(vx, vy, vz); }
2338  {}
2339 
2341  {
2342  myLines[0].setArray(vx);
2343  myLines[1].setArray(vy);
2344  myLines[2].setArray(vz);
2345  }
2346  void setConstArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz)
2347  {
2348  SYS_STATIC_ASSERT(DoWrite == false);
2349  setArray((UT_VoxelArray<T> *)vx, (UT_VoxelArray<T> *)vy, (UT_VoxelArray<T> *)vz);
2350  }
2351 
2352  inline UT_Vector3 getValue() const
2353  {
2354  return UT_Vector3(myLines[0].getValue(), myLines[1].getValue(), myLines[2].getValue());
2355  }
2356  inline T getValue(int axis) const
2357  {
2358  return myLines[axis].getValue();
2359  }
2360 
2361  inline void setValue(const UT_Vector3 &v)
2362  {
2363  myLines[0].setValue(v.x());
2364  myLines[1].setValue(v.y());
2365  myLines[2].setValue(v.z());
2366  }
2367 
2368  inline void setComponent(int axis, T val)
2369  {
2370  myLines[axis].setValue(val);
2371  }
2372 
2373  /// Resets where we currently point to.
2374  /// Returns true if we had to reset our cache line. If we didn't,
2375  /// and you have multiple probes acting in-step, you can just
2376  /// advanceX() the other probes
2377  template <typename S>
2379  { return setIndex(vit.x(), vit.y(), vit.z()); }
2380  template <typename S>
2382  { return setIndex(vit.x(), vit.y(), vit.z()); }
2383 
2384  bool setIndex(int x, int y, int z)
2385  {
2386  if (myLines[0].setIndex(x, y, z))
2387  {
2388  myLines[1].setIndex(x, y, z);
2389  myLines[2].setIndex(x, y, z);
2390  return true;
2391  }
2392  myLines[1].advanceX();
2393  myLines[2].advanceX();
2394  return false;
2395  }
2396 
2397  void advanceX()
2398  { myLines[0].advanceX(); myLines[1].advanceX(); myLines[2].advanceX(); }
2399 
2400 protected:
2402 };
2403 
2404 template <typename T>
2405 class
2407 {
2408 public:
2410  ~UT_VoxelProbeCube();
2411 
2412  void setConstCubeArray(const UT_VoxelArray<T> *vox);
2413  void setConstPlusArray(const UT_VoxelArray<T> *vox);
2414 
2415  /// Allows you to query +/-1 in each direction. In cube update,
2416  /// all are valid. In plus update, only one of x y and z may be
2417  /// non zero.
2419  T
2420  getValue(int x, int y, int z) const
2421  {
2422  UT_ASSERT_P(x >= -1 && x <= 1 &&
2423  y >= -1 && y <= 1 &&
2424  z >= -1 && z <= 1);
2425 
2426  return myLines[y+1][z+1].getValue(x);
2427  }
2428 
2430  T
2432  {
2433  return getValue(offset[0], offset[1], offset[2]);
2434  }
2435 
2436  template <typename S>
2438  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2439  template <typename S>
2441  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2442  bool setIndexCube(int x, int y, int z);
2443 
2444  template <typename S>
2446  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2447  template <typename S>
2449  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2450  bool setIndexPlus(int x, int y, int z);
2451 
2452  /// Computes central difference gradient, does not scale
2453  /// by the step size (which is twice voxelsize)
2454  /// Requires PlusArray
2456  { return UT_Vector3(getValue(1,0,0) - getValue(-1,0,0),
2457  getValue(0,1,0) - getValue(0,-1,0),
2458  getValue(0,0,1) - getValue(0,0,-1)); }
2459 
2460  /// Computes the central difference curvature using the given
2461  /// inverse voxelsize (ie, 1/voxelsize) at this point.
2462  /// Requires CubeArray.
2463  fpreal64 curvature(const UT_Vector3 &invvoxelsize) const;
2464 
2465  /// Computes the laplacian, again with a given 1/voxelsize.
2466  /// Requires PlusArray
2467  fpreal64 laplacian(const UT_Vector3 &invvoxelsize) const;
2468 
2469 protected:
2470  /// Does an rotation of our cache lines, ym becomes y0 and y0 becomes yp,
2471  /// so further queries with y+1 will be cache hits for 2 out of 3.
2472  static void rotateLines(UT_VoxelProbe<T, true, false, false> &ym,
2475 
2477  /// Cached look up position. myValid stores if they are
2478  /// valid values or not
2479  bool myValid;
2480  int myX, myY, myZ;
2481  /// Half inclusive [,) range of valid x queries for current cache.
2482  int myMinValidX, myMaxValidX;
2483 };
2484 
2485 ///
2486 /// UT_VoxelProbeConstant
2487 ///
2488 /// Looks like a voxel probe but only returns a constant value.
2489 ///
2490 template <typename T>
2491 class
2493 {
2494 public:
2497 
2498  template <typename S>
2500  { return true; }
2501  template <typename S>
2503  { return true; }
2504  bool setIndex(int x, int y, int z)
2505  { return true; }
2506 
2507  void setValue(T val) { myValue = val; }
2508  T getValue() const { return myValue; }
2509 protected:
2511 };
2512 
2513 ///
2514 /// UT_VoxelProbeAverage
2515 ///
2516 /// When working with MAC grids one often has slightly misalgined
2517 /// fields. Ie, one field is at the half-grid spacing of another field.
2518 /// The step values are 0 if the dimension is algined, -1 for half a step
2519 /// back (ie, (val(-1)+val(0))/2) and 1 for half a step forward
2520 /// (ie, (val(0)+val(1))/2)
2521 ///
2522 template <typename T, int XStep, int YStep, int ZStep>
2523 class
2525 {
2526 public:
2529 
2530  void setArray(const UT_VoxelArray<T> *vox);
2531 
2532  template <typename S>
2534  { return setIndex(vit.x(), vit.y(), vit.z()); }
2535  template <typename S>
2537  { return setIndex(vit.x(), vit.y(), vit.z()); }
2538  bool setIndex(int x, int y, int z);
2539 
2540  /// Returns the velocity centered at this index, thus an average
2541  /// of the values in each of our internal probes.
2542  inline T getValue() const
2543  {
2544  if (ZStep)
2545  return (valueZ(1) + valueZ(0)) * 0.5;
2546  return valueZ(0);
2547  }
2548 
2549 protected:
2550  inline T valueZ(int z) const
2551  {
2552  if (YStep)
2553  return (valueYZ(1, z) + valueYZ(0, z)) * 0.5;
2554  return valueYZ(0, z);
2555  }
2556 
2557  inline T valueYZ(int y, int z) const
2558  {
2559  if (XStep > 0)
2560  return (myLines[y][z].getValue(1) + myLines[y][z].getValue(0)) * 0.5;
2561  if (XStep < 0)
2562  return (myLines[y][z].getValue(-1) + myLines[y][z].getValue(0)) * 0.5;
2563  return myLines[y][z].getValue();
2564  }
2565 
2566  // Stores [Y][Z] lines.
2568 };
2569 
2570 
2571 ///
2572 /// UT_VoxelProbeFace is designed to walk over three velocity
2573 /// fields that store face-centered values. The indices refer
2574 /// to the centers of the voxels.
2575 ///
2576 template <typename T>
2577 class
2579 {
2580 public:
2582  ~UT_VoxelProbeFace();
2583 
2584  void setArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz);
2585  void setVoxelSize(const UT_Vector3 &voxelsize);
2586 
2587  template <typename S>
2589  { return setIndex(vit.x(), vit.y(), vit.z()); }
2590  template <typename S>
2592  { return setIndex(vit.x(), vit.y(), vit.z()); }
2593  bool setIndex(int x, int y, int z);
2594 
2595  /// Get the face values on each face component.
2596  /// Parameters are axis then side.
2597  /// 0 is the lower face, 1 the higher face.
2598  inline T face(int axis, int side) const
2599  {
2600  if (axis == 0)
2601  return myLines[0][0].getValue(side);
2602  else
2603  return myLines[axis][side].getValue();
2604  }
2605 
2606  /// Returns the velocity centered at this index, thus an average
2607  /// of the values in each of our internal probes.
2608  inline UT_Vector3 value() const
2609  {
2610  return UT_Vector3(0.5f * (face(0, 0) + face(0, 1)),
2611  0.5f * (face(1, 0) + face(1, 1)),
2612  0.5f * (face(2, 0) + face(2, 1)));
2613  }
2614 
2615  /// Returns the divergence of this cell.
2616  inline T divergence() const
2617  {
2618  return (face(0,1)-face(0,0)) * myVoxelSize.x()
2619  + (face(1,1)-face(1,0)) * myVoxelSize.y()
2620  + (face(2,1)-face(2,0)) * myVoxelSize.z();
2621 
2622  }
2623 
2624 protected:
2625 
2626  static void swapLines(UT_VoxelProbe<T, true, false, false> &ym,
2628 
2629 
2631 
2632  /// Cached look up position. myValid stores if they are
2633  /// valid values or not
2634  bool myValid;
2635  int myX, myY, myZ;
2636  /// Half inclusive [,) range of valid x queries for current cache.
2637  int myMinValidX, myMaxValidX;
2638 
2639  UT_Vector3 myVoxelSize, myInvVoxelSize;
2640 };
2641 
2642 
2643 #include "UT_VoxelArray.C"
2644 
2645 
2646 // Typedefs for common voxel array types
2652 
2664 // Read only probe
2670 // Write only
2676 // Read/Write always writeback.
2682 // Read/Write with testing
2688 
2689 // TODO: add support for read-write probe cube
2691 
2695 
2699 
2703 
2707 
2711 
2715 
2716 #endif
2717 
void setTile(const UT_VoxelArrayIterator< T > &vit)
exint exint const UT_JobInfo &info const
int x() const
Retrieve the current location of the iterator.
UT_Interrupt * myInterrupt
UT_Vector3I getVoxelRes() const
#define SYSmax(a, b)
Definition: SYS_Math.h:1582
int xyzTileToLinear(int x, int y, int z) const
void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const
SYS_FORCE_INLINE T lerpSample(T *samples, float fx, float fy, float fz) const
Lerps the given sample using trilinear interpolation.
const UT_VoxelTile< T > & operator=(const UT_VoxelTile< T > &src)
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void findAverage(T &avg) const
Determines the average value of the tile.
UT_VoxelProbe< UT_Vector3, false, true, false > UT_VoxelWOProbeV3
#define SYS_STATIC_ASSERT(expr)
UT_Vector3I linearTileToXYZ(int idx) const
int int32
Definition: SYS_Types.h:39
UT_VoxelProbe< fpreal32, true, true, true > UT_VoxelRWTProbeF
void setInterrupt(UT_Interrupt *interrupt)
T valueZ(int z) const
int myLinearTileNum
Current's tile linear number.
void loadData(UT_IStream &is)
Load an array, requires you have already size()d this array.
UT_VoxelBorderType getBorder() const
exint getDataLength() const
Returns the amount of data used by the tile myData pointer.
bool atEnd() const
Returns true if we have iterated over all of the voxels.
void match(const UT_VoxelArray< T > &src)
UT_VoxelArray< UT_Vector3 > UT_VoxelArrayV3
SYS_FORCE_INLINE T getValue(const UT_Vector3I &offset) const
bool isMatching(const UT_VoxelArray< S > &src) const
Axis-aligned bounding box (AABB).
Definition: GEO_Detail.h:41
*get result *(waiting if necessary)*A common idiom is to fire a bunch of sub tasks at the and then *wait for them to all complete We provide a helper class
Definition: thread.h:632
void setValue(UT_Vector3I index, T value)
virtual const char * getName()=0
T valueYZ(int y, int z) const
void resample(const UT_VoxelArray< T > &src, UT_FilterType filtertype=UT_FILTER_POINT, float filterwidthscale=1.0f, int clampaxis=-1)
Fills this by resampling the given voxel array.
const UT_VoxelCompressOptions & getCompressionOptions() const
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void
Definition: png.h:1083
int64 getMemoryUsage(bool inclusive) const
Return the amount of memory used by this array.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
GLboolean * data
Definition: glcorearb.h:131
constexpr SYS_FORCE_INLINE T & y() noexcept
Definition: UT_Vector4.h:493
bool setIndex(UT_VoxelTileIterator< S > &vit)
SYS_FORCE_INLINE void freeData()
const GLdouble * v
Definition: glcorearb.h:837
T operator()(UT_Vector3D pos) const
THREADED_METHOD3_CONST(UT_VoxelArray< T >, numTiles() > 16, flatten, T *, flatarray, exint, ystride, exint, zstride) void flattenPartial(T *flatarray
UT_Vector3 gradient() const
int numVoxels() const
GLuint start
Definition: glcorearb.h:475
GLsizei const GLfloat * value
Definition: glcorearb.h:824
UT_COWReadHandle< UT_VoxelArray< T > > myHandle
UT_VoxelArray< fpreal32 > UT_VoxelArrayF
void clampIndex(int &x, int &y, int &z) const
virtual T getValue(const UT_VoxelTile< T > &tile, int x, int y, int z) const =0
T * fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const
static void registerCompressionEngine(UT_VoxelTileCompress< T > *engine)
fpreal myQuantizeTol
Tolerance for quantizing to reduced bit depth.
virtual bool lerp(GA_AttributeOperand &d, GA_AttributeOperand &a, GA_AttributeOperand &b, GA_AttributeOperand &t) const
d = SYSlerp(a, b, t);
UT_VoxelVectorProbe< fpreal32, true, true, true > UT_VoxelVectorRWTProbeF
UT_Vector3T< float > UT_Vector3
SYS_FORCE_INLINE T * SYSconst_cast(const T *foo)
Definition: SYS_Types.h:136
UT_VoxelTile< T > * myCurTile
Current processing tile.
GLdouble GLdouble GLdouble z
Definition: glcorearb.h:848
UT_VoxelVectorProbe(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
UT_VoxelArray< T > * myBaseLevel
constexpr SYS_FORCE_INLINE T & z() noexcept
Definition: UT_Vector3.h:667
int64 exint
Definition: SYS_Types.h:125
T divergence() const
Returns the divergence of this cell.
GLint level
Definition: glcorearb.h:108
SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1222
UT_VoxelBorderType
Definition: UT_VoxelArray.h:70
#define SYSabs(a)
Definition: SYS_Math.h:1584
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
bool myOwnBase
If true, we will delete the base level when we are done.
UT_VoxelArray< UT_Vector4 > UT_VoxelArrayV4
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:87
#define UT_API
Definition: UT_API.h:14
bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const
ImageBuf OIIO_API min(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
UT_VoxelArray< T > * myArray
fpreal UTvoxelTileDist(const UT_Vector2 &a, const UT_Vector2 &b)
UT_VoxelTileIterator< UT_Vector2 > UT_VoxelTileIteratorV2
void setArray(UT_VoxelArray< T > *vox)
GLint y
Definition: glcorearb.h:103
T mixValues(T t1, T t2, mipmaptype function) const
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:37
T ysize() const
ImageBuf OIIO_API flatten(const ImageBuf &src, ROI roi={}, int nthreads=0)
void copyWithOffset(const UT_VoxelArray< T > &src, int offx, int offy, int offz)
int indexToLinearTile(int x, int y, int z) const
**But if you need a result
Definition: thread.h:622
UT_VoxelTileIterator< int64 > UT_VoxelTileIteratorI
bool isConstant(T *cval=0) const
UT_Vector3T< int64 > UT_Vector3I
void makeConstant(T t)
Turns this tile into a constant tile of the given value.
bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const
GLfloat GLfloat GLfloat v2
Definition: glcorearb.h:818
T face(int axis, int side) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
UT_VoxelVectorProbe< fpreal32, true, true, false > UT_VoxelVectorRWProbeF
SYS_FORCE_INLINE T rawConstVal() const
UT_FilterType
Definition: UT_FilterType.h:16
void UTvoxelTileExpandMinMax(UT_Vector2 v, UT_Vector2 &min, UT_Vector2 &max)
bool atEnd() const
Returns true if we have iterated over all of the voxels.
const UT_JobInfo & info
void setCompressionOptions(const UT_VoxelCompressOptions &options)
float fpreal32
Definition: SYS_Types.h:200
virtual bool load(UT_JSONParser &p, UT_VoxelTile< T > &tile) const
UT_VoxelArray< T > * getArray() const
int myCurTile
Our current linear tile idx. A value of -1 implies at end.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
void setCompressionTolerance(fpreal tol)
void setHandle(UT_COWReadHandle< UT_VoxelArray< T > > handle)
void flatten(S *dst, int dststride) const
Flattens ourself into the given destination buffer.
void makeFpreal16()
Explicit compress to fpreal16. Lossy. No-op if already constant.
void size(int xres, int yres, int zres, bool reset=true)
OutGridT const XformOp bool bool
S * extractSlice(S *dstdata, int slice, bool half_slice) const
constexpr SYS_FORCE_INLINE T & x() noexcept
Definition: UT_Vector4.h:491
int zres() const
virtual bool writeThrough(UT_VoxelTile< T > &tile, int x, int y, int z, T t) const =0
UT_VoxelProbeCube< fpreal32 > UT_VoxelROProbeCubeF
UT_VoxelVectorProbe< fpreal32, true, false, false > UT_VoxelVectorProbeF
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
UT_VoxelArrayIterator< UT_Vector3 > UT_VoxelArrayIteratorV3
virtual ~UT_VoxelTileCompress()
static fpreal dist(T a, T b)
bool setIndex(int x, int y, int z)
double fpreal64
Definition: SYS_Types.h:201
ImageBuf OIIO_API laplacian(const ImageBuf &src, ROI roi={}, int nthreads=0)
constexpr SYS_FORCE_INLINE T & x() noexcept
Definition: UT_Vector2.h:423
bool getCompressOnExit() const
SYS_NO_DISCARD_RESULT SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
unsigned char uint8
Definition: SYS_Types.h:36
bool writeThrough(int x, int y, int z, T t)
int yres() const
UT_VoxelTileIterator< fpreal32 > UT_VoxelTileIteratorF
UT_VoxelArray< UT_Vector2 > UT_VoxelArrayV2
void moveTilesWithOffset(UT_VoxelArray< T > &src, int tileoffx, int tileoffy, int tileoffz)
const T * rawData() const
T getBorderScale(int axis) const
const S * writeTiles(const S *srcdata, int srcstride, const UT_IntArray &tilelist)
GLfloat f
Definition: glcorearb.h:1926
GLint GLint GLsizei GLint GLenum GLenum type
Definition: glcorearb.h:108
bool hasNan() const
Returns true if any NANs are in this tile.
UT_VoxelProbe< UT_Vector2, true, true, false > UT_VoxelRWProbeV2
GLintptr offset
Definition: glcorearb.h:665
void setArray(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
void resetX(int x)
GLboolean reset
Definition: glad.h:5138
void setTile(const UT_VoxelArrayIterator< S > &vit, UT_VoxelArray< T > *array)
UT_VoxelArrayIterator< UT_Vector2 > UT_VoxelArrayIteratorV2
THREADED_METHOD4(UT_VoxelArray< T >, numTiles() > 4, copyWithOffsetInternal, const UT_VoxelArray< T > &, src, int, offx, int, offy, int, offz) void copyWithOffsetInternalPartial(const UT_VoxelArray< T > &src
exint exint zstride
int myCurTileListIdx
Our current index into the tile list.
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
virtual void load(UT_IStream &is, UT_VoxelTile< T > &tile) const
int64 getMemoryUsage(bool inclusive) const
Returns the amount of memory used by this tile.
THREADED_METHOD4_CONST(UT_VoxelArray< T >, numTiles() > 16, flattenGLFixed8, uint8 *, flatarray, exint, ystride, exint, zstride, T, dummy) void flattenGLFixed8Partial(uint8 *flatarray
int getYRes() const
void weightedSum(int pstart[3], int pend[3], const float *weights[3], int start[3], T &result)
SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const
int numTiles() const
UT_BoundingBoxI getTileBBox() const
int getLinearTileNum() const
constexpr SYS_FORCE_INLINE T & z() noexcept
Definition: UT_Vector4.h:495
bool getValues(const UT_BoundingBoxI &bbox, T *values, const exint size) const
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
static void saveCompressionTypes(std::ostream &os)
Stores a list of compresson engines to os.
T getBorderValue() const
UT_VoxelArray< T > * myArray
The array we belong to.
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:155
static int mirrorCoordinates(int x, int res)
T getValue() const
GLuint GLuint end
Definition: glcorearb.h:475
UT_VoxelProbe< UT_Vector2, true, false, false > UT_VoxelProbeV2
virtual void save(std::ostream &os, const UT_VoxelTile< T > &tile) const
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
const UT_VoxelArray< T > & operator=(const UT_VoxelArray< T > &src)
Assignment operator:
UT_VoxelArray< T > * getArray() const
UT_Vector3T< T > SYSclamp(const UT_Vector3T< T > &v, const UT_Vector3T< T > &min, const UT_Vector3T< T > &max)
Definition: UT_Vector3.h:1057
bool isValid() const
GLint GLenum GLboolean GLsizei stride
Definition: glcorearb.h:872
void makeRawUninitialized()
Definition: VM_SIMD.h:188
void setTile(const UT_VoxelArrayIterator< S > &vit, UT_VoxelArray< T > *array)
GLint GLuint mask
Definition: glcorearb.h:124
void setValue(int x, int y, int z, T t)
UT_VoxelTile< T > * getTile(int tx, int ty, int tz) const
UT_VoxelProbe< UT_Vector2, false, true, false > UT_VoxelWOProbeV2
constexpr enabler dummy
An instance to use in EnableIf.
Definition: CLI11.h:985
UT_VoxelProbe< UT_Vector2, true, true, true > UT_VoxelRWTProbeV2
UT_VoxelProbe< UT_Vector4, true, true, true > UT_VoxelRWTProbeV4
void setCompressOnExit(bool shouldcompress)
T evaluate(const UT_Vector3 &pos, const UT_Filter &filter, fpreal radius, int clampaxis=-1) const
long long int64
Definition: SYS_Types.h:116
UT_VoxelMipMap< fpreal32 > UT_VoxelMipMapF
SYS_NO_DISCARD_RESULT SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
bool setIndex(UT_VoxelTileIterator< S > &vit)
UT_VoxelProbe< UT_Vector3, true, false, false > UT_VoxelProbeV3
#define SYS_NO_DISCARD_RESULT
Definition: SYS_Compiler.h:93
#define SYS_STATIC_FORCE_INLINE
Definition: SYS_Inline.h:48
UT_VoxelProbe< UT_Vector3, true, true, false > UT_VoxelRWProbeV3
bool tryCompress(const UT_VoxelCompressOptions &options)
virtual bool canSave() const
Does this engine support saving and loading?
int getRes(int dim) const
THREADED_METHOD(UT_VoxelArray< T >, numTiles() > 100, collapseAllTiles) void collapseAllTilesPartial(const UT_JobInfo &info)
int getXRes() const
void setRes(int xr, int yr, int zr)
UT_VoxelArray< int64 > UT_VoxelArrayI
THREADED_METHOD1(UT_VoxelArray< T >, numTiles() > 100, constant, T, t) void const antPartial(T t
bool setIndexPlus(UT_VoxelTileIterator< S > &vit)
UT_API UT_ValArray< UT_VoxelTileCompress< fpreal16 > * > & UTvoxelTileGetCompressionEngines(fpreal16 *dummy)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
GLuint const GLchar * name
Definition: glcorearb.h:786
virtual bool isLossless() const
Returns true if the compression type is lossless.
signed char int8
Definition: SYS_Types.h:35
void getTileVoxels(int idx, UT_Vector3I &start, UT_Vector3I &end) const
idxth tile represents the voxels indexed [start,end).
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1222
GLint GLenum GLint x
Definition: glcorearb.h:409
SYS_FORCE_INLINE bool inlineConstant() const
void writeCacheLine(T *cacheline, int y, int z)
Fills a cache line from an external buffer into our own data.
void advanceX()
Blindly advances our current pointer.
T xsize() const
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
void setValue(int x, int y, int z, T t)
SYS_FORCE_INLINE T lerpVoxelCoordAxis(UT_Vector3F pos) const
UT_VoxelArrayIterator< int64 > UT_VoxelArrayIteratorI
virtual int getDataLength(const UT_VoxelTile< T > &tile) const =0
GLdouble t
Definition: glad.h:2397
GLsizei samples
Definition: glcorearb.h:1298
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
void getTileVoxels(UT_Vector3I &start, UT_Vector3I &end) const
This tile will iterate over the voxels indexed [start,end).
bool myAllowFP16
Conversion to fpreal16, only valid for scalar data.
SYS_FORCE_INLINE T lerpVoxelCoord(UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
int getZRes() const
const UT_JobInfo * myJobInfo
The job info to use for tilefetching.
void setConstArray(const UT_VoxelArray< T > *vox, int prex=0, int postx=0)
static UT_VoxelTileCompress< T > * getCompressionEngine(int index)
GLint j
Definition: glad.h:2733
bool getCompressOnExit() const
T volume() const
UT_VoxelTileIterator< UT_Vector4 > UT_VoxelTileIteratorV4
UT_VoxelProbe< fpreal32, false, true, false > UT_VoxelWOProbeF
GLsizeiptr size
Definition: glcorearb.h:664
GLenum GLenum dst
Definition: glcorearb.h:1793
virtual void findMinMax(const UT_VoxelTile< T > &tile, T &min, T &max) const
Definition: UT_VoxelArray.C:73
void setLinearTile(exint lineartilenum, UT_VoxelArray< T > *array)
void setTile(const UT_VoxelArrayIterator< T > &vit)
bool isRawFull() const
Returns if this tile is in raw full format.
bool hasNan() const
Returns true if any element of the voxel array is NAN.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
UT_VoxelVectorProbe< fpreal32, false, true, false > UT_VoxelVectorWOProbeF
UT_VoxelProbe< UT_Vector4, true, false, false > UT_VoxelProbeV4
T operator()(UT_Vector3I index) const
SYS_FORCE_INLINE void lerpVoxelMinMaxAxis(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
SYS_STATIC_FORCE_INLINE T lerpValues(T v1, T v2, fpreal32 bias)
Lerps two numbers, templated to work with T.
T getValue(int x, int y, int z) const
bool setIndexPlus(UT_VoxelArrayIterator< S > &vit)
SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z, float fx, float fy, float fz) const
UT_VoxelProbe< UT_Vector3, true, true, true > UT_VoxelRWTProbeV3
GLenum GLsizei GLsizei GLint * values
Definition: glcorearb.h:1602
T getValue(int offset) const
void copyFragment(int dstx, int dsty, int dstz, const UT_VoxelTile< T > &srctile, int srcx, int srcy, int srcz)
void uncompress()
Turns a compressed tile into a raw tile.
UT_VoxelArray< T > * myArray
short int16
Definition: SYS_Types.h:37
fpreal64 fpreal
Definition: SYS_Types.h:278
THREADED_METHOD3(UT_VoxelArray< T >, numTiles() > 16, extractFromFlattened, const T *, flatarray, exint, ystride, exint, zstride) void extractFromFlattenedPartial(const T *flatarray
int idx(int idx) const
void setConstArray(const UT_VoxelArray< T > *vox)
UT_API UT_Interrupt * UTgetInterrupt()
Obtain global UT_Interrupt singleton.
void forEachTile(const OP &op, bool shouldthread=true)
SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
GLuint index
Definition: glcorearb.h:786
T getValue(int axis) const
UT_VoxelProbe< fpreal32, true, false, false > UT_VoxelProbeF
int numLevels() const
UT_BoundingBoxT< int64 > UT_BoundingBoxI
SYS_FORCE_INLINE T getValue(int x, int y, int z) const
UT_VoxelProbe< fpreal32, true, true, false > UT_VoxelRWProbeF
constexpr SYS_FORCE_INLINE T & w() noexcept
Definition: UT_Vector4.h:497
bool isRaw() const
Returns if this tile is in raw format.
void saveData(std::ostream &os) const
UT_ValArray< UT_VoxelArray< T > ** > myLevels
int getRes(int axis) const
GLfloat GLfloat v1
Definition: glcorearb.h:817
GLuint GLfloat * val
Definition: glcorearb.h:1608
ImageBuf OIIO_API max(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
UT_BoundingBoxI getTileBBox(int idx) const
int int int offz
UT_Vector3 value() const
UT_VoxelTileIterator< UT_Vector3 > UT_VoxelTileIteratorV3
void linearTileToXYZ(int idx, int &x, int &y, int &z) const
UT_Vector3 myVoxelSize
UT_VoxelProbe< UT_Vector4, true, true, false > UT_VoxelRWProbeV4
int getLinearTileNum() const
exint numVoxels() const
void save(std::ostream &os) const
#define DEFINE_STD_FUNC(TYPE)
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
T operator()(int x, int y, int z) const
int isInside(const UT_Vector3T< T > &pt) const
UT_VoxelTile< T > * getLinearTile(int idx) const
void load(UT_IStream &is, const UT_IntArray &compression)
UT_VoxelProbe< UT_Vector4, false, true, false > UT_VoxelWOProbeV4
void setCompressOnExit(bool shouldcompress)
static int lookupCompressionEngine(const char *name)
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:857
void setValue(const UT_Vector3 &v)
void setValue(T value)
SYS_FORCE_INLINE void lerpVoxelMinMax(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
bool setIndexCube(UT_VoxelArrayIterator< S > &vit)
#define UT_VOXEL_FREE(x)
Definition: UT_VoxelArray.h:53
bool setIndexCube(UT_VoxelTileIterator< S > &vit)
void uncompressFull()
Turns a tile into a raw full tile.
void setBorder(UT_VoxelBorderType type, T t)
UT_Vector3 getValue() const
T * rawFullData()
Returns the raw full data of the tile.
SYS_FORCE_INLINE T operator()(int x, int y, int z) const
UT_VoxelArrayIterator< fpreal32 > UT_VoxelArrayIteratorF
virtual bool tryCompress(UT_VoxelTile< T > &tile, const UT_VoxelCompressOptions &options, T min, T max) const =0
void setForeignData(void *data, int8 compress_type)
SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
static void expandMinMax(T v, T &min, T &max)
Designed to be specialized according to T.
int idx(int idx) const
constexpr SYS_FORCE_INLINE T & y() noexcept
Definition: UT_Vector3.h:665
void findMinMax(T &min, T &max) const
Finds the minimum and maximum T values.
bool isValidIndex(int x, int y, int z) const
Returns true if the given x, y, z values lie inside the valid index.
T avgNonZero(const UT_Vector3 &pos, const UT_Filter &filter, fpreal radius, int clampaxis=-1) const
average of non-zero values of the voxel array.
#define SYSmin(a, b)
Definition: SYS_Math.h:1583
void setComponent(int axis, T val)
virtual bool save(UT_JSONWriter &w, const UT_VoxelTile< T > &tile) const
SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z, float fx, float fy, float fz) const
void setBorderScale(T scalex, T scaley, T scalez)
void avgNonZero(int pstart[3], int pend[3], int start[3], T &result)
bool setIndex(UT_VoxelTileIterator< S > &vit)
bool setIndex(UT_VoxelTileIterator< S > &vit)
void setConstArray(const UT_VoxelArray< T > *vx, const UT_VoxelArray< T > *vy, const UT_VoxelArray< T > *vz)
exint exint T dummy
const UT_VoxelArray< T > * level(int level, int function) const
int xres() const
Read the current resolution.
bool compressionEnabled() const
S * extractTiles(S *dstdata, int stride, const UT_IntArray &tilelist) const
bool setIndex(int x, int y, int z)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
void writeData(const S *src, int srcstride)
SYS_FORCE_INLINE T * rawConstData() const
bool isSimpleCompression() const
constexpr SYS_FORCE_INLINE T & y() noexcept
Definition: UT_Vector2.h:425
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
bool isConstant() const
Returns if this tile is constant.
SYS_FORCE_INLINE T lerpSampleAxis(T *samples, float fx, float fy, float fz) const
Definition: format.h:1821
static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions)
UT_VoxelArrayIterator< UT_Vector4 > UT_VoxelArrayIteratorV4
int getTileRes(int dim) const
void evaluateMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelTileIterator< S > &vit)
fpreal getCompressionTolerance() const
void flattenPartialAxis(T *flatarray, exint ystride, const UT_JobInfo &info) const
void forEachTileConst(const OP &op, bool shouldthread=true) const
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const
GLint GLint GLint GLint GLint GLint GLint GLbitfield GLenum filter
Definition: glcorearb.h:1297
GLenum src
Definition: glcorearb.h:1793
constexpr SYS_FORCE_INLINE T & x() noexcept
Definition: UT_Vector3.h:663