HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_VoxelArray.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_VoxelArray.h ( UT Library, C++)
7  *
8  * COMMENTS:
9  * This provides support for transparently tiled voxel arrays of data.
10  * The given type, T, should support normal arithmatic operations.
11  *
12  * The created array has elements indexed from 0, ie: [0..xdiv-1].
13  */
14 
15 #ifndef __UT_VoxelArray__
16 #define __UT_VoxelArray__
17 
18 #include "UT_API.h"
19 #include "UT_BoundingBox.h"
20 #include "UT_Vector2.h"
21 #include "UT_Vector3.h"
22 #include "UT_Vector4.h"
23 #include "UT_ValArray.h"
24 #include "UT_Array.h"
25 #include "UT_FilterType.h"
26 #include "UT_COW.h"
27 #include "UT_ThreadedAlgorithm.h"
28 #include "UT_Interrupt.h"
29 #include <SYS/SYS_Align.h>
30 #include <SYS/SYS_Floor.h>
31 #include <SYS/SYS_Inline.h>
32 #include <SYS/SYS_Math.h>
33 
34 #include <SYS/SYS_StaticAssert.h>
35 #include <SYS/SYS_Types.h>
36 
37 // TBB alloc results in real-world tests that are 3-4% faster. Yay!
38 // But unfortunately it is less aggressive with fragmentation, so
39 // we use effectively 2x the memory. Boo.
40 
41 //#define VOXEL_USE_TBB_ALLOC
42 
43 #ifdef VOXEL_USE_TBB_ALLOC
44 
45 #include <tbb/scalable_allocator.h>
46 
47 #define UT_VOXEL_ALLOC(x) scalable_malloc(x)
48 #define UT_VOXEL_FREE(x) scalable_free(x)
49 
50 #else
51 
52 #define UT_VOXEL_ALLOC(x) SYSamalloc((x), 128)
53 #define UT_VOXEL_FREE(x) SYSafree(x)
54 
55 #endif
56 
57 class UT_Filter;
58 class UT_JSONWriter;
59 class UT_JSONParser;
60 class SYS_SharedMemory;
62 
63 static const int TILEBITS = 4;
64 static const int TILESIZE = 1 << TILEBITS;
65 static const int TILEMASK = TILESIZE-1;
66 
67 ///
68 /// Behaviour of out of bound reads.
69 ///
71 {
77 };
78 
79 template <typename T> class UT_VoxelTile;
80 template <typename T> class UT_VoxelArray;
81 template <typename T, bool DoRead, bool DoWrite, bool TestForWrite> class UT_VoxelProbe;
82 template <typename T> class UT_VoxelProbeCube;
83 template <typename T> class UT_VoxelProbeFace;
84 
86 {
87  int tileidx;
88  int numvoxel;
89 };
90 
92 {
93 public:
95  {
96  myConstantTol = 0;
97  myQuantizeTol = 0;
98  myAllowFP16 = false;
99  }
100 
101  // Used for quantization.
103  {
106  };
107 
108  /// Determines if compressTile should be run on this grid for
109  /// things other than constant compression. Used by writeTiles
110  /// to limit compression attempts.
111  bool compressionEnabled() const
112  {
113  return myAllowFP16 || myConstantTol > 0 || myQuantizeTol > 0;
114  }
115 
116  /// Tiles will be constant if within this range. This may
117  /// need to be tighter than quantization tolerance as
118  /// dithering can't recover partial values.
120  /// Tolerance for quantizing to reduced bit depth
122 
124 
125  /// Conversion to fpreal16, only valid for scalar data.
127 };
128 
129 ///
130 /// UT_VoxelTileCompress
131 ///
132 /// A compression engine for UT_VoxelTiles of a specific type. This
133 /// is a verb class which is invoked from the voxeltile class.
134 ///
135 template <typename T>
137 {
138 public:
141 
142  /// Attempts to write data directly to the compressed tile.
143  /// Returns false if not possible.
144  virtual bool writeThrough(UT_VoxelTile<T> &tile,
145  int x, int y, int z, T t) const = 0;
146 
147  /// Reads directly from the compressed data.
148  /// Cannot alter the tile in any way because it must be threadsafe.
149  virtual T getValue(const UT_VoxelTile<T> &tile,
150  int x, int y, int z) const = 0;
151 
152  /// Attempts to compress the data according to the given tolerance.
153  /// If succesful, returns true.
154  virtual bool tryCompress(UT_VoxelTile<T> &tile,
155  const UT_VoxelCompressOptions &options,
156  T min, T max) const = 0;
157 
158  /// Returns the length in bytes of the data in the tile.
159  /// It must be at least one byte long.
160  virtual int getDataLength(const UT_VoxelTile<T> &tile) const = 0;
161 
162  /// Returns true if the compression type is lossless
163  virtual bool isLossless() const { return false; }
164 
165  /// Determines the min & max values of the tile. A default
166  /// implementation uses getValue() on all voxels.
167  virtual void findMinMax(const UT_VoxelTile<T> &tile, T &min, T &max) const;
168 
169  /// Does this engine support saving and loading?
170  virtual bool canSave() const { return false; }
171  virtual void save(std::ostream &os, const UT_VoxelTile<T> &tile) const {}
172  virtual bool save(UT_JSONWriter &w, const UT_VoxelTile<T> &tile) const
173  { return false; }
174  virtual void load(UT_IStream &is, UT_VoxelTile<T> &tile) const {}
175  virtual bool load(UT_JSONParser &p, UT_VoxelTile<T> &tile) const
176  { return false; }
177 
178  /// Returns the unique name of this compression engine so
179  /// we can look up engines by name (the index of the compression
180  /// engine is assigned at load time so isn't constant)
181  virtual const char *getName() = 0;
182 };
183 
195 
196 #define DEFINE_STD_FUNC(TYPE) \
197 inline void \
198 UTvoxelTileExpandMinMax(TYPE v, TYPE &min, TYPE &max) \
199 { \
200  if (v < min) \
201  min = v; \
202  else if (v > max) \
203  max = v; \
204 } \
205  \
206 inline fpreal \
207 UTvoxelTileDist(TYPE a, TYPE b) \
208 { \
209  return (fpreal) SYSabs(a - b); \
210 }
211 
220 
221 #undef DEFINE_STD_FUNC
222 
223 inline void
225 {
226  min.x() = SYSmin(v.x(), min.x());
227  max.x() = SYSmax(v.x(), max.x());
228 
229  min.y() = SYSmin(v.y(), min.y());
230  max.y() = SYSmax(v.y(), max.y());
231 }
232 
233 inline void
235 {
236  min.x() = SYSmin(v.x(), min.x());
237  max.x() = SYSmax(v.x(), max.x());
238 
239  min.y() = SYSmin(v.y(), min.y());
240  max.y() = SYSmax(v.y(), max.y());
241 
242  min.z() = SYSmin(v.z(), min.z());
243  max.z() = SYSmax(v.z(), max.z());
244 }
245 
246 inline void
248 {
249  min.x() = SYSmin(v.x(), min.x());
250  max.x() = SYSmax(v.x(), max.x());
251 
252  min.y() = SYSmin(v.y(), min.y());
253  max.y() = SYSmax(v.y(), max.y());
254 
255  min.z() = SYSmin(v.z(), min.z());
256  max.z() = SYSmax(v.z(), max.z());
257 
258  min.w() = SYSmin(v.w(), min.w());
259  max.w() = SYSmax(v.w(), max.w());
260 }
261 
262 inline fpreal
264 {
265  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y());
266 }
267 
268 inline fpreal
270 {
271  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
272  + SYSabs(a.z() - b.z());
273 }
274 
275 inline fpreal
277 {
278  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
279  + SYSabs(a.z() - b.z()) + SYSabs(a.w() - b.w());
280 }
281 
282 ///
283 /// UT_VoxelTile
284 ///
285 /// A UT_VoxelArray is composed of a number of these tiles. This is
286 /// done for two reasons:
287 /// 1) Increased memory locality when processing neighbouring points.
288 /// 2) Ability to compress or page out unneeded tiles.
289 /// Currently, the only special ability is the ability to create constant
290 /// tiles.
291 ///
292 /// To the end user of the UT_VoxelArray, the UT_VoxelTile should be
293 /// usually transparent. The only exception may be if they want to do
294 /// a FOR_ALL_TILES in order to ensure an optimal traversal order.
295 ///
296 template <typename T>
297 class UT_VoxelTile
298 {
299 public:
300  UT_VoxelTile();
301  ~UT_VoxelTile();
302 
303  // Copy constructor:
305 
306 
307  // Assignment operator:
309 
311  {
317  };
318 
319  /// Fetch a given local value. (x,y,z) should be local to
320  /// this tile.
321  SYS_FORCE_INLINE T operator()(int x, int y, int z) const;
322 
323  /// Lerps two numbers, templated to work with T.
325  {
326  return v1 + (v2 - v1) * bias;
327  }
328 
329  /// Does a trilinear interpolation. x,y,z should be local to this
330  /// as should x+1, y+1, and z+1. fx-fz should be 0..1.
331  SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const;
332 
333  template <int AXIS2D>
334  SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const;
335 
336  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
337  /// array should have 8 elements, x minor, z major.
338  /// Requires it is in bounds.
339  /// Returns true if all constant, in which case only a single
340  /// sample is filled, [0]
342  bool extractSample(int x, int y, int z,
343  T *sample) const;
344  template <int AXIS2D>
346  bool extractSampleAxis(int x, int y, int z,
347  T *sample) const;
348 
349  /// Extracts +/- dx, +/- dy, +/- dz and then the center into
350  /// 7 samples.
351  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
352  T *sample) const;
353 #if 0
354  /// Extracts the full cube of +/- dx, dy, dz. xminor, zmajor, into
355  /// 27 elements.
356  /// Previous implementation had an error and this method isn't used in
357  /// Houdini code.
358  bool extractSampleCube(int x, int y, int z,
359  T *sample) const;
360 #endif
361 
362 #if 0
363  /// MSVC can't handle aligned parameters after the third so
364  /// frac must come first.
365  T lerp(v4uf frac, int x, int y, int z) const;
366 #endif
367 
368  /// Returns a cached line to our internal data, at local address x,y,z.
369  /// cacheline is a caller allocated structure to fill out if we have
370  /// to decompress. If forcecopy isn't set and we can, the result may
371  /// be an internal pointer. stride is set to the update for moving one
372  /// x position in the cache.
373  /// strideofone should be set to true if you want to prevent 0 stride
374  /// results for constant tiles.
375  T *fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const;
376 
377  /// Fills a cache line from an external buffer into our own data.
378  void writeCacheLine(T *cacheline, int y, int z);
379 
380  /// Copies between two tiles. The tile's voxels match up, but don't
381  /// have the same offset. The maximal overlapping voxels are copied.
382  /// this->setValue(dst, dsty, dstz, src(srcx, srcy, srcz));
383  void copyFragment(int dstx, int dsty, int dstz,
384  const UT_VoxelTile<T> &srctile,
385  int srcx, int srcy, int srcz);
386 
387  /// Flattens ourself into the given destination buffer.
388  template <typename S>
389  void flatten(S *dst, int dststride) const;
390 
391  /// Fills our values from the given dense flat buffer. Will
392  /// create a constant tile if the source is constant.
393  template <typename S>
394  void writeData(const S *src, int srcstride);
395 
396  /// The setData is intentionally seperate so we can avoid
397  /// expanding constant data when we write the same value to it.
398  void setValue(int x, int y, int z, T t);
399 
400  /// Finds the minimum and maximum T values
401  void findMinMax(T &min, T &max) const;
402 
403  /// Determines the average value of the tile.
404  void findAverage(T &avg) const;
405 
406  /// Returns if this tile is constant.
407  bool isConstant() const
408  { return myCompressionType == COMPRESS_CONSTANT; }
409 
410  /// Returns true if any NANs are in this tile
411  bool hasNan() const;
412 
413  /// Returns if this tile is in raw format.
414  bool isRaw() const
415  { return myCompressionType == COMPRESS_RAW; }
416 
417  /// Returns if this tile is in raw full format.
418  bool isRawFull() const
419  { return myCompressionType == COMPRESS_RAWFULL; }
420 
421  /// Returns true if this is a simple form of compression, either
422  /// constant, raw, or a raw full that isn't padded
423  bool isSimpleCompression() const
424  {
425  if (isRaw()) return true;
426  if (isConstant()) return true;
427  if (isRawFull() && myRes[0] == TILESIZE && myRes[1] == TILESIZE)
428  return true;
429  return false;
430  }
431 
432  /// Attempts to compress this tile. Returns true if any
433  /// compression performed.
434  bool tryCompress(const UT_VoxelCompressOptions &options);
435 
436  /// Turns this tile into a constant tile of the given value.
437  void makeConstant(T t);
438 
439  /// Explicit compress to fpreal16. Lossy. No-op if already constant.
440  void makeFpreal16();
441 
442  /// Turns a compressed tile into a raw tile.
443  void uncompress();
444 
445  /// Turns a tile into a raw full tile.
446  void uncompressFull();
447 
448  /// Like uncompress() except it leaves the data uninitialized. Result
449  /// is either COMPRESS_RAW or COMPRESS_RAWFULL depending on the tile res.
450  /// @note USE WITH CAUTION!
451  void makeRawUninitialized();
452 
453  /// Returns the raw full data of the tile.
455  {
456  uncompressFull();
457  return (T *)myData;
458  }
459 
460  /// This only makes sense for simple compression. Use with
461  /// extreme care.
463  { if (inlineConstant() && isConstant())
464  { return (T *) &myData; }
465  return (T *)myData; }
466  const T *rawData() const
467  { if (inlineConstant() && isConstant())
468  { return (const T *) &myData; }
469  return (const T *)myData; }
470 
471  /// Read the current resolution.
472  int xres() const { return myRes[0]; }
473  int yres() const { return myRes[1]; }
474  int zres() const { return myRes[2]; }
475 
476  int getRes(int dim) const { return myRes[dim]; }
477 
478 
479  int numVoxels() const { return myRes[0] * myRes[1] * myRes[2]; }
480 
481  /// Returns the amount of memory used by this tile.
482  int64 getMemoryUsage(bool inclusive) const;
483 
484  /// Returns the amount of data used by the tile myData pointer.
485  exint getDataLength() const;
486 
487  /// A routine used by filtered evaluation to accumulated a partial
488  /// filtered sum in this tile.
489  /// pstart, pend - voxel bounds (in UT_VoxelArray coordinates)
490  /// weights - weight array
491  /// start - UT_VoxelArray coordinates at [0] in the weight array
492  void weightedSum(int pstart[3], int pend[3],
493  const float *weights[3], int start[3],
494  T &result);
495 
496  /// Designed to be specialized according to T
497 
498  /// Update min & max to encompass T itself.
499  static void expandMinMax(T v, T &min, T &max)
500  {
501  UTvoxelTileExpandMinMax(v, min, max);
502  }
503 
504  /// Return the "distance" of a & b. This is used for
505  /// tolerance checks on equality comparisons.
506  static fpreal dist(T a, T b)
507  {
508  return UTvoxelTileDist(a, b);
509  }
510 
512 
513  // Returns the index of the bound compression engine.
514  static int lookupCompressionEngine(const char *name);
515  // Given an index, gets the compression engine.
517 
518  /// Saves this tile's data, in compressed form.
519  /// May save in uncompressed form is the compression type does
520  /// not support saving.
521  void save(std::ostream &os) const;
522  bool save(UT_JSONWriter &w) const;
523 
524  /// Loads tile data. Uses the compression index to map the saved
525  /// compression types into the correct loading compression types.
526  void load(UT_IStream &is, const UT_IntArray &compression);
527  bool load(UT_JSONParser &p, const UT_IntArray &compression);
528 
529  /// Stores a list of compresson engines to os.
530  static void saveCompressionTypes(std::ostream &os);
531  static bool saveCompressionTypes(UT_JSONWriter &w);
532 
533  /// Builds a translation table from the given stream's compression types
534  /// into our own valid compression types.
535  static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions);
536  static bool loadCompressionTypes(UT_JSONParser &p, UT_IntArray &compressions);
537 
538 protected:
539  // Attempts to set the value to the native compressed format
540  // Some compression types allow some values to be written
541  // without decompression. Eg, you can write to a constant tile
542  // the tile's own value without decompression.
543  // If this returns true, t has been written.
544  bool writeThrough(int x, int y, int z, T t);
545 
546  /// Sets the local res of the tile. Does *not* resize the allocated
547  /// memory.
548  void setRes(int xr, int yr, int zr)
549  { myRes[0] = xr; myRes[1] = yr; myRes[2] = zr; }
550 
552  {
553  return (sizeof(T) <= sizeof(T*));
554  }
555 
557  { if (inlineConstant()) { return *((const T *)&myData); }
558  return *((const T*)myData); }
560  { if (inlineConstant()) { return ((T *)&myData); }
561  return ((T*)myData); }
562 
563  void setForeignData(void *data, int8 compress_type)
564  {
565  freeData();
566  myCompressionType = compress_type;
567 
568  if (isConstant() && inlineConstant())
569  {
570  makeConstant(*(T *)data);
571  }
572  else
573  {
574  myData = data;
575  myForeignData = true;
576  }
577  }
578 
579 public:
580  /// Frees myData and sets it to zero. This is a bit tricky
581  /// as the constant tiles may be inlined.
582  /// This is only public for the compression engines.
584  {
585  if (inlineConstant() && isConstant())
586  {
587  // Do nothing!
588  }
589  else if (myData && !myForeignData)
590  {
592  }
593  myData = 0;
594  myForeignData = false;
595  }
596 
597 public:
598  // This is only public so the compression engines can get to it.
599  // It is blind data, do not alter!
600  void *myData;
601 private:
602 
603  /// Resolutions.
604  int8 myRes[3];
605 
606  /// Am I a constant tile?
607  int8 myCompressionType;
608 
609  int8 myForeignData;
610 
611  static UT_ValArray<UT_VoxelTileCompress<T> *> &getCompressionEngines()
612  {
613  return UTvoxelTileGetCompressionEngines((T *) 0);
614  }
615 
616  friend class UT_VoxelTileCompress<T>;
617  friend class UT_VoxelArray<T>;
618  template <typename S, bool DoWrite, bool DoRead, bool TestForWrites>
619  friend class UT_VoxelProbe;
620 };
621 
622 ///
623 /// UT_VoxelArray
624 ///
625 /// This provides data structure to hold a three dimmensional array
626 /// of data. The data should be some simple arithmetic type, such
627 /// as uint8, fpreal16, or UT_Vector3.
628 ///
629 /// Some operations, such as gradiants, may make less sense with uint8.
630 ///
631 template <typename T>
632 class UT_VoxelArray
633 {
634 public:
635  using ScalarType = T;
636 
637  UT_VoxelArray();
638  ~UT_VoxelArray();
639 
640  /// Copy constructor:
642 
643  /// Assignment operator:
645 
646  /// This sets the voxelarray to have the given resolution. If resolution is
647  /// changed, all elements will be set to 0. If resolution is already equal
648  /// to the arguments, all elements will be set to 0 only if reset is true;
649  /// otherwise, the voxel array will be left untouched.
650  void size(int xres, int yres, int zres, bool reset = true);
651 
652  /// This will ensure this voxel array matches the given voxel array
653  /// in terms of dimensions & border conditions. It may invoke
654  /// a size() and hence reset the field to 0.
655  void match(const UT_VoxelArray<T> &src);
656 
657  template <typename S>
658  bool isMatching(const UT_VoxelArray<S> &src) const
659  {
660  return src.getXRes() == getXRes() &&
661  src.getYRes() == getYRes() &&
662  src.getZRes() == getZRes();
663  }
664 
665  int getXRes() const { return myRes[0]; }
666  int getYRes() const { return myRes[1]; }
667  int getZRes() const { return myRes[2]; }
668  int getRes(int axis) const { return myRes[axis]; }
669 
671  {
672  return UT_Vector3I(myRes[0], myRes[1], myRes[2]);
673 
674  }
675 
676  /// Return the amount of memory used by this array.
677  int64 getMemoryUsage(bool inclusive) const;
678 
679  /// Sets this voxel array to the given constant value. All tiles
680  /// are turned into constant tiles.
682  constant,
683  T, t)
684  void constantPartial(T t, const UT_JobInfo &info);
685 
686  /// If this voxel array is all constant tiles, returns true.
687  /// The optional pointer is initialized to the constant value iff
688  /// the array is constant. (Note by constant we mean made of constant
689  /// tiles of the same value - if some tiles are uncompressed but
690  /// constant, it will still return false)
691  bool isConstant(T *cval = 0) const;
692 
693  /// Returns true if any element of the voxel array is NAN
694  bool hasNan() const;
695 
696  /// This convience function lets you sample the voxel array.
697  /// pos is in the range [0..1]^3.
698  /// T value trilinearly interpolated. Edges are determined by the border
699  /// mode.
700  /// The cells are sampled at the center of the voxels.
701  T operator()(UT_Vector3D pos) const;
702  T operator()(UT_Vector3F pos) const;
703 
704  /// This convience function lets you sample the voxel array.
705  /// pos is in the range [0..1]^3.
706  /// The min/max is the range of the sampled values.
707  void evaluateMinMax(T &lerp, T &lmin, T &lmax,
708  UT_Vector3F pos) const;
709 
710  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
711  /// Allows out of range evaluation
713  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
714  /// Allows out of range evaluation
715  SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z,
716  float fx, float fy, float fz) const;
717  template <int AXIS2D>
719  template <int AXIS2D>
720  SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z,
721  float fx, float fy, float fz) const;
722 
723  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
724  /// Allows out of range evaluation. Also computes min/max of
725  /// interpolated samples.
726  SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax,
727  UT_Vector3F pos) const;
728  template <int AXIS2D>
729  SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax,
730  UT_Vector3F pos) const;
731  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
732  /// Allows out of range evaluation. Also computes min/max of
733  /// interpolated samples.
735  T &lerp, T &lmin, T &lmax,
736  int x, int y, int z,
737  float fx, float fy, float fz) const;
738  template <int AXIS2D>
740  T &lerp, T &lmin, T &lmax,
741  int x, int y, int z,
742  float fx, float fy, float fz) const;
743 
744  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
745  /// array should have 8 elements, x minor, z major.
746  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
747  T *sample) const;
748  template <int AXIS2D>
749  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
750  T *sample) const;
751 
752  /// Extracts a sample in a plus shape, dx, then dy, then dz, finally
753  /// the center into 7 voxels.
754  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
755  T *sample) const;
756 #if 0
757  /// Extracts 27 dense 3x3x3 cube centered at x,y,z into samples
758  /// z major, xminor.
759  /// Previous implementation had an error and this method isn't used in
760  /// Houdini code.
761  SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z,
762  T *sample) const;
763 #endif
764 
765  /// Lerps the given sample using trilinear interpolation
767  float fx, float fy, float fz) const;
768  template <int AXIS2D>
770  float fx, float fy, float fz) const;
771 
772  SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z,
773  float &fx, float &fy, float &fz) const
774  {
775  // Determine integer & fractional components.
776  fx = pos.x();
777  SYSfastSplitFloat(fx, x);
778  fy = pos.y();
779  SYSfastSplitFloat(fy, y);
780  fz = pos.z();
781  SYSfastSplitFloat(fz, z);
782  }
783  template <int AXIS2D>
784  SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z,
785  float &fx, float &fy, float &fz) const
786  {
787  // Determine integer & fractional components.
788  if (AXIS2D != 0)
789  {
790  fx = pos.x();
791  SYSfastSplitFloat(fx, x);
792  }
793  else
794  {
795  fx = 0.0;
796  x = 0;
797  }
798  if (AXIS2D != 1)
799  {
800  fy = pos.y();
801  SYSfastSplitFloat(fy, y);
802  }
803  else
804  {
805  fy = 0.0;
806  y = 0;
807  }
808  if (AXIS2D != 2)
809  {
810  fz = pos.z();
811  SYSfastSplitFloat(fz, z);
812  }
813  else
814  {
815  fz = 0.0;
816  z = 0;
817  }
818  }
819 #if 0
820  T operator()(v4uf pos) const;
821 #endif
822 
823  /// Filtered evaluation of the voxel array. This operation should
824  /// exhibit the same behavior as IMG3D_Channel::evaluate.
825  T evaluate(const UT_Vector3 &pos, const UT_Filter &filter,
826  fpreal radius, int clampaxis = -1) const;
827 
828  /// Fills this by resampling the given voxel array.
829  void resample(const UT_VoxelArray<T> &src,
830  UT_FilterType filtertype = UT_FILTER_POINT,
831  float filterwidthscale = 1.0f,
832  int clampaxis = -1);
833 
834 
835  /// Calls [](UT_VoxelTileIterator<T> &vit) -> void
836  /// in parallel for each tile.
837  template <typename OP>
838  void forEachTile(const OP &op, bool shouldthread = true);
839 
840  /// Calls [](UT_VoxelTileIterator<T> &vit) -> void
841  /// in parallel for each tile. Since TileIterator don't understand
842  /// const correctness, it is important you do not use setValue
843  /// in the op.
844  template <typename OP>
845  void forEachTileConst(const OP &op, bool shouldthread = true) const
846  {
847  SYSconst_cast(this)->forEachTile(op, shouldthread);
848  }
849 
850  /// Flattens this into an array. Z major, then Y, then X.
851  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
853  flatten,
854  T *, flatarray,
855  exint, ystride,
856  exint, zstride)
857  void flattenPartial(T *flatarray, exint ystride, exint zstride,
858  const UT_JobInfo &info) const;
859 
860  /// Flattens this into an array. Z major, then Y, then X.
861  /// Flattens a 2d slice where AXIS2D is constant.
862  /// If AXIS2D == 2 (ie, z): flatarray[x + y * ystride] = getValue(x, y, 0);
863  /// Flattens by destination x-major stripes to avoid page collisions
864  /// on freshly allocated memory buffers.
865  template <int AXIS2D>
866  void flattenPartialAxis(T *flatarray, exint ystride,
867  const UT_JobInfo &info) const;
868 
869  /// Flattens this into an array suitable for a GL 8bit texture.
870  /// Z major, then Y, then X.
871  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
873  flattenGLFixed8,
874  uint8 *, flatarray,
875  exint, ystride,
876  exint, zstride,
877  T , dummy)
878  void flattenGLFixed8Partial(uint8 *flatarray,
879  exint ystride, exint zstride,
880  T dummy,
881  const UT_JobInfo &info) const;
882 
883  /// Flattens this into an array suitable for a GL 16bit FP texture.
884  /// Z major, then Y, then X.
885  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
886  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
887  flattenGL16F,
888  UT_Vector4H *, flatarray,
889  exint, ystride,
890  exint, zstride,
891  T , dummy)
892  void flattenGL16FPartial(UT_Vector4H *flatarray,
893  exint ystride, exint zstride,
894  T dummy,
895  const UT_JobInfo &info) const;
896 
897  /// Flattens this into an array suitable for a GL 32b FP texture. Note that
898  /// this also works around an older Nvidia driver bug that caused very small
899  /// valued texels (<1e-9) to appear as huge random values in the texture.
900  /// Z major, then Y, then X.
901  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
902  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
903  flattenGL32F,
904  UT_Vector4F *, flatarray,
905  exint, ystride,
906  exint, zstride,
907  T , dummy)
908  void flattenGL32FPartial(UT_Vector4F *flatarray,
909  exint ystride, exint zstride,
910  T dummy,
911  const UT_JobInfo &info) const;
912 
913  /// Fills this from a flattened array. Z major, then Y, then X.
914  /// setValue(x,y,z, flatarray[x + y * ystride + z * zstride];
915  THREADED_METHOD3(UT_VoxelArray<T>, numTiles() > 16,
916  extractFromFlattened,
917  const T *, flatarray,
918  exint, ystride,
919  exint, zstride)
920  void extractFromFlattenedPartial(const T *flatarray,
921  exint ystride, exint zstride,
922  const UT_JobInfo &info);
923 
924  /// Copies into this voxel array from the source array.
925  /// Conceptually,
926  /// this->setValue(x, y, z, src.getValue(x+offx, y+offy, z+offz);
927  void copyWithOffset(const UT_VoxelArray<T> &src,
928  int offx, int offy, int offz);
929  THREADED_METHOD4(UT_VoxelArray<T>, numTiles() > 4,
930  copyWithOffsetInternal,
931  const UT_VoxelArray<T> &, src,
932  int, offx,
933  int, offy,
934  int, offz)
935  void copyWithOffsetInternalPartial(const UT_VoxelArray<T> &src,
936  int offx, int offy, int offz,
937  const UT_JobInfo &info);
938 
939  /// Moves data from the source voxel array into this array. The offsets should
940  /// be in terms of tiles. Source may be modified as this array steals its data
941  /// buffers in such a way that no dynamic memory will leak when these arrays
942  /// are freed.
943  /// Conceptually, this function performs the same operation as copyWithOffset,
944  /// but with offsets specified in terms of tiles:
945  /// this->setValue(x, y, z, src.getValue(x+off_v_x, y+off_v_y, z+off_v_z)
946  /// where off_v_A=tileoffA*TILESIZE for A in {x, y, z}.
947  void moveTilesWithOffset(UT_VoxelArray<T> &src, int tileoffx, int tileoffy,
948  int tileoffz);
949 
950  /// Fills dstdata with the voxel data of listed tiles. Stride is measured
951  /// in T. Data order is in tile-order. So, sorted by tilelist, then
952  /// z, y, x within that tile.
953  /// The ix/iy/iz variant allows partial tiles. If the number of
954  /// voxels to write to a tile matches the tile size, however, the
955  /// ix/iy/iz is ignored and the tile is written in canonical order.
956  template <typename S>
957  S *extractTiles(S *dstdata, int stride,
958  const UT_IntArray &tilelist) const;
959  template <typename S, typename IDX>
960  S *extractTiles(S *dstdata, int stride,
961  const IDX *ix, const IDX *iy, const IDX *iz,
962  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist) const;
963 
964  /// Fills dstdata with the voxel data of the slice with the coordinate at
965  /// component SLICE_AXIS fixed at slice. Returns nullptr if slice is outside
966  /// the domain.
967  /// If half_slice is true, the extracted values lie halfway between slice
968  /// and slice+1.
969  template <int SLICE_AXIS, typename S>
970  S *extractSlice(S *dstdata, int slice, bool half_slice) const;
971 
972  /// Overwrites our tiles with the given data. Does checking
973  /// for constant tiles. Input srcdata stream should match
974  /// that of extractTiles.
975  template <typename S>
976  const S *writeTiles(const S *srcdata, int srcstride,
977  const UT_IntArray &tilelist);
978  template <typename S, typename IDX>
979  const S *writeTiles(const S *srcdata, int srcstride,
980  const IDX *ix, const IDX *iy, const IDX *iz,
981  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist);
982 
983  /// Converts a 3d position in range [0..1]^3 into the closest
984  /// index value.
985  /// Returns false if the resulting index was out of range. The index
986  /// will still be set.
987  bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const;
988  bool posToIndex(UT_Vector3D pos, exint &x, exint &y, exint &z) const;
989  /// Convertes a 3d position in [0..1]^3 into the equivalent in
990  /// the integer cell space. Does not clamp to the closest value.
991  bool posToIndex(UT_Vector3 pos, UT_Vector3 &ipos) const;
992  bool posToIndex(UT_Vector3D pos, UT_Vector3D &ipos) const;
993  /// Converts an index into a position.
994  /// Returns false if the source index was out of range, in which case
995  /// pos will be outside [0..1]^3
996  bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const;
997  bool indexToPos(exint x, exint y, exint z, UT_Vector3D &pos) const;
998  void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const;
999  void findexToPos(UT_Vector3D ipos, UT_Vector3D &pos) const;
1000 
1001  /// Clamps the given x, y, and z values to lie inside the valid index
1002  /// range.
1003  void clampIndex(int &x, int &y, int &z) const
1004  {
1005  x = SYSclamp(x, 0, myRes[0]-1);
1006  y = SYSclamp(y, 0, myRes[1]-1);
1007  z = SYSclamp(z, 0, myRes[2]-1);
1008  }
1009 
1010  /// Returns true if the given x, y, z values lie inside the valid index.
1011  bool isValidIndex(int x, int y, int z) const
1012  {
1013  return !((x | y | z) < 0) &&
1014  (((x - myRes[0]) & (y - myRes[1]) & (z - myRes[2])) < 0);
1015  }
1016 
1017  /// This allows you to read & write the raw data.
1018  /// Out of bound reads are illegal.
1020  {
1021  return (*this)(index[0], index[1], index[2]);
1022  }
1023  T operator()(int x, int y, int z) const
1024  {
1025  UT_ASSERT_P(isValidIndex(x, y, z));
1026  return (*getTile(x >> TILEBITS,
1027  y >> TILEBITS,
1028  z >> TILEBITS))
1029  (x & TILEMASK, y & TILEMASK, z & TILEMASK);
1030  }
1031 
1033  {
1034  setValue(index[0], index[1], index[2], value);
1035  }
1036 
1037  void setValue(int x, int y, int z, T t)
1038  {
1039  UT_ASSERT_P(isValidIndex(x, y, z));
1040  getTile(x >> TILEBITS,
1041  y >> TILEBITS,
1042  z >> TILEBITS)->setValue(
1043  x & TILEMASK, y & TILEMASK, z & TILEMASK, t);
1044  }
1045 
1046  /// Mirrors the coordinate for the given resolution. This is effectively
1047  /// like using one reflection then repeating that with twice the resolution.
1048  static inline int mirrorCoordinates(int x, int res)
1049  {
1050  int res2 = res * 2;
1051  int y = x % res2;
1052  if (y < 0)
1053  y += res2;
1054  if (y >= res)
1055  y = res2 - y - 1;
1056  return y;
1057  }
1058 
1059  /// This will clamp the bounds to fit within the voxel array,
1060  /// using the border type to resolve out of range values.
1061  T getValue(int x, int y, int z) const
1062  {
1063  // First handle the most common case.
1064  if (isValidIndex(x, y, z))
1065  return (*this)(x, y, z);
1066 
1067  // Verify our voxel array is non-empty.
1068  if (!myTiles)
1069  return myBorderValue;
1070 
1071  // We now know we are out of range, adjust appropriately
1072  switch (myBorderType)
1073  {
1075  return myBorderValue;
1076 
1077  case UT_VOXELBORDER_REPEAT:
1078  if (x < 0 || x >= myRes[0])
1079  {
1080  x %= myRes[0];
1081  if (x < 0)
1082  x += myRes[0];
1083  }
1084  if (y < 0 || y >= myRes[1])
1085  {
1086  y %= myRes[1];
1087  if (y < 0)
1088  y += myRes[1];
1089  }
1090  if (z < 0 || z >= myRes[2])
1091  {
1092  z %= myRes[2];
1093  if (z < 0)
1094  z += myRes[2];
1095  }
1096  break;
1097 
1098  case UT_VOXELBORDER_MIRROR:
1099  if (x < 0 || x >= myRes[0])
1100  x = mirrorCoordinates(x, myRes[0]);
1101  if (y < 0 || y >= myRes[1])
1102  y = mirrorCoordinates(y, myRes[1]);
1103  if (z < 0 || z >= myRes[2])
1104  z = mirrorCoordinates(z, myRes[2]);
1105 
1106  case UT_VOXELBORDER_STREAK:
1107  clampIndex(x, y, z);
1108  break;
1109  case UT_VOXELBORDER_EXTRAP:
1110  {
1111  int cx, cy, cz;
1112  T result;
1113 
1114  cx = x; cy = y; cz = z;
1115  clampIndex(cx, cy, cz);
1116 
1117  result = (*this)(cx, cy, cz);
1118  result += (x - cx) * myBorderScale[0] +
1119  (y - cy) * myBorderScale[1] +
1120  (z - cz) * myBorderScale[2];
1121  return result;
1122  }
1123  }
1124 
1125  // It is now within bounds, do normal fetch.
1126  return (*this)(x, y, z);
1127  }
1128 
1129  /// Gets values in the box [bbox.minvec(), bbox.maxvec())
1130  /// Values are stored in the array `values` of size `size` that has to be at least `bbox.volume()`
1131  /// The order of values is give by: `i + bbox.xsize() * (j + bbox.ysize() * k)`
1132  ///
1133  /// If returns true, values in `bbox` are constant and only values[0] is guaranteed to be assigned.
1134  bool getValues(const UT_BoundingBoxI &bbox,
1135  T * values,
1136  const exint size) const
1137  {
1138  UT_ASSERT_P(bbox.volume() <= size);
1139 
1140  const UT_BoundingBoxI bounds = {0, 0, 0, getXRes(), getYRes(), getZRes()};
1141 
1142  const UT_BoundingBoxI tiles =
1143  {bbox.xmin() >> TILEBITS,
1144  bbox.ymin() >> TILEBITS,
1145  bbox.zmin() >> TILEBITS,
1146  ((bbox.xmax() - 1) >> TILEBITS) + 1,
1147  ((bbox.ymax() - 1) >> TILEBITS) + 1,
1148  ((bbox.zmax() - 1) >> TILEBITS) + 1};
1149 
1150  bool allconstant = true;
1151 
1152  UT_BoundingBoxI tilesamples;
1153 
1154  for (int kt = tiles.zmin(); kt < tiles.zmax(); kt++)
1155  {
1156  // zmin & zmax
1157  tilesamples.vals[2][0] = TILESIZE * kt;
1158  tilesamples.vals[2][1] = TILESIZE * (kt + 1);
1159  // clip bounds
1160  if (kt == tiles.zmin())
1161  tilesamples.vals[2][0] = bbox.zmin();
1162  if (kt == tiles.zmax() - 1)
1163  tilesamples.vals[2][1] = bbox.zmax();
1164 
1165  for (int jt = tiles.ymin(); jt < tiles.ymax(); jt++)
1166  {
1167  // ymin & ymax
1168  tilesamples.vals[1][0] = TILESIZE * jt;
1169  tilesamples.vals[1][1] = TILESIZE * (jt + 1);
1170  // clip bounds
1171  if (jt == tiles.ymin())
1172  tilesamples.vals[1][0] = bbox.ymin();
1173  if (jt == tiles.ymax() - 1)
1174  tilesamples.vals[1][1] = bbox.ymax();
1175 
1176  for (int it = tiles.xmin(); it < tiles.xmax(); it++)
1177  {
1178  // xmin & xmax
1179  tilesamples.vals[0][0] = TILESIZE * it;
1180  tilesamples.vals[0][1] = TILESIZE * (it + 1);
1181  // clip bounds
1182  if (it == tiles.xmin())
1183  tilesamples.vals[0][0] = bbox.xmin();
1184  if (it == tiles.xmax() - 1)
1185  tilesamples.vals[0][1] = bbox.xmax();
1186 
1187  const bool inbounds = tilesamples.isInside(bounds);
1188 
1189  if (inbounds)
1190  {
1191  const UT_VoxelTile<T> *tile = getTile(it, jt, kt);
1192 
1193  for (int k = tilesamples.zmin();
1194  k < tilesamples.zmax(); k++)
1195  {
1196  for (int j = tilesamples.ymin();
1197  j < tilesamples.ymax(); j++)
1198  {
1199  for (int i = tilesamples.xmin();
1200  i < tilesamples.xmax(); i++)
1201  {
1202  const UT_Vector3I localindex = {
1203  i - bbox.xmin(),
1204  j - bbox.ymin(),
1205  k - bbox.zmin()};
1206 
1207  const int locallinindex
1208  = localindex.x()
1209  + bbox.xsize() * (localindex.y()
1210  + bbox.ysize() * localindex.z());
1211 
1212  values[locallinindex] = (*tile)(
1213  i & TILEMASK,
1214  j & TILEMASK,
1215  k & TILEMASK);
1216 
1217  if (allconstant
1218  && (values[0] != values[locallinindex]))
1219  {
1220  allconstant = false;
1221  }
1222  }
1223  }
1224  }
1225  }
1226  else
1227  {
1228  for (int k = tilesamples.zmin(); k < tilesamples.zmax(); k++)
1229  {
1230  for (int j = tilesamples.ymin();
1231  j < tilesamples.ymax(); j++)
1232  {
1233  for (int i = tilesamples.xmin();
1234  i < tilesamples.xmax(); i++)
1235  {
1236  const UT_Vector3I localindex = {
1237  i - bbox.xmin(),
1238  j - bbox.ymin(),
1239  k - bbox.zmin()};
1240 
1241  const int locallinindex
1242  = localindex.x()
1243  + bbox.xsize() * (localindex.y()
1244  + bbox.ysize() * localindex.z());
1245 
1246  values[locallinindex] = getValue(i, j, k);
1247 
1248  if (allconstant
1249  && (values[0] != values[locallinindex]))
1250  {
1251  allconstant = false;
1252  }
1253  }
1254  }
1255  }
1256 
1257  }
1258  }
1259  }
1260  }
1261 
1262  return allconstant;
1263  }
1264 
1266  void setBorderScale(T scalex, T scaley, T scalez);
1267  UT_VoxelBorderType getBorder() const { return myBorderType; }
1268  T getBorderValue() const { return myBorderValue; }
1269  T getBorderScale(int axis) const { return myBorderScale[axis]; }
1270 
1271  /// This tries to compress or collapse each tile. This can
1272  /// be expensive (ie, converting a tile to constant), so
1273  /// should be saved until modifications are complete.
1275  collapseAllTiles)
1276  void collapseAllTilesPartial(const UT_JobInfo &info);
1277 
1278  /// Uncompresses all tiles into non-constant tiles. Useful
1279  /// if you have a multithreaded algorithm that may need to
1280  /// both read and write, if you write to a collapsed tile
1281  /// while someone else reads from it, bad stuff happens.
1282  /// Instead, you can expandAllTiles. This may have serious
1283  /// consequences in memory use, however.
1285  expandAllTiles)
1286  void expandAllTilesPartial(const UT_JobInfo &info);
1287 
1288  /// Uncompresses all tiles, but leaves constant tiles alone.
1289  /// Useful for cleaning out any non-standard compression algorithm
1290  /// that some external program can't handle.
1291  THREADED_METHOD(UT_VoxelArray<T>, numTiles() > 100,
1292  expandAllNonConstTiles)
1293  void expandAllNonConstTilesPartial(const UT_JobInfo &info);
1294 
1295  /// The direct tile access methods are to make TBF writing a bit
1296  /// more efficient.
1297  UT_VoxelTile<T> *getTile(int tx, int ty, int tz) const
1298  { return &myTiles[xyzTileToLinear(tx, ty, tz)]; }
1300  { return &myTiles[idx]; }
1301  void linearTileToXYZ(int idx, int &x, int &y, int &z) const
1302  {
1303  x = idx % myTileRes[0];
1304  idx -= x;
1305  idx /= myTileRes[0];
1306  y = idx % myTileRes[1];
1307  idx -= y;
1308  idx /= myTileRes[1];
1309  z = idx;
1310  }
1312  {
1313  UT_Vector3I tileindex;
1314  tileindex[0] = idx % myTileRes[0];
1315  idx -= tileindex[0];
1316  idx /= myTileRes[0];
1317  tileindex[1] = idx % myTileRes[1];
1318  idx -= tileindex[1];
1319  idx /= myTileRes[1];
1320  tileindex[2] = idx;
1321 
1322  return tileindex;
1323  }
1324 
1325  int xyzTileToLinear(int x, int y, int z) const
1326  { return (z * myTileRes[1] + y) * myTileRes[0] + x; }
1327 
1328  int indexToLinearTile(int x, int y, int z) const
1329  { return ((z >> TILEBITS) * myTileRes[1] + (y >> TILEBITS)) * myTileRes[0] + (x >> TILEBITS); }
1330 
1331  /// idxth tile represents the voxels indexed [start,end).
1332  void getTileVoxels(int idx,
1333  UT_Vector3I &start, UT_Vector3I &end) const
1334  {
1335  int x, y, z;
1336  linearTileToXYZ(idx, x, y, z);
1337 
1338  start.x() = x * TILESIZE;
1339  start.y() = y * TILESIZE;
1340  start.z() = z * TILESIZE;
1341  end = start;
1342  end.x() += myTiles[idx].xres();
1343  end.y() += myTiles[idx].yres();
1344  end.z() += myTiles[idx].zres();
1345  }
1346 
1348  {
1350  getTileVoxels(idx, start, end);
1351  return UT_BoundingBoxI(start, end);
1352  }
1353 
1354  /// Number of tiles along that axis. Not to be confused with
1355  /// the resolution of the individual tiles.
1356  int getTileRes(int dim) const { return myTileRes[dim]; }
1357  int numTiles() const
1358  { return myTileRes[0] * myTileRes[1] * myTileRes[2]; }
1359  exint numVoxels() const
1360  { return ((exint)myRes[0]) * myRes[1] * myRes[2]; }
1361 
1363  { myCompressionOptions = options; }
1365  { return myCompressionOptions; }
1366 
1368  { myCompressionOptions.myConstantTol = tol; }
1370  { return myCompressionOptions.myConstantTol; }
1371 
1372  /// Saves only the data of this array to the given stream.
1373  /// To reload it you will have to have a matching array in tiles
1374  /// dimensions and size.
1375  void saveData(std::ostream &os) const;
1376  bool saveData(UT_JSONWriter &w,
1377  const char *shared_mem_owner = 0) const;
1378 
1379  /// Load an array, requires you have already size()d this array.
1380  void loadData(UT_IStream &is);
1381  bool loadData(UT_JSONParser &p);
1382 
1383  /// Copy only the data from the source array.
1384  /// Note that it is an error to call this unless isMatching(src).
1386  copyData,
1387  const UT_VoxelArray<T> &, src)
1388 
1389  void copyDataPartial(const UT_VoxelArray<T> &src,
1390  const UT_JobInfo &info);
1391 
1392 private:
1394  resamplethread,
1395  const UT_VoxelArray<T> &, src,
1396  const UT_Filter *, filter,
1397  float, radius,
1398  int, clampaxis)
1399  void resamplethreadPartial(const UT_VoxelArray<T> &src,
1400  const UT_Filter *filter,
1401  float radius,
1402  int clampaxis,
1403  const UT_JobInfo &info);
1404 
1405 
1406  void deleteVoxels();
1407 
1408  SYS_SharedMemory *copyToSharedMemory(const char *shared_mem_owner) const;
1409  bool populateFromSharedMemory(const char *id);
1410 
1411 
1412  /// Number of elements in each dimension.
1413  int myRes[3];
1414 
1415  /// Inverse tile res, 1/myRes
1416  UT_Vector3 myInvRes;
1417 
1418  /// Number of tiles in each dimension.
1419  int myTileRes[3];
1420 
1421  /// Compression tolerance for lossy compression.
1422  UT_VoxelCompressOptions myCompressionOptions;
1423 
1424  /// Double dereferenced so we can theoretically resize easily.
1425  UT_VoxelTile<T> *myTiles;
1426 
1427  /// Outside values get this if constant borders are used
1428  T myBorderValue;
1429  /// Per axis scale factors for when extrapolating.
1430  T myBorderScale[3];
1431  UT_VoxelBorderType myBorderType;
1432 
1433  /// For initializing the tiles from shared memory.
1434  SYS_SharedMemory *mySharedMem;
1435  SYS_SharedMemoryView *mySharedMemView;
1436 };
1437 
1438 
1439 ///
1440 /// UT_VoxelMipMap
1441 ///
1442 /// This provides a mip-map type structure for a voxel array.
1443 /// It manages the different levels of voxels arrays that are needed.
1444 /// You can create different types of mip maps: average, maximum, etc,
1445 /// which can allow different tricks.
1446 /// Each level is one half the previous level, rounded up.
1447 /// Out of bound voxels are ignored from the lower levels.
1448 ///
1449 template <typename T>
1451 {
1452 public:
1453  /// The different types of functions that can be used for
1454  /// constructing a mip map.
1455  enum mipmaptype { MIPMAP_MAXIMUM=0, MIPMAP_AVERAGE=1, MIPMAP_MINIMUM=2 };
1456 
1457  UT_VoxelMipMap();
1458  ~UT_VoxelMipMap();
1459 
1460  /// Copy constructor.
1461  UT_VoxelMipMap(const UT_VoxelMipMap<T> &src);
1462 
1463  /// Assignment operator:
1464  const UT_VoxelMipMap<T> &operator=(const UT_VoxelMipMap<T> &src);
1465 
1466  /// Builds from a given voxel array. The ownership flag determines
1467  /// if we gain ownership of the voxel array and should delete it.
1468  /// In any case, the new levels are owned by us.
1469  void build(UT_VoxelArray<T> *baselevel,
1470  mipmaptype function);
1471 
1472  /// Same as above but construct mipmaps simultaneously for more than
1473  /// one function. The order of the functions will correspond to the
1474  /// order of the data values passed to the traversal callback.
1475  void build(UT_VoxelArray<T> *baselevel,
1476  const UT_Array<mipmaptype> &functions);
1477 
1478  /// This does a top down traversal of the implicit octree defined
1479  /// by the voxel array. Returning false will abort that
1480  /// branch of the octree.
1481  /// The bounding box given is in cell space and is an exclusive
1482  /// box of the included cells (ie: (0..1)^3 means just cell 0,0,0)
1483  /// Note that each bounding box will not be square, unless you
1484  /// have the good fortune of starting with a power of 2 cube.
1485  /// The boolean goes true when the the callback is invoked on a
1486  /// base level.
1487  typedef bool (*Callback)(const T *funcs,
1488  const UT_BoundingBox &box,
1489  bool baselevel, void *data);
1490  void traverseTopDown(Callback function,
1491  void *data) const;
1492 
1493  /// Top down traversal on op. OP is invoked with
1494  /// bool op(const UT_BoundingBoxI &indexbox, int level)
1495  ///
1496  /// indexbox is half-inclusive (0..1)^3 means cell 0,0,0
1497  /// level 0 means the base level.
1498  /// (box.min.x()>>level, box.min.y()>>level, box.min.z()>>level)
1499  /// gives the index to extract the value from level..
1500  template <typename OP>
1501  void traverseTopDown(OP&op) const;
1502 
1503 
1504  /// Top down traversal, but which quad tree is visited first
1505  /// is controlled by
1506  /// float op.sortValue(UT_BoundingBoxI &indexbox, int level);
1507  /// Lower values are visited first.
1508  template <typename OP>
1509  void traverseTopDownSorted(OP&op) const;
1510 
1511 
1512  /// Return the amount of memory used by this mipmap.
1513  int64 getMemoryUsage(bool inclusive) const;
1514 
1515  int numLevels() const { return myNumLevels+1; }
1516 
1517  /// level 0 is the original grid, each level higher is a power
1518  /// of two smaller.
1519  const UT_VoxelArray<T> *level(int level, int function) const
1520  {
1521  if (level == 0)
1522  return myBaseLevel;
1523 
1524  return myLevels(function)[numLevels() - 1 - level];
1525  }
1526 
1527 private:
1528  void doTraverse(int x, int y, int z, int level,
1529  Callback function,
1530  void *data) const;
1531 
1532  /// Note: This variant of doTraverse has the opposite sense of level!
1533  template <typename OP>
1534  void doTraverse(int x, int y, int z, int level,
1535  OP &op) const;
1536  template <typename OP>
1537  void doTraverseSorted(int x, int y, int z, int level,
1538  OP &op) const;
1539 
1540  void initializePrivate();
1541  void destroyPrivate();
1542 
1543  THREADED_METHOD3(UT_VoxelMipMap<T>, dst.numTiles() > 1,
1544  downsample,
1545  UT_VoxelArray<T> &, dst,
1546  const UT_VoxelArray<T> &, src,
1547  mipmaptype, function)
1548  void downsamplePartial(UT_VoxelArray<T> &dst,
1549  const UT_VoxelArray<T> &src,
1550  mipmaptype function,
1551  const UT_JobInfo &info);
1552 
1553 protected:
1554  T mixValues(T t1, T t2, mipmaptype function) const
1555  {
1556  switch (function)
1557  {
1558  case MIPMAP_MAXIMUM:
1559  return SYSmax(t1, t2);
1560 
1561  case MIPMAP_AVERAGE:
1562  return (t1 + t2) / 2;
1563 
1564  case MIPMAP_MINIMUM:
1565  return SYSmin(t1, t2);
1566  }
1567 
1568  return t1;
1569  }
1570 
1571 
1572  /// This stores the base most level that was provided
1573  /// externally.
1574  UT_VoxelArray<T> *myBaseLevel;
1575  /// If true, we will delete the base level when we are done.
1577 
1578  /// Tracks the number of levels which we used to represent
1579  /// this hierarchy.
1581  /// The array of VoxelArrays, one per level.
1582  /// myLevels[0] is a 1x1x1 array. Each successive layer is twice
1583  /// as big in each each dimension. However, every layer is clamped
1584  /// against the resolution of the base layer.
1585  /// We own all these layers.
1587 };
1588 
1589 
1590 /// Iterator for Voxel Arrays
1591 ///
1592 /// This class eliminates the need for having
1593 /// for (z = 0; z < zres; z++)
1594 /// ...
1595 /// for (x = 0; x < xres; x++)
1596 /// loops everywhere.
1597 ///
1598 /// Note that the order of iteration is undefined! (The actual order is
1599 /// to complete each tile in turn, thereby hopefully improving cache
1600 /// coherency)
1601 ///
1602 /// It is safe to write to the voxel array while this iterator is active.
1603 /// It is *not* safe to resize the voxel array (or destroy it)
1604 ///
1605 /// The iterator is similar in principal to an STL iterator, but somewhat
1606 /// simpler. The classic STL loop
1607 /// for ( it = begin(); it != end(); ++it )
1608 /// is done using
1609 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1610 ///
1611 template <typename T>
1613 {
1614 public:
1619 
1621  {
1622  myCurTile = -1;
1623  myHandle.resetHandle();
1624  myArray = vox;
1625  // Reset the range
1626  setPartialRange(0, 1);
1627  }
1629  {
1630  setArray((UT_VoxelArray<T> *) vox);
1631  }
1632 
1633  /// Iterates over the array pointed to by the handle. Only
1634  /// supports read access during the iteration as it does
1635  /// a read lock.
1637  {
1638  myHandle = handle;
1639  // Ideally we'd have a separate const iterator
1640  // from our non-const iterator so this would
1641  // only be exposed in the const version.
1642  myArray = const_cast<UT_VoxelArray<T> *>(&*myHandle);
1643 
1644  // Reset our range.
1645  myCurTile = -1;
1646  setPartialRange(0, 1);
1647  }
1648 
1649 
1650  /// Restricts this iterator to only run over a subset
1651  /// of the tiles. The tiles will be divided into approximately
1652  /// numrange equal groups, this will be the idx'th.
1653  /// The resulting iterator may have zero tiles.
1654  void setPartialRange(int idx, int numranges);
1655 
1656  /// Ties this iterator to the given jobinfo so it will
1657  /// match the jobinfo's processing.
1658  void splitByTile(const UT_JobInfo &info);
1659 
1660  /// Sets this iterator to run over the tile specified by the referenced
1661  /// iterator.
1662  /// This assumes the underlying arrays are matching.
1663  template <typename S>
1665  UT_VoxelArray<T> *array)
1666  {
1667  UT_ASSERT_P(vit.isStartOfTile());
1668  UT_ASSERT_P(getArray()->isMatching(*vit.getArray()));
1669  UT_ASSERT_P(!myJobInfo && !myUseTileList);
1670  myTileStart = vit.getLinearTileNum();
1671  myTileEnd = myTileStart+1;
1672  rewind();
1673  }
1674 
1676  {
1677  setTile(vit, vit.getArray());
1678  }
1679 
1680  /// Assigns an interrupt handler. This will be tested whenever
1681  /// it advances to a new tile. If it is interrupted, the iterator
1682  /// will jump forward to atEnd()
1683  void setInterrupt(UT_Interrupt *interrupt) { myInterrupt = interrupt; }
1684  void detectInterrupts() { myInterrupt = UTgetInterrupt(); }
1685 
1686  /// Restricts this iterator to the tiles that intersect
1687  /// the given bounding box of voxel coordinates.
1688  /// Note that this will not be a precise restriction as
1689  /// each tile is either included or not.
1690  /// You should setPartialRange() after setting the bbox range
1691  /// The bounding box is on the [0..1]^3 range.
1692  void restrictToBBox(const UT_BoundingBox &bbox);
1693  /// The [xmin, xmax] are inclusive and measured in voxels.
1694  void restrictToBBox(int xmin, int xmax,
1695  int ymin, int ymax,
1696  int zmin, int zmax);
1697 
1698  /// Resets the iterator to point to the first voxel.
1699  void rewind();
1700 
1701  /// Returns true if we have iterated over all of the voxels.
1702  bool atEnd() const
1703  { return myCurTile < 0; }
1704 
1705  /// Advances the iterator to point to the next voxel.
1706  void advance()
1707  {
1708  // We try to advance each axis, rolling over to the next.
1709  // If we exhaust this tile, we call advanceTile.
1710  myPos[0]++;
1711  myTileLocalPos[0]++;
1712  if (myTileLocalPos[0] >= myTileSize[0])
1713  {
1714  // Wrapped in X.
1715  myPos[0] -= myTileLocalPos[0];
1716  myTileLocalPos[0] = 0;
1717 
1718  myPos[1]++;
1719  myTileLocalPos[1]++;
1720  if (myTileLocalPos[1] >= myTileSize[1])
1721  {
1722  // Wrapped in Y.
1723  myPos[1] -= myTileLocalPos[1];
1724  myTileLocalPos[1] = 0;
1725 
1726  myPos[2]++;
1727  myTileLocalPos[2]++;
1728  if (myTileLocalPos[2] >= myTileSize[2])
1729  {
1730  // Wrapped in Z! Finished this tile!
1731  advanceTile();
1732  }
1733  }
1734  }
1735  }
1736 
1737  /// Retrieve the current location of the iterator.
1738  int x() const { return myPos[0]; }
1739  int y() const { return myPos[1]; }
1740  int z() const { return myPos[2]; }
1741  int idx(int idx) const { return myPos[idx]; }
1742 
1743  /// Retrieves the value that we are currently pointing at.
1744  /// This is faster than an operator(x,y,z) as we already know
1745  /// our current tile and that bounds checking isn't needed.
1746  T getValue() const
1747  {
1748  UT_ASSERT_P(myCurTile >= 0);
1749 
1750  UT_VoxelTile<T> *tile;
1751 
1752  tile = myArray->getLinearTile(myCurTile);
1753  return (*tile)(myTileLocalPos[0],
1754  myTileLocalPos[1],
1755  myTileLocalPos[2]);
1756  }
1757 
1758  /// Sets the voxel we are currently pointing to the given value.
1759  void setValue(T t) const
1760  {
1761  UT_ASSERT_P(myCurTile >= 0);
1762 
1763  UT_VoxelTile<T> *tile;
1764 
1765  tile = myArray->getLinearTile(myCurTile);
1766 
1767  tile->setValue(myTileLocalPos[0],
1768  myTileLocalPos[1],
1769  myTileLocalPos[2], t);
1770  }
1771 
1772  /// Returns true if the tile we are currently in is a constant tile.
1773  bool isTileConstant() const
1774  {
1775  UT_ASSERT_P(myCurTile >= 0);
1776 
1777  UT_VoxelTile<T> *tile;
1778 
1779  tile = myArray->getLinearTile(myCurTile);
1780  return tile->isConstant();
1781  }
1782 
1783  /// This tile will iterate over the voxels indexed [start,end).
1785  {
1786  start.x() = myTilePos[0] * TILESIZE;
1787  start.y() = myTilePos[1] * TILESIZE;
1788  start.z() = myTilePos[2] * TILESIZE;
1789  end = start;
1790  end.x() += myTileSize[0];
1791  end.y() += myTileSize[1];
1792  end.z() += myTileSize[2];
1793  }
1794 
1795  /// This tile will iterate over the *inclusive* voxels indexed
1796  /// in the returned boudning box.
1798  {
1800  getTileVoxels(start, end);
1801  return UT_BoundingBoxI(start, end);
1802  }
1803 
1804  /// Returns true if we are at the start of a new tile.
1805  bool isStartOfTile() const
1806  { return !(myTileLocalPos[0] ||
1807  myTileLocalPos[1] ||
1808  myTileLocalPos[2]); }
1809 
1810  /// Returns the VoxelTile we are currently processing
1812  {
1813  UT_ASSERT_P(myCurTile >= 0);
1814  return myArray->getLinearTile(myCurTile);
1815  }
1816  int getLinearTileNum() const
1817  {
1818  return myCurTile;
1819  }
1820 
1821  /// Advances the iterator to point to the next tile. Useful if the
1822  /// constant test showed that you didn't need to deal with this one.
1823  void advanceTile();
1824 
1825  /// Advances the iterator to pointing just before the next tile so
1826  /// the next advance() will be an advanceTile(). This is useful
1827  /// if you want to do a continue; as your break but the forloop
1828  /// is doing advance()
1829  /// Note the iterator is in a bad state until advance() is called.
1830  void skipToEndOfTile();
1831 
1832  /// Sets a flag which causes the iterator to tryCompress()
1833  /// tiles when it is done with them.
1834  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1835  void setCompressOnExit(bool shouldcompress)
1836  { myShouldCompressOnExit = shouldcompress; }
1837 
1838  /// These templated algorithms are designed to apply simple operations
1839  /// across all of the voxels with as little overhead as possible.
1840  /// The iterator should already point to a voxel array and if multithreaded
1841  /// had its partial range set. The source arrays must be matching size.
1842  /// The operator should support a () operator, and the result is
1843  /// vit.setValue( op(vit.getValue(), a->getValue(vit), ...);
1844  /// Passing T instead of UT_VoxelArray will treat it as a constant source
1845  /// Note if both source and destination tiles are constant, only
1846  /// a single operation is invoked.
1847  template <typename OP>
1848  void applyOperation(const OP &op);
1849  template <typename OP, typename S>
1850  void applyOperation(const OP &op, const UT_VoxelArray<S> &a);
1851  template <typename OP>
1852  void applyOperation(const OP &op, T a);
1853  template <typename OP, typename S, typename R>
1854  void applyOperation(const OP &op, const UT_VoxelArray<S> &a,
1855  const UT_VoxelArray<R> &b);
1856  template <typename OP, typename S, typename R, typename Q>
1857  void applyOperation(const OP &op, const UT_VoxelArray<S> &a,
1858  const UT_VoxelArray<R> &b,
1859  const UT_VoxelArray<Q> &c);
1860  /// These variants will invoke op.isNoop(a, b, ...) which will return
1861  /// true if those values won't affect the destination. This allows
1862  /// constant source tiles to be skipped, for example when adding
1863  /// 0.
1864  template <typename OP, typename S>
1865  void applyOperationCheckNoop(const OP &op, const UT_VoxelArray<S> &a);
1866  template <typename OP>
1867  void applyOperationCheckNoop(const OP &op, T a);
1868 
1869  /// These variants of apply operation also accept a mask array. The
1870  /// operation is applied only where the mask is greater than 0.5.
1871  template <typename OP, typename M>
1872  void maskedApplyOperation(const OP &op,
1873  const UT_VoxelArray<M> &mask);
1874  template <typename OP, typename S, typename M>
1875  void maskedApplyOperation(const OP &op, const UT_VoxelArray<S> &a,
1876  const UT_VoxelArray<M> &mask);
1877  template <typename OP, typename S, typename R, typename M>
1878  void maskedApplyOperation(const OP &op, const UT_VoxelArray<S> &a,
1879  const UT_VoxelArray<R>& b,
1880  const UT_VoxelArray<M> &mask);
1881  template <typename OP, typename S, typename R, typename Q, typename M>
1882  void maskedApplyOperation(const OP& op, const UT_VoxelArray<S> &a,
1883  const UT_VoxelArray<R>& b,
1884  const UT_VoxelArray<Q>& c,
1885  const UT_VoxelArray<M> &mask);
1886 
1887  /// Assign operation works like apply operation, but *this is written
1888  /// to without reading, so there is one less parameter to the ()
1889  /// callback. This can optimize constant tile writes as the
1890  /// constant() status of the destination doesn't matter.
1891  template <typename OP, typename S>
1892  void assignOperation(const OP &op, const UT_VoxelArray<S> &a);
1893  template <typename OP, typename S, typename R>
1894  void assignOperation(const OP &op, const UT_VoxelArray<S> &a,
1895  const UT_VoxelArray<R> &b);
1896  template <typename OP, typename S, typename R, typename Q>
1897  void assignOperation(const OP &op, const UT_VoxelArray<S> &a,
1898  const UT_VoxelArray<R> &b,
1899  const UT_VoxelArray<Q> &c);
1900 
1901  /// These variants of assign operation also accept a mask array. The
1902  /// assignment operation is performed only where the mask is greater
1903  /// than 0.5.
1904  template <typename OP, typename S, typename M>
1905  void maskedAssignOperation(const OP& op, const UT_VoxelArray<S>& a,
1906  const UT_VoxelArray<M>& mask);
1907  template <typename OP, typename S, typename R, typename M>
1908  void maskedAssignOperation(const OP& op, const UT_VoxelArray<S>& a,
1909  const UT_VoxelArray<R>& b,
1910  const UT_VoxelArray<M>& mask);
1911  template <typename OP, typename S, typename R, typename Q, typename M>
1912  void maskedAssignOperation(const OP& op, const UT_VoxelArray<S>& a,
1913  const UT_VoxelArray<R>& b,
1914  const UT_VoxelArray<Q>& c,
1915  const UT_VoxelArray<M>& mask);
1916 
1917  /// Reduction operators.
1918  /// op.reduce(T a) called for each voxel, *but*,
1919  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1920  template <typename OP>
1921  void reduceOperation(OP &op);
1922 
1923  UT_VoxelArray<T> *getArray() const { return myArray; }
1924 
1925 protected:
1926  /// The array we belong to.
1928  /// The handle that we have locked to get our array. It is null
1929  /// by default which makes the lock/unlock nops.
1931 
1932  /// Absolute index into voxel array.
1933  int myPos[3];
1934 
1935  /// Flag determining if we should compress tiles whenever we
1936  /// advance out of them.
1938 
1941 
1942 public:
1943  /// Our current linear tile idx. A value of -1 implies at end.
1945 
1946  /// Our current index into the tile list
1948 
1949  /// Our start & end tiles for processing a subrange.
1950  /// The tile range is half open [start, end)
1951  int myTileStart, myTileEnd;
1952 
1953  /// Which tile we are as per tx,ty,tz rather than linear index.
1954  int myTilePos[3];
1955 
1956  /// Our position within the current tile.
1957  int myTileLocalPos[3];
1958 
1959  /// The size of the current tile
1960  int myTileSize[3];
1961 
1962  /// The job info to use for tilefetching
1964 
1966 };
1967 
1968 /// Iterator for tiles inside Voxel Arrays
1969 ///
1970 /// This class eliminates the need for having
1971 /// for (z = 0; z < zres; z++)
1972 /// ...
1973 /// for (x = 0; x < xres; x++)
1974 /// loops everywhere.
1975 ///
1976 /// The iterator is similar in principal to an STL iterator, but somewhat
1977 /// simpler. The classic STL loop
1978 /// for ( it = begin(); it != end(); ++it )
1979 /// is done using
1980 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1981 ///
1982 template <typename T>
1984 {
1985 public:
1988  template <typename S>
1990  UT_VoxelArray<T> *array);
1992 
1993  template <typename S>
1995  UT_VoxelArray<T> *array)
1996  {
1997  UT_ASSERT_P(vit.isStartOfTile());
1998  myCurTile = array->getLinearTile(vit.getLinearTileNum());
1999  myLinearTileNum = vit.getLinearTileNum();
2000  myArray = array;
2001  myTileStart[0] = vit.x();
2002  myTileStart[1] = vit.y();
2003  myTileStart[2] = vit.z();
2004  }
2005 
2007  {
2008  setTile(vit, vit.getArray());
2009  }
2010 
2011  void setLinearTile(exint lineartilenum, UT_VoxelArray<T> *array)
2012  {
2013  myCurTile = array->getLinearTile(lineartilenum);
2014  myLinearTileNum = lineartilenum;
2015  myArray = array;
2016 
2017  array->linearTileToXYZ(lineartilenum,
2018  myTileStart[0], myTileStart[1], myTileStart[2]);
2019  myTileStart[0] <<= TILEBITS;
2020  myTileStart[1] <<= TILEBITS;
2021  myTileStart[2] <<= TILEBITS;
2022  }
2023 
2024  /// Resets the iterator to point to the first voxel.
2025  void rewind();
2026 
2027  /// Returns true if we have iterated over all of the voxels.
2028  bool atEnd() const
2029  { return myCurTile == 0 || myAtEnd; }
2030 
2031  /// Advances the iterator to point to the next voxel.
2032  void advance()
2033  {
2034  // We try to advance each axis, rolling over to the next.
2035  // If we exhaust this tile, we call advanceTile.
2036  myPos[0]++;
2037  myTileLocalPos[0]++;
2038  if (myTileLocalPos[0] >= myTileSize[0])
2039  {
2040  // Wrapped in X.
2041  myPos[0] -= myTileLocalPos[0];
2042  myTileLocalPos[0] = 0;
2043 
2044  myPos[1]++;
2045  myTileLocalPos[1]++;
2046  if (myTileLocalPos[1] >= myTileSize[1])
2047  {
2048  // Wrapped in Y.
2049  myPos[1] -= myTileLocalPos[1];
2050  myTileLocalPos[1] = 0;
2051 
2052  myPos[2]++;
2053  myTileLocalPos[2]++;
2054  if (myTileLocalPos[2] >= myTileSize[2])
2055  {
2056  // Wrapped in Z! Finished this tile!
2057  advanceTile();
2058  }
2059  }
2060  }
2061  }
2062 
2063  /// Retrieve the current location of the iterator, in the
2064  /// containing voxel array, not in the tile.
2065  int x() const { return myPos[0]; }
2066  int y() const { return myPos[1]; }
2067  int z() const { return myPos[2]; }
2068  int idx(int idx) const { return myPos[idx]; }
2069 
2070  /// Retrieves the value that we are currently pointing at.
2071  /// This is faster than an operator(x,y,z) as we already know
2072  /// our current tile and that bounds checking isn't needed.
2073  T getValue() const
2074  {
2075  UT_ASSERT_P(myCurTile);
2076 
2077  return (*myCurTile)(myTileLocalPos[0],
2078  myTileLocalPos[1],
2079  myTileLocalPos[2]);
2080  }
2081 
2082  /// Sets the voxel we are currently pointing to the given value.
2083  void setValue(T t) const
2084  {
2085  UT_ASSERT_P(myCurTile);
2086 
2087  myCurTile->setValue(myTileLocalPos[0],
2088  myTileLocalPos[1],
2089  myTileLocalPos[2], t);
2090  }
2091 
2092  /// Returns true if the tile we are currently in is a constant tile.
2093  bool isTileConstant() const
2094  {
2095  UT_ASSERT_P(myCurTile);
2096 
2097  return myCurTile->isConstant();
2098  }
2099 
2100  /// Returns true if we are at the start of a new tile.
2101  bool isStartOfTile() const
2102  { return !(myTileLocalPos[0] ||
2103  myTileLocalPos[1] ||
2104  myTileLocalPos[2]); }
2105 
2106  /// Returns the VoxelTile we are currently processing
2108  {
2109  return myCurTile;
2110  }
2111  int getLinearTileNum() const
2112  {
2113  return myLinearTileNum;
2114  }
2115 
2116 
2117  /// Advances the iterator to point to the next tile. Since
2118  /// we are restricted to one tile, effectively just ends the iterator.
2119  void advanceTile();
2120 
2121  /// Sets a flag which causes the iterator to tryCompress()
2122  /// tiles when it is done with them.
2123  bool getCompressOnExit() const { return myShouldCompressOnExit; }
2124  void setCompressOnExit(bool shouldcompress)
2125  { myShouldCompressOnExit = shouldcompress; }
2126 
2127  /// These templated algorithms are designed to apply simple operations
2128  /// across all of the voxels with as little overhead as possible.
2129  /// The iterator should already point to a voxel array and if multithreaded
2130  /// had its partial range set. The source arrays must be matching size.
2131  /// The operator should support a () operator, and the result is
2132  /// vit.setValue( op(vit.getValue(), a->getValue(vit), ...);
2133  /// Passing T instead of UT_VoxelArray will treat it as a constant source
2134  /// Note if both source and destination tiles are constant, only
2135  /// a single operation is invoked.
2136  template <typename OP>
2137  void applyOperation(const OP &op);
2138  template <typename OP, typename S>
2139  void applyOperation(const OP &op, const UT_VoxelArray<S> &a);
2140  template <typename OP>
2141  void applyOperation(const OP &op, T a);
2142  template <typename OP, typename S, typename R>
2143  void applyOperation(const OP &op, const UT_VoxelArray<S> &a,
2144  const UT_VoxelArray<R> &b);
2145  template <typename OP, typename S, typename R, typename Q>
2146  void applyOperation(const OP &op, const UT_VoxelArray<S> &a,
2147  const UT_VoxelArray<R> &b,
2148  const UT_VoxelArray<Q> &c);
2149 
2150  /// Assign operation works like apply operation, but *this is written
2151  /// to without reading, so there is one less parameter to the ()
2152  /// callback. This can optimize constant tile writes as the
2153  /// constant() status of the destination doesn't matter.
2154  template <typename OP, typename S>
2155  void assignOperation(const OP &op, const UT_VoxelArray<S> &a);
2156  template <typename OP, typename S, typename R>
2157  void assignOperation(const OP &op, const UT_VoxelArray<S> &a,
2158  const UT_VoxelArray<R> &b);
2159  template <typename OP, typename S, typename R, typename Q>
2160  void assignOperation(const OP &op, const UT_VoxelArray<S> &a,
2161  const UT_VoxelArray<R> &b,
2162  const UT_VoxelArray<Q> &c);
2163 
2164 
2165  /// Reduction operators.
2166  /// op.reduce(T a) called for each voxel, *but*,
2167  /// op.reduceMany(T a, int n) called to reduce constant blocks.
2168  /// Early exits if op.reduce() returns false.
2169  template <typename OP>
2170  bool reduceOperation(OP &op);
2171 
2172 protected:
2173  /// Current processing tile
2176 
2177  /// Current's tile linear number.
2179 
2180  /// Absolute index into voxel array.
2181  int myPos[3];
2182  /// Absolute index of start of tile
2183  int myTileStart[3];
2184 
2185  /// Flag determining if we should compress tiles whenever we
2186  /// advance out of them.
2188 
2189  /// Since we want to allow multiple passes, we can't
2190  /// clear out myCurTile when we hit the end.
2191  bool myAtEnd;
2192 
2193 public:
2194  /// Our position within the current tile.
2195  int myTileLocalPos[3];
2196 
2197  /// The size of the current tile
2198  int myTileSize[3];
2199 };
2200 
2201 /// Probe for Voxel Arrays
2202 ///
2203 /// This class is designed to allow for efficient evaluation
2204 /// of aligned indices of a voxel array, provided the voxels are iterated
2205 /// in a tile-by-tile, x-inner most, manner.
2206 ///
2207 /// This class will create a local copy of the voxel data where needed,
2208 /// uncompressing the information once for every 16 queries. It will
2209 /// also create an aligned buffer so you can safely use v4uf on fpreal32
2210 /// data.
2211 ///
2212 /// For queries where you need surrounding values, the prex and postx can
2213 /// specify padding on the probe. prex should be -1 to allow reading
2214 /// -1 offset, postx 1 to allow reading a 1 offset.
2215 ///
2216 
2217 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
2218 class UT_VoxelProbe
2219 {
2220 public:
2221  UT_VoxelProbe();
2222  UT_VoxelProbe(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
2223  ~UT_VoxelProbe();
2224 
2225  void setArray(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
2227  int prex = 0, int postx = 0)
2228  {
2229  SYS_STATIC_ASSERT(DoWrite == false);
2230  setArray((UT_VoxelArray<T> *)vox, prex, postx);
2231  }
2232 
2233  UT_VoxelArray<T> *getArray() const { return myArray; }
2234 
2235  bool isValid() const { return myArray != 0; }
2236 
2237  inline T getValue() const
2238  {
2239  return *myCurLine;
2240  }
2241  inline T getValue(int offset) const
2242  {
2243  return myCurLine[myStride*offset];
2244  }
2245 
2246  inline void setValue(T value)
2247  {
2248  UT_ASSERT_P(DoWrite);
2249  *myCurLine = value;
2250  if (TestForWrites)
2251  myDirty = true;
2252  }
2253 
2254 
2255  /// Resets where we currently point to.
2256  /// Returns true if we had to reset our cache line. If we didn't,
2257  /// and you have multiple probes acting in-step, you can just
2258  /// advanceX() the other probes
2259  template <typename S>
2261  { return setIndex(vit.x(), vit.y(), vit.z()); }
2262  template <typename S>
2264  { return setIndex(vit.x(), vit.y(), vit.z()); }
2265 
2266  bool setIndex(int x, int y, int z);
2267 
2268  /// Blindly advances our current pointer.
2269  inline void advanceX()
2270  {
2271  myCurLine += myStride;
2272  myX++;
2273  UT_ASSERT_P(myX < myMaxValidX);
2274  }
2275 
2276  /// Adjusts our current pointer to the given absolute location,
2277  /// assumes the new value is inside our valid range.
2278  inline void resetX(int x)
2279  {
2280  myCurLine += myStride * (x - myX);
2281  myX = x;
2282  UT_ASSERT_P(myX < myMaxValidX && myX >= myMinValidX);
2283  }
2284 
2285 protected:
2286  void reloadCache(int x, int y, int z);
2287 
2288  void writeCacheLine();
2289 
2290  void buildConstantCache(T value);
2291 
2293  /// myCacheLine[0] is the start of the cache line, so -1 would be
2294  /// the first pre-rolled value
2296  /// Where we actually allocated our cache line, aligned to 4x multiple
2297  /// to ensure SSE compatible.
2299 
2300  int myX, myY, myZ;
2301  int myPreX, myPostX;
2304  /// Half inclusive [,) range of valid x queries for current cache.
2305  int myMinValidX, myMaxValidX;
2306 
2307  /// Determines if we have anything to write back, only
2308  /// valid if TestForWrites is enabled.
2309  bool myDirty;
2310 
2312 
2313  friend class UT_VoxelProbeCube<T>;
2314  friend class UT_VoxelProbeFace<T>;
2315 };
2316 
2317 ///
2318 /// The vector probe is three normal probes into separate voxel arrays
2319 /// making it easier to read and write to aligned vector fields.
2320 /// If the vector field is face-centered, see the UT_VoxelProbeFace.
2321 ///
2322 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
2324 {
2325 public:
2327  { }
2329  { setArray(vx, vy, vz); }
2331  {}
2332 
2334  {
2335  myLines[0].setArray(vx);
2336  myLines[1].setArray(vy);
2337  myLines[2].setArray(vz);
2338  }
2339  void setConstArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz)
2340  {
2341  SYS_STATIC_ASSERT(DoWrite == false);
2342  setArray((UT_VoxelArray<T> *)vx, (UT_VoxelArray<T> *)vy, (UT_VoxelArray<T> *)vz);
2343  }
2344 
2345  inline UT_Vector3 getValue() const
2346  {
2347  return UT_Vector3(myLines[0].getValue(), myLines[1].getValue(), myLines[2].getValue());
2348  }
2349  inline T getValue(int axis) const
2350  {
2351  return myLines[axis].getValue();
2352  }
2353 
2354  inline void setValue(const UT_Vector3 &v)
2355  {
2356  myLines[0].setValue(v.x());
2357  myLines[1].setValue(v.y());
2358  myLines[2].setValue(v.z());
2359  }
2360 
2361  inline void setComponent(int axis, T val)
2362  {
2363  myLines[axis].setValue(val);
2364  }
2365 
2366  /// Resets where we currently point to.
2367  /// Returns true if we had to reset our cache line. If we didn't,
2368  /// and you have multiple probes acting in-step, you can just
2369  /// advanceX() the other probes
2370  template <typename S>
2372  { return setIndex(vit.x(), vit.y(), vit.z()); }
2373  template <typename S>
2375  { return setIndex(vit.x(), vit.y(), vit.z()); }
2376 
2377  bool setIndex(int x, int y, int z)
2378  {
2379  if (myLines[0].setIndex(x, y, z))
2380  {
2381  myLines[1].setIndex(x, y, z);
2382  myLines[2].setIndex(x, y, z);
2383  return true;
2384  }
2385  myLines[1].advanceX();
2386  myLines[2].advanceX();
2387  return false;
2388  }
2389 
2390  void advanceX()
2391  { myLines[0].advanceX(); myLines[1].advanceX(); myLines[2].advanceX(); }
2392 
2393 protected:
2395 };
2396 
2397 template <typename T>
2398 class
2400 {
2401 public:
2403  ~UT_VoxelProbeCube();
2404 
2405  void setConstCubeArray(const UT_VoxelArray<T> *vox);
2406  void setConstPlusArray(const UT_VoxelArray<T> *vox);
2407 
2408  /// Allows you to query +/-1 in each direction. In cube update,
2409  /// all are valid. In plus update, only one of x y and z may be
2410  /// non zero.
2412  T
2413  getValue(int x, int y, int z) const
2414  {
2415  UT_ASSERT_P(x >= -1 && x <= 1 &&
2416  y >= -1 && y <= 1 &&
2417  z >= -1 && z <= 1);
2418 
2419  return myLines[y+1][z+1].getValue(x);
2420  }
2421 
2423  T
2425  {
2426  return getValue(offset[0], offset[1], offset[2]);
2427  }
2428 
2429  template <typename S>
2431  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2432  template <typename S>
2434  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2435  bool setIndexCube(int x, int y, int z);
2436 
2437  template <typename S>
2439  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2440  template <typename S>
2442  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2443  bool setIndexPlus(int x, int y, int z);
2444 
2445  /// Computes central difference gradient, does not scale
2446  /// by the step size (which is twice voxelsize)
2447  /// Requires PlusArray
2449  { return UT_Vector3(getValue(1,0,0) - getValue(-1,0,0),
2450  getValue(0,1,0) - getValue(0,-1,0),
2451  getValue(0,0,1) - getValue(0,0,-1)); }
2452 
2453  /// Computes the central difference curvature using the given
2454  /// inverse voxelsize (ie, 1/voxelsize) at this point.
2455  /// Requires CubeArray.
2456  fpreal64 curvature(const UT_Vector3 &invvoxelsize) const;
2457 
2458  /// Computes the laplacian, again with a given 1/voxelsize.
2459  /// Requires PlusArray
2460  fpreal64 laplacian(const UT_Vector3 &invvoxelsize) const;
2461 
2462 protected:
2463  /// Does an rotation of our cache lines, ym becomes y0 and y0 becomes yp,
2464  /// so further queries with y+1 will be cache hits for 2 out of 3.
2465  static void rotateLines(UT_VoxelProbe<T, true, false, false> &ym,
2468 
2470  /// Cached look up position. myValid stores if they are
2471  /// valid values or not
2472  bool myValid;
2473  int myX, myY, myZ;
2474  /// Half inclusive [,) range of valid x queries for current cache.
2475  int myMinValidX, myMaxValidX;
2476 };
2477 
2478 ///
2479 /// UT_VoxelProbeConstant
2480 ///
2481 /// Looks like a voxel probe but only returns a constant value.
2482 ///
2483 template <typename T>
2484 class
2486 {
2487 public:
2490 
2491  template <typename S>
2493  { return true; }
2494  template <typename S>
2496  { return true; }
2497  bool setIndex(int x, int y, int z)
2498  { return true; }
2499 
2500  void setValue(T val) { myValue = val; }
2501  T getValue() const { return myValue; }
2502 protected:
2504 };
2505 
2506 ///
2507 /// UT_VoxelProbeAverage
2508 ///
2509 /// When working with MAC grids one often has slightly misalgined
2510 /// fields. Ie, one field is at the half-grid spacing of another field.
2511 /// The step values are 0 if the dimension is algined, -1 for half a step
2512 /// back (ie, (val(-1)+val(0))/2) and 1 for half a step forward
2513 /// (ie, (val(0)+val(1))/2)
2514 ///
2515 template <typename T, int XStep, int YStep, int ZStep>
2516 class
2518 {
2519 public:
2522 
2523  void setArray(const UT_VoxelArray<T> *vox);
2524 
2525  template <typename S>
2527  { return setIndex(vit.x(), vit.y(), vit.z()); }
2528  template <typename S>
2530  { return setIndex(vit.x(), vit.y(), vit.z()); }
2531  bool setIndex(int x, int y, int z);
2532 
2533  /// Returns the velocity centered at this index, thus an average
2534  /// of the values in each of our internal probes.
2535  inline T getValue() const
2536  {
2537  if (ZStep)
2538  return (valueZ(1) + valueZ(0)) * 0.5;
2539  return valueZ(0);
2540  }
2541 
2542 protected:
2543  inline T valueZ(int z) const
2544  {
2545  if (YStep)
2546  return (valueYZ(1, z) + valueYZ(0, z)) * 0.5;
2547  return valueYZ(0, z);
2548  }
2549 
2550  inline T valueYZ(int y, int z) const
2551  {
2552  if (XStep > 0)
2553  return (myLines[y][z].getValue(1) + myLines[y][z].getValue(0)) * 0.5;
2554  if (XStep < 0)
2555  return (myLines[y][z].getValue(-1) + myLines[y][z].getValue(0)) * 0.5;
2556  return myLines[y][z].getValue();
2557  }
2558 
2559  // Stores [Y][Z] lines.
2561 };
2562 
2563 
2564 ///
2565 /// UT_VoxelProbeFace is designed to walk over three velocity
2566 /// fields that store face-centered values. The indices refer
2567 /// to the centers of the voxels.
2568 ///
2569 template <typename T>
2570 class
2572 {
2573 public:
2575  ~UT_VoxelProbeFace();
2576 
2577  void setArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz);
2578  void setVoxelSize(const UT_Vector3 &voxelsize);
2579 
2580  template <typename S>
2582  { return setIndex(vit.x(), vit.y(), vit.z()); }
2583  template <typename S>
2585  { return setIndex(vit.x(), vit.y(), vit.z()); }
2586  bool setIndex(int x, int y, int z);
2587 
2588  /// Get the face values on each face component.
2589  /// Parameters are axis then side.
2590  /// 0 is the lower face, 1 the higher face.
2591  inline T face(int axis, int side) const
2592  {
2593  if (axis == 0)
2594  return myLines[0][0].getValue(side);
2595  else
2596  return myLines[axis][side].getValue();
2597  }
2598 
2599  /// Returns the velocity centered at this index, thus an average
2600  /// of the values in each of our internal probes.
2601  inline UT_Vector3 value() const
2602  {
2603  return UT_Vector3(0.5f * (face(0, 0) + face(0, 1)),
2604  0.5f * (face(1, 0) + face(1, 1)),
2605  0.5f * (face(2, 0) + face(2, 1)));
2606  }
2607 
2608  /// Returns the divergence of this cell.
2609  inline T divergence() const
2610  {
2611  return (face(0,1)-face(0,0)) * myVoxelSize.x()
2612  + (face(1,1)-face(1,0)) * myVoxelSize.y()
2613  + (face(2,1)-face(2,0)) * myVoxelSize.z();
2614 
2615  }
2616 
2617 protected:
2618 
2619  static void swapLines(UT_VoxelProbe<T, true, false, false> &ym,
2621 
2622 
2624 
2625  /// Cached look up position. myValid stores if they are
2626  /// valid values or not
2627  bool myValid;
2628  int myX, myY, myZ;
2629  /// Half inclusive [,) range of valid x queries for current cache.
2630  int myMinValidX, myMaxValidX;
2631 
2632  UT_Vector3 myVoxelSize, myInvVoxelSize;
2633 };
2634 
2635 
2636 #include "UT_VoxelArray.C"
2637 
2638 
2639 // Typedefs for common voxel array types
2645 
2657 // Read only probe
2663 // Write only
2669 // Read/Write always writeback.
2675 // Read/Write with testing
2681 
2682 // TODO: add support for read-write probe cube
2684 
2688 
2692 
2696 
2700 
2704 
2708 
2709 #endif
2710 
void setTile(const UT_VoxelArrayIterator< T > &vit)
exint exint const UT_JobInfo &info const
int x() const
Retrieve the current location of the iterator.
UT_Interrupt * myInterrupt
UT_Vector3I getVoxelRes() const
#define SYSmax(a, b)
Definition: SYS_Math.h:1570
int xyzTileToLinear(int x, int y, int z) const
void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const
SYS_FORCE_INLINE T lerpSample(T *samples, float fx, float fy, float fz) const
Lerps the given sample using trilinear interpolation.
const UT_VoxelTile< T > & operator=(const UT_VoxelTile< T > &src)
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void findAverage(T &avg) const
Determines the average value of the tile.
UT_VoxelProbe< UT_Vector3, false, true, false > UT_VoxelWOProbeV3
#define SYS_STATIC_ASSERT(expr)
UT_Vector3I linearTileToXYZ(int idx) const
int int32
Definition: SYS_Types.h:39
UT_VoxelProbe< fpreal32, true, true, true > UT_VoxelRWTProbeF
void setInterrupt(UT_Interrupt *interrupt)
T valueZ(int z) const
int myLinearTileNum
Current's tile linear number.
void loadData(UT_IStream &is)
Load an array, requires you have already size()d this array.
UT_VoxelBorderType getBorder() const
exint getDataLength() const
Returns the amount of data used by the tile myData pointer.
bool atEnd() const
Returns true if we have iterated over all of the voxels.
void match(const UT_VoxelArray< T > &src)
UT_VoxelArray< UT_Vector3 > UT_VoxelArrayV3
SYS_FORCE_INLINE T getValue(const UT_Vector3I &offset) const
bool isMatching(const UT_VoxelArray< S > &src) const
Axis-aligned bounding box (AABB).
Definition: GEO_Detail.h:41
*get result *(waiting if necessary)*A common idiom is to fire a bunch of sub tasks at the and then *wait for them to all complete We provide a helper class
Definition: thread.h:623
void setValue(UT_Vector3I index, T value)
virtual const char * getName()=0
T valueYZ(int y, int z) const
void resample(const UT_VoxelArray< T > &src, UT_FilterType filtertype=UT_FILTER_POINT, float filterwidthscale=1.0f, int clampaxis=-1)
Fills this by resampling the given voxel array.
const UT_VoxelCompressOptions & getCompressionOptions() const
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void
Definition: png.h:1083
int64 getMemoryUsage(bool inclusive) const
Return the amount of memory used by this array.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
GLboolean * data
Definition: glcorearb.h:131
constexpr SYS_FORCE_INLINE T & y() noexcept
Definition: UT_Vector4.h:493
bool setIndex(UT_VoxelTileIterator< S > &vit)
SYS_FORCE_INLINE void freeData()
const GLdouble * v
Definition: glcorearb.h:837
T operator()(UT_Vector3D pos) const
THREADED_METHOD3_CONST(UT_VoxelArray< T >, numTiles() > 16, flatten, T *, flatarray, exint, ystride, exint, zstride) void flattenPartial(T *flatarray
UT_Vector3 gradient() const
int numVoxels() const
GLuint start
Definition: glcorearb.h:475
GLsizei const GLfloat * value
Definition: glcorearb.h:824
UT_COWReadHandle< UT_VoxelArray< T > > myHandle
UT_VoxelArray< fpreal32 > UT_VoxelArrayF
void clampIndex(int &x, int &y, int &z) const
virtual T getValue(const UT_VoxelTile< T > &tile, int x, int y, int z) const =0
T * fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const
static void registerCompressionEngine(UT_VoxelTileCompress< T > *engine)
fpreal myQuantizeTol
Tolerance for quantizing to reduced bit depth.
virtual bool lerp(GA_AttributeOperand &d, GA_AttributeOperand &a, GA_AttributeOperand &b, GA_AttributeOperand &t) const
d = SYSlerp(a, b, t);
UT_VoxelVectorProbe< fpreal32, true, true, true > UT_VoxelVectorRWTProbeF
UT_Vector3T< float > UT_Vector3
SYS_FORCE_INLINE T * SYSconst_cast(const T *foo)
Definition: SYS_Types.h:136
UT_VoxelTile< T > * myCurTile
Current processing tile.
GLdouble GLdouble GLdouble z
Definition: glcorearb.h:848
UT_VoxelVectorProbe(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
UT_VoxelArray< T > * myBaseLevel
constexpr SYS_FORCE_INLINE T & z() noexcept
Definition: UT_Vector3.h:667
int64 exint
Definition: SYS_Types.h:125
T divergence() const
Returns the divergence of this cell.
GLint level
Definition: glcorearb.h:108
SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1222
UT_VoxelBorderType
Definition: UT_VoxelArray.h:70
#define SYSabs(a)
Definition: SYS_Math.h:1572
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
bool myOwnBase
If true, we will delete the base level when we are done.
UT_VoxelArray< UT_Vector4 > UT_VoxelArrayV4
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:87
#define UT_API
Definition: UT_API.h:14
bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const
ImageBuf OIIO_API min(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
UT_VoxelArray< T > * myArray
fpreal UTvoxelTileDist(const UT_Vector2 &a, const UT_Vector2 &b)
UT_VoxelTileIterator< UT_Vector2 > UT_VoxelTileIteratorV2
void setArray(UT_VoxelArray< T > *vox)
GLint y
Definition: glcorearb.h:103
T mixValues(T t1, T t2, mipmaptype function) const
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:37
T ysize() const
ImageBuf OIIO_API flatten(const ImageBuf &src, ROI roi={}, int nthreads=0)
void copyWithOffset(const UT_VoxelArray< T > &src, int offx, int offy, int offz)
int indexToLinearTile(int x, int y, int z) const
**But if you need a result
Definition: thread.h:613
UT_VoxelTileIterator< int64 > UT_VoxelTileIteratorI
bool isConstant(T *cval=0) const
UT_Vector3T< int64 > UT_Vector3I
void makeConstant(T t)
Turns this tile into a constant tile of the given value.
bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const
GLfloat GLfloat GLfloat v2
Definition: glcorearb.h:818
T face(int axis, int side) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
UT_VoxelVectorProbe< fpreal32, true, true, false > UT_VoxelVectorRWProbeF
SYS_FORCE_INLINE T rawConstVal() const
UT_FilterType
Definition: UT_FilterType.h:16
void UTvoxelTileExpandMinMax(UT_Vector2 v, UT_Vector2 &min, UT_Vector2 &max)
bool atEnd() const
Returns true if we have iterated over all of the voxels.
const UT_JobInfo & info
void setCompressionOptions(const UT_VoxelCompressOptions &options)
float fpreal32
Definition: SYS_Types.h:200
virtual bool load(UT_JSONParser &p, UT_VoxelTile< T > &tile) const
UT_VoxelArray< T > * getArray() const
int myCurTile
Our current linear tile idx. A value of -1 implies at end.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
void setCompressionTolerance(fpreal tol)
void setHandle(UT_COWReadHandle< UT_VoxelArray< T > > handle)
void flatten(S *dst, int dststride) const
Flattens ourself into the given destination buffer.
void makeFpreal16()
Explicit compress to fpreal16. Lossy. No-op if already constant.
void size(int xres, int yres, int zres, bool reset=true)
S * extractSlice(S *dstdata, int slice, bool half_slice) const
constexpr SYS_FORCE_INLINE T & x() noexcept
Definition: UT_Vector4.h:491
int zres() const
virtual bool writeThrough(UT_VoxelTile< T > &tile, int x, int y, int z, T t) const =0
UT_VoxelProbeCube< fpreal32 > UT_VoxelROProbeCubeF
UT_VoxelVectorProbe< fpreal32, true, false, false > UT_VoxelVectorProbeF
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
UT_VoxelArrayIterator< UT_Vector3 > UT_VoxelArrayIteratorV3
virtual ~UT_VoxelTileCompress()
static fpreal dist(T a, T b)
bool setIndex(int x, int y, int z)
double fpreal64
Definition: SYS_Types.h:201
ImageBuf OIIO_API laplacian(const ImageBuf &src, ROI roi={}, int nthreads=0)
constexpr SYS_FORCE_INLINE T & x() noexcept
Definition: UT_Vector2.h:423
bool getCompressOnExit() const
SYS_NO_DISCARD_RESULT SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
unsigned char uint8
Definition: SYS_Types.h:36
bool writeThrough(int x, int y, int z, T t)
int yres() const
UT_VoxelTileIterator< fpreal32 > UT_VoxelTileIteratorF
UT_VoxelArray< UT_Vector2 > UT_VoxelArrayV2
void moveTilesWithOffset(UT_VoxelArray< T > &src, int tileoffx, int tileoffy, int tileoffz)
const T * rawData() const
T getBorderScale(int axis) const
const S * writeTiles(const S *srcdata, int srcstride, const UT_IntArray &tilelist)
GLfloat f
Definition: glcorearb.h:1926
bool hasNan() const
Returns true if any NANs are in this tile.
UT_VoxelProbe< UT_Vector2, true, true, false > UT_VoxelRWProbeV2
GLintptr offset
Definition: glcorearb.h:665
void setArray(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
void resetX(int x)
GLboolean reset
Definition: glad.h:5138
void setTile(const UT_VoxelArrayIterator< S > &vit, UT_VoxelArray< T > *array)
UT_VoxelArrayIterator< UT_Vector2 > UT_VoxelArrayIteratorV2
THREADED_METHOD4(UT_VoxelArray< T >, numTiles() > 4, copyWithOffsetInternal, const UT_VoxelArray< T > &, src, int, offx, int, offy, int, offz) void copyWithOffsetInternalPartial(const UT_VoxelArray< T > &src
exint exint zstride
int myCurTileListIdx
Our current index into the tile list.
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
virtual void load(UT_IStream &is, UT_VoxelTile< T > &tile) const
int64 getMemoryUsage(bool inclusive) const
Returns the amount of memory used by this tile.
THREADED_METHOD4_CONST(UT_VoxelArray< T >, numTiles() > 16, flattenGLFixed8, uint8 *, flatarray, exint, ystride, exint, zstride, T, dummy) void flattenGLFixed8Partial(uint8 *flatarray
int getYRes() const
void weightedSum(int pstart[3], int pend[3], const float *weights[3], int start[3], T &result)
SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const
int numTiles() const
UT_BoundingBoxI getTileBBox() const
int getLinearTileNum() const
constexpr SYS_FORCE_INLINE T & z() noexcept
Definition: UT_Vector4.h:495
bool getValues(const UT_BoundingBoxI &bbox, T *values, const exint size) const
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
static void saveCompressionTypes(std::ostream &os)
Stores a list of compresson engines to os.
T getBorderValue() const
UT_VoxelArray< T > * myArray
The array we belong to.
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:155
static int mirrorCoordinates(int x, int res)
T getValue() const
GLuint GLuint end
Definition: glcorearb.h:475
UT_VoxelProbe< UT_Vector2, true, false, false > UT_VoxelProbeV2
virtual void save(std::ostream &os, const UT_VoxelTile< T > &tile) const
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
const UT_VoxelArray< T > & operator=(const UT_VoxelArray< T > &src)
Assignment operator:
UT_VoxelArray< T > * getArray() const
UT_Vector3T< T > SYSclamp(const UT_Vector3T< T > &v, const UT_Vector3T< T > &min, const UT_Vector3T< T > &max)
Definition: UT_Vector3.h:1057
bool isValid() const
GLint GLenum GLboolean GLsizei stride
Definition: glcorearb.h:872
void makeRawUninitialized()
Definition: VM_SIMD.h:188
void setTile(const UT_VoxelArrayIterator< S > &vit, UT_VoxelArray< T > *array)
GLint GLuint mask
Definition: glcorearb.h:124
void setValue(int x, int y, int z, T t)
UT_VoxelTile< T > * getTile(int tx, int ty, int tz) const
UT_VoxelProbe< UT_Vector2, false, true, false > UT_VoxelWOProbeV2
constexpr enabler dummy
An instance to use in EnableIf.
Definition: CLI11.h:985
UT_VoxelProbe< UT_Vector2, true, true, true > UT_VoxelRWTProbeV2
UT_VoxelProbe< UT_Vector4, true, true, true > UT_VoxelRWTProbeV4
void setCompressOnExit(bool shouldcompress)
T evaluate(const UT_Vector3 &pos, const UT_Filter &filter, fpreal radius, int clampaxis=-1) const
long long int64
Definition: SYS_Types.h:116
UT_VoxelMipMap< fpreal32 > UT_VoxelMipMapF
SYS_NO_DISCARD_RESULT SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
bool setIndex(UT_VoxelTileIterator< S > &vit)
UT_VoxelProbe< UT_Vector3, true, false, false > UT_VoxelProbeV3
#define SYS_NO_DISCARD_RESULT
Definition: SYS_Compiler.h:93
#define SYS_STATIC_FORCE_INLINE
Definition: SYS_Inline.h:48
UT_VoxelProbe< UT_Vector3, true, true, false > UT_VoxelRWProbeV3
bool tryCompress(const UT_VoxelCompressOptions &options)
virtual bool canSave() const
Does this engine support saving and loading?
int getRes(int dim) const
THREADED_METHOD(UT_VoxelArray< T >, numTiles() > 100, collapseAllTiles) void collapseAllTilesPartial(const UT_JobInfo &info)
int getXRes() const
void setRes(int xr, int yr, int zr)
UT_VoxelArray< int64 > UT_VoxelArrayI
THREADED_METHOD1(UT_VoxelArray< T >, numTiles() > 100, constant, T, t) void const antPartial(T t
bool setIndexPlus(UT_VoxelTileIterator< S > &vit)
UT_API UT_ValArray< UT_VoxelTileCompress< fpreal16 > * > & UTvoxelTileGetCompressionEngines(fpreal16 *dummy)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
GLuint const GLchar * name
Definition: glcorearb.h:786
virtual bool isLossless() const
Returns true if the compression type is lossless.
signed char int8
Definition: SYS_Types.h:35
void getTileVoxels(int idx, UT_Vector3I &start, UT_Vector3I &end) const
idxth tile represents the voxels indexed [start,end).
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1222
GLint GLenum GLint x
Definition: glcorearb.h:409
SYS_FORCE_INLINE bool inlineConstant() const
void writeCacheLine(T *cacheline, int y, int z)
Fills a cache line from an external buffer into our own data.
void advanceX()
Blindly advances our current pointer.
T xsize() const
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
void setValue(int x, int y, int z, T t)
SYS_FORCE_INLINE T lerpVoxelCoordAxis(UT_Vector3F pos) const
UT_VoxelArrayIterator< int64 > UT_VoxelArrayIteratorI
virtual int getDataLength(const UT_VoxelTile< T > &tile) const =0
GLdouble t
Definition: glad.h:2397
GLsizei samples
Definition: glcorearb.h:1298
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
void getTileVoxels(UT_Vector3I &start, UT_Vector3I &end) const
This tile will iterate over the voxels indexed [start,end).
bool myAllowFP16
Conversion to fpreal16, only valid for scalar data.
SYS_FORCE_INLINE T lerpVoxelCoord(UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
int getZRes() const
const UT_JobInfo * myJobInfo
The job info to use for tilefetching.
void setConstArray(const UT_VoxelArray< T > *vox, int prex=0, int postx=0)
static UT_VoxelTileCompress< T > * getCompressionEngine(int index)
GLint j
Definition: glad.h:2733
bool getCompressOnExit() const
T volume() const
UT_VoxelTileIterator< UT_Vector4 > UT_VoxelTileIteratorV4
UT_VoxelProbe< fpreal32, false, true, false > UT_VoxelWOProbeF
GLsizeiptr size
Definition: glcorearb.h:664
GLenum GLenum dst
Definition: glcorearb.h:1793
virtual void findMinMax(const UT_VoxelTile< T > &tile, T &min, T &max) const
Definition: UT_VoxelArray.C:73
void setLinearTile(exint lineartilenum, UT_VoxelArray< T > *array)
void setTile(const UT_VoxelArrayIterator< T > &vit)
bool isRawFull() const
Returns if this tile is in raw full format.
bool hasNan() const
Returns true if any element of the voxel array is NAN.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
UT_VoxelVectorProbe< fpreal32, false, true, false > UT_VoxelVectorWOProbeF
UT_VoxelProbe< UT_Vector4, true, false, false > UT_VoxelProbeV4
T operator()(UT_Vector3I index) const
SYS_FORCE_INLINE void lerpVoxelMinMaxAxis(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
SYS_STATIC_FORCE_INLINE T lerpValues(T v1, T v2, fpreal32 bias)
Lerps two numbers, templated to work with T.
T getValue(int x, int y, int z) const
bool setIndexPlus(UT_VoxelArrayIterator< S > &vit)
SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z, float fx, float fy, float fz) const
UT_VoxelProbe< UT_Vector3, true, true, true > UT_VoxelRWTProbeV3
GLenum GLsizei GLsizei GLint * values
Definition: glcorearb.h:1602
T getValue(int offset) const
void copyFragment(int dstx, int dsty, int dstz, const UT_VoxelTile< T > &srctile, int srcx, int srcy, int srcz)
void uncompress()
Turns a compressed tile into a raw tile.
UT_VoxelArray< T > * myArray
short int16
Definition: SYS_Types.h:37
fpreal64 fpreal
Definition: SYS_Types.h:277
THREADED_METHOD3(UT_VoxelArray< T >, numTiles() > 16, extractFromFlattened, const T *, flatarray, exint, ystride, exint, zstride) void extractFromFlattenedPartial(const T *flatarray
int idx(int idx) const
void setConstArray(const UT_VoxelArray< T > *vox)
UT_API UT_Interrupt * UTgetInterrupt()
Obtain global UT_Interrupt singleton.
void forEachTile(const OP &op, bool shouldthread=true)
SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
GLuint index
Definition: glcorearb.h:786
T getValue(int axis) const
UT_VoxelProbe< fpreal32, true, false, false > UT_VoxelProbeF
int numLevels() const
UT_BoundingBoxT< int64 > UT_BoundingBoxI
SYS_FORCE_INLINE T getValue(int x, int y, int z) const
UT_VoxelProbe< fpreal32, true, true, false > UT_VoxelRWProbeF
constexpr SYS_FORCE_INLINE T & w() noexcept
Definition: UT_Vector4.h:497
bool isRaw() const
Returns if this tile is in raw format.
void saveData(std::ostream &os) const
UT_ValArray< UT_VoxelArray< T > ** > myLevels
int getRes(int axis) const
GLfloat GLfloat v1
Definition: glcorearb.h:817
GLuint GLfloat * val
Definition: glcorearb.h:1608
ImageBuf OIIO_API max(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
UT_BoundingBoxI getTileBBox(int idx) const
int int int offz
UT_Vector3 value() const
UT_VoxelTileIterator< UT_Vector3 > UT_VoxelTileIteratorV3
void linearTileToXYZ(int idx, int &x, int &y, int &z) const
UT_Vector3 myVoxelSize
UT_VoxelProbe< UT_Vector4, true, true, false > UT_VoxelRWProbeV4
int getLinearTileNum() const
exint numVoxels() const
void save(std::ostream &os) const
#define DEFINE_STD_FUNC(TYPE)
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
T operator()(int x, int y, int z) const
int isInside(const UT_Vector3T< T > &pt) const
UT_VoxelTile< T > * getLinearTile(int idx) const
void load(UT_IStream &is, const UT_IntArray &compression)
UT_VoxelProbe< UT_Vector4, false, true, false > UT_VoxelWOProbeV4
void setCompressOnExit(bool shouldcompress)
static int lookupCompressionEngine(const char *name)
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:857
Definition: core.h:1131
void setValue(const UT_Vector3 &v)
void setValue(T value)
SYS_FORCE_INLINE void lerpVoxelMinMax(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
bool setIndexCube(UT_VoxelArrayIterator< S > &vit)
#define UT_VOXEL_FREE(x)
Definition: UT_VoxelArray.h:53
bool setIndexCube(UT_VoxelTileIterator< S > &vit)
void uncompressFull()
Turns a tile into a raw full tile.
void setBorder(UT_VoxelBorderType type, T t)
UT_Vector3 getValue() const
T * rawFullData()
Returns the raw full data of the tile.
SYS_FORCE_INLINE T operator()(int x, int y, int z) const
UT_VoxelArrayIterator< fpreal32 > UT_VoxelArrayIteratorF
virtual bool tryCompress(UT_VoxelTile< T > &tile, const UT_VoxelCompressOptions &options, T min, T max) const =0
void setForeignData(void *data, int8 compress_type)
SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
static void expandMinMax(T v, T &min, T &max)
Designed to be specialized according to T.
int idx(int idx) const
constexpr SYS_FORCE_INLINE T & y() noexcept
Definition: UT_Vector3.h:665
void findMinMax(T &min, T &max) const
Finds the minimum and maximum T values.
bool isValidIndex(int x, int y, int z) const
Returns true if the given x, y, z values lie inside the valid index.
#define SYSmin(a, b)
Definition: SYS_Math.h:1571
type
Definition: core.h:1059
void setComponent(int axis, T val)
virtual bool save(UT_JSONWriter &w, const UT_VoxelTile< T > &tile) const
SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z, float fx, float fy, float fz) const
void setBorderScale(T scalex, T scaley, T scalez)
bool setIndex(UT_VoxelTileIterator< S > &vit)
bool setIndex(UT_VoxelTileIterator< S > &vit)
void setConstArray(const UT_VoxelArray< T > *vx, const UT_VoxelArray< T > *vy, const UT_VoxelArray< T > *vz)
exint exint T dummy
const UT_VoxelArray< T > * level(int level, int function) const
int xres() const
Read the current resolution.
bool compressionEnabled() const
S * extractTiles(S *dstdata, int stride, const UT_IntArray &tilelist) const
bool setIndex(int x, int y, int z)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
void writeData(const S *src, int srcstride)
SYS_FORCE_INLINE T * rawConstData() const
bool isSimpleCompression() const
constexpr SYS_FORCE_INLINE T & y() noexcept
Definition: UT_Vector2.h:425
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
bool isConstant() const
Returns if this tile is constant.
SYS_FORCE_INLINE T lerpSampleAxis(T *samples, float fx, float fy, float fz) const
Definition: format.h:895
static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions)
UT_VoxelArrayIterator< UT_Vector4 > UT_VoxelArrayIteratorV4
int getTileRes(int dim) const
void evaluateMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelTileIterator< S > &vit)
fpreal getCompressionTolerance() const
void flattenPartialAxis(T *flatarray, exint ystride, const UT_JobInfo &info) const
void forEachTileConst(const OP &op, bool shouldthread=true) const
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const
GLint GLint GLint GLint GLint GLint GLint GLbitfield GLenum filter
Definition: glcorearb.h:1297
GLenum src
Definition: glcorearb.h:1793
constexpr SYS_FORCE_INLINE T & x() noexcept
Definition: UT_Vector3.h:663