HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_VoxelArray.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_VoxelArray.h ( UT Library, C++)
7  *
8  * COMMENTS:
9  * This provides support for transparently tiled voxel arrays of data.
10  * The given type, T, should support normal arithmatic operations.
11  *
12  * The created array has elements indexed from 0, ie: [0..xdiv-1].
13  */
14 
15 #ifndef __UT_VoxelArray__
16 #define __UT_VoxelArray__
17 
18 #include "UT_API.h"
19 #include "UT_BoundingBox.h"
20 #include "UT_Vector2.h"
21 #include "UT_Vector3.h"
22 #include "UT_Vector4.h"
23 #include "UT_IntArray.h"
24 #include "UT_ValArray.h"
25 #include "UT_Array.h"
26 #include "UT_FilterType.h"
27 #include "UT_COW.h"
28 #include "UT_ThreadedAlgorithm.h"
29 #include "UT_Interrupt.h"
30 #include <VM/VM_SIMD.h>
31 
32 #include <SYS/SYS_SharedMemory.h>
33 #include <SYS/SYS_StaticAssert.h>
34 #include <SYS/SYS_Types.h>
35 
36 // TBB alloc results in real-world tests that are 3-4% faster. Yay!
37 // But unfortunately it is less aggressive with fragmentation, so
38 // we use effectively 2x the memory. Boo.
39 
40 //#define VOXEL_USE_TBB_ALLOC
41 
42 #ifdef VOXEL_USE_TBB_ALLOC
43 
44 #include <tbb/scalable_allocator.h>
45 
46 #define UT_VOXEL_ALLOC(x) scalable_malloc(x)
47 #define UT_VOXEL_FREE(x) scalable_free(x)
48 
49 #else
50 
51 #define UT_VOXEL_ALLOC(x) SYSamalloc((x), 128)
52 #define UT_VOXEL_FREE(x) SYSafree(x)
53 
54 #endif
55 
56 class UT_Filter;
57 class UT_JSONWriter;
58 class UT_JSONParser;
59 
60 static const int TILEBITS = 4;
61 static const int TILESIZE = 1 << TILEBITS;
62 static const int TILEMASK = TILESIZE-1;
63 
64 ///
65 /// Behaviour of out of bound reads.
66 ///
68 {
73 };
74 
75 template <typename T> class UT_VoxelTile;
76 template <typename T> class UT_VoxelArray;
77 template <typename T, bool DoRead, bool DoWrite, bool TestForWrite> class UT_VoxelProbe;
78 template <typename T> class UT_VoxelProbeCube;
79 template <typename T> class UT_VoxelProbeFace;
80 
82 {
83  int tileidx;
84  int numvoxel;
85 };
86 
88 {
89 public:
91  {
92  myConstantTol = 0;
93  myQuantizeTol = 0;
94  myAllowFP16 = false;
95  }
96 
97  // Used for quantization.
99  {
102  };
103 
104  /// Determines if compressTile should be run on this grid for
105  /// things other than constant compression. Used by writeTiles
106  /// to limit compression attempts.
107  bool compressionEnabled() const
108  {
109  return myAllowFP16 || myConstantTol > 0 || myQuantizeTol > 0;
110  }
111 
112  /// Tiles will be constant if within this range. This may
113  /// need to be tighter than quantization tolerance as
114  /// dithering can't recover partial values.
116  /// Tolerance for quantizing to reduced bit depth
118 
120 
121  /// Conversion to fpreal16, only valid for scalar data.
123 };
124 
125 ///
126 /// UT_VoxelTileCompress
127 ///
128 /// A compression engine for UT_VoxelTiles of a specific type. This
129 /// is a verb class which is invoked from the voxeltile class.
130 ///
131 template <typename T>
133 {
134 public:
137 
138  /// Attempts to write data directly to the compressed tile.
139  /// Returns false if not possible.
140  virtual bool writeThrough(UT_VoxelTile<T> &tile,
141  int x, int y, int z, T t) const = 0;
142 
143  /// Reads directly from the compressed data.
144  /// Cannot alter the tile in any way because it must be threadsafe.
145  virtual T getValue(const UT_VoxelTile<T> &tile,
146  int x, int y, int z) const = 0;
147 
148  /// Attempts to compress the data according to the given tolerance.
149  /// If succesful, returns true.
150  virtual bool tryCompress(UT_VoxelTile<T> &tile,
151  const UT_VoxelCompressOptions &options,
152  T min, T max) const = 0;
153 
154  /// Returns the length in bytes of the data in the tile.
155  /// It must be at least one byte long.
156  virtual int getDataLength(const UT_VoxelTile<T> &tile) const = 0;
157 
158  /// Returns true if the compression type is lossless
159  virtual bool isLossless() const { return false; }
160 
161  /// Determines the min & max values of the tile. A default
162  /// implementation uses getValue() on all voxels.
163  virtual void findMinMax(const UT_VoxelTile<T> &tile, T &min, T &max) const;
164 
165  /// Does this engine support saving and loading?
166  virtual bool canSave() const { return false; }
167  virtual void save(std::ostream &os, const UT_VoxelTile<T> &tile) const {}
168  virtual bool save(UT_JSONWriter &w, const UT_VoxelTile<T> &tile) const
169  { return false; }
170  virtual void load(UT_IStream &is, UT_VoxelTile<T> &tile) const {}
171  virtual bool load(UT_JSONParser &p, UT_VoxelTile<T> &tile) const
172  { return false; }
173 
174  /// Returns the unique name of this compression engine so
175  /// we can look up engines by name (the index of the compression
176  /// engine is assigned at load time so isn't constant)
177  virtual const char *getName() = 0;
178 };
179 
191 
192 #define DEFINE_STD_FUNC(TYPE) \
193 inline void \
194 UTvoxelTileExpandMinMax(TYPE v, TYPE &min, TYPE &max) \
195 { \
196  if (v < min) \
197  min = v; \
198  else if (v > max) \
199  max = v; \
200 } \
201  \
202 inline fpreal \
203 UTvoxelTileDist(TYPE a, TYPE b) \
204 { \
205  return (fpreal) SYSabs(a - b); \
206 }
207 
216 
217 #undef DEFINE_STD_FUNC
218 
219 inline void
221 {
222  min.x() = SYSmin(v.x(), min.x());
223  max.x() = SYSmax(v.x(), max.x());
224 
225  min.y() = SYSmin(v.y(), min.y());
226  max.y() = SYSmax(v.y(), max.y());
227 }
228 
229 inline void
231 {
232  min.x() = SYSmin(v.x(), min.x());
233  max.x() = SYSmax(v.x(), max.x());
234 
235  min.y() = SYSmin(v.y(), min.y());
236  max.y() = SYSmax(v.y(), max.y());
237 
238  min.z() = SYSmin(v.z(), min.z());
239  max.z() = SYSmax(v.z(), max.z());
240 }
241 
242 inline void
244 {
245  min.x() = SYSmin(v.x(), min.x());
246  max.x() = SYSmax(v.x(), max.x());
247 
248  min.y() = SYSmin(v.y(), min.y());
249  max.y() = SYSmax(v.y(), max.y());
250 
251  min.z() = SYSmin(v.z(), min.z());
252  max.z() = SYSmax(v.z(), max.z());
253 
254  min.w() = SYSmin(v.w(), min.w());
255  max.w() = SYSmax(v.w(), max.w());
256 }
257 
258 inline fpreal
260 {
261  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y());
262 }
263 
264 inline fpreal
266 {
267  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
268  + SYSabs(a.z() - b.z());
269 }
270 
271 inline fpreal
273 {
274  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
275  + SYSabs(a.z() - b.z()) + SYSabs(a.w() - b.w());
276 }
277 
278 ///
279 /// UT_VoxelTile
280 ///
281 /// A UT_VoxelArray is composed of a number of these tiles. This is
282 /// done for two reasons:
283 /// 1) Increased memory locality when processing neighbouring points.
284 /// 2) Ability to compress or page out unneeded tiles.
285 /// Currently, the only special ability is the ability to create constant
286 /// tiles.
287 ///
288 /// To the end user of the UT_VoxelArray, the UT_VoxelTile should be
289 /// usually transparent. The only exception may be if they want to do
290 /// a FOR_ALL_TILES in order to ensure an optimal traversal order.
291 ///
292 template <typename T>
293 class UT_VoxelTile
294 {
295 public:
296  UT_VoxelTile();
297  ~UT_VoxelTile();
298 
299  // Copy constructor:
301 
302 
303  // Assignment operator:
305 
307  {
313  };
314 
315  /// Fetch a given local value. (x,y,z) should be local to
316  /// this tile.
317  SYS_FORCE_INLINE T operator()(int x, int y, int z) const
318  {
319  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
320  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
321 
322  switch (myCompressionType)
323  {
324  case COMPRESS_RAW:
325  return ((T *)myData)[
326  ((z * myRes[1]) + y) * myRes[0] + x ];
327 
328  case COMPRESS_CONSTANT:
329  return rawConstVal();
330 
331  case COMPRESS_RAWFULL:
332  return ((T *)myData)[
333  ((z * TILESIZE) + y) * TILESIZE + x ];
334 
335  case COMPRESS_FPREAL16:
336  {
337  T result;
338  result = (((fpreal16 *)myData)[
339  ((z * myRes[1]) + y) * myRes[0] + x ]);
340  return result;
341  }
342  }
343 
344  // By default use the compression engine.
345  UT_VoxelTileCompress<T> *engine;
346 
347  engine = getCompressionEngine(myCompressionType);
348  return engine->getValue(*this, x, y, z);
349  }
350 
351  /// Lerps two numbers, templated to work with T.
353  {
354  return v1 + (v2 - v1) * bias;
355  }
356 
357  /// Does a trilinear interpolation. x,y,z should be local to this
358  /// as should x+1, y+1, and z+1. fx-fz should be 0..1.
359  SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const;
360 
361  template <int AXIS2D>
362  SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const;
363 
364  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
365  /// array should have 8 elements, x minor, z major.
366  /// Requires it is in bounds.
367  /// Returns true if all constant, in which case only a single
368  /// sample is filled, [0]
370  bool extractSample(int x, int y, int z,
371  T *sample) const;
372  template <int AXIS2D>
374  bool extractSampleAxis(int x, int y, int z,
375  T *sample) const;
376 
377  /// Extracts +/- dx, +/- dy, +/- dz and then the center into
378  /// 7 samples.
379  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
380  T *sample) const;
381  /// Extracts the full cube of +/- dx, dy, dz. xminor, zmajor, into
382  /// 27 elements.
383  bool extractSampleCube(int x, int y, int z,
384  T *sample) const;
385 #if 0
386  /// MSVC can't handle aligned parameters after the third so
387  /// frac must come first.
388  T lerp(v4uf frac, int x, int y, int z) const;
389 #endif
390 
391  /// Returns a cached line to our internal data, at local address x,y,z.
392  /// cacheline is a caller allocated structure to fill out if we have
393  /// to decompress. If forcecopy isn't set and we can, the result may
394  /// be an internal pointer. stride is set to the update for moving one
395  /// x position in the cache.
396  /// strideofone should be set to true if you want to prevent 0 stride
397  /// results for constant tiles.
398  T *fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const;
399 
400  /// Fills a cache line from an external buffer into our own data.
401  void writeCacheLine(T *cacheline, int y, int z);
402 
403  /// Copies between two tiles. The tile's voxels match up, but don't
404  /// have the same offset. The maximal overlapping voxels are copied.
405  /// this->setValue(dst, dsty, dstz, src(srcx, srcy, srcz));
406  void copyFragment(int dstx, int dsty, int dstz,
407  const UT_VoxelTile<T> &srctile,
408  int srcx, int srcy, int srcz);
409 
410  /// Flattens ourself into the given destination buffer.
411  template <typename S>
412  void flatten(S *dst, int dststride) const;
413 
414  /// Fills our values from the given dense flat buffer. Will
415  /// create a constant tile if the source is constant.
416  template <typename S>
417  void writeData(const S *src, int srcstride);
418 
419  /// The setData is intentionally seperate so we can avoid
420  /// expanding constant data when we write the same value to it.
421  void setValue(int x, int y, int z, T t);
422 
423  /// Finds the minimum and maximum T values
424  void findMinMax(T &min, T &max) const;
425 
426  /// Determines the average value of the tile.
427  void findAverage(T &avg) const;
428 
429  /// Returns if this tile is constant.
430  bool isConstant() const
431  { return myCompressionType == COMPRESS_CONSTANT; }
432 
433  /// Returns true if any NANs are in this tile
434  bool hasNan() const;
435 
436  /// Returns if this tile is in raw format.
437  bool isRaw() const
438  { return myCompressionType == COMPRESS_RAW; }
439 
440  /// Returns if this tile is in raw full format.
441  bool isRawFull() const
442  { return myCompressionType == COMPRESS_RAWFULL; }
443 
444  /// Returns true if this is a simple form of compression, either
445  /// constant, raw, or a raw full that isn't padded
446  bool isSimpleCompression() const
447  {
448  if (isRaw()) return true;
449  if (isConstant()) return true;
450  if (isRawFull() && myRes[0] == TILESIZE && myRes[1] == TILESIZE)
451  return true;
452  return false;
453  }
454 
455  /// Attempts to compress this tile. Returns true if any
456  /// compression performed.
457  bool tryCompress(const UT_VoxelCompressOptions &options);
458 
459  /// Turns this tile into a constant tile of the given value.
460  void makeConstant(T t);
461 
462  /// Explicit compress to fpreal16. Lossy. No-op if already constant.
463  void makeFpreal16();
464 
465  /// Turns a compressed tile into a raw tile.
466  void uncompress();
467 
468  /// Turns a tile into a raw full tile.
469  void uncompressFull();
470 
471  /// Like uncompress() except it leaves the data uninitialized. Result
472  /// is either COMPRESS_RAW or COMPRESS_RAWFULL depending on the tile res.
473  /// @note USE WITH CAUTION!
474  void makeRawUninitialized();
475 
476  /// Returns the raw full data of the tile.
478  {
479  uncompressFull();
480  return (T *)myData;
481  }
482 
483  /// This only makes sense for simple compression. Use with
484  /// extreme care.
486  { if (inlineConstant() && isConstant())
487  { return (T *) &myData; }
488  return (T *)myData; }
489  const T *rawData() const
490  { if (inlineConstant() && isConstant())
491  { return (const T *) &myData; }
492  return (const T *)myData; }
493 
494  /// Read the current resolution.
495  int xres() const { return myRes[0]; }
496  int yres() const { return myRes[1]; }
497  int zres() const { return myRes[2]; }
498 
499  int getRes(int dim) const { return myRes[dim]; }
500 
501 
502  int numVoxels() const { return myRes[0] * myRes[1] * myRes[2]; }
503 
504  /// Returns the amount of memory used by this tile.
505  int64 getMemoryUsage(bool inclusive) const;
506 
507  /// Returns the amount of data used by the tile myData pointer.
508  exint getDataLength() const;
509 
510  /// A routine used by filtered evaluation to accumulated a partial
511  /// filtered sum in this tile.
512  /// pstart, pend - voxel bounds (in UT_VoxelArray coordinates)
513  /// weights - weight array
514  /// start - UT_VoxelArray coordinates at [0] in the weight array
515  void weightedSum(int pstart[3], int pend[3],
516  const float *weights[3], int start[3],
517  T &result);
518 
519  /// Designed to be specialized according to T
520 
521  /// Update min & max to encompass T itself.
522  static void expandMinMax(T v, T &min, T &max)
523  {
524  UTvoxelTileExpandMinMax(v, min, max);
525  }
526 
527  /// Return the "distance" of a & b. This is used for
528  /// tolerance checks on equality comparisons.
529  static fpreal dist(T a, T b)
530  {
531  return UTvoxelTileDist(a, b);
532  }
533 
535 
536  // Returns the index of the bound compression engine.
537  static int lookupCompressionEngine(const char *name);
538  // Given an index, gets the compression engine.
540 
541  /// Saves this tile's data, in compressed form.
542  /// May save in uncompressed form is the compression type does
543  /// not support saving.
544  void save(std::ostream &os) const;
545  bool save(UT_JSONWriter &w) const;
546 
547  /// Loads tile data. Uses the compression index to map the saved
548  /// compression types into the correct loading compression types.
549  void load(UT_IStream &is, const UT_IntArray &compression);
550  bool load(UT_JSONParser &p, const UT_IntArray &compression);
551 
552  /// Stores a list of compresson engines to os.
553  static void saveCompressionTypes(std::ostream &os);
554  static bool saveCompressionTypes(UT_JSONWriter &w);
555 
556  /// Builds a translation table from the given stream's compression types
557  /// into our own valid compression types.
558  static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions);
559  static bool loadCompressionTypes(UT_JSONParser &p, UT_IntArray &compressions);
560 
561 protected:
562  // Attempts to set the value to the native compressed format
563  // Some compression types allow some values to be written
564  // without decompression. Eg, you can write to a constant tile
565  // the tile's own value without decompression.
566  // If this returns true, t has been written.
567  bool writeThrough(int x, int y, int z, T t);
568 
569  /// Sets the local res of the tile. Does *not* resize the allocated
570  /// memory.
571  void setRes(int xr, int yr, int zr)
572  { myRes[0] = xr; myRes[1] = yr; myRes[2] = zr; }
573 
575  {
576  return (sizeof(T) <= sizeof(T*));
577  }
578 
580  { if (inlineConstant()) { return *((const T *)&myData); }
581  return *((const T*)myData); }
583  { if (inlineConstant()) { return ((T *)&myData); }
584  return ((T*)myData); }
585 
586  void setForeignData(void *data, int8 compress_type)
587  {
588  freeData();
589  myCompressionType = compress_type;
590 
591  if (isConstant() && inlineConstant())
592  {
593  makeConstant(*(T *)data);
594  }
595  else
596  {
597  myData = data;
598  myForeignData = true;
599  }
600  }
601 
602 public:
603  /// Frees myData and sets it to zero. This is a bit tricky
604  /// as the constant tiles may be inlined.
605  /// This is only public for the compression engines.
607  {
608  if (inlineConstant() && isConstant())
609  {
610  // Do nothing!
611  }
612  else if (myData && !myForeignData)
613  {
615  }
616  myData = 0;
617  myForeignData = false;
618  }
619 
620 public:
621  // This is only public so the compression engines can get to it.
622  // It is blind data, do not alter!
623  void *myData;
624 private:
625 
626  /// Resolutions.
627  int8 myRes[3];
628 
629  /// Am I a constant tile?
630  int8 myCompressionType;
631 
632  int8 myForeignData;
633 
634  static UT_ValArray<UT_VoxelTileCompress<T> *> &getCompressionEngines()
635  {
636  return UTvoxelTileGetCompressionEngines((T *) 0);
637  }
638 
639  friend class UT_VoxelTileCompress<T>;
640  friend class UT_VoxelArray<T>;
641  template <typename S, bool DoWrite, bool DoRead, bool TestForWrites>
642  friend class UT_VoxelProbe;
643 };
644 
645 ///
646 /// UT_VoxelArray
647 ///
648 /// This provides data structure to hold a three dimmensional array
649 /// of data. The data should be some simple arithmetic type, such
650 /// as uint8, fpreal16, or UT_Vector3.
651 ///
652 /// Some operations, such as gradiants, may make less sense with uint8.
653 ///
654 template <typename T>
655 class UT_VoxelArray
656 {
657 public:
658  UT_VoxelArray();
659  ~UT_VoxelArray();
660 
661  /// Copy constructor:
663 
664  /// Assignment operator:
666 
667  /// This sets the voxelarray to have the given resolution. If resolution is
668  /// changed, all elements will be set to 0. If resolution is already equal
669  /// to the arguments, all elements will be set to 0 only if reset is true;
670  /// otherwise, the voxel array will be left untouched.
671  void size(int xres, int yres, int zres, bool reset = true);
672 
673  /// This will ensure this voxel array matches the given voxel array
674  /// in terms of dimensions & border conditions. It may invoke
675  /// a size() and hence reset the field to 0.
676  void match(const UT_VoxelArray<T> &src);
677 
678  template <typename S>
679  bool isMatching(const UT_VoxelArray<S> &src) const
680  {
681  return src.getXRes() == getXRes() &&
682  src.getYRes() == getYRes() &&
683  src.getZRes() == getZRes();
684  }
685 
686  int getXRes() const { return myRes[0]; }
687  int getYRes() const { return myRes[1]; }
688  int getZRes() const { return myRes[2]; }
689  int getRes(int axis) const { return myRes[axis]; }
690 
692  {
693  return UT_Vector3I(myRes[0], myRes[1], myRes[2]);
694 
695  }
696 
697  /// Return the amount of memory used by this array.
698  int64 getMemoryUsage(bool inclusive) const;
699 
700  /// Sets this voxel array to the given constant value. All tiles
701  /// are turned into constant tiles.
703  constant,
704  T, t)
705  void constantPartial(T t, const UT_JobInfo &info);
706 
707  /// If this voxel array is all constant tiles, returns true.
708  /// The optional pointer is initialized to the constant value iff
709  /// the array is constant. (Note by constant we mean made of constant
710  /// tiles of the same value - if some tiles are uncompressed but
711  /// constant, it will still return false)
712  bool isConstant(T *cval = 0) const;
713 
714  /// Returns true if any element of the voxel array is NAN
715  bool hasNan() const;
716 
717  /// This convience function lets you sample the voxel array.
718  /// pos is in the range [0..1]^3.
719  /// T value trilinearly interpolated. Edges are determined by the border
720  /// mode.
721  /// The cells are sampled at the center of the voxels.
722  T operator()(UT_Vector3D pos) const;
723  T operator()(UT_Vector3F pos) const;
724 
725  /// This convience function lets you sample the voxel array.
726  /// pos is in the range [0..1]^3.
727  /// The min/max is the range of the sampled values.
728  void evaluateMinMax(T &lerp, T &lmin, T &lmax,
729  UT_Vector3F pos) const;
730 
731  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
732  /// Allows out of range evaluation
734  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
735  /// Allows out of range evaluation
736  SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z,
737  float fx, float fy, float fz) const;
738  template <int AXIS2D>
740  template <int AXIS2D>
741  SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z,
742  float fx, float fy, float fz) const;
743 
744  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
745  /// Allows out of range evaluation. Also computes min/max of
746  /// interpolated samples.
747  SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax,
748  UT_Vector3F pos) const;
749  template <int AXIS2D>
750  SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax,
751  UT_Vector3F pos) const;
752  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
753  /// Allows out of range evaluation. Also computes min/max of
754  /// interpolated samples.
756  T &lerp, T &lmin, T &lmax,
757  int x, int y, int z,
758  float fx, float fy, float fz) const;
759  template <int AXIS2D>
761  T &lerp, T &lmin, T &lmax,
762  int x, int y, int z,
763  float fx, float fy, float fz) const;
764 
765  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
766  /// array should have 8 elements, x minor, z major.
767  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
768  T *sample) const;
769  template <int AXIS2D>
770  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
771  T *sample) const;
772 
773  /// Extracts a sample in a plus shape, dx, then dy, then dz, finally
774  /// the center into 7 voxels.
775  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
776  T *sample) const;
777  /// Extracts 27 dense 3x3x3 cube centered at x,y,z into samples
778  /// z major, xminor.
779  SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z,
780  T *sample) const;
781 
782  /// Lerps the given sample using trilinear interpolation
784  float fx, float fy, float fz) const;
785  template <int AXIS2D>
787  float fx, float fy, float fz) const;
788 
789  SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z,
790  float &fx, float &fy, float &fz) const
791  {
792  // Determine integer & fractional components.
793  fx = pos.x();
794  SYSfastSplitFloat(fx, x);
795  fy = pos.y();
796  SYSfastSplitFloat(fy, y);
797  fz = pos.z();
798  SYSfastSplitFloat(fz, z);
799  }
800  template <int AXIS2D>
801  SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z,
802  float &fx, float &fy, float &fz) const
803  {
804  // Determine integer & fractional components.
805  if (AXIS2D != 0)
806  {
807  fx = pos.x();
808  SYSfastSplitFloat(fx, x);
809  }
810  else
811  {
812  fx = 0.0;
813  x = 0;
814  }
815  if (AXIS2D != 1)
816  {
817  fy = pos.y();
818  SYSfastSplitFloat(fy, y);
819  }
820  else
821  {
822  fy = 0.0;
823  y = 0;
824  }
825  if (AXIS2D != 2)
826  {
827  fz = pos.z();
828  SYSfastSplitFloat(fz, z);
829  }
830  else
831  {
832  fz = 0.0;
833  z = 0;
834  }
835  }
836 #if 0
837  T operator()(v4uf pos) const;
838 #endif
839 
840  /// Filtered evaluation of the voxel array. This operation should
841  /// exhibit the same behavior as IMG3D_Channel::evaluate.
842  T evaluate(const UT_Vector3 &pos, const UT_Filter &filter,
843  fpreal radius, int clampaxis = -1) const;
844 
845  /// Fills this by resampling the given voxel array.
846  void resample(const UT_VoxelArray<T> &src,
847  UT_FilterType filtertype = UT_FILTER_POINT,
848  float filterwidthscale = 1.0f,
849  int clampaxis = -1);
850 
851  /// Flattens this into an array. Z major, then Y, then X.
852  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
854  flatten,
855  T *, flatarray,
856  exint, ystride,
857  exint, zstride)
858  void flattenPartial(T *flatarray, exint ystride, exint zstride,
860 
861  /// Flattens this into an array. Z major, then Y, then X.
862  /// Flattens a 2d slice where AXIS2D is constant.
863  /// If AXIS2D == 2 (ie, z): flatarray[x + y * ystride] = getValue(x, y, 0);
864  /// Flattens by destination x-major stripes to avoid page collisions
865  /// on freshly allocated memory buffers.
866  template <int AXIS2D>
867  void flattenPartialAxis(T *flatarray, exint ystride,
868  const UT_JobInfo &info) const;
869 
870  /// Flattens this into an array suitable for a GL 8bit texture.
871  /// Z major, then Y, then X.
872  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
874  flattenGLFixed8,
875  uint8 *, flatarray,
876  exint, ystride,
877  exint, zstride,
878  T , dummy)
879  void flattenGLFixed8Partial(uint8 *flatarray,
880  exint ystride, exint zstride,
881  T dummy,
882  const UT_JobInfo &info) const;
883 
884  /// Flattens this into an array suitable for a GL 16bit FP texture.
885  /// Z major, then Y, then X.
886  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
887  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
888  flattenGL16F,
889  UT_Vector4H *, flatarray,
890  exint, ystride,
891  exint, zstride,
892  T , dummy)
893  void flattenGL16FPartial(UT_Vector4H *flatarray,
894  exint ystride, exint zstride,
895  T dummy,
896  const UT_JobInfo &info) const;
897 
898  /// Flattens this into an array suitable for a GL 32b FP texture. Note that
899  /// this also works around an older Nvidia driver bug that caused very small
900  /// valued texels (<1e-9) to appear as huge random values in the texture.
901  /// Z major, then Y, then X.
902  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
903  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
904  flattenGL32F,
905  UT_Vector4F *, flatarray,
906  exint, ystride,
907  exint, zstride,
908  T , dummy)
909  void flattenGL32FPartial(UT_Vector4F *flatarray,
910  exint ystride, exint zstride,
911  T dummy,
912  const UT_JobInfo &info) const;
913 
914  /// Fills this from a flattened array. Z major, then Y, then X.
915  /// setValue(x,y,z, flatarray[x + y * ystride + z * zstride];
916  THREADED_METHOD3(UT_VoxelArray<T>, numTiles() > 16,
917  extractFromFlattened,
918  const T *, flatarray,
919  exint, ystride,
920  exint, zstride)
921  void extractFromFlattenedPartial(const T *flatarray,
922  exint ystride, exint zstride,
923  const UT_JobInfo &info);
924 
925  /// Copies into this voxel array from the source array.
926  /// Conceptually,
927  /// this->setValue(x, y, z, src.getValue(x+offx, y+offy, z+offz);
928  void copyWithOffset(const UT_VoxelArray<T> &src,
929  int offx, int offy, int offz);
930  THREADED_METHOD4(UT_VoxelArray<T>, numTiles() > 4,
931  copyWithOffsetInternal,
932  const UT_VoxelArray<T> &, src,
933  int, offx,
934  int, offy,
935  int, offz)
936  void copyWithOffsetInternalPartial(const UT_VoxelArray<T> &src,
937  int offx, int offy, int offz,
938  const UT_JobInfo &info);
939 
940  /// Moves data from the source voxel array into this array. The offsets should
941  /// be in terms of tiles. Source may be modified as this array steals its data
942  /// buffers in such a way that no dynamic memory will leak when these arrays
943  /// are freed.
944  /// Conceptually, this function performs the same operation as copyWithOffset,
945  /// but with offsets specified in terms of tiles:
946  /// this->setValue(x, y, z, src.getValue(x+off_v_x, y+off_v_y, z+off_v_z)
947  /// where off_v_A=tileoffA*TILESIZE for A in {x, y, z}.
948  void moveTilesWithOffset(UT_VoxelArray<T> &src, int tileoffx, int tileoffy,
949  int tileoffz);
950 
951  /// Fills dstdata with the voxel data of listed tiles. Stride is measured
952  /// in T. Data order is in tile-order. So, sorted by tilelist, then
953  /// z, y, x within that tile.
954  /// The ix/iy/iz variant allows partial tiles. If the number of
955  /// voxels to write to a tile matches the tile size, however, the
956  /// ix/iy/iz is ignored and the tile is written in canonical order.
957  template <typename S>
958  S *extractTiles(S *dstdata, int stride,
959  const UT_IntArray &tilelist) const;
960  template <typename S, typename IDX>
961  S *extractTiles(S *dstdata, int stride,
962  const IDX *ix, const IDX *iy, const IDX *iz,
963  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist) const;
964 
965  /// Overwrites our tiles with the given data. Does checking
966  /// for constant tiles. Input srcdata stream should match
967  /// that of extractTiles.
968  template <typename S>
969  const S *writeTiles(const S *srcdata, int srcstride,
970  const UT_IntArray &tilelist);
971  template <typename S, typename IDX>
972  const S *writeTiles(const S *srcdata, int srcstride,
973  const IDX *ix, const IDX *iy, const IDX *iz,
974  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist);
975 
976  /// Converts a 3d position in range [0..1]^3 into the closest
977  /// index value.
978  /// Returns false if the resulting index was out of range. The index
979  /// will still be set.
980  bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const;
981  bool posToIndex(UT_Vector3D pos, exint &x, exint &y, exint &z) const;
982  /// Convertes a 3d position in [0..1]^3 into the equivalent in
983  /// the integer cell space. Does not clamp to the closest value.
984  bool posToIndex(UT_Vector3 pos, UT_Vector3 &ipos) const;
985  bool posToIndex(UT_Vector3D pos, UT_Vector3D &ipos) const;
986  /// Converts an index into a position.
987  /// Returns false if the source index was out of range, in which case
988  /// pos will be outside [0..1]^3
989  bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const;
990  bool indexToPos(exint x, exint y, exint z, UT_Vector3D &pos) const;
991  void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const;
992  void findexToPos(UT_Vector3D ipos, UT_Vector3D &pos) const;
993 
994  /// Clamps the given x, y, and z values to lie inside the valid index
995  /// range.
996  void clampIndex(int &x, int &y, int &z) const
997  {
998  x = SYSclamp(x, 0, myRes[0]-1);
999  y = SYSclamp(y, 0, myRes[1]-1);
1000  z = SYSclamp(z, 0, myRes[2]-1);
1001  }
1002 
1003  /// Returns true if the given x, y, z values lie inside the valid index.
1004  bool isValidIndex(int x, int y, int z) const
1005  {
1006  return !((x | y | z) < 0) &&
1007  (((x - myRes[0]) & (y - myRes[1]) & (z - myRes[2])) < 0);
1008  }
1009 
1010  /// This allows you to read & write the raw data.
1011  /// Out of bound reads are illegal.
1013  {
1014  return (*this)(index[0], index[1], index[2]);
1015  }
1016  T operator()(int x, int y, int z) const
1017  {
1018  UT_ASSERT_P(isValidIndex(x, y, z));
1019  return (*getTile(x >> TILEBITS,
1020  y >> TILEBITS,
1021  z >> TILEBITS))
1022  (x & TILEMASK, y & TILEMASK, z & TILEMASK);
1023  }
1024 
1026  {
1027  setValue(index[0], index[1], index[2], value);
1028  }
1029 
1030  void setValue(int x, int y, int z, T t)
1031  {
1032  UT_ASSERT_P(isValidIndex(x, y, z));
1033  getTile(x >> TILEBITS,
1034  y >> TILEBITS,
1035  z >> TILEBITS)->setValue(
1036  x & TILEMASK, y & TILEMASK, z & TILEMASK, t);
1037  }
1038 
1039  /// This will clamp the bounds to fit within the voxel array,
1040  /// using the border type to resolve out of range values.
1041  T getValue(int x, int y, int z) const
1042  {
1043  // First handle the most common case.
1044  if (isValidIndex(x, y, z))
1045  return (*this)(x, y, z);
1046 
1047  // Verify our voxel array is non-empty.
1048  if (!myTiles)
1049  return myBorderValue;
1050 
1051  // We now know we are out of range, adjust appropriately
1052  switch (myBorderType)
1053  {
1055  return myBorderValue;
1056 
1057  case UT_VOXELBORDER_REPEAT:
1058  if (x < 0 || x >= myRes[0])
1059  {
1060  x %= myRes[0];
1061  if (x < 0)
1062  x += myRes[0];
1063  }
1064  if (y < 0 || y >= myRes[1])
1065  {
1066  y %= myRes[1];
1067  if (y < 0)
1068  y += myRes[1];
1069  }
1070  if (z < 0 || z >= myRes[2])
1071  {
1072  z %= myRes[2];
1073  if (z < 0)
1074  z += myRes[2];
1075  }
1076  break;
1077 
1078  case UT_VOXELBORDER_STREAK:
1079  clampIndex(x, y, z);
1080  break;
1081  case UT_VOXELBORDER_EXTRAP:
1082  {
1083  int cx, cy, cz;
1084  T result;
1085 
1086  cx = x; cy = y; cz = z;
1087  clampIndex(cx, cy, cz);
1088 
1089  result = (*this)(cx, cy, cz);
1090  result += (x - cx) * myBorderScale[0] +
1091  (y - cy) * myBorderScale[1] +
1092  (z - cz) * myBorderScale[2];
1093  return result;
1094  }
1095  }
1096 
1097  // It is now within bounds, do normal fetch.
1098  return (*this)(x, y, z);
1099  }
1100 
1101  /// Gets values in the box [bbox.minvec(), bbox.maxvec())
1102  /// Values are stored in the array `values` of size `size` that has to be at least `bbox.volume()`
1103  /// The order of values is give by: `i + bbox.xsize() * (j + bbox.ysize() * k)`
1104  ///
1105  /// If returns true, values in `bbox` are constant and only values[0] is guaranteed to be assigned.
1106  bool getValues(const UT_BoundingBoxI &bbox,
1107  T * values,
1108  const exint size) const
1109  {
1110  UT_ASSERT_P(bbox.volume() <= size);
1111 
1112  const UT_BoundingBoxI bounds = {0, 0, 0, getXRes(), getYRes(), getZRes()};
1113 
1114  const UT_BoundingBoxI tiles =
1115  {bbox.xmin() >> TILEBITS,
1116  bbox.ymin() >> TILEBITS,
1117  bbox.zmin() >> TILEBITS,
1118  ((bbox.xmax() - 1) >> TILEBITS) + 1,
1119  ((bbox.ymax() - 1) >> TILEBITS) + 1,
1120  ((bbox.zmax() - 1) >> TILEBITS) + 1};
1121 
1122  bool allconstant = true;
1123 
1124  UT_BoundingBoxI tilesamples;
1125 
1126  for (int kt = tiles.zmin(); kt < tiles.zmax(); kt++)
1127  {
1128  // zmin & zmax
1129  tilesamples.vals[2][0] = TILESIZE * kt;
1130  tilesamples.vals[2][1] = TILESIZE * (kt + 1);
1131  // clip bounds
1132  if (kt == tiles.zmin())
1133  tilesamples.vals[2][0] = bbox.zmin();
1134  if (kt == tiles.zmax() - 1)
1135  tilesamples.vals[2][1] = bbox.zmax();
1136 
1137  for (int jt = tiles.ymin(); jt < tiles.ymax(); jt++)
1138  {
1139  // ymin & ymax
1140  tilesamples.vals[1][0] = TILESIZE * jt;
1141  tilesamples.vals[1][1] = TILESIZE * (jt + 1);
1142  // clip bounds
1143  if (jt == tiles.ymin())
1144  tilesamples.vals[1][0] = bbox.ymin();
1145  if (jt == tiles.ymax() - 1)
1146  tilesamples.vals[1][1] = bbox.ymax();
1147 
1148  for (int it = tiles.xmin(); it < tiles.xmax(); it++)
1149  {
1150  // xmin & xmax
1151  tilesamples.vals[0][0] = TILESIZE * it;
1152  tilesamples.vals[0][1] = TILESIZE * (it + 1);
1153  // clip bounds
1154  if (it == tiles.xmin())
1155  tilesamples.vals[0][0] = bbox.xmin();
1156  if (it == tiles.xmax() - 1)
1157  tilesamples.vals[0][1] = bbox.xmax();
1158 
1159  const bool inbounds = tilesamples.isInside(bounds);
1160 
1161  if (inbounds)
1162  {
1163  const UT_VoxelTile<T> *tile = getTile(it, jt, kt);
1164 
1165  for (int k = tilesamples.zmin();
1166  k < tilesamples.zmax(); k++)
1167  {
1168  for (int j = tilesamples.ymin();
1169  j < tilesamples.ymax(); j++)
1170  {
1171  for (int i = tilesamples.xmin();
1172  i < tilesamples.xmax(); i++)
1173  {
1174  const UT_Vector3I localindex = {
1175  i - bbox.xmin(),
1176  j - bbox.ymin(),
1177  k - bbox.zmin()};
1178 
1179  const int locallinindex
1180  = localindex.x()
1181  + bbox.xsize() * (localindex.y()
1182  + bbox.ysize() * localindex.z());
1183 
1184  values[locallinindex] = (*tile)(
1185  i & TILEMASK,
1186  j & TILEMASK,
1187  k & TILEMASK);
1188 
1189  if (allconstant
1190  && (values[0] != values[locallinindex]))
1191  {
1192  allconstant = false;
1193  }
1194  }
1195  }
1196  }
1197  }
1198  else
1199  {
1200  for (int k = tilesamples.zmin(); k < tilesamples.zmax(); k++)
1201  {
1202  for (int j = tilesamples.ymin();
1203  j < tilesamples.ymax(); j++)
1204  {
1205  for (int i = tilesamples.xmin();
1206  i < tilesamples.xmax(); i++)
1207  {
1208  const UT_Vector3I localindex = {
1209  i - bbox.xmin(),
1210  j - bbox.ymin(),
1211  k - bbox.zmin()};
1212 
1213  const int locallinindex
1214  = localindex.x()
1215  + bbox.xsize() * (localindex.y()
1216  + bbox.ysize() * localindex.z());
1217 
1218  values[locallinindex] = getValue(i, j, k);
1219 
1220  if (allconstant
1221  && (values[0] != values[locallinindex]))
1222  {
1223  allconstant = false;
1224  }
1225  }
1226  }
1227  }
1228 
1229  }
1230  }
1231  }
1232  }
1233 
1234  return allconstant;
1235  }
1236 
1238  void setBorderScale(T scalex, T scaley, T scalez);
1239  UT_VoxelBorderType getBorder() const { return myBorderType; }
1240  T getBorderValue() const { return myBorderValue; }
1241  T getBorderScale(int axis) const { return myBorderScale[axis]; }
1242 
1243  /// This tries to compress or collapse each tile. This can
1244  /// be expensive (ie, converting a tile to constant), so
1245  /// should be saved until modifications are complete.
1247  collapseAllTiles)
1248  void collapseAllTilesPartial(const UT_JobInfo &info);
1249 
1250  /// Uncompresses all tiles into non-constant tiles. Useful
1251  /// if you have a multithreaded algorithm that may need to
1252  /// both read and write, if you write to a collapsed tile
1253  /// while someone else reads from it, bad stuff happens.
1254  /// Instead, you can expandAllTiles. This may have serious
1255  /// consequences in memory use, however.
1257  expandAllTiles)
1258  void expandAllTilesPartial(const UT_JobInfo &info);
1259 
1260  /// Uncompresses all tiles, but leaves constant tiles alone.
1261  /// Useful for cleaning out any non-standard compression algorithm
1262  /// that some external program can't handle.
1263  THREADED_METHOD(UT_VoxelArray<T>, numTiles() > 100,
1264  expandAllNonConstTiles)
1265  void expandAllNonConstTilesPartial(const UT_JobInfo &info);
1266 
1267  /// The direct tile access methods are to make TBF writing a bit
1268  /// more efficient.
1269  UT_VoxelTile<T> *getTile(int tx, int ty, int tz) const
1270  { return &myTiles[xyzTileToLinear(tx, ty, tz)]; }
1272  { return &myTiles[idx]; }
1273  void linearTileToXYZ(int idx, int &x, int &y, int &z) const
1274  {
1275  x = idx % myTileRes[0];
1276  idx -= x;
1277  idx /= myTileRes[0];
1278  y = idx % myTileRes[1];
1279  idx -= y;
1280  idx /= myTileRes[1];
1281  z = idx;
1282  }
1284  {
1285  UT_Vector3I tileindex;
1286  tileindex[0] = idx % myTileRes[0];
1287  idx -= tileindex[0];
1288  idx /= myTileRes[0];
1289  tileindex[1] = idx % myTileRes[1];
1290  idx -= tileindex[1];
1291  idx /= myTileRes[1];
1292  tileindex[2] = idx;
1293 
1294  return tileindex;
1295  }
1296 
1297  int xyzTileToLinear(int x, int y, int z) const
1298  { return (z * myTileRes[1] + y) * myTileRes[0] + x; }
1299 
1300  int indexToLinearTile(int x, int y, int z) const
1301  { return ((z >> TILEBITS) * myTileRes[1] + (y >> TILEBITS)) * myTileRes[0] + (x >> TILEBITS); }
1302 
1303  /// idxth tile represents the voxels indexed [start,end).
1304  void getTileVoxels(int idx,
1305  UT_Vector3I &start, UT_Vector3I &end) const
1306  {
1307  int x, y, z;
1308  linearTileToXYZ(idx, x, y, z);
1309 
1310  start.x() = x * TILESIZE;
1311  start.y() = y * TILESIZE;
1312  start.z() = z * TILESIZE;
1313  end = start;
1314  end.x() += myTiles[idx].xres();
1315  end.y() += myTiles[idx].yres();
1316  end.z() += myTiles[idx].zres();
1317  }
1318 
1320  {
1322  getTileVoxels(idx, start, end);
1323  return UT_BoundingBoxI(start, end);
1324  }
1325 
1326  /// Number of tiles along that axis. Not to be confused with
1327  /// the resolution of the individual tiles.
1328  int getTileRes(int dim) const { return myTileRes[dim]; }
1329  int numTiles() const
1330  { return myTileRes[0] * myTileRes[1] * myTileRes[2]; }
1331  exint numVoxels() const
1332  { return ((exint)myRes[0]) * myRes[1] * myRes[2]; }
1333 
1335  { myCompressionOptions = options; }
1337  { return myCompressionOptions; }
1338 
1340  { myCompressionOptions.myConstantTol = tol; }
1342  { return myCompressionOptions.myConstantTol; }
1343 
1344  /// Saves only the data of this array to the given stream.
1345  /// To reload it you will have to have a matching array in tiles
1346  /// dimensions and size.
1347  void saveData(std::ostream &os) const;
1348  bool saveData(UT_JSONWriter &w,
1349  const char *shared_mem_owner = 0) const;
1350 
1351  /// Load an array, requires you have already size()d this array.
1352  void loadData(UT_IStream &is);
1353  bool loadData(UT_JSONParser &p);
1354 
1355  /// Copy only the data from the source array.
1356  /// Note that it is an error to call this unless isMatching(src).
1358  copyData,
1359  const UT_VoxelArray<T> &, src)
1360 
1361  void copyDataPartial(const UT_VoxelArray<T> &src,
1362  const UT_JobInfo &info);
1363 
1364 private:
1366  resamplethread,
1367  const UT_VoxelArray<T> &, src,
1368  const UT_Filter *, filter,
1369  float, radius,
1370  int, clampaxis)
1371  void resamplethreadPartial(const UT_VoxelArray<T> &src,
1372  const UT_Filter *filter,
1373  float radius,
1374  int clampaxis,
1375  const UT_JobInfo &info);
1376 
1377 
1378  void deleteVoxels();
1379 
1380  SYS_SharedMemory *copyToSharedMemory(const char *shared_mem_owner) const;
1381  bool populateFromSharedMemory(const char *id);
1382 
1383 
1384  /// Number of elements in each dimension.
1385  int myRes[3];
1386 
1387  /// Inverse tile res, 1/myRes
1388  UT_Vector3 myInvRes;
1389 
1390  /// Number of tiles in each dimension.
1391  int myTileRes[3];
1392 
1393  /// Compression tolerance for lossy compression.
1394  UT_VoxelCompressOptions myCompressionOptions;
1395 
1396  /// Double dereferenced so we can theoretically resize easily.
1397  UT_VoxelTile<T> *myTiles;
1398 
1399  /// Outside values get this if constant borders are used
1400  T myBorderValue;
1401  /// Per axis scale factors for when extrapolating.
1402  T myBorderScale[3];
1403  UT_VoxelBorderType myBorderType;
1404 
1405  /// For initializing the tiles from shared memory.
1406  SYS_SharedMemory *mySharedMem;
1407  SYS_SharedMemoryView *mySharedMemView;
1408 };
1409 
1410 
1411 ///
1412 /// UT_VoxelMipMap
1413 ///
1414 /// This provides a mip-map type structure for a voxel array.
1415 /// It manages the different levels of voxels arrays that are needed.
1416 /// You can create different types of mip maps: average, maximum, etc,
1417 /// which can allow different tricks.
1418 /// Each level is one half the previous level, rounded up.
1419 /// Out of bound voxels are ignored from the lower levels.
1420 ///
1421 template <typename T>
1423 {
1424 public:
1425  /// The different types of functions that can be used for
1426  /// constructing a mip map.
1427  enum mipmaptype { MIPMAP_MAXIMUM=0, MIPMAP_AVERAGE=1, MIPMAP_MINIMUM=2 };
1428 
1429  UT_VoxelMipMap();
1430  ~UT_VoxelMipMap();
1431 
1432  /// Copy constructor.
1433  UT_VoxelMipMap(const UT_VoxelMipMap<T> &src);
1434 
1435  /// Assignment operator:
1436  const UT_VoxelMipMap<T> &operator=(const UT_VoxelMipMap<T> &src);
1437 
1438  /// Builds from a given voxel array. The ownership flag determines
1439  /// if we gain ownership of the voxel array and should delete it.
1440  /// In any case, the new levels are owned by us.
1441  void build(UT_VoxelArray<T> *baselevel,
1442  mipmaptype function);
1443 
1444  /// Same as above but construct mipmaps simultaneously for more than
1445  /// one function. The order of the functions will correspond to the
1446  /// order of the data values passed to the traversal callback.
1447  void build(UT_VoxelArray<T> *baselevel,
1448  const UT_Array<mipmaptype> &functions);
1449 
1450  /// This does a top down traversal of the implicit octree defined
1451  /// by the voxel array. Returning false will abort that
1452  /// branch of the octree.
1453  /// The bounding box given is in cell space and is an exclusive
1454  /// box of the included cells (ie: (0..1)^3 means just cell 0,0,0)
1455  /// Note that each bounding box will not be square, unless you
1456  /// have the good fortune of starting with a power of 2 cube.
1457  /// The boolean goes true when the the callback is invoked on a
1458  /// base level.
1459  typedef bool (*Callback)(const T *funcs,
1460  const UT_BoundingBox &box,
1461  bool baselevel, void *data);
1462  void traverseTopDown(Callback function,
1463  void *data) const;
1464 
1465  /// Top down traversal on op. OP is invoked with
1466  /// bool op(const UT_BoundingBoxI &indexbox, int level)
1467  ///
1468  /// indexbox is half-inclusive (0..1)^3 means cell 0,0,0
1469  /// level 0 means the base level.
1470  /// (box.min.x()>>level, box.min.y()>>level, box.min.z()>>level)
1471  /// gives the index to extract the value from level..
1472  template <typename OP>
1473  void traverseTopDown(OP&op) const;
1474 
1475 
1476  /// Top down traversal, but which quad tree is visited first
1477  /// is controlled by
1478  /// float op.sortValue(UT_BoundingBoxI &indexbox, int level);
1479  /// Lower values are visited first.
1480  template <typename OP>
1481  void traverseTopDownSorted(OP&op) const;
1482 
1483 
1484  /// Return the amount of memory used by this mipmap.
1485  int64 getMemoryUsage(bool inclusive) const;
1486 
1487  int numLevels() const { return myNumLevels+1; }
1488 
1489  /// level 0 is the original grid, each level higher is a power
1490  /// of two smaller.
1491  const UT_VoxelArray<T> *level(int level, int function) const
1492  {
1493  if (level == 0)
1494  return myBaseLevel;
1495 
1496  return myLevels(function)[numLevels() - 1 - level];
1497  }
1498 
1499 private:
1500  void doTraverse(int x, int y, int z, int level,
1501  Callback function,
1502  void *data) const;
1503 
1504  /// Note: This variant of doTraverse has the opposite sense of level!
1505  template <typename OP>
1506  void doTraverse(int x, int y, int z, int level,
1507  OP &op) const;
1508  template <typename OP>
1509  void doTraverseSorted(int x, int y, int z, int level,
1510  OP &op) const;
1511 
1512  void initializePrivate();
1513  void destroyPrivate();
1514 
1515  THREADED_METHOD3(UT_VoxelMipMap<T>, dst.numTiles() > 1,
1516  downsample,
1517  UT_VoxelArray<T> &, dst,
1518  const UT_VoxelArray<T> &, src,
1519  mipmaptype, function)
1520  void downsamplePartial(UT_VoxelArray<T> &dst,
1521  const UT_VoxelArray<T> &src,
1522  mipmaptype function,
1523  const UT_JobInfo &info);
1524 
1525 protected:
1526  T mixValues(T t1, T t2, mipmaptype function) const
1527  {
1528  switch (function)
1529  {
1530  case MIPMAP_MAXIMUM:
1531  return SYSmax(t1, t2);
1532 
1533  case MIPMAP_AVERAGE:
1534  return (t1 + t2) / 2;
1535 
1536  case MIPMAP_MINIMUM:
1537  return SYSmin(t1, t2);
1538  }
1539 
1540  return t1;
1541  }
1542 
1543 
1544  /// This stores the base most level that was provided
1545  /// externally.
1546  UT_VoxelArray<T> *myBaseLevel;
1547  /// If true, we will delete the base level when we are done.
1549 
1550  /// Tracks the number of levels which we used to represent
1551  /// this hierarchy.
1553  /// The array of VoxelArrays, one per level.
1554  /// myLevels[0] is a 1x1x1 array. Each successive layer is twice
1555  /// as big in each each dimension. However, every layer is clamped
1556  /// against the resolution of the base layer.
1557  /// We own all these layers.
1559 };
1560 
1561 
1562 /// Iterator for Voxel Arrays
1563 ///
1564 /// This class eliminates the need for having
1565 /// for (z = 0; z < zres; z++)
1566 /// ...
1567 /// for (x = 0; x < xres; x++)
1568 /// loops everywhere.
1569 ///
1570 /// Note that the order of iteration is undefined! (The actual order is
1571 /// to complete each tile in turn, thereby hopefully improving cache
1572 /// coherency)
1573 ///
1574 /// It is safe to write to the voxel array while this iterator is active.
1575 /// It is *not* safe to resize the voxel array (or destroy it)
1576 ///
1577 /// The iterator is similar in principal to an STL iterator, but somewhat
1578 /// simpler. The classic STL loop
1579 /// for ( it = begin(); it != end(); ++it )
1580 /// is done using
1581 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1582 ///
1583 template <typename T>
1585 {
1586 public:
1591 
1593  {
1594  myCurTile = -1;
1595  myHandle.resetHandle();
1596  myArray = vox;
1597  // Reset the range
1598  setPartialRange(0, 1);
1599  }
1601  {
1602  setArray((UT_VoxelArray<T> *) vox);
1603  }
1604 
1605  /// Iterates over the array pointed to by the handle. Only
1606  /// supports read access during the iteration as it does
1607  /// a read lock.
1609  {
1610  myHandle = handle;
1611  // Ideally we'd have a separate const iterator
1612  // from our non-const iterator so this would
1613  // only be exposed in the const version.
1614  myArray = const_cast<UT_VoxelArray<T> *>(&*myHandle);
1615 
1616  // Reset our range.
1617  myCurTile = -1;
1618  setPartialRange(0, 1);
1619  }
1620 
1621 
1622  /// Restricts this iterator to only run over a subset
1623  /// of the tiles. The tiles will be divided into approximately
1624  /// numrange equal groups, this will be the idx'th.
1625  /// The resulting iterator may have zero tiles.
1626  void setPartialRange(int idx, int numranges);
1627 
1628  /// Ties this iterator to the given jobinfo so it will
1629  /// match the jobinfo's processing.
1630  void splitByTile(const UT_JobInfo &info);
1631 
1632  /// Sets this iterator to run over the tile specified by the referenced
1633  /// iterator.
1634  /// This assumes the underlying arrays are matching.
1635  template <typename S>
1638  {
1639  UT_ASSERT_P(vit.isStartOfTile());
1640  UT_ASSERT_P(getArray()->isMatching(*vit.getArray()));
1641  UT_ASSERT_P(!myJobInfo && !myUseTileList);
1642  myTileStart = vit.getLinearTileNum();
1643  myTileEnd = myTileStart+1;
1644  rewind();
1645  }
1646 
1648  {
1649  setTile(vit, vit.getArray());
1650  }
1651 
1652  /// Assigns an interrupt handler. This will be tested whenever
1653  /// it advances to a new tile. If it is interrupted, the iterator
1654  /// will jump forward to atEnd()
1655  void setInterrupt(UT_Interrupt *interrupt) { myInterrupt = interrupt; }
1656  void detectInterrupts() { myInterrupt = UTgetInterrupt(); }
1657 
1658  /// Restricts this iterator to the tiles that intersect
1659  /// the given bounding box of voxel coordinates.
1660  /// Note that this will not be a precise restriction as
1661  /// each tile is either included or not.
1662  /// You should setPartialRange() after setting the bbox range
1663  /// The bounding box is on the [0..1]^3 range.
1664  void restrictToBBox(const UT_BoundingBox &bbox);
1665  /// The [xmin, xmax] are inclusive and measured in voxels.
1666  void restrictToBBox(int xmin, int xmax,
1667  int ymin, int ymax,
1668  int zmin, int zmax);
1669 
1670  /// Resets the iterator to point to the first voxel.
1671  void rewind();
1672 
1673  /// Returns true if we have iterated over all of the voxels.
1674  bool atEnd() const
1675  { return myCurTile < 0; }
1676 
1677  /// Advances the iterator to point to the next voxel.
1678  void advance()
1679  {
1680  // We try to advance each axis, rolling over to the next.
1681  // If we exhaust this tile, we call advanceTile.
1682  myPos[0]++;
1683  myTileLocalPos[0]++;
1684  if (myTileLocalPos[0] >= myTileSize[0])
1685  {
1686  // Wrapped in X.
1687  myPos[0] -= myTileLocalPos[0];
1688  myTileLocalPos[0] = 0;
1689 
1690  myPos[1]++;
1691  myTileLocalPos[1]++;
1692  if (myTileLocalPos[1] >= myTileSize[1])
1693  {
1694  // Wrapped in Y.
1695  myPos[1] -= myTileLocalPos[1];
1696  myTileLocalPos[1] = 0;
1697 
1698  myPos[2]++;
1699  myTileLocalPos[2]++;
1700  if (myTileLocalPos[2] >= myTileSize[2])
1701  {
1702  // Wrapped in Z! Finished this tile!
1703  advanceTile();
1704  }
1705  }
1706  }
1707  }
1708 
1709  /// Retrieve the current location of the iterator.
1710  int x() const { return myPos[0]; }
1711  int y() const { return myPos[1]; }
1712  int z() const { return myPos[2]; }
1713  int idx(int idx) const { return myPos[idx]; }
1714 
1715  /// Retrieves the value that we are currently pointing at.
1716  /// This is faster than an operator(x,y,z) as we already know
1717  /// our current tile and that bounds checking isn't needed.
1718  T getValue() const
1719  {
1720  UT_ASSERT_P(myCurTile >= 0);
1721 
1722  UT_VoxelTile<T> *tile;
1723 
1724  tile = myArray->getLinearTile(myCurTile);
1725  return (*tile)(myTileLocalPos[0],
1726  myTileLocalPos[1],
1727  myTileLocalPos[2]);
1728  }
1729 
1730  /// Sets the voxel we are currently pointing to the given value.
1731  void setValue(T t) const
1732  {
1733  UT_ASSERT_P(myCurTile >= 0);
1734 
1735  UT_VoxelTile<T> *tile;
1736 
1737  tile = myArray->getLinearTile(myCurTile);
1738 
1739  tile->setValue(myTileLocalPos[0],
1740  myTileLocalPos[1],
1741  myTileLocalPos[2], t);
1742  }
1743 
1744  /// Returns true if the tile we are currently in is a constant tile.
1745  bool isTileConstant() const
1746  {
1747  UT_ASSERT_P(myCurTile >= 0);
1748 
1749  UT_VoxelTile<T> *tile;
1750 
1751  tile = myArray->getLinearTile(myCurTile);
1752  return tile->isConstant();
1753  }
1754 
1755  /// This tile will iterate over the voxels indexed [start,end).
1757  {
1758  start.x() = myTilePos[0] * TILESIZE;
1759  start.y() = myTilePos[1] * TILESIZE;
1760  start.z() = myTilePos[2] * TILESIZE;
1761  end = start;
1762  end.x() += myTileSize[0];
1763  end.y() += myTileSize[1];
1764  end.z() += myTileSize[2];
1765  }
1766 
1767  /// This tile will iterate over the *inclusive* voxels indexed
1768  /// in the returned boudning box.
1770  {
1772  getTileVoxels(start, end);
1773  return UT_BoundingBoxI(start, end);
1774  }
1775 
1776  /// Returns true if we are at the start of a new tile.
1777  bool isStartOfTile() const
1778  { return !(myTileLocalPos[0] ||
1779  myTileLocalPos[1] ||
1780  myTileLocalPos[2]); }
1781 
1782  /// Returns the VoxelTile we are currently processing
1784  {
1785  UT_ASSERT_P(myCurTile >= 0);
1786  return myArray->getLinearTile(myCurTile);
1787  }
1788  int getLinearTileNum() const
1789  {
1790  return myCurTile;
1791  }
1792 
1793  /// Advances the iterator to point to the next tile. Useful if the
1794  /// constant test showed that you didn't need to deal with this one.
1795  void advanceTile();
1796 
1797  /// Advances the iterator to pointing just before the next tile so
1798  /// the next advance() will be an advanceTile(). This is useful
1799  /// if you want to do a continue; as your break but the forloop
1800  /// is doing advance()
1801  /// Note the iterator is in a bad state until advance() is called.
1802  void skipToEndOfTile();
1803 
1804  /// Sets a flag which causes the iterator to tryCompress()
1805  /// tiles when it is done with them.
1806  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1807  void setCompressOnExit(bool shouldcompress)
1808  { myShouldCompressOnExit = shouldcompress; }
1809 
1810  /// These templated algorithms are designed to apply simple operations
1811  /// across all of the voxels with as little overhead as possible.
1812  /// The iterator should already point to a voxel array and if multithreaded
1813  /// had its partial range set. The source arrays must be matching size.
1814  /// The operator should support a () operator, and the result is
1815  /// vit.setValue( op(vit.getValue(), a->getValue(vit), ...);
1816  /// Passing T instead of UT_VoxelArray will treat it as a constant source
1817  /// Note if both source and destination tiles are constant, only
1818  /// a single operation is invoked.
1819  template <typename OP>
1820  void applyOperation(OP &op);
1821  template <typename OP, typename S>
1822  void applyOperation(OP &op, const UT_VoxelArray<S> &a);
1823  template <typename OP>
1824  void applyOperation(OP &op, T a);
1825  template <typename OP, typename S, typename R>
1826  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1827  const UT_VoxelArray<R> &b);
1828  template <typename OP, typename S, typename R, typename Q>
1829  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1830  const UT_VoxelArray<R> &b,
1831  const UT_VoxelArray<Q> &c);
1832  /// These variants will invoke op.isNoop(a, b, ...) which will return
1833  /// true if those values won't affect the destination. This allows
1834  /// constant source tiles to be skipped, for example when adding
1835  /// 0.
1836  template <typename OP, typename S>
1837  void applyOperationCheckNoop(OP &op, const UT_VoxelArray<S> &a);
1838  template <typename OP>
1839  void applyOperationCheckNoop(OP &op, T a);
1840 
1841  /// These variants of apply operation also accept a mask array. The
1842  /// operation is applied only where the mask is greater than 0.5.
1843  template <typename OP, typename M>
1844  void maskedApplyOperation(OP &op,
1845  const UT_VoxelArray<M> &mask);
1846  template <typename OP, typename S, typename M>
1847  void maskedApplyOperation(OP &op, const UT_VoxelArray<S> &a,
1848  const UT_VoxelArray<M> &mask);
1849  template <typename OP, typename S, typename R, typename M>
1850  void maskedApplyOperation(OP &op, const UT_VoxelArray<S> &a,
1851  const UT_VoxelArray<R>& b,
1852  const UT_VoxelArray<M> &mask);
1853  template <typename OP, typename S, typename R, typename Q, typename M>
1854  void maskedApplyOperation(OP& op, const UT_VoxelArray<S> &a,
1855  const UT_VoxelArray<R>& b,
1856  const UT_VoxelArray<Q>& c,
1857  const UT_VoxelArray<M> &mask);
1858 
1859  /// Assign operation works like apply operation, but *this is written
1860  /// to without reading, so there is one less parameter to the ()
1861  /// callback. This can optimize constant tile writes as the
1862  /// constant() status of the destination doesn't matter.
1863  template <typename OP, typename S>
1864  void assignOperation(OP &op, const UT_VoxelArray<S> &a);
1865  template <typename OP, typename S, typename R>
1866  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1867  const UT_VoxelArray<R> &b);
1868  template <typename OP, typename S, typename R, typename Q>
1869  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1870  const UT_VoxelArray<R> &b,
1871  const UT_VoxelArray<Q> &c);
1872 
1873  /// These variants of assign operation also accept a mask array. The
1874  /// assignment operation is performed only where the mask is greater
1875  /// than 0.5.
1876  template <typename OP, typename S, typename M>
1877  void maskedAssignOperation(OP& op, const UT_VoxelArray<S>& a,
1878  const UT_VoxelArray<M>& mask);
1879  template <typename OP, typename S, typename R, typename M>
1880  void maskedAssignOperation(OP& op, const UT_VoxelArray<S>& a,
1881  const UT_VoxelArray<R>& b,
1882  const UT_VoxelArray<M>& mask);
1883  template <typename OP, typename S, typename R, typename Q, typename M>
1884  void maskedAssignOperation(OP& op, const UT_VoxelArray<S>& a,
1885  const UT_VoxelArray<R>& b,
1886  const UT_VoxelArray<Q>& c,
1887  const UT_VoxelArray<M>& mask);
1888 
1889  /// Reduction operators.
1890  /// op.reduce(T a) called for each voxel, *but*,
1891  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1892  template <typename OP>
1893  void reduceOperation(OP &op);
1894 
1895  UT_VoxelArray<T> *getArray() const { return myArray; }
1896 
1897 protected:
1898  /// The array we belong to.
1900  /// The handle that we have locked to get our array. It is null
1901  /// by default which makes the lock/unlock nops.
1903 
1904  /// Absolute index into voxel array.
1905  int myPos[3];
1906 
1907  /// Flag determining if we should compress tiles whenever we
1908  /// advance out of them.
1910 
1913 
1914 public:
1915  /// Our current linear tile idx. A value of -1 implies at end.
1917 
1918  /// Our current index into the tile list
1920 
1921  /// Our start & end tiles for processing a subrange.
1922  /// The tile range is half open [start, end)
1923  int myTileStart, myTileEnd;
1924 
1925  /// Which tile we are as per tx,ty,tz rather than linear index.
1926  int myTilePos[3];
1927 
1928  /// Our position within the current tile.
1929  int myTileLocalPos[3];
1930 
1931  /// The size of the current tile
1932  int myTileSize[3];
1933 
1934  /// The job info to use for tilefetching
1936 
1938 };
1939 
1940 /// Iterator for tiles inside Voxel Arrays
1941 ///
1942 /// This class eliminates the need for having
1943 /// for (z = 0; z < zres; z++)
1944 /// ...
1945 /// for (x = 0; x < xres; x++)
1946 /// loops everywhere.
1947 ///
1948 /// The iterator is similar in principal to an STL iterator, but somewhat
1949 /// simpler. The classic STL loop
1950 /// for ( it = begin(); it != end(); ++it )
1951 /// is done using
1952 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1953 ///
1954 template <typename T>
1956 {
1957 public:
1960  template <typename S>
1964 
1965  template <typename S>
1967  UT_VoxelArray<T> *array)
1968  {
1969  UT_ASSERT_P(vit.isStartOfTile());
1970  myCurTile = array->getLinearTile(vit.getLinearTileNum());
1971  myArray = array;
1972  myTileStart[0] = vit.x();
1973  myTileStart[1] = vit.y();
1974  myTileStart[2] = vit.z();
1975  }
1976 
1978  {
1979  setTile(vit, vit.getArray());
1980  }
1981 
1982  void setLinearTile(exint lineartilenum, UT_VoxelArray<T> *array)
1983  {
1984  myCurTile = array->getLinearTile(lineartilenum);
1985  myArray = array;
1986 
1987  array->linearTileToXYZ(lineartilenum,
1988  myTileStart[0], myTileStart[1], myTileStart[2]);
1989  myTileStart[0] <<= TILEBITS;
1990  myTileStart[1] <<= TILEBITS;
1991  myTileStart[2] <<= TILEBITS;
1992  }
1993 
1994  /// Resets the iterator to point to the first voxel.
1995  void rewind();
1996 
1997  /// Returns true if we have iterated over all of the voxels.
1998  bool atEnd() const
1999  { return myCurTile == 0 || myAtEnd; }
2000 
2001  /// Advances the iterator to point to the next voxel.
2002  void advance()
2003  {
2004  // We try to advance each axis, rolling over to the next.
2005  // If we exhaust this tile, we call advanceTile.
2006  myPos[0]++;
2007  myTileLocalPos[0]++;
2008  if (myTileLocalPos[0] >= myTileSize[0])
2009  {
2010  // Wrapped in X.
2011  myPos[0] -= myTileLocalPos[0];
2012  myTileLocalPos[0] = 0;
2013 
2014  myPos[1]++;
2015  myTileLocalPos[1]++;
2016  if (myTileLocalPos[1] >= myTileSize[1])
2017  {
2018  // Wrapped in Y.
2019  myPos[1] -= myTileLocalPos[1];
2020  myTileLocalPos[1] = 0;
2021 
2022  myPos[2]++;
2023  myTileLocalPos[2]++;
2024  if (myTileLocalPos[2] >= myTileSize[2])
2025  {
2026  // Wrapped in Z! Finished this tile!
2027  advanceTile();
2028  }
2029  }
2030  }
2031  }
2032 
2033  /// Retrieve the current location of the iterator, in the
2034  /// containing voxel array, not in the tile.
2035  int x() const { return myPos[0]; }
2036  int y() const { return myPos[1]; }
2037  int z() const { return myPos[2]; }
2038  int idx(int idx) const { return myPos[idx]; }
2039 
2040  /// Retrieves the value that we are currently pointing at.
2041  /// This is faster than an operator(x,y,z) as we already know
2042  /// our current tile and that bounds checking isn't needed.
2043  T getValue() const
2044  {
2045  UT_ASSERT_P(myCurTile);
2046 
2047  return (*myCurTile)(myTileLocalPos[0],
2048  myTileLocalPos[1],
2049  myTileLocalPos[2]);
2050  }
2051 
2052  /// Sets the voxel we are currently pointing to the given value.
2053  void setValue(T t) const
2054  {
2055  UT_ASSERT_P(myCurTile);
2056 
2057  myCurTile->setValue(myTileLocalPos[0],
2058  myTileLocalPos[1],
2059  myTileLocalPos[2], t);
2060  }
2061 
2062  /// Returns true if the tile we are currently in is a constant tile.
2063  bool isTileConstant() const
2064  {
2065  UT_ASSERT_P(myCurTile);
2066 
2067  return myCurTile->isConstant();
2068  }
2069 
2070  /// Returns true if we are at the start of a new tile.
2071  bool isStartOfTile() const
2072  { return !(myTileLocalPos[0] ||
2073  myTileLocalPos[1] ||
2074  myTileLocalPos[2]); }
2075 
2076  /// Returns the VoxelTile we are currently processing
2078  {
2079  return myCurTile;
2080  }
2081 
2082  /// Advances the iterator to point to the next tile. Since
2083  /// we are restricted to one tile, effectively just ends the iterator.
2084  void advanceTile();
2085 
2086  /// Sets a flag which causes the iterator to tryCompress()
2087  /// tiles when it is done with them.
2088  bool getCompressOnExit() const { return myShouldCompressOnExit; }
2089  void setCompressOnExit(bool shouldcompress)
2090  { myShouldCompressOnExit = shouldcompress; }
2091 
2092  /// Reduction operators.
2093  /// op.reduce(T a) called for each voxel, *but*,
2094  /// op.reduceMany(T a, int n) called to reduce constant blocks.
2095  /// Early exits if op.reduce() returns false.
2096  template <typename OP>
2097  bool reduceOperation(OP &op);
2098 
2099 protected:
2100  /// Current processing tile
2103 
2104  /// Absolute index into voxel array.
2105  int myPos[3];
2106  /// Absolute index of start of tile
2107  int myTileStart[3];
2108 
2109  /// Flag determining if we should compress tiles whenever we
2110  /// advance out of them.
2112 
2113  /// Since we want to allow multiple passes, we can't
2114  /// clear out myCurTile when we hit the end.
2115  bool myAtEnd;
2116 
2117 public:
2118  /// Our position within the current tile.
2119  int myTileLocalPos[3];
2120 
2121  /// The size of the current tile
2122  int myTileSize[3];
2123 };
2124 
2125 /// Probe for Voxel Arrays
2126 ///
2127 /// This class is designed to allow for efficient evaluation
2128 /// of aligned indices of a voxel array, provided the voxels are iterated
2129 /// in a tile-by-tile, x-inner most, manner.
2130 ///
2131 /// This class will create a local copy of the voxel data where needed,
2132 /// uncompressing the information once for every 16 queries. It will
2133 /// also create an aligned buffer so you can safely use v4uf on fpreal32
2134 /// data.
2135 ///
2136 /// For queries where you need surrounding values, the prex and postx can
2137 /// specify padding on the probe. prex should be -1 to allow reading
2138 /// -1 offset, postx 1 to allow reading a 1 offset.
2139 ///
2140 
2141 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
2142 class UT_VoxelProbe
2143 {
2144 public:
2145  UT_VoxelProbe();
2146  UT_VoxelProbe(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
2147  ~UT_VoxelProbe();
2148 
2149  void setArray(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
2151  int prex = 0, int postx = 0)
2152  {
2153  SYS_STATIC_ASSERT(DoWrite == false);
2154  setArray((UT_VoxelArray<T> *)vox, prex, postx);
2155  }
2156 
2157  UT_VoxelArray<T> *getArray() const { return myArray; }
2158 
2159  bool isValid() const { return myArray != 0; }
2160 
2161  inline T getValue() const
2162  {
2163  return *myCurLine;
2164  }
2165  inline T getValue(int offset) const
2166  {
2167  return myCurLine[myStride*offset];
2168  }
2169 
2170  inline void setValue(T value)
2171  {
2172  UT_ASSERT_P(DoWrite);
2173  *myCurLine = value;
2174  if (TestForWrites)
2175  myDirty = true;
2176  }
2177 
2178 
2179  /// Resets where we currently point to.
2180  /// Returns true if we had to reset our cache line. If we didn't,
2181  /// and you have multiple probes acting in-step, you can just
2182  /// advanceX() the other probes
2183  template <typename S>
2185  { return setIndex(vit.x(), vit.y(), vit.z()); }
2186  template <typename S>
2188  { return setIndex(vit.x(), vit.y(), vit.z()); }
2189 
2190  bool setIndex(int x, int y, int z);
2191 
2192  /// Blindly advances our current pointer.
2193  inline void advanceX()
2194  {
2195  myCurLine += myStride;
2196  myX++;
2197  UT_ASSERT_P(myX < myMaxValidX);
2198  }
2199 
2200  /// Adjusts our current pointer to the given absolute location,
2201  /// assumes the new value is inside our valid range.
2202  inline void resetX(int x)
2203  {
2204  myCurLine += myStride * (x - myX);
2205  myX = x;
2206  UT_ASSERT_P(myX < myMaxValidX && myX >= myMinValidX);
2207  }
2208 
2209 protected:
2210  void reloadCache(int x, int y, int z);
2211 
2212  void writeCacheLine();
2213 
2214  void buildConstantCache(T value);
2215 
2217  /// myCacheLine[0] is the start of the cache line, so -1 would be
2218  /// the first pre-rolled value
2220  /// Where we actually allocated our cache line, aligned to 4x multiple
2221  /// to ensure SSE compatible.
2223 
2224  int myX, myY, myZ;
2225  int myPreX, myPostX;
2228  /// Half inclusive [,) range of valid x queries for current cache.
2229  int myMinValidX, myMaxValidX;
2230 
2231  /// Determines if we have anything to write back, only
2232  /// valid if TestForWrites is enabled.
2233  bool myDirty;
2234 
2236 
2237  friend class UT_VoxelProbeCube<T>;
2238  friend class UT_VoxelProbeFace<T>;
2239 };
2240 
2241 ///
2242 /// The vector probe is three normal probes into separate voxel arrays
2243 /// making it easier to read and write to aligned vector fields.
2244 /// If the vector field is face-centered, see the UT_VoxelProbeFace.
2245 ///
2246 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
2248 {
2249 public:
2251  { }
2253  { setArray(vx, vy, vz); }
2255  {}
2256 
2258  {
2259  myLines[0].setArray(vx);
2260  myLines[1].setArray(vy);
2261  myLines[2].setArray(vz);
2262  }
2263  void setConstArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz)
2264  {
2265  SYS_STATIC_ASSERT(DoWrite == false);
2266  setArray((UT_VoxelArray<T> *)vx, (UT_VoxelArray<T> *)vy, (UT_VoxelArray<T> *)vz);
2267  }
2268 
2269  inline UT_Vector3 getValue() const
2270  {
2271  return UT_Vector3(myLines[0].getValue(), myLines[1].getValue(), myLines[2].getValue());
2272  }
2273  inline T getValue(int axis) const
2274  {
2275  return myLines[axis].getValue();
2276  }
2277 
2278  inline void setValue(const UT_Vector3 &v)
2279  {
2280  myLines[0].setValue(v.x());
2281  myLines[1].setValue(v.y());
2282  myLines[2].setValue(v.z());
2283  }
2284 
2285  inline void setComponent(int axis, T val)
2286  {
2287  myLines[axis].setValue(val);
2288  }
2289 
2290  /// Resets where we currently point to.
2291  /// Returns true if we had to reset our cache line. If we didn't,
2292  /// and you have multiple probes acting in-step, you can just
2293  /// advanceX() the other probes
2294  template <typename S>
2296  { return setIndex(vit.x(), vit.y(), vit.z()); }
2297  template <typename S>
2299  { return setIndex(vit.x(), vit.y(), vit.z()); }
2300 
2301  bool setIndex(int x, int y, int z)
2302  {
2303  if (myLines[0].setIndex(x, y, z))
2304  {
2305  myLines[1].setIndex(x, y, z);
2306  myLines[2].setIndex(x, y, z);
2307  return true;
2308  }
2309  myLines[1].advanceX();
2310  myLines[2].advanceX();
2311  return false;
2312  }
2313 
2314  void advanceX()
2315  { myLines[0].advanceX(); myLines[1].advanceX(); myLines[2].advanceX(); }
2316 
2317 protected:
2319 };
2320 
2321 template <typename T>
2322 class
2324 {
2325 public:
2327  ~UT_VoxelProbeCube();
2328 
2329  void setConstCubeArray(const UT_VoxelArray<T> *vox);
2330  void setConstPlusArray(const UT_VoxelArray<T> *vox);
2331 
2332  /// Allows you to query +/-1 in each direction. In cube update,
2333  /// all are valid. In plus update, only one of x y and z may be
2334  /// non zero.
2336  T
2337  getValue(int x, int y, int z) const
2338  {
2339  UT_ASSERT_P(x >= -1 && x <= 1 &&
2340  y >= -1 && y <= 1 &&
2341  z >= -1 && z <= 1);
2342 
2343  return myLines[y+1][z+1].getValue(x);
2344  }
2345 
2347  T
2349  {
2350  return getValue(offset[0], offset[1], offset[2]);
2351  }
2352 
2353  template <typename S>
2355  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2356  template <typename S>
2358  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2359  bool setIndexCube(int x, int y, int z);
2360 
2361  template <typename S>
2363  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2364  template <typename S>
2366  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2367  bool setIndexPlus(int x, int y, int z);
2368 
2369  /// Computes central difference gradient, does not scale
2370  /// by the step size (which is twice voxelsize)
2371  /// Requires PlusArray
2373  { return UT_Vector3(getValue(1,0,0) - getValue(-1,0,0),
2374  getValue(0,1,0) - getValue(0,-1,0),
2375  getValue(0,0,1) - getValue(0,0,-1)); }
2376 
2377  /// Computes the central difference curvature using the given
2378  /// inverse voxelsize (ie, 1/voxelsize) at this point.
2379  /// Requires CubeArray.
2380  fpreal64 curvature(const UT_Vector3 &invvoxelsize) const;
2381 
2382  /// Computes the laplacian, again with a given 1/voxelsize.
2383  /// Requires PlusArray
2384  fpreal64 laplacian(const UT_Vector3 &invvoxelsize) const;
2385 
2386 protected:
2387  /// Does an rotation of our cache lines, ym becomes y0 and y0 becomes yp,
2388  /// so further queries with y+1 will be cache hits for 2 out of 3.
2389  static void rotateLines(UT_VoxelProbe<T, true, false, false> &ym,
2392 
2394  /// Cached look up position. myValid stores if they are
2395  /// valid values or not
2396  bool myValid;
2397  int myX, myY, myZ;
2398  /// Half inclusive [,) range of valid x queries for current cache.
2399  int myMinValidX, myMaxValidX;
2400 };
2401 
2402 ///
2403 /// UT_VoxelProbeConstant
2404 ///
2405 /// Looks like a voxel probe but only returns a constant value.
2406 ///
2407 template <typename T>
2408 class
2410 {
2411 public:
2414 
2415  template <typename S>
2417  { return true; }
2418  template <typename S>
2420  { return true; }
2421  bool setIndex(int x, int y, int z)
2422  { return true; }
2423 
2424  void setValue(T val) { myValue = val; }
2425  T getValue() const { return myValue; }
2426 protected:
2428 };
2429 
2430 ///
2431 /// UT_VoxelProbeAverage
2432 ///
2433 /// When working with MAC grids one often has slightly misalgined
2434 /// fields. Ie, one field is at the half-grid spacing of another field.
2435 /// The step values are 0 if the dimension is algined, -1 for half a step
2436 /// back (ie, (val(-1)+val(0))/2) and 1 for half a step forward
2437 /// (ie, (val(0)+val(1))/2)
2438 ///
2439 template <typename T, int XStep, int YStep, int ZStep>
2440 class
2442 {
2443 public:
2446 
2447  void setArray(const UT_VoxelArray<T> *vox);
2448 
2449  template <typename S>
2451  { return setIndex(vit.x(), vit.y(), vit.z()); }
2452  template <typename S>
2454  { return setIndex(vit.x(), vit.y(), vit.z()); }
2455  bool setIndex(int x, int y, int z);
2456 
2457  /// Returns the velocity centered at this index, thus an average
2458  /// of the values in each of our internal probes.
2459  inline T getValue() const
2460  {
2461  if (ZStep)
2462  return (valueZ(1) + valueZ(0)) * 0.5;
2463  return valueZ(0);
2464  }
2465 
2466 protected:
2467  inline T valueZ(int z) const
2468  {
2469  if (YStep)
2470  return (valueYZ(1, z) + valueYZ(0, z)) * 0.5;
2471  return valueYZ(0, z);
2472  }
2473 
2474  inline T valueYZ(int y, int z) const
2475  {
2476  if (XStep > 0)
2477  return (myLines[y][z].getValue(1) + myLines[y][z].getValue(0)) * 0.5;
2478  if (XStep < 0)
2479  return (myLines[y][z].getValue(-1) + myLines[y][z].getValue(0)) * 0.5;
2480  return myLines[y][z].getValue();
2481  }
2482 
2483  // Stores [Y][Z] lines.
2485 };
2486 
2487 
2488 ///
2489 /// UT_VoxelProbeFace is designed to walk over three velocity
2490 /// fields that store face-centered values. The indices refer
2491 /// to the centers of the voxels.
2492 ///
2493 template <typename T>
2494 class
2496 {
2497 public:
2499  ~UT_VoxelProbeFace();
2500 
2501  void setArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz);
2502  void setVoxelSize(const UT_Vector3 &voxelsize);
2503 
2504  template <typename S>
2506  { return setIndex(vit.x(), vit.y(), vit.z()); }
2507  template <typename S>
2509  { return setIndex(vit.x(), vit.y(), vit.z()); }
2510  bool setIndex(int x, int y, int z);
2511 
2512  /// Get the face values on each face component.
2513  /// Parameters are axis then side.
2514  /// 0 is the lower face, 1 the higher face.
2515  inline T face(int axis, int side) const
2516  {
2517  if (axis == 0)
2518  return myLines[0][0].getValue(side);
2519  else
2520  return myLines[axis][side].getValue();
2521  }
2522 
2523  /// Returns the velocity centered at this index, thus an average
2524  /// of the values in each of our internal probes.
2525  inline UT_Vector3 value() const
2526  {
2527  return UT_Vector3(0.5f * (face(0, 0) + face(0, 1)),
2528  0.5f * (face(1, 0) + face(1, 1)),
2529  0.5f * (face(2, 0) + face(2, 1)));
2530  }
2531 
2532  /// Returns the divergence of this cell.
2533  inline T divergence() const
2534  {
2535  return (face(0,1)-face(0,0)) * myVoxelSize.x()
2536  + (face(1,1)-face(1,0)) * myVoxelSize.y()
2537  + (face(2,1)-face(2,0)) * myVoxelSize.z();
2538 
2539  }
2540 
2541 protected:
2542 
2543  static void swapLines(UT_VoxelProbe<T, true, false, false> &ym,
2545 
2546 
2548 
2549  /// Cached look up position. myValid stores if they are
2550  /// valid values or not
2551  bool myValid;
2552  int myX, myY, myZ;
2553  /// Half inclusive [,) range of valid x queries for current cache.
2554  int myMinValidX, myMaxValidX;
2555 
2556  UT_Vector3 myVoxelSize, myInvVoxelSize;
2557 };
2558 
2559 
2560 #include "UT_VoxelArray.C"
2561 
2562 
2563 // Typedefs for common voxel array types
2567 
2575 // Read only probe
2579 // Write only
2583 // Read/Write always writeback.
2587 // Read/Write with testing
2591 
2592 // TODO: add support for read-write probe cube
2594 
2598 
2602 
2603 #endif
2604 
UT_COWWriteHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayWriteHandleV4
void setTile(const UT_VoxelArrayIterator< T > &vit)
exint exint const UT_JobInfo &info const
int x() const
Retrieve the current location of the iterator.
UT_Interrupt * myInterrupt
UT_Vector3I getVoxelRes() const
#define SYSmax(a, b)
Definition: SYS_Math.h:1513
int xyzTileToLinear(int x, int y, int z) const
void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const
SYS_FORCE_INLINE T lerpSample(T *samples, float fx, float fy, float fz) const
Lerps the given sample using trilinear interpolation.
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1222
const UT_VoxelTile< T > & operator=(const UT_VoxelTile< T > &src)
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void findAverage(T &avg) const
Determines the average value of the tile.
#define SYS_STATIC_ASSERT(expr)
UT_Vector3I linearTileToXYZ(int idx) const
int int32
Definition: SYS_Types.h:39
UT_VoxelProbe< fpreal32, true, true, true > UT_VoxelRWTProbeF
void setInterrupt(UT_Interrupt *interrupt)
T valueZ(int z) const
void loadData(UT_IStream &is)
Load an array, requires you have already size()d this array.
UT_VoxelBorderType getBorder() const
exint getDataLength() const
Returns the amount of data used by the tile myData pointer.
bool atEnd() const
Returns true if we have iterated over all of the voxels.
UT_COWHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayHandleF
void match(const UT_VoxelArray< T > &src)
SYS_FORCE_INLINE T getValue(const UT_Vector3I &offset) const
bool isMatching(const UT_VoxelArray< S > &src) const
Axis-aligned bounding box (AABB).
Definition: GEO_Detail.h:43
*get result *(waiting if necessary)*A common idiom is to fire a bunch of sub tasks at the and then *wait for them to all complete We provide a helper class
Definition: thread.h:623
void setValue(UT_Vector3I index, T value)
virtual const char * getName()=0
T valueYZ(int y, int z) const
void resample(const UT_VoxelArray< T > &src, UT_FilterType filtertype=UT_FILTER_POINT, float filterwidthscale=1.0f, int clampaxis=-1)
Fills this by resampling the given voxel array.
const UT_VoxelCompressOptions & getCompressionOptions() const
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void
Definition: png.h:1083
int64 getMemoryUsage(bool inclusive) const
Return the amount of memory used by this array.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
bool setIndex(UT_VoxelTileIterator< S > &vit)
SYS_FORCE_INLINE void freeData()
T operator()(UT_Vector3D pos) const
THREADED_METHOD3_CONST(UT_VoxelArray< T >, numTiles() > 16, flatten, T *, flatarray, exint, ystride, exint, zstride) void flattenPartial(T *flatarray
UT_Vector3 gradient() const
int numVoxels() const
GLuint start
Definition: glcorearb.h:475
UT_COWReadHandle< UT_VoxelArray< T > > myHandle
UT_VoxelArray< fpreal32 > UT_VoxelArrayF
void clampIndex(int &x, int &y, int &z) const
UT_COWReadHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayReadHandleF
virtual T getValue(const UT_VoxelTile< T > &tile, int x, int y, int z) const =0
T * fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const
static void registerCompressionEngine(UT_VoxelTileCompress< T > *engine)
fpreal myQuantizeTol
Tolerance for quantizing to reduced bit depth.
virtual bool lerp(GA_AttributeOperand &d, GA_AttributeOperand &a, GA_AttributeOperand &b, GA_AttributeOperand &t) const
d = SYSlerp(a, b, t);
const GLfloat * c
Definition: glew.h:16631
UT_VoxelVectorProbe< fpreal32, true, true, true > UT_VoxelVectorRWTProbeF
UT_Vector3T< float > UT_Vector3
UT_VoxelTile< T > * myCurTile
Current processing tile.
UT_VoxelVectorProbe(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
UT_VoxelArray< T > * myBaseLevel
constexpr SYS_FORCE_INLINE T & z() noexcept
Definition: UT_Vector3.h:657
int64 exint
Definition: SYS_Types.h:125
T divergence() const
Returns the divergence of this cell.
GLint level
Definition: glcorearb.h:108
SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
UT_VoxelBorderType
Definition: UT_VoxelArray.h:67
#define SYSabs(a)
Definition: SYS_Math.h:1515
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
bool myOwnBase
If true, we will delete the base level when we are done.
UT_VoxelArray< UT_Vector4 > UT_VoxelArrayV4
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:88
#define UT_API
Definition: UT_API.h:14
bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const
ImageBuf OIIO_API min(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
GLuint GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat t1
Definition: glew.h:12900
UT_VoxelArray< T > * myArray
fpreal UTvoxelTileDist(const UT_Vector2 &a, const UT_Vector2 &b)
void setArray(UT_VoxelArray< T > *vox)
T mixValues(T t1, T t2, mipmaptype function) const
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:35
T ysize() const
ImageBuf OIIO_API flatten(const ImageBuf &src, ROI roi={}, int nthreads=0)
void copyWithOffset(const UT_VoxelArray< T > &src, int offx, int offy, int offz)
int indexToLinearTile(int x, int y, int z) const
GLuint const GLchar * name
Definition: glcorearb.h:786
UT_VoxelTileIterator< int64 > UT_VoxelTileIteratorI
bool isConstant(T *cval=0) const
UT_Vector3T< int64 > UT_Vector3I
void makeConstant(T t)
Turns this tile into a constant tile of the given value.
bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const
T face(int axis, int side) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
UT_VoxelVectorProbe< fpreal32, true, true, false > UT_VoxelVectorRWProbeF
SYS_FORCE_INLINE T rawConstVal() const
GLenum src
Definition: glcorearb.h:1793
UT_FilterType
Definition: UT_FilterType.h:16
void UTvoxelTileExpandMinMax(UT_Vector2 v, UT_Vector2 &min, UT_Vector2 &max)
bool atEnd() const
Returns true if we have iterated over all of the voxels.
const UT_JobInfo & info
void setCompressionOptions(const UT_VoxelCompressOptions &options)
float fpreal32
Definition: SYS_Types.h:200
virtual bool load(UT_JSONParser &p, UT_VoxelTile< T > &tile) const
UT_VoxelArray< T > * getArray() const
GLdouble GLdouble t
Definition: glew.h:1403
int myCurTile
Our current linear tile idx. A value of -1 implies at end.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
void setCompressionTolerance(fpreal tol)
void setHandle(UT_COWReadHandle< UT_VoxelArray< T > > handle)
void flatten(S *dst, int dststride) const
Flattens ourself into the given destination buffer.
void makeFpreal16()
Explicit compress to fpreal16. Lossy. No-op if already constant.
void size(int xres, int yres, int zres, bool reset=true)
SYS_FORCE_INLINE T operator()(int x, int y, int z) const
int zres() const
GLsizei samples
Definition: glcorearb.h:1298
virtual bool writeThrough(UT_VoxelTile< T > &tile, int x, int y, int z, T t) const =0
UT_VoxelProbeCube< fpreal32 > UT_VoxelROProbeCubeF
UT_VoxelVectorProbe< fpreal32, true, false, false > UT_VoxelVectorProbeF
GLenum GLsizei GLsizei GLint * values
Definition: glcorearb.h:1602
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
GLint GLenum GLint x
Definition: glcorearb.h:409
virtual ~UT_VoxelTileCompress()
GLsizeiptr size
Definition: glcorearb.h:664
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:857
static fpreal dist(T a, T b)
bool setIndex(int x, int y, int z)
double fpreal64
Definition: SYS_Types.h:201
ImageBuf OIIO_API laplacian(const ImageBuf &src, ROI roi={}, int nthreads=0)
bool getCompressOnExit() const
SYS_NO_DISCARD_RESULT SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
unsigned char uint8
Definition: SYS_Types.h:36
bool writeThrough(int x, int y, int z, T t)
int yres() const
UT_VoxelTileIterator< fpreal32 > UT_VoxelTileIteratorF
void moveTilesWithOffset(UT_VoxelArray< T > &src, int tileoffx, int tileoffy, int tileoffz)
const T * rawData() const
GLenum array
Definition: glew.h:9108
T getBorderScale(int axis) const
const S * writeTiles(const S *srcdata, int srcstride, const UT_IntArray &tilelist)
bool hasNan() const
Returns true if any NANs are in this tile.
GLuint64EXT * result
Definition: glew.h:14311
GLfloat bias
Definition: glew.h:10316
void setArray(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
void resetX(int x)
void setTile(const UT_VoxelArrayIterator< S > &vit, UT_VoxelArray< T > *array)
THREADED_METHOD4(UT_VoxelArray< T >, numTiles() > 4, copyWithOffsetInternal, const UT_VoxelArray< T > &, src, int, offx, int, offy, int, offz) void copyWithOffsetInternalPartial(const UT_VoxelArray< T > &src
exint exint zstride
int myCurTileListIdx
Our current index into the tile list.
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
virtual void load(UT_IStream &is, UT_VoxelTile< T > &tile) const
GLfloat GLfloat GLfloat v2
Definition: glcorearb.h:818
int64 getMemoryUsage(bool inclusive) const
Returns the amount of memory used by this tile.
THREADED_METHOD4_CONST(UT_VoxelArray< T >, numTiles() > 16, flattenGLFixed8, uint8 *, flatarray, exint, ystride, exint, zstride, T, dummy) void flattenGLFixed8Partial(uint8 *flatarray
int getYRes() const
void weightedSum(int pstart[3], int pend[3], const float *weights[3], int start[3], T &result)
SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const
int numTiles() const
UT_BoundingBoxI getTileBBox() const
int getLinearTileNum() const
bool getValues(const UT_BoundingBoxI &bbox, T *values, const exint size) const
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
GLint GLuint mask
Definition: glcorearb.h:124
static void saveCompressionTypes(std::ostream &os)
Stores a list of compresson engines to os.
T getBorderValue() const
UT_VoxelArray< T > * myArray
The array we belong to.
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:152
const GLdouble * v
Definition: glcorearb.h:837
T getValue() const
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1222
GLuint GLuint end
Definition: glcorearb.h:475
virtual void save(std::ostream &os, const UT_VoxelTile< T > &tile) const
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
const UT_VoxelArray< T > & operator=(const UT_VoxelArray< T > &src)
Assignment operator:
UT_VoxelArray< T > * getArray() const
UT_Vector3T< T > SYSclamp(const UT_Vector3T< T > &v, const UT_Vector3T< T > &min, const UT_Vector3T< T > &max)
Definition: UT_Vector3.h:1040
bool isValid() const
GLuint GLfloat GLfloat y0
Definition: glew.h:12900
void makeRawUninitialized()
Definition: VM_SIMD.h:188
void setTile(const UT_VoxelArrayIterator< S > &vit, UT_VoxelArray< T > *array)
void setValue(int x, int y, int z, T t)
UT_VoxelTile< T > * getTile(int tx, int ty, int tz) const
GLdouble GLdouble GLdouble z
Definition: glcorearb.h:848
UT_VoxelProbe< UT_Vector4, true, true, true > UT_VoxelRWTProbeV4
void setCompressOnExit(bool shouldcompress)
T evaluate(const UT_Vector3 &pos, const UT_Filter &filter, fpreal radius, int clampaxis=-1) const
long long int64
Definition: SYS_Types.h:116
UT_VoxelMipMap< fpreal32 > UT_VoxelMipMapF
SYS_NO_DISCARD_RESULT SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
bool setIndex(UT_VoxelTileIterator< S > &vit)
GLfloat GLfloat p
Definition: glew.h:16656
#define SYS_NO_DISCARD_RESULT
Definition: SYS_Compiler.h:93
#define SYS_STATIC_FORCE_INLINE
Definition: SYS_Inline.h:48
GLint GLint GLint GLint GLint GLint GLint GLbitfield GLenum filter
Definition: glcorearb.h:1297
bool tryCompress(const UT_VoxelCompressOptions &options)
virtual bool canSave() const
Does this engine support saving and loading?
int getRes(int dim) const
THREADED_METHOD(UT_VoxelArray< T >, numTiles() > 100, collapseAllTiles) void collapseAllTilesPartial(const UT_JobInfo &info)
int getXRes() const
void setRes(int xr, int yr, int zr)
UT_VoxelArray< int64 > UT_VoxelArrayI
THREADED_METHOD1(UT_VoxelArray< T >, numTiles() > 100, constant, T, t) void const antPartial(T t
bool setIndexPlus(UT_VoxelTileIterator< S > &vit)
UT_API UT_ValArray< UT_VoxelTileCompress< fpreal16 > * > & UTvoxelTileGetCompressionEngines(fpreal16 *dummy)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
GLboolean reset
Definition: glew.h:4989
virtual bool isLossless() const
Returns true if the compression type is lossless.
signed char int8
Definition: SYS_Types.h:35
void getTileVoxels(int idx, UT_Vector3I &start, UT_Vector3I &end) const
idxth tile represents the voxels indexed [start,end).
SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z, T *sample) const
SYS_FORCE_INLINE bool inlineConstant() const
void writeCacheLine(T *cacheline, int y, int z)
Fills a cache line from an external buffer into our own data.
void advanceX()
Blindly advances our current pointer.
T xsize() const
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
void setValue(int x, int y, int z, T t)
SYS_FORCE_INLINE T lerpVoxelCoordAxis(UT_Vector3F pos) const
UT_VoxelArrayIterator< int64 > UT_VoxelArrayIteratorI
virtual int getDataLength(const UT_VoxelTile< T > &tile) const =0
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
void getTileVoxels(UT_Vector3I &start, UT_Vector3I &end) const
This tile will iterate over the voxels indexed [start,end).
bool myAllowFP16
Conversion to fpreal16, only valid for scalar data.
SYS_FORCE_INLINE T lerpVoxelCoord(UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
int getZRes() const
const UT_JobInfo * myJobInfo
The job info to use for tilefetching.
void setConstArray(const UT_VoxelArray< T > *vox, int prex=0, int postx=0)
GLboolean * data
Definition: glcorearb.h:131
static UT_VoxelTileCompress< T > * getCompressionEngine(int index)
GLclampd zmax
Definition: glew.h:9063
bool getCompressOnExit() const
T volume() const
UT_VoxelTileIterator< UT_Vector4 > UT_VoxelTileIteratorV4
UT_VoxelProbe< fpreal32, false, true, false > UT_VoxelWOProbeF
GLuint GLfloat * val
Definition: glcorearb.h:1608
virtual void findMinMax(const UT_VoxelTile< T > &tile, T &min, T &max) const
Definition: UT_VoxelArray.C:83
void setLinearTile(exint lineartilenum, UT_VoxelArray< T > *array)
void setTile(const UT_VoxelArrayIterator< T > &vit)
bool isRawFull() const
Returns if this tile is in raw full format.
bool hasNan() const
Returns true if any element of the voxel array is NAN.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
UT_VoxelVectorProbe< fpreal32, false, true, false > UT_VoxelVectorWOProbeF
UT_VoxelProbe< UT_Vector4, true, false, false > UT_VoxelProbeV4
T operator()(UT_Vector3I index) const
GLsizei const GLint box[]
Definition: glew.h:11654
SYS_FORCE_INLINE void lerpVoxelMinMaxAxis(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
SYS_STATIC_FORCE_INLINE T lerpValues(T v1, T v2, fpreal32 bias)
Lerps two numbers, templated to work with T.
T getValue(int x, int y, int z) const
bool setIndexPlus(UT_VoxelArrayIterator< S > &vit)
SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z, float fx, float fy, float fz) const
T getValue(int offset) const
void copyFragment(int dstx, int dsty, int dstz, const UT_VoxelTile< T > &srctile, int srcx, int srcy, int srcz)
void uncompress()
Turns a compressed tile into a raw tile.
UT_VoxelArray< T > * myArray
short int16
Definition: SYS_Types.h:37
fpreal64 fpreal
Definition: SYS_Types.h:277
THREADED_METHOD3(UT_VoxelArray< T >, numTiles() > 16, extractFromFlattened, const T *, flatarray, exint, ystride, exint, zstride) void extractFromFlattenedPartial(const T *flatarray
int idx(int idx) const
UT_COWReadHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayReadHandleV4
void setConstArray(const UT_VoxelArray< T > *vox)
UT_API UT_Interrupt * UTgetInterrupt()
Obtain global UT_Interrupt singleton.
SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
GLuint index
Definition: glcorearb.h:786
T getValue(int axis) const
UT_VoxelProbe< fpreal32, true, false, false > UT_VoxelProbeF
int numLevels() const
UT_BoundingBoxT< int64 > UT_BoundingBoxI
SYS_FORCE_INLINE T getValue(int x, int y, int z) const
UT_VoxelProbe< fpreal32, true, true, false > UT_VoxelRWProbeF
bool isRaw() const
Returns if this tile is in raw format.
void saveData(std::ostream &os) const
GLbyte * weights
Definition: glew.h:7581
UT_ValArray< UT_VoxelArray< T > ** > myLevels
int getRes(int axis) const
ImageBuf OIIO_API max(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
UT_BoundingBoxI getTileBBox(int idx) const
int int int offz
UT_Vector3 value() const
void linearTileToXYZ(int idx, int &x, int &y, int &z) const
UT_Vector3 myVoxelSize
GLsizei const GLfloat * value
Definition: glcorearb.h:824
UT_VoxelProbe< UT_Vector4, true, true, false > UT_VoxelRWProbeV4
GLfloat f
Definition: glcorearb.h:1926
exint numVoxels() const
void save(std::ostream &os) const
#define DEFINE_STD_FUNC(TYPE)
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
T operator()(int x, int y, int z) const
UT_COWHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayHandleV4
int isInside(const UT_Vector3T< T > &pt) const
UT_VoxelTile< T > * getLinearTile(int idx) const
void load(UT_IStream &is, const UT_IntArray &compression)
UT_VoxelProbe< UT_Vector4, false, true, false > UT_VoxelWOProbeV4
void setCompressOnExit(bool shouldcompress)
static int lookupCompressionEngine(const char *name)
UT_COWWriteHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayWriteHandleF
Definition: core.h:1131
void setValue(const UT_Vector3 &v)
void setValue(T value)
SYS_FORCE_INLINE void lerpVoxelMinMax(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
bool setIndexCube(UT_VoxelArrayIterator< S > &vit)
#define UT_VOXEL_FREE(x)
Definition: UT_VoxelArray.h:52
GLintptr offset
Definition: glcorearb.h:665
bool setIndexCube(UT_VoxelTileIterator< S > &vit)
void uncompressFull()
Turns a tile into a raw full tile.
void setBorder(UT_VoxelBorderType type, T t)
UT_Vector3 getValue() const
T * rawFullData()
Returns the raw full data of the tile.
GLfloat GLfloat v1
Definition: glcorearb.h:817
GLint GLenum GLboolean GLsizei stride
Definition: glcorearb.h:872
UT_VoxelArrayIterator< fpreal32 > UT_VoxelArrayIteratorF
virtual bool tryCompress(UT_VoxelTile< T > &tile, const UT_VoxelCompressOptions &options, T min, T max) const =0
void setForeignData(void *data, int8 compress_type)
SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
static void expandMinMax(T v, T &min, T &max)
Designed to be specialized according to T.
type
Definition: core.h:1059
int idx(int idx) const
GLenum GLenum dst
Definition: glcorearb.h:1793
constexpr SYS_FORCE_INLINE T & y() noexcept
Definition: UT_Vector3.h:655
bool extractSampleCube(int x, int y, int z, T *sample) const
void findMinMax(T &min, T &max) const
Finds the minimum and maximum T values.
bool isValidIndex(int x, int y, int z) const
Returns true if the given x, y, z values lie inside the valid index.
#define SYSmin(a, b)
Definition: SYS_Math.h:1514
void setComponent(int axis, T val)
virtual bool save(UT_JSONWriter &w, const UT_VoxelTile< T > &tile) const
SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z, float fx, float fy, float fz) const
void setBorderScale(T scalex, T scaley, T scalez)
bool setIndex(UT_VoxelTileIterator< S > &vit)
bool setIndex(UT_VoxelTileIterator< S > &vit)
void setConstArray(const UT_VoxelArray< T > *vx, const UT_VoxelArray< T > *vy, const UT_VoxelArray< T > *vz)
exint exint T dummy
const UT_VoxelArray< T > * level(int level, int function) const
int xres() const
Read the current resolution.
bool compressionEnabled() const
S * extractTiles(S *dstdata, int stride, const UT_IntArray &tilelist) const
GLint y
Definition: glcorearb.h:103
bool setIndex(int x, int y, int z)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
void writeData(const S *src, int srcstride)
SYS_FORCE_INLINE T * rawConstData() const
bool isSimpleCompression() const
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
bool isConstant() const
Returns if this tile is constant.
SYS_FORCE_INLINE T lerpSampleAxis(T *samples, float fx, float fy, float fz) const
Definition: format.h:895
GLenum GLuint GLint GLenum face
Definition: glew.h:4630
static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions)
UT_VoxelArrayIterator< UT_Vector4 > UT_VoxelArrayIteratorV4
int getTileRes(int dim) const
void evaluateMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelTileIterator< S > &vit)
fpreal getCompressionTolerance() const
void flattenPartialAxis(T *flatarray, exint ystride, const UT_JobInfo &info) const
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const
constexpr SYS_FORCE_INLINE T & x() noexcept
Definition: UT_Vector3.h:653