HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_VoxelArray.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_VoxelArray.h ( UT Library, C++)
7  *
8  * COMMENTS:
9  * This provides support for transparently tiled voxel arrays of data.
10  * The given type, T, should support normal arithmatic operations.
11  *
12  * The created array has elements indexed from 0, ie: [0..xdiv-1].
13  */
14 
15 #ifndef __UT_VoxelArray__
16 #define __UT_VoxelArray__
17 
18 #include "UT_API.h"
19 #include "UT_BoundingBox.h"
20 #include "UT_Vector2.h"
21 #include "UT_Vector3.h"
22 #include "UT_Vector4.h"
23 #include "UT_IntArray.h"
24 #include "UT_ValArray.h"
25 #include "UT_Array.h"
26 #include "UT_FilterType.h"
27 #include "UT_COW.h"
28 #include "UT_ThreadedAlgorithm.h"
29 #include "UT_Interrupt.h"
30 #include <VM/VM_SIMD.h>
31 
32 #include <SYS/SYS_SharedMemory.h>
33 #include <SYS/SYS_StaticAssert.h>
34 #include <SYS/SYS_Types.h>
35 
36 // TBB alloc results in real-world tests that are 3-4% faster. Yay!
37 // But unfortunately it is less aggressive with fragmentation, so
38 // we use effectively 2x the memory. Boo.
39 
40 //#define VOXEL_USE_TBB_ALLOC
41 
42 #ifdef VOXEL_USE_TBB_ALLOC
43 
44 #include <tbb/scalable_allocator.h>
45 
46 #define UT_VOXEL_ALLOC(x) scalable_malloc(x)
47 #define UT_VOXEL_FREE(x) scalable_free(x)
48 
49 #else
50 
51 #define UT_VOXEL_ALLOC(x) SYSamalloc((x), 128)
52 #define UT_VOXEL_FREE(x) SYSafree(x)
53 
54 #endif
55 
56 class UT_Filter;
57 class UT_JSONWriter;
58 class UT_JSONParser;
59 
60 static const int TILEBITS = 4;
61 static const int TILESIZE = 1 << TILEBITS;
62 static const int TILEMASK = TILESIZE-1;
63 
64 ///
65 /// Behaviour of out of bound reads.
66 ///
68 {
73 };
74 
75 template <typename T> class UT_VoxelTile;
76 template <typename T> class UT_VoxelArray;
77 template <typename T, bool DoRead, bool DoWrite, bool TestForWrite> class UT_VoxelProbe;
78 template <typename T> class UT_VoxelProbeCube;
79 template <typename T> class UT_VoxelProbeFace;
80 
82 {
83  int tileidx;
84  int numvoxel;
85 };
86 
88 {
89 public:
91  {
92  myConstantTol = 0;
93  myQuantizeTol = 0;
94  myAllowFP16 = false;
95  }
96 
97  // Used for quantization.
99  {
102  };
103 
104  /// Determines if compressTile should be run on this grid for
105  /// things other than constant compression. Used by writeTiles
106  /// to limit compression attempts.
107  bool compressionEnabled() const
108  {
109  return myAllowFP16 || myConstantTol > 0 || myQuantizeTol > 0;
110  }
111 
112  /// Tiles will be constant if within this range. This may
113  /// need to be tighter than quantization tolerance as
114  /// dithering can't recover partial values.
116  /// Tolerance for quantizing to reduced bit depth
118 
120 
121  /// Conversion to fpreal16, only valid for scalar data.
123 };
124 
125 ///
126 /// UT_VoxelTileCompress
127 ///
128 /// A compression engine for UT_VoxelTiles of a specific type. This
129 /// is a verb class which is invoked from the voxeltile class.
130 ///
131 template <typename T>
133 {
134 public:
137 
138  /// Attempts to write data directly to the compressed tile.
139  /// Returns false if not possible.
140  virtual bool writeThrough(UT_VoxelTile<T> &tile,
141  int x, int y, int z, T t) const = 0;
142 
143  /// Reads directly from the compressed data.
144  /// Cannot alter the tile in any way because it must be threadsafe.
145  virtual T getValue(const UT_VoxelTile<T> &tile,
146  int x, int y, int z) const = 0;
147 
148  /// Attempts to compress the data according to the given tolerance.
149  /// If succesful, returns true.
150  virtual bool tryCompress(UT_VoxelTile<T> &tile,
151  const UT_VoxelCompressOptions &options,
152  T min, T max) const = 0;
153 
154  /// Returns the length in bytes of the data in the tile.
155  /// It must be at least one byte long.
156  virtual int getDataLength(const UT_VoxelTile<T> &tile) const = 0;
157 
158  /// Returns true if the compression type is lossless
159  virtual bool isLossless() const { return false; }
160 
161  /// Determines the min & max values of the tile. A default
162  /// implementation uses getValue() on all voxels.
163  virtual void findMinMax(const UT_VoxelTile<T> &tile, T &min, T &max) const;
164 
165  /// Does this engine support saving and loading?
166  virtual bool canSave() const { return false; }
167  virtual void save(std::ostream &os, const UT_VoxelTile<T> &tile) const {}
168  virtual bool save(UT_JSONWriter &w, const UT_VoxelTile<T> &tile) const
169  { return false; }
170  virtual void load(UT_IStream &is, UT_VoxelTile<T> &tile) const {}
171  virtual bool load(UT_JSONParser &p, UT_VoxelTile<T> &tile) const
172  { return false; }
173 
174  /// Returns the unique name of this compression engine so
175  /// we can look up engines by name (the index of the compression
176  /// engine is assigned at load time so isn't constant)
177  virtual const char *getName() = 0;
178 };
179 
191 
192 #define DEFINE_STD_FUNC(TYPE) \
193 inline void \
194 UTvoxelTileExpandMinMax(TYPE v, TYPE &min, TYPE &max) \
195 { \
196  if (v < min) \
197  min = v; \
198  else if (v > max) \
199  max = v; \
200 } \
201  \
202 inline fpreal \
203 UTvoxelTileDist(TYPE a, TYPE b) \
204 { \
205  return (fpreal) SYSabs(a - b); \
206 }
207 
216 
217 #undef DEFINE_STD_FUNC
218 
219 inline void
221 {
222  min.x() = SYSmin(v.x(), min.x());
223  max.x() = SYSmax(v.x(), max.x());
224 
225  min.y() = SYSmin(v.y(), min.y());
226  max.y() = SYSmax(v.y(), max.y());
227 }
228 
229 inline void
231 {
232  min.x() = SYSmin(v.x(), min.x());
233  max.x() = SYSmax(v.x(), max.x());
234 
235  min.y() = SYSmin(v.y(), min.y());
236  max.y() = SYSmax(v.y(), max.y());
237 
238  min.z() = SYSmin(v.z(), min.z());
239  max.z() = SYSmax(v.z(), max.z());
240 }
241 
242 inline void
244 {
245  min.x() = SYSmin(v.x(), min.x());
246  max.x() = SYSmax(v.x(), max.x());
247 
248  min.y() = SYSmin(v.y(), min.y());
249  max.y() = SYSmax(v.y(), max.y());
250 
251  min.z() = SYSmin(v.z(), min.z());
252  max.z() = SYSmax(v.z(), max.z());
253 
254  min.w() = SYSmin(v.w(), min.w());
255  max.w() = SYSmax(v.w(), max.w());
256 }
257 
258 inline fpreal
260 {
261  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y());
262 }
263 
264 inline fpreal
266 {
267  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
268  + SYSabs(a.z() - b.z());
269 }
270 
271 inline fpreal
273 {
274  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
275  + SYSabs(a.z() - b.z()) + SYSabs(a.w() - b.w());
276 }
277 
278 ///
279 /// UT_VoxelTile
280 ///
281 /// A UT_VoxelArray is composed of a number of these tiles. This is
282 /// done for two reasons:
283 /// 1) Increased memory locality when processing neighbouring points.
284 /// 2) Ability to compress or page out unneeded tiles.
285 /// Currently, the only special ability is the ability to create constant
286 /// tiles.
287 ///
288 /// To the end user of the UT_VoxelArray, the UT_VoxelTile should be
289 /// usually transparent. The only exception may be if they want to do
290 /// a FOR_ALL_TILES in order to ensure an optimal traversal order.
291 ///
292 template <typename T>
293 class UT_VoxelTile
294 {
295 public:
296  UT_VoxelTile();
297  virtual ~UT_VoxelTile();
298 
299  // Copy constructor:
301 
302 
303  // Assignment operator:
305 
307  {
313  };
314 
315  /// Fetch a given local value. (x,y,z) should be local to
316  /// this tile.
317  SYS_FORCE_INLINE T operator()(int x, int y, int z) const
318  {
319  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
320  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
321 
322  switch (myCompressionType)
323  {
324  case COMPRESS_RAW:
325  return ((T *)myData)[
326  ((z * myRes[1]) + y) * myRes[0] + x ];
327 
328  case COMPRESS_CONSTANT:
329  return rawConstVal();
330 
331  case COMPRESS_RAWFULL:
332  return ((T *)myData)[
333  ((z * TILESIZE) + y) * TILESIZE + x ];
334 
335  case COMPRESS_FPREAL16:
336  {
337  T result;
338  result = (((fpreal16 *)myData)[
339  ((z * myRes[1]) + y) * myRes[0] + x ]);
340  return result;
341  }
342  }
343 
344  // By default use the compression engine.
345  UT_VoxelTileCompress<T> *engine;
346 
347  engine = getCompressionEngine(myCompressionType);
348  return engine->getValue(*this, x, y, z);
349  }
350 
351  /// Lerps two numbers, templated to work with T.
353  {
354  return v1 + (v2 - v1) * bias;
355  }
356 
357  /// Does a trilinear interpolation. x,y,z should be local to this
358  /// as should x+1, y+1, and z+1. fx-fz should be 0..1.
359  SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const;
360 
361  template <int AXIS2D>
362  SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const;
363 
364  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
365  /// array should have 8 elements, x minor, z major.
366  /// Requires it is in bounds.
367  /// Returns true if all constant, in which case only a single
368  /// sample is filled, [0]
369  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
370  T *sample) const;
371  template <int AXIS2D>
372  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
373  T *sample) const;
374 
375  /// Extracts +/- dx, +/- dy, +/- dz and then the center into
376  /// 7 samples.
377  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
378  T *sample) const;
379  /// Extracts the full cube of +/- dx, dy, dz. xminor, zmajor, into
380  /// 27 elements.
381  bool extractSampleCube(int x, int y, int z,
382  T *sample) const;
383 #if 0
384  /// MSVC can't handle aligned parameters after the third so
385  /// frac must come first.
386  T lerp(v4uf frac, int x, int y, int z) const;
387 #endif
388 
389  /// Returns a cached line to our internal data, at local address x,y,z.
390  /// cacheline is a caller allocated structure to fill out if we have
391  /// to decompress. If forcecopy isn't set and we can, the result may
392  /// be an internal pointer. stride is set to the update for moving one
393  /// x position in the cache.
394  /// strideofone should be set to true if you want to prevent 0 stride
395  /// results for constant tiles.
396  T *fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const;
397 
398  /// Fills a cache line from an external buffer into our own data.
399  void writeCacheLine(T *cacheline, int y, int z);
400 
401  /// Copies between two tiles. The tile's voxels match up, but don't
402  /// have the same offset. The maximal overlapping voxels are copied.
403  /// this->setValue(dst, dsty, dstz, src(srcx, srcy, srcz));
404  void copyFragment(int dstx, int dsty, int dstz,
405  const UT_VoxelTile<T> &srctile,
406  int srcx, int srcy, int srcz);
407 
408  /// Flattens ourself into the given destination buffer.
409  template <typename S>
410  void flatten(S *dst, int dststride) const;
411 
412  /// Fills our values from the given dense flat buffer. Will
413  /// create a constant tile if the source is constant.
414  template <typename S>
415  void writeData(const S *src, int srcstride);
416 
417  /// The setData is intentionally seperate so we can avoid
418  /// expanding constant data when we write the same value to it.
419  void setValue(int x, int y, int z, T t);
420 
421  /// Finds the minimum and maximum T values
422  void findMinMax(T &min, T &max) const;
423 
424  /// Determines the average value of the tile.
425  void findAverage(T &avg) const;
426 
427  /// Returns if this tile is constant.
428  bool isConstant() const
429  { return myCompressionType == COMPRESS_CONSTANT; }
430 
431  /// Returns true if any NANs are in this tile
432  bool hasNan() const;
433 
434  /// Returns if this tile is in raw format.
435  bool isRaw() const
436  { return myCompressionType == COMPRESS_RAW; }
437 
438  /// Returns if this tile is in raw full format.
439  bool isRawFull() const
440  { return myCompressionType == COMPRESS_RAWFULL; }
441 
442  /// Returns true if this is a simple form of compression, either
443  /// constant, raw, or a raw full that isn't padded
444  bool isSimpleCompression() const
445  {
446  if (isRaw()) return true;
447  if (isConstant()) return true;
448  if (isRawFull() && myRes[0] == TILESIZE && myRes[1] == TILESIZE)
449  return true;
450  return false;
451  }
452 
453  /// Attempts to compress this tile. Returns true if any
454  /// compression performed.
455  bool tryCompress(const UT_VoxelCompressOptions &options);
456 
457  /// Turns this tile into a constant tile of the given value.
458  void makeConstant(T t);
459 
460  /// Explicit compress to fpreal16. Lossy. No-op if already constant.
461  void makeFpreal16();
462 
463  /// Turns a compressed tile into a raw tile.
464  void uncompress();
465 
466  /// Turns a tile into a raw full tile.
467  void uncompressFull();
468 
469  /// Like uncompress() except it leaves the data uninitialized. Result
470  /// is either COMPRESS_RAW or COMPRESS_RAWFULL depending on the tile res.
471  /// @note USE WITH CAUTION!
472  void makeRawUninitialized();
473 
474  /// Returns the raw full data of the tile.
476  {
477  uncompressFull();
478  return (T *)myData;
479  }
480 
481  /// This only makes sense for simple compression. Use with
482  /// extreme care.
484  { if (inlineConstant() && isConstant())
485  { return (T *) &myData; }
486  return (T *)myData; }
487  const T *rawData() const
488  { if (inlineConstant() && isConstant())
489  { return (const T *) &myData; }
490  return (const T *)myData; }
491 
492  /// Read the current resolution.
493  int xres() const { return myRes[0]; }
494  int yres() const { return myRes[1]; }
495  int zres() const { return myRes[2]; }
496 
497  int getRes(int dim) const { return myRes[dim]; }
498 
499 
500  int numVoxels() const { return myRes[0] * myRes[1] * myRes[2]; }
501 
502  /// Returns the amount of memory used by this tile.
503  int64 getMemoryUsage(bool inclusive) const;
504 
505  /// Returns the amount of data used by the tile myData pointer.
506  exint getDataLength() const;
507 
508  /// A routine used by filtered evaluation to accumulated a partial
509  /// filtered sum in this tile.
510  /// pstart, pend - voxel bounds (in UT_VoxelArray coordinates)
511  /// weights - weight array
512  /// start - UT_VoxelArray coordinates at [0] in the weight array
513  void weightedSum(int pstart[3], int pend[3],
514  const float *weights[3], int start[3],
515  T &result);
516 
517  /// Designed to be specialized according to T
518 
519  /// Update min & max to encompass T itself.
520  static void expandMinMax(T v, T &min, T &max)
521  {
522  UTvoxelTileExpandMinMax(v, min, max);
523  }
524 
525  /// Return the "distance" of a & b. This is used for
526  /// tolerance checks on equality comparisons.
527  static fpreal dist(T a, T b)
528  {
529  return UTvoxelTileDist(a, b);
530  }
531 
533 
534  // Returns the index of the bound compression engine.
535  static int lookupCompressionEngine(const char *name);
536  // Given an index, gets the compression engine.
538 
539  /// Saves this tile's data, in compressed form.
540  /// May save in uncompressed form is the compression type does
541  /// not support saving.
542  void save(std::ostream &os) const;
543  bool save(UT_JSONWriter &w) const;
544 
545  /// Loads tile data. Uses the compression index to map the saved
546  /// compression types into the correct loading compression types.
547  void load(UT_IStream &is, const UT_IntArray &compression);
548  bool load(UT_JSONParser &p, const UT_IntArray &compression);
549 
550  /// Stores a list of compresson engines to os.
551  static void saveCompressionTypes(std::ostream &os);
552  static bool saveCompressionTypes(UT_JSONWriter &w);
553 
554  /// Builds a translation table from the given stream's compression types
555  /// into our own valid compression types.
556  static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions);
557  static bool loadCompressionTypes(UT_JSONParser &p, UT_IntArray &compressions);
558 
559 protected:
560  // Attempts to set the value to the native compressed format
561  // Some compression types allow some values to be written
562  // without decompression. Eg, you can write to a constant tile
563  // the tile's own value without decompression.
564  // If this returns true, t has been written.
565  bool writeThrough(int x, int y, int z, T t);
566 
567  /// Sets the local res of the tile. Does *not* resize the allocated
568  /// memory.
569  void setRes(int xr, int yr, int zr)
570  { myRes[0] = xr; myRes[1] = yr; myRes[2] = zr; }
571 
573  {
574  return (sizeof(T) <= sizeof(T*));
575  }
576 
578  { if (inlineConstant()) { return *((const T *)&myData); }
579  return *((const T*)myData); }
581  { if (inlineConstant()) { return ((T *)&myData); }
582  return ((T*)myData); }
583 
584  void setForeignData(void *data, int8 compress_type)
585  {
586  freeData();
587  myCompressionType = compress_type;
588 
589  if (isConstant() && inlineConstant())
590  {
591  makeConstant(*(T *)data);
592  }
593  else
594  {
595  myData = data;
596  myForeignData = true;
597  }
598  }
599 
600 public:
601  /// Frees myData and sets it to zero. This is a bit tricky
602  /// as the constant tiles may be inlined.
603  /// This is only public for the compression engines.
605  {
606  if (inlineConstant() && isConstant())
607  {
608  // Do nothing!
609  }
610  else if (myData && !myForeignData)
611  {
613  }
614  myData = 0;
615  myForeignData = false;
616  }
617 
618 public:
619  // This is only public so the compression engines can get to it.
620  // It is blind data, do not alter!
621  void *myData;
622 private:
623 
624  /// Resolutions.
625  int8 myRes[3];
626 
627  /// Am I a constant tile?
628  int8 myCompressionType;
629 
630  int8 myForeignData;
631 
632  static UT_ValArray<UT_VoxelTileCompress<T> *> &getCompressionEngines()
633  {
634  return UTvoxelTileGetCompressionEngines((T *) 0);
635  }
636 
637  friend class UT_VoxelTileCompress<T>;
638  friend class UT_VoxelArray<T>;
639  template <typename S, bool DoWrite, bool DoRead, bool TestForWrites>
640  friend class UT_VoxelProbe;
641 };
642 
643 ///
644 /// UT_VoxelArray
645 ///
646 /// This provides data structure to hold a three dimmensional array
647 /// of data. The data should be some simple arithmetic type, such
648 /// as uint8, fpreal16, or UT_Vector3.
649 ///
650 /// Some operations, such as gradiants, may make less sense with uint8.
651 ///
652 template <typename T>
653 class UT_VoxelArray
654 {
655 public:
656  UT_VoxelArray();
657  virtual ~UT_VoxelArray();
658 
659  /// Copy constructor:
661 
662  /// Assignment operator:
664 
665  /// This sets the voxelarray to have the given resolution, resetting
666  /// all elements to 0.
667  void size(int xres, int yres, int zres);
668 
669  /// This will ensure this voxel array matches the given voxel array
670  /// in terms of dimensions & border conditions. It may invoke
671  /// a size() and hence reset the field to 0.
672  void match(const UT_VoxelArray<T> &src);
673 
674  template <typename S>
675  bool isMatching(const UT_VoxelArray<S> &src) const
676  {
677  return src.getXRes() == getXRes() &&
678  src.getYRes() == getYRes() &&
679  src.getZRes() == getZRes();
680  }
681 
682  int getXRes() const { return myRes[0]; }
683  int getYRes() const { return myRes[1]; }
684  int getZRes() const { return myRes[2]; }
685  int getRes(int axis) const { return myRes[axis]; }
686 
688  {
689  return UT_Vector3I(myRes[0], myRes[1], myRes[2]);
690 
691  }
692 
693  /// Return the amount of memory used by this array.
694  int64 getMemoryUsage(bool inclusive) const;
695 
696  /// Sets this voxel array to the given constant value. All tiles
697  /// are turned into constant tiles.
699  constant,
700  T, t)
701  void constantPartial(T t, const UT_JobInfo &info);
702 
703  /// If this voxel array is all constant tiles, returns true.
704  /// The optional pointer is initialized to the constant value iff
705  /// the array is constant. (Note by constant we mean made of constant
706  /// tiles of the same value - if some tiles are uncompressed but
707  /// constant, it will still return false)
708  bool isConstant(T *cval = 0) const;
709 
710  /// Returns true if any element of the voxel array is NAN
711  bool hasNan() const;
712 
713  /// This convience function lets you sample the voxel array.
714  /// pos is in the range [0..1]^3.
715  /// T value trilinearly interpolated. Edges are determined by the border
716  /// mode.
717  /// The cells are sampled at the center of the voxels.
718  T operator()(UT_Vector3D pos) const;
719  T operator()(UT_Vector3F pos) const;
720 
721  /// This convience function lets you sample the voxel array.
722  /// pos is in the range [0..1]^3.
723  /// The min/max is the range of the sampled values.
724  void evaluateMinMax(T &lerp, T &lmin, T &lmax,
725  UT_Vector3F pos) const;
726 
727  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
728  /// Allows out of range evaluation
730  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
731  /// Allows out of range evaluation
732  SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z,
733  float fx, float fy, float fz) const;
734  template <int AXIS2D>
736  template <int AXIS2D>
737  SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z,
738  float fx, float fy, float fz) const;
739 
740  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
741  /// Allows out of range evaluation. Also computes min/max of
742  /// interpolated samples.
743  SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax,
744  UT_Vector3F pos) const;
745  template <int AXIS2D>
746  SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax,
747  UT_Vector3F pos) const;
748  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
749  /// Allows out of range evaluation. Also computes min/max of
750  /// interpolated samples.
752  T &lerp, T &lmin, T &lmax,
753  int x, int y, int z,
754  float fx, float fy, float fz) const;
755  template <int AXIS2D>
757  T &lerp, T &lmin, T &lmax,
758  int x, int y, int z,
759  float fx, float fy, float fz) const;
760 
761  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
762  /// array should have 8 elements, x minor, z major.
763  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
764  T *sample) const;
765  template <int AXIS2D>
766  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
767  T *sample) const;
768 
769  /// Extracts a sample in a plus shape, dx, then dy, then dz, finally
770  /// the center into 7 voxels.
771  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
772  T *sample) const;
773  /// Extracts 27 dense 3x3x3 cube centered at x,y,z into samples
774  /// z major, xminor.
775  SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z,
776  T *sample) const;
777 
778  /// Lerps the given sample using trilinear interpolation
780  float fx, float fy, float fz) const;
781  template <int AXIS2D>
783  float fx, float fy, float fz) const;
784 
785  SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z,
786  float &fx, float &fy, float &fz) const
787  {
788  // Determine integer & fractional components.
789  fx = pos.x();
790  SYSfastSplitFloat(fx, x);
791  fy = pos.y();
792  SYSfastSplitFloat(fy, y);
793  fz = pos.z();
794  SYSfastSplitFloat(fz, z);
795  }
796  template <int AXIS2D>
797  SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z,
798  float &fx, float &fy, float &fz) const
799  {
800  // Determine integer & fractional components.
801  if (AXIS2D != 0)
802  {
803  fx = pos.x();
804  SYSfastSplitFloat(fx, x);
805  }
806  else
807  {
808  fx = 0.0;
809  x = 0;
810  }
811  if (AXIS2D != 1)
812  {
813  fy = pos.y();
814  SYSfastSplitFloat(fy, y);
815  }
816  else
817  {
818  fy = 0.0;
819  y = 0;
820  }
821  if (AXIS2D != 2)
822  {
823  fz = pos.z();
824  SYSfastSplitFloat(fz, z);
825  }
826  else
827  {
828  fz = 0.0;
829  z = 0;
830  }
831  }
832 #if 0
833  T operator()(v4uf pos) const;
834 #endif
835 
836  /// Filtered evaluation of the voxel array. This operation should
837  /// exhibit the same behavior as IMG3D_Channel::evaluate.
838  T evaluate(const UT_Vector3 &pos, const UT_Filter &filter,
839  fpreal radius, int clampaxis = -1) const;
840 
841  /// Fills this by resampling the given voxel array.
842  void resample(const UT_VoxelArray<T> &src,
843  UT_FilterType filtertype = UT_FILTER_POINT,
844  float filterwidthscale = 1.0f,
845  int clampaxis = -1);
846 
847  /// Flattens this into an array. Z major, then Y, then X.
848  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
850  flatten,
851  T *, flatarray,
852  exint, ystride,
853  exint, zstride)
854  void flattenPartial(T *flatarray, exint ystride, exint zstride,
856 
857  /// Flattens this into an array. Z major, then Y, then X.
858  /// Flattens a 2d slice where AXIS2D is constant.
859  /// If AXIS2D == 2 (ie, z): flatarray[x + y * ystride] = getValue(x, y, 0);
860  /// Flattens by destination x-major stripes to avoid page collisions
861  /// on freshly allocated memory buffers.
862  template <int AXIS2D>
863  void flattenPartialAxis(T *flatarray, exint ystride,
864  const UT_JobInfo &info) const;
865 
866  /// Flattens this into an array suitable for a GL 8bit texture.
867  /// Z major, then Y, then X.
868  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
870  flattenGLFixed8,
871  uint8 *, flatarray,
872  exint, ystride,
873  exint, zstride,
874  T , dummy)
875  void flattenGLFixed8Partial(uint8 *flatarray,
876  exint ystride, exint zstride,
877  T dummy,
878  const UT_JobInfo &info) const;
879 
880  /// Flattens this into an array suitable for a GL 16bit FP texture.
881  /// Z major, then Y, then X.
882  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
883  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
884  flattenGL16F,
885  UT_Vector4H *, flatarray,
886  exint, ystride,
887  exint, zstride,
888  T , dummy)
889  void flattenGL16FPartial(UT_Vector4H *flatarray,
890  exint ystride, exint zstride,
891  T dummy,
892  const UT_JobInfo &info) const;
893 
894  /// Flattens this into an array suitable for a GL 32b FP texture. Note that
895  /// this also works around an older Nvidia driver bug that caused very small
896  /// valued texels (<1e-9) to appear as huge random values in the texture.
897  /// Z major, then Y, then X.
898  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
899  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
900  flattenGL32F,
901  UT_Vector4F *, flatarray,
902  exint, ystride,
903  exint, zstride,
904  T , dummy)
905  void flattenGL32FPartial(UT_Vector4F *flatarray,
906  exint ystride, exint zstride,
907  T dummy,
908  const UT_JobInfo &info) const;
909 
910  /// Fills this from a flattened array. Z major, then Y, then X.
911  /// setValue(x,y,z, flatarray[x + y * ystride + z * zstride];
912  THREADED_METHOD3(UT_VoxelArray<T>, numTiles() > 16,
913  extractFromFlattened,
914  const T *, flatarray,
915  exint, ystride,
916  exint, zstride)
917  void extractFromFlattenedPartial(const T *flatarray,
918  exint ystride, exint zstride,
919  const UT_JobInfo &info);
920 
921  /// Copies into this voxel array from the source array.
922  /// Conceptually,
923  /// this->setValue(x, y, z, src.getValue(x+offx, y+offy, z+offz);
924  void copyWithOffset(const UT_VoxelArray<T> &src,
925  int offx, int offy, int offz);
926  THREADED_METHOD4(UT_VoxelArray<T>, numTiles() > 4,
927  copyWithOffsetInternal,
928  const UT_VoxelArray<T> &, src,
929  int, offx,
930  int, offy,
931  int, offz)
932  void copyWithOffsetInternalPartial(const UT_VoxelArray<T> &src,
933  int offx, int offy, int offz,
934  const UT_JobInfo &info);
935 
936  /// Moves data from the source voxel array into this array. The offsets should
937  /// be in terms of tiles. Source may be modified as this array steals its data
938  /// buffers in such a way that no dynamic memory will leak when these arrays
939  /// are freed.
940  /// Conceptually, this function performs the same operation as copyWithOffset,
941  /// but with offsets specified in terms of tiles:
942  /// this->setValue(x, y, z, src.getValue(x+off_v_x, y+off_v_y, z+off_v_z)
943  /// where off_v_A=tileoffA*TILESIZE for A in {x, y, z}.
945  moveTilesWithOffset,
946  UT_VoxelArray<T>&, src,
947  int, tileoffx,
948  int, tileoffy,
949  int, tilleoffz)
950  void moveTilesWithOffsetPartial(UT_VoxelArray<T> &src, int tileoffx,
951  int tileoffy, int tileoffz,
952  const UT_JobInfo &info);
953 
954  /// Fills dstdata with the voxel data of listed tiles. Stride is measured
955  /// in T. Data order is in tile-order. So, sorted by tilelist, then
956  /// z, y, x within that tile.
957  /// The ix/iy/iz variant allows partial tiles. If the number of
958  /// voxels to write to a tile matches the tile size, however, the
959  /// ix/iy/iz is ignored and the tile is written in canonical order.
960  template <typename S>
961  S *extractTiles(S *dstdata, int stride,
962  const UT_IntArray &tilelist) const;
963  template <typename S, typename IDX>
964  S *extractTiles(S *dstdata, int stride,
965  const IDX *ix, const IDX *iy, const IDX *iz,
966  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist) const;
967 
968  /// Overwrites our tiles with the given data. Does checking
969  /// for constant tiles. Input srcdata stream should match
970  /// that of extractTiles.
971  template <typename S>
972  const S *writeTiles(const S *srcdata, int srcstride,
973  const UT_IntArray &tilelist);
974  template <typename S, typename IDX>
975  const S *writeTiles(const S *srcdata, int srcstride,
976  const IDX *ix, const IDX *iy, const IDX *iz,
977  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist);
978 
979  /// Converts a 3d position in range [0..1]^3 into the closest
980  /// index value.
981  /// Returns false if the resulting index was out of range. The index
982  /// will still be set.
983  bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const;
984  bool posToIndex(UT_Vector3D pos, exint &x, exint &y, exint &z) const;
985  /// Convertes a 3d position in [0..1]^3 into the equivalent in
986  /// the integer cell space. Does not clamp to the closest value.
987  bool posToIndex(UT_Vector3 pos, UT_Vector3 &ipos) const;
988  bool posToIndex(UT_Vector3D pos, UT_Vector3D &ipos) const;
989  /// Converts an index into a position.
990  /// Returns false if the source index was out of range, in which case
991  /// pos will be outside [0..1]^3
992  bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const;
993  bool indexToPos(exint x, exint y, exint z, UT_Vector3D &pos) const;
994  void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const;
995  void findexToPos(UT_Vector3D ipos, UT_Vector3D &pos) const;
996 
997  /// Clamps the given x, y, and z values to lie inside the valid index
998  /// range.
999  void clampIndex(int &x, int &y, int &z) const
1000  {
1001  x = SYSclamp(x, 0, myRes[0]-1);
1002  y = SYSclamp(y, 0, myRes[1]-1);
1003  z = SYSclamp(z, 0, myRes[2]-1);
1004  }
1005 
1006  /// Returns true if the given x, y, z values lie inside the valid index.
1007  bool isValidIndex(int x, int y, int z) const
1008  {
1009  return !((x | y | z) < 0) &&
1010  (((x - myRes[0]) & (y - myRes[1]) & (z - myRes[2])) < 0);
1011  }
1012 
1013  /// This allows you to read & write the raw data.
1014  /// Out of bound reads are illegal.
1016  {
1017  return (*this)(index[0], index[1], index[2]);
1018  }
1019  T operator()(int x, int y, int z) const
1020  {
1021  UT_ASSERT_P(isValidIndex(x, y, z));
1022  return (*getTile(x >> TILEBITS,
1023  y >> TILEBITS,
1024  z >> TILEBITS))
1025  (x & TILEMASK, y & TILEMASK, z & TILEMASK);
1026  }
1027 
1029  {
1030  setValue(index[0], index[1], index[2], value);
1031  }
1032 
1033  void setValue(int x, int y, int z, T t)
1034  {
1035  UT_ASSERT_P(isValidIndex(x, y, z));
1036  getTile(x >> TILEBITS,
1037  y >> TILEBITS,
1038  z >> TILEBITS)->setValue(
1039  x & TILEMASK, y & TILEMASK, z & TILEMASK, t);
1040  }
1041 
1042  /// This will clamp the bounds to fit within the voxel array,
1043  /// using the border type to resolve out of range values.
1044  T getValue(int x, int y, int z) const
1045  {
1046  // First handle the most common case.
1047  if (isValidIndex(x, y, z))
1048  return (*this)(x, y, z);
1049 
1050  // Verify our voxel array is non-empty.
1051  if (!myTiles)
1052  return myBorderValue;
1053 
1054  // We now know we are out of range, adjust appropriately
1055  switch (myBorderType)
1056  {
1058  return myBorderValue;
1059 
1060  case UT_VOXELBORDER_REPEAT:
1061  if (x < 0 || x >= myRes[0])
1062  {
1063  x %= myRes[0];
1064  if (x < 0)
1065  x += myRes[0];
1066  }
1067  if (y < 0 || y >= myRes[1])
1068  {
1069  y %= myRes[1];
1070  if (y < 0)
1071  y += myRes[1];
1072  }
1073  if (z < 0 || z >= myRes[2])
1074  {
1075  z %= myRes[2];
1076  if (z < 0)
1077  z += myRes[2];
1078  }
1079  break;
1080 
1081  case UT_VOXELBORDER_STREAK:
1082  clampIndex(x, y, z);
1083  break;
1084  case UT_VOXELBORDER_EXTRAP:
1085  {
1086  int cx, cy, cz;
1087  T result;
1088 
1089  cx = x; cy = y; cz = z;
1090  clampIndex(cx, cy, cz);
1091 
1092  result = (*this)(cx, cy, cz);
1093  result += (x - cx) * myBorderScale[0] +
1094  (y - cy) * myBorderScale[1] +
1095  (z - cz) * myBorderScale[2];
1096  return result;
1097  }
1098  }
1099 
1100  // It is now within bounds, do normal fetch.
1101  return (*this)(x, y, z);
1102  }
1103 
1105  void setBorderScale(T scalex, T scaley, T scalez);
1106  UT_VoxelBorderType getBorder() const { return myBorderType; }
1107  T getBorderValue() const { return myBorderValue; }
1108  T getBorderScale(int axis) const { return myBorderScale[axis]; }
1109 
1110  /// This tries to compress or collapse each tile. This can
1111  /// be expensive (ie, converting a tile to constant), so
1112  /// should be saved until modifications are complete.
1114  collapseAllTiles)
1115  void collapseAllTilesPartial(const UT_JobInfo &info);
1116 
1117  /// Uncompresses all tiles into non-constant tiles. Useful
1118  /// if you have a multithreaded algorithm that may need to
1119  /// both read and write, if you write to a collapsed tile
1120  /// while someone else reads from it, bad stuff happens.
1121  /// Instead, you can expandAllTiles. This may have serious
1122  /// consequences in memory use, however.
1124  expandAllTiles)
1125  void expandAllTilesPartial(const UT_JobInfo &info);
1126 
1127  /// Uncompresses all tiles, but leaves constant tiles alone.
1128  /// Useful for cleaning out any non-standard compression algorithm
1129  /// that some external program can't handle.
1130  THREADED_METHOD(UT_VoxelArray<T>, numTiles() > 100,
1131  expandAllNonConstTiles)
1132  void expandAllNonConstTilesPartial(const UT_JobInfo &info);
1133 
1134  /// The direct tile access methods are to make TBF writing a bit
1135  /// more efficient.
1136  UT_VoxelTile<T> *getTile(int tx, int ty, int tz) const
1137  { return &myTiles[xyzTileToLinear(tx, ty, tz)]; }
1139  { return &myTiles[idx]; }
1140  void linearTileToXYZ(int idx, int &x, int &y, int &z) const
1141  {
1142  x = idx % myTileRes[0];
1143  idx -= x;
1144  idx /= myTileRes[0];
1145  y = idx % myTileRes[1];
1146  idx -= y;
1147  idx /= myTileRes[1];
1148  z = idx;
1149  }
1151  {
1152  UT_Vector3I tileindex;
1153  tileindex[0] = idx % myTileRes[0];
1154  idx -= tileindex[0];
1155  idx /= myTileRes[0];
1156  tileindex[1] = idx % myTileRes[1];
1157  idx -= tileindex[1];
1158  idx /= myTileRes[1];
1159  tileindex[2] = idx;
1160 
1161  return tileindex;
1162  }
1163 
1164  int xyzTileToLinear(int x, int y, int z) const
1165  { return (z * myTileRes[1] + y) * myTileRes[0] + x; }
1166 
1167  int indexToLinearTile(int x, int y, int z) const
1168  { return ((z >> TILEBITS) * myTileRes[1] + (y >> TILEBITS)) * myTileRes[0] + (x >> TILEBITS); }
1169 
1170  /// idxth tile represents the voxels indexed [start,end).
1171  void getTileVoxels(int idx,
1172  UT_Vector3I &start, UT_Vector3I &end) const
1173  {
1174  int x, y, z;
1175  linearTileToXYZ(idx, x, y, z);
1176 
1177  start.x() = x * TILESIZE;
1178  start.y() = y * TILESIZE;
1179  start.z() = z * TILESIZE;
1180  end = start;
1181  end.x() += myTiles[idx].xres();
1182  end.y() += myTiles[idx].yres();
1183  end.z() += myTiles[idx].zres();
1184  }
1185 
1187  {
1189  getTileVoxels(idx, start, end);
1190  return UT_BoundingBoxI(start, end);
1191  }
1192 
1193  /// Number of tiles along that axis. Not to be confused with
1194  /// the resolution of the individual tiles.
1195  int getTileRes(int dim) const { return myTileRes[dim]; }
1196  int numTiles() const
1197  { return myTileRes[0] * myTileRes[1] * myTileRes[2]; }
1198  exint numVoxels() const
1199  { return ((exint)myRes[0]) * myRes[1] * myRes[2]; }
1200 
1202  { myCompressionOptions = options; }
1204  { return myCompressionOptions; }
1205 
1207  { myCompressionOptions.myConstantTol = tol; }
1209  { return myCompressionOptions.myConstantTol; }
1210 
1211  /// Saves only the data of this array to the given stream.
1212  /// To reload it you will have to have a matching array in tiles
1213  /// dimensions and size.
1214  void saveData(std::ostream &os) const;
1215  bool saveData(UT_JSONWriter &w,
1216  const char *shared_mem_owner = 0) const;
1217 
1218  /// Load an array, requires you have already size()d this array.
1219  void loadData(UT_IStream &is);
1220  bool loadData(UT_JSONParser &p);
1221 
1222  /// Copy only the data from the source array.
1223  /// Note that it is an error to call this unless isMatching(src).
1225  copyData,
1226  const UT_VoxelArray<T> &, src)
1227 
1228  void copyDataPartial(const UT_VoxelArray<T> &src,
1229  const UT_JobInfo &info);
1230 
1231 private:
1233  resamplethread,
1234  const UT_VoxelArray<T> &, src,
1235  const UT_Filter *, filter,
1236  float, radius,
1237  int, clampaxis)
1238  void resamplethreadPartial(const UT_VoxelArray<T> &src,
1239  const UT_Filter *filter,
1240  float radius,
1241  int clampaxis,
1242  const UT_JobInfo &info);
1243 
1244 
1245  void deleteVoxels();
1246 
1247  SYS_SharedMemory *copyToSharedMemory(const char *shared_mem_owner) const;
1248  bool populateFromSharedMemory(const char *id);
1249 
1250 
1251  /// Number of elements in each dimension.
1252  int myRes[3];
1253 
1254  /// Inverse tile res, 1/myRes
1255  UT_Vector3 myInvRes;
1256 
1257  /// Number of tiles in each dimension.
1258  int myTileRes[3];
1259 
1260  /// Compression tolerance for lossy compression.
1261  UT_VoxelCompressOptions myCompressionOptions;
1262 
1263  /// Double dereferenced so we can theoretically resize easily.
1264  UT_VoxelTile<T> *myTiles;
1265 
1266  /// Outside values get this if constant borders are used
1267  T myBorderValue;
1268  /// Per axis scale factors for when extrapolating.
1269  T myBorderScale[3];
1270  UT_VoxelBorderType myBorderType;
1271 
1272  /// For initializing the tiles from shared memory.
1273  SYS_SharedMemory *mySharedMem;
1274  SYS_SharedMemoryView *mySharedMemView;
1275 };
1276 
1277 
1278 ///
1279 /// UT_VoxelMipMap
1280 ///
1281 /// This provides a mip-map type structure for a voxel array.
1282 /// It manages the different levels of voxels arrays that are needed.
1283 /// You can create different types of mip maps: average, maximum, etc,
1284 /// which can allow different tricks.
1285 /// Each level is one half the previous level, rounded up.
1286 /// Out of bound voxels are ignored from the lower levels.
1287 ///
1288 template <typename T>
1290 {
1291 public:
1292  /// The different types of functions that can be used for
1293  /// constructing a mip map.
1294  enum mipmaptype { MIPMAP_MAXIMUM=0, MIPMAP_AVERAGE=1, MIPMAP_MINIMUM=2 };
1295 
1296  UT_VoxelMipMap();
1297  virtual ~UT_VoxelMipMap();
1298 
1299  /// Copy constructor.
1300  UT_VoxelMipMap(const UT_VoxelMipMap<T> &src);
1301 
1302  /// Assignment operator:
1303  const UT_VoxelMipMap<T> &operator=(const UT_VoxelMipMap<T> &src);
1304 
1305  /// Builds from a given voxel array. The ownership flag determines
1306  /// if we gain ownership of the voxel array and should delete it.
1307  /// In any case, the new levels are owned by us.
1308  void build(UT_VoxelArray<T> *baselevel,
1309  mipmaptype function);
1310 
1311  /// Same as above but construct mipmaps simultaneously for more than
1312  /// one function. The order of the functions will correspond to the
1313  /// order of the data values passed to the traversal callback.
1314  void build(UT_VoxelArray<T> *baselevel,
1315  const UT_Array<mipmaptype> &functions);
1316 
1317  /// This does a top down traversal of the implicit octree defined
1318  /// by the voxel array. Returning false will abort that
1319  /// branch of the octree.
1320  /// The bounding box given is in cell space and is an exclusive
1321  /// box of the included cells (ie: (0..1)^3 means just cell 0,0,0)
1322  /// Note that each bounding box will not be square, unless you
1323  /// have the good fortune of starting with a power of 2 cube.
1324  /// The boolean goes true when the the callback is invoked on a
1325  /// base level.
1326  typedef bool (*Callback)(const T *funcs,
1327  const UT_BoundingBox &box,
1328  bool baselevel, void *data);
1329  void traverseTopDown(Callback function,
1330  void *data) const;
1331 
1332  /// Top down traversal on op. OP is invoked with
1333  /// bool op(const UT_BoundingBoxI &indexbox, int level)
1334  ///
1335  /// indexbox is half-inclusive (0..1)^3 means cell 0,0,0
1336  /// level 0 means the base level.
1337  /// (box.min.x()>>level, box.min.y()>>level, box.min.z()>>level)
1338  /// gives the index to extract the value from level..
1339  template <typename OP>
1340  void traverseTopDown(OP&op) const;
1341 
1342 
1343  /// Top down traversal, but which quad tree is visited first
1344  /// is controlled by
1345  /// float op.sortValue(UT_BoundingBoxI &indexbox, int level);
1346  /// Lower values are visited first.
1347  template <typename OP>
1348  void traverseTopDownSorted(OP&op) const;
1349 
1350 
1351  /// Return the amount of memory used by this mipmap.
1352  int64 getMemoryUsage(bool inclusive) const;
1353 
1354  int numLevels() const { return myNumLevels+1; }
1355 
1356  /// level 0 is the original grid, each level higher is a power
1357  /// of two smaller.
1358  const UT_VoxelArray<T> *level(int level, int function) const
1359  {
1360  if (level == 0)
1361  return myBaseLevel;
1362 
1363  return myLevels(function)[numLevels() - 1 - level];
1364  }
1365 
1366 private:
1367  void doTraverse(int x, int y, int z, int level,
1368  Callback function,
1369  void *data) const;
1370 
1371  /// Note: This variant of doTraverse has the opposite sense of level!
1372  template <typename OP>
1373  void doTraverse(int x, int y, int z, int level,
1374  OP &op) const;
1375  template <typename OP>
1376  void doTraverseSorted(int x, int y, int z, int level,
1377  OP &op) const;
1378 
1379  void initializePrivate();
1380  void destroyPrivate();
1381 
1382  THREADED_METHOD3(UT_VoxelMipMap<T>, dst.numTiles() > 1,
1383  downsample,
1384  UT_VoxelArray<T> &, dst,
1385  const UT_VoxelArray<T> &, src,
1386  mipmaptype, function)
1387  void downsamplePartial(UT_VoxelArray<T> &dst,
1388  const UT_VoxelArray<T> &src,
1389  mipmaptype function,
1390  const UT_JobInfo &info);
1391 
1392 protected:
1393  T mixValues(T t1, T t2, mipmaptype function) const
1394  {
1395  switch (function)
1396  {
1397  case MIPMAP_MAXIMUM:
1398  return SYSmax(t1, t2);
1399 
1400  case MIPMAP_AVERAGE:
1401  return (t1 + t2) / 2;
1402 
1403  case MIPMAP_MINIMUM:
1404  return SYSmin(t1, t2);
1405  }
1406 
1407  return t1;
1408  }
1409 
1410 
1411  /// This stores the base most level that was provided
1412  /// externally.
1413  UT_VoxelArray<T> *myBaseLevel;
1414  /// If true, we will delete the base level when we are done.
1416 
1417  /// Tracks the number of levels which we used to represent
1418  /// this hierarchy.
1420  /// The array of VoxelArrays, one per level.
1421  /// myLevels[0] is a 1x1x1 array. Each successive layer is twice
1422  /// as big in each each dimension. However, every layer is clamped
1423  /// against the resolution of the base layer.
1424  /// We own all these layers.
1426 };
1427 
1428 
1429 /// Iterator for Voxel Arrays
1430 ///
1431 /// This class eliminates the need for having
1432 /// for (z = 0; z < zres; z++)
1433 /// ...
1434 /// for (x = 0; x < xres; x++)
1435 /// loops everywhere.
1436 ///
1437 /// Note that the order of iteration is undefined! (The actual order is
1438 /// to complete each tile in turn, thereby hopefully improving cache
1439 /// coherency)
1440 ///
1441 /// It is safe to write to the voxel array while this iterator is active.
1442 /// It is *not* safe to resize the voxel array (or destroy it)
1443 ///
1444 /// The iterator is similar in principal to an STL iterator, but somewhat
1445 /// simpler. The classic STL loop
1446 /// for ( it = begin(); it != end(); ++it )
1447 /// is done using
1448 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1449 ///
1450 template <typename T>
1452 {
1453 public:
1457  virtual ~UT_VoxelArrayIterator();
1458 
1460  {
1461  myCurTile = -1;
1462  myHandle.resetHandle();
1463  myArray = vox;
1464  // Reset the range
1465  setPartialRange(0, 1);
1466  }
1468  {
1469  setArray((UT_VoxelArray<T> *) vox);
1470  }
1471 
1472  /// Iterates over the array pointed to by the handle. Only
1473  /// supports read access during the iteration as it does
1474  /// a read lock.
1476  {
1477  myHandle = handle;
1478  // Ideally we'd have a separate const iterator
1479  // from our non-const iterator so this would
1480  // only be exposed in the const version.
1481  myArray = const_cast<UT_VoxelArray<T> *>(&*myHandle);
1482 
1483  // Reset our range.
1484  myCurTile = -1;
1485  setPartialRange(0, 1);
1486  }
1487 
1488 
1489  /// Restricts this iterator to only run over a subset
1490  /// of the tiles. The tiles will be divided into approximately
1491  /// numrange equal groups, this will be the idx'th.
1492  /// The resulting iterator may have zero tiles.
1493  void setPartialRange(int idx, int numranges);
1494 
1495  /// Ties this iterator to the given jobinfo so it will
1496  /// match the jobinfo's processing.
1497  void splitByTile(const UT_JobInfo &info);
1498 
1499  /// Assigns an interrupt handler. This will be tested whenever
1500  /// it advances to a new tile. If it is interrupted, the iterator
1501  /// will jump forward to atEnd()
1502  void setInterrupt(UT_Interrupt *interrupt) { myInterrupt = interrupt; }
1503  void detectInterrupts() { myInterrupt = UTgetInterrupt(); }
1504 
1505  /// Restricts this iterator to the tiles that intersect
1506  /// the given bounding box of voxel coordinates.
1507  /// Note that this will not be a precise restriction as
1508  /// each tile is either included or not.
1509  /// You should setPartialRange() after setting the bbox range
1510  /// The bounding box is on the [0..1]^3 range.
1511  void restrictToBBox(const UT_BoundingBox &bbox);
1512  /// The [xmin, xmax] are inclusive and measured in voxels.
1513  void restrictToBBox(int xmin, int xmax,
1514  int ymin, int ymax,
1515  int zmin, int zmax);
1516 
1517  /// Resets the iterator to point to the first voxel.
1518  void rewind();
1519 
1520  /// Returns true if we have iterated over all of the voxels.
1521  bool atEnd() const
1522  { return myCurTile < 0; }
1523 
1524  /// Advances the iterator to point to the next voxel.
1525  void advance()
1526  {
1527  // We try to advance each axis, rolling over to the next.
1528  // If we exhaust this tile, we call advanceTile.
1529  myPos[0]++;
1530  myTileLocalPos[0]++;
1531  if (myTileLocalPos[0] >= myTileSize[0])
1532  {
1533  // Wrapped in X.
1534  myPos[0] -= myTileLocalPos[0];
1535  myTileLocalPos[0] = 0;
1536 
1537  myPos[1]++;
1538  myTileLocalPos[1]++;
1539  if (myTileLocalPos[1] >= myTileSize[1])
1540  {
1541  // Wrapped in Y.
1542  myPos[1] -= myTileLocalPos[1];
1543  myTileLocalPos[1] = 0;
1544 
1545  myPos[2]++;
1546  myTileLocalPos[2]++;
1547  if (myTileLocalPos[2] >= myTileSize[2])
1548  {
1549  // Wrapped in Z! Finished this tile!
1550  advanceTile();
1551  }
1552  }
1553  }
1554  }
1555 
1556  /// Retrieve the current location of the iterator.
1557  int x() const { return myPos[0]; }
1558  int y() const { return myPos[1]; }
1559  int z() const { return myPos[2]; }
1560  int idx(int idx) const { return myPos[idx]; }
1561 
1562  /// Retrieves the value that we are currently pointing at.
1563  /// This is faster than an operator(x,y,z) as we already know
1564  /// our current tile and that bounds checking isn't needed.
1565  T getValue() const
1566  {
1567  UT_ASSERT_P(myCurTile >= 0);
1568 
1569  UT_VoxelTile<T> *tile;
1570 
1571  tile = myArray->getLinearTile(myCurTile);
1572  return (*tile)(myTileLocalPos[0],
1573  myTileLocalPos[1],
1574  myTileLocalPos[2]);
1575  }
1576 
1577  /// Sets the voxel we are currently pointing to the given value.
1578  void setValue(T t) const
1579  {
1580  UT_ASSERT_P(myCurTile >= 0);
1581 
1582  UT_VoxelTile<T> *tile;
1583 
1584  tile = myArray->getLinearTile(myCurTile);
1585 
1586  tile->setValue(myTileLocalPos[0],
1587  myTileLocalPos[1],
1588  myTileLocalPos[2], t);
1589  }
1590 
1591  /// Returns true if the tile we are currently in is a constant tile.
1592  bool isTileConstant() const
1593  {
1594  UT_ASSERT_P(myCurTile >= 0);
1595 
1596  UT_VoxelTile<T> *tile;
1597 
1598  tile = myArray->getLinearTile(myCurTile);
1599  return tile->isConstant();
1600  }
1601 
1602  /// This tile will iterate over the voxels indexed [start,end).
1604  {
1605  start.x() = myTilePos[0] * TILESIZE;
1606  start.y() = myTilePos[1] * TILESIZE;
1607  start.z() = myTilePos[2] * TILESIZE;
1608  end = start;
1609  end.x() += myTileSize[0];
1610  end.y() += myTileSize[1];
1611  end.z() += myTileSize[2];
1612  }
1613 
1614  /// This tile will iterate over the *inclusive* voxels indexed
1615  /// in the returned boudning box.
1617  {
1619  getTileVoxels(start, end);
1620  return UT_BoundingBoxI(start, end);
1621  }
1622 
1623  /// Returns true if we are at the start of a new tile.
1624  bool isStartOfTile() const
1625  { return !(myTileLocalPos[0] ||
1626  myTileLocalPos[1] ||
1627  myTileLocalPos[2]); }
1628 
1629  /// Returns the VoxelTile we are currently processing
1631  {
1632  UT_ASSERT_P(myCurTile >= 0);
1633  return myArray->getLinearTile(myCurTile);
1634  }
1635  int getLinearTileNum() const
1636  {
1637  return myCurTile;
1638  }
1639 
1640  /// Advances the iterator to point to the next tile. Useful if the
1641  /// constant test showed that you didn't need to deal with this one.
1642  void advanceTile();
1643 
1644  /// Advances the iterator to pointing just before the next tile so
1645  /// the next advance() will be an advanceTile(). This is useful
1646  /// if you want to do a continue; as your break but the forloop
1647  /// is doing advance()
1648  /// Note the iterator is in a bad state until advance() is called.
1649  void skipToEndOfTile();
1650 
1651  /// Sets a flag which causes the iterator to tryCompress()
1652  /// tiles when it is done with them.
1653  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1654  void setCompressOnExit(bool shouldcompress)
1655  { myShouldCompressOnExit = shouldcompress; }
1656 
1657  /// These templated algorithms are designed to apply simple operations
1658  /// across all of the voxels with as little overhead as possible.
1659  /// The iterator should already point to a voxel array and if multithreaded
1660  /// had its partial range set. The source arrays must be matching size.
1661  /// The operator should support a () operator, and the result is
1662  /// vit.setValue( op(vit.getValue(), a->getValue(vit), ...);
1663  /// Passing T instead of UT_VoxelArray will treat it as a constant source
1664  /// Note if both source and destination tiles are constant, only
1665  /// a single operation is invoked.
1666  template <typename OP>
1667  void applyOperation(OP &op);
1668  template <typename OP, typename S>
1669  void applyOperation(OP &op, const UT_VoxelArray<S> &a);
1670  template <typename OP>
1671  void applyOperation(OP &op, T a);
1672  template <typename OP, typename S, typename R>
1673  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1674  const UT_VoxelArray<R> &b);
1675  template <typename OP, typename S, typename R, typename Q>
1676  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1677  const UT_VoxelArray<R> &b,
1678  const UT_VoxelArray<Q> &c);
1679  /// These variants will invoke op.isNoop(a, b, ...) which will return
1680  /// true if those values won't affect the destination. This allows
1681  /// constant source tiles to be skipped, for example when adding
1682  /// 0.
1683  template <typename OP, typename S>
1684  void applyOperationCheckNoop(OP &op, const UT_VoxelArray<S> &a);
1685  template <typename OP>
1686  void applyOperationCheckNoop(OP &op, T a);
1687 
1688  /// These variants of apply operation also accept a mask array. The
1689  /// operation is applied only where the mask is greater than 0.5.
1690  template <typename OP, typename M>
1691  void maskedApplyOperation(OP &op,
1692  const UT_VoxelArray<M> &mask);
1693  template <typename OP, typename S, typename M>
1694  void maskedApplyOperation(OP &op, const UT_VoxelArray<S> &a,
1695  const UT_VoxelArray<M> &mask);
1696  template <typename OP, typename S, typename R, typename M>
1697  void maskedApplyOperation(OP &op, const UT_VoxelArray<S> &a,
1698  const UT_VoxelArray<R>& b,
1699  const UT_VoxelArray<M> &mask);
1700  template <typename OP, typename S, typename R, typename Q, typename M>
1701  void maskedApplyOperation(OP& op, const UT_VoxelArray<S> &a,
1702  const UT_VoxelArray<R>& b,
1703  const UT_VoxelArray<Q>& c,
1704  const UT_VoxelArray<M> &mask);
1705 
1706  /// Assign operation works like apply operation, but *this is written
1707  /// to without reading, so there is one less parameter to the ()
1708  /// callback. This can optimize constant tile writes as the
1709  /// constant() status of the destination doesn't matter.
1710  template <typename OP, typename S>
1711  void assignOperation(OP &op, const UT_VoxelArray<S> &a);
1712  template <typename OP, typename S, typename R>
1713  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1714  const UT_VoxelArray<R> &b);
1715  template <typename OP, typename S, typename R, typename Q>
1716  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1717  const UT_VoxelArray<R> &b,
1718  const UT_VoxelArray<Q> &c);
1719 
1720  /// These variants of assign operation also accept a mask array. The
1721  /// assignment operation is performed only where the mask is greater
1722  /// than 0.5.
1723  template <typename OP, typename S, typename M>
1724  void maskedAssignOperation(OP& op, const UT_VoxelArray<S>& a,
1725  const UT_VoxelArray<M>& mask);
1726  template <typename OP, typename S, typename R, typename M>
1727  void maskedAssignOperation(OP& op, const UT_VoxelArray<S>& a,
1728  const UT_VoxelArray<R>& b,
1729  const UT_VoxelArray<M>& mask);
1730  template <typename OP, typename S, typename R, typename Q, typename M>
1731  void maskedAssignOperation(OP& op, const UT_VoxelArray<S>& a,
1732  const UT_VoxelArray<R>& b,
1733  const UT_VoxelArray<Q>& c,
1734  const UT_VoxelArray<M>& mask);
1735 
1736  /// Reduction operators.
1737  /// op.reduce(T a) called for each voxel, *but*,
1738  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1739  template <typename OP>
1740  void reduceOperation(OP &op);
1741 
1742  UT_VoxelArray<T> *getArray() const { return myArray; }
1743 
1744 protected:
1745  /// The array we belong to.
1747  /// The handle that we have locked to get our array. It is null
1748  /// by default which makes the lock/unlock nops.
1750 
1751  /// Absolute index into voxel array.
1752  int myPos[3];
1753 
1754  /// Flag determining if we should compress tiles whenever we
1755  /// advance out of them.
1757 
1760 
1761 public:
1762  /// Our current linear tile idx. A value of -1 implies at end.
1764 
1765  /// Our current index into the tile list
1767 
1768  /// Our start & end tiles for processing a subrange.
1769  /// The tile range is half open [start, end)
1770  int myTileStart, myTileEnd;
1771 
1772  /// Which tile we are as per tx,ty,tz rather than linear index.
1773  int myTilePos[3];
1774 
1775  /// Our position within the current tile.
1776  int myTileLocalPos[3];
1777 
1778  /// The size of the current tile
1779  int myTileSize[3];
1780 
1781  /// The job info to use for tilefetching
1783 
1785 };
1786 
1787 /// Iterator for tiles inside Voxel Arrays
1788 ///
1789 /// This class eliminates the need for having
1790 /// for (z = 0; z < zres; z++)
1791 /// ...
1792 /// for (x = 0; x < xres; x++)
1793 /// loops everywhere.
1794 ///
1795 /// The iterator is similar in principal to an STL iterator, but somewhat
1796 /// simpler. The classic STL loop
1797 /// for ( it = begin(); it != end(); ++it )
1798 /// is done using
1799 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1800 ///
1801 template <typename T>
1803 {
1804 public:
1807  template <typename S>
1810  virtual ~UT_VoxelTileIterator();
1811 
1812  template <typename S>
1814  UT_VoxelArray<T> *array)
1815  {
1816  UT_ASSERT_P(vit.isStartOfTile());
1817  myCurTile = array->getLinearTile(vit.getLinearTileNum());
1818  myArray = array;
1819  myTileStart[0] = vit.x();
1820  myTileStart[1] = vit.y();
1821  myTileStart[2] = vit.z();
1822  }
1823 
1825  {
1826  setTile(vit, vit.getArray());
1827  }
1828 
1829  void setLinearTile(exint lineartilenum, UT_VoxelArray<T> *array)
1830  {
1831  myCurTile = array->getLinearTile(lineartilenum);
1832  myArray = array;
1833 
1834  array->linearTileToXYZ(lineartilenum,
1835  myTileStart[0], myTileStart[1], myTileStart[2]);
1836  myTileStart[0] <<= TILEBITS;
1837  myTileStart[1] <<= TILEBITS;
1838  myTileStart[2] <<= TILEBITS;
1839  }
1840 
1841  /// Resets the iterator to point to the first voxel.
1842  void rewind();
1843 
1844  /// Returns true if we have iterated over all of the voxels.
1845  bool atEnd() const
1846  { return myCurTile == 0 || myAtEnd; }
1847 
1848  /// Advances the iterator to point to the next voxel.
1849  void advance()
1850  {
1851  // We try to advance each axis, rolling over to the next.
1852  // If we exhaust this tile, we call advanceTile.
1853  myPos[0]++;
1854  myTileLocalPos[0]++;
1855  if (myTileLocalPos[0] >= myTileSize[0])
1856  {
1857  // Wrapped in X.
1858  myPos[0] -= myTileLocalPos[0];
1859  myTileLocalPos[0] = 0;
1860 
1861  myPos[1]++;
1862  myTileLocalPos[1]++;
1863  if (myTileLocalPos[1] >= myTileSize[1])
1864  {
1865  // Wrapped in Y.
1866  myPos[1] -= myTileLocalPos[1];
1867  myTileLocalPos[1] = 0;
1868 
1869  myPos[2]++;
1870  myTileLocalPos[2]++;
1871  if (myTileLocalPos[2] >= myTileSize[2])
1872  {
1873  // Wrapped in Z! Finished this tile!
1874  advanceTile();
1875  }
1876  }
1877  }
1878  }
1879 
1880  /// Retrieve the current location of the iterator, in the
1881  /// containing voxel array, not in the tile.
1882  int x() const { return myPos[0]; }
1883  int y() const { return myPos[1]; }
1884  int z() const { return myPos[2]; }
1885  int idx(int idx) const { return myPos[idx]; }
1886 
1887  /// Retrieves the value that we are currently pointing at.
1888  /// This is faster than an operator(x,y,z) as we already know
1889  /// our current tile and that bounds checking isn't needed.
1890  T getValue() const
1891  {
1892  UT_ASSERT_P(myCurTile);
1893 
1894  return (*myCurTile)(myTileLocalPos[0],
1895  myTileLocalPos[1],
1896  myTileLocalPos[2]);
1897  }
1898 
1899  /// Sets the voxel we are currently pointing to the given value.
1900  void setValue(T t) const
1901  {
1902  UT_ASSERT_P(myCurTile);
1903 
1904  myCurTile->setValue(myTileLocalPos[0],
1905  myTileLocalPos[1],
1906  myTileLocalPos[2], t);
1907  }
1908 
1909  /// Returns true if the tile we are currently in is a constant tile.
1910  bool isTileConstant() const
1911  {
1912  UT_ASSERT_P(myCurTile);
1913 
1914  return myCurTile->isConstant();
1915  }
1916 
1917  /// Returns true if we are at the start of a new tile.
1918  bool isStartOfTile() const
1919  { return !(myTileLocalPos[0] ||
1920  myTileLocalPos[1] ||
1921  myTileLocalPos[2]); }
1922 
1923  /// Returns the VoxelTile we are currently processing
1925  {
1926  return myCurTile;
1927  }
1928 
1929  /// Advances the iterator to point to the next tile. Since
1930  /// we are restricted to one tile, effectively just ends the iterator.
1931  void advanceTile();
1932 
1933  /// Sets a flag which causes the iterator to tryCompress()
1934  /// tiles when it is done with them.
1935  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1936  void setCompressOnExit(bool shouldcompress)
1937  { myShouldCompressOnExit = shouldcompress; }
1938 
1939  /// Reduction operators.
1940  /// op.reduce(T a) called for each voxel, *but*,
1941  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1942  /// Early exits if op.reduce() returns false.
1943  template <typename OP>
1944  bool reduceOperation(OP &op);
1945 
1946 protected:
1947  /// Current processing tile
1950 
1951  /// Absolute index into voxel array.
1952  int myPos[3];
1953  /// Absolute index of start of tile
1954  int myTileStart[3];
1955 
1956  /// Flag determining if we should compress tiles whenever we
1957  /// advance out of them.
1959 
1960  /// Since we want to allow multiple passes, we can't
1961  /// clear out myCurTile when we hit the end.
1962  bool myAtEnd;
1963 
1964 public:
1965  /// Our position within the current tile.
1966  int myTileLocalPos[3];
1967 
1968  /// The size of the current tile
1969  int myTileSize[3];
1970 };
1971 
1972 /// Probe for Voxel Arrays
1973 ///
1974 /// This class is designed to allow for efficient evaluation
1975 /// of aligned indices of a voxel array, provided the voxels are iterated
1976 /// in a tile-by-tile, x-inner most, manner.
1977 ///
1978 /// This class will create a local copy of the voxel data where needed,
1979 /// uncompressing the information once for every 16 queries. It will
1980 /// also create an aligned buffer so you can safely use v4uf on fpreal32
1981 /// data.
1982 ///
1983 /// For queries where you need surrounding values, the prex and postx can
1984 /// specify padding on the probe. prex should be -1 to allow reading
1985 /// -1 offset, postx 1 to allow reading a 1 offset.
1986 ///
1987 
1988 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
1989 class UT_VoxelProbe
1990 {
1991 public:
1992  UT_VoxelProbe();
1993  UT_VoxelProbe(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
1994  virtual ~UT_VoxelProbe();
1995 
1996  void setArray(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
1998  int prex = 0, int postx = 0)
1999  {
2000  SYS_STATIC_ASSERT(DoWrite == false);
2001  setArray((UT_VoxelArray<T> *)vox, prex, postx);
2002  }
2003 
2004  UT_VoxelArray<T> *getArray() const { return myArray; }
2005 
2006  bool isValid() const { return myArray != 0; }
2007 
2008  inline T getValue() const
2009  {
2010  return *myCurLine;
2011  }
2012  inline T getValue(int offset) const
2013  {
2014  return myCurLine[myStride*offset];
2015  }
2016 
2017  inline void setValue(T value)
2018  {
2019  UT_ASSERT_P(DoWrite);
2020  *myCurLine = value;
2021  if (TestForWrites)
2022  myDirty = true;
2023  }
2024 
2025 
2026  /// Resets where we currently point to.
2027  /// Returns true if we had to reset our cache line. If we didn't,
2028  /// and you have multiple probes acting in-step, you can just
2029  /// advanceX() the other probes
2030  template <typename S>
2032  { return setIndex(vit.x(), vit.y(), vit.z()); }
2033  template <typename S>
2035  { return setIndex(vit.x(), vit.y(), vit.z()); }
2036 
2037  bool setIndex(int x, int y, int z);
2038 
2039  /// Blindly advances our current pointer.
2040  inline void advanceX()
2041  {
2042  myCurLine += myStride;
2043  myX++;
2044  UT_ASSERT_P(myX < myMaxValidX);
2045  }
2046 
2047  /// Adjusts our current pointer to the given absolute location,
2048  /// assumes the new value is inside our valid range.
2049  inline void resetX(int x)
2050  {
2051  myCurLine += myStride * (x - myX);
2052  myX = x;
2053  UT_ASSERT_P(myX < myMaxValidX && myX >= myMinValidX);
2054  }
2055 
2056 protected:
2057  void reloadCache(int x, int y, int z);
2058 
2059  void writeCacheLine();
2060 
2061  void buildConstantCache(T value);
2062 
2064  /// myCacheLine[0] is the start of the cache line, so -1 would be
2065  /// the first pre-rolled value
2067  /// Where we actually allocated our cache line, aligned to 4x multiple
2068  /// to ensure SSE compatible.
2070 
2071  int myX, myY, myZ;
2072  int myPreX, myPostX;
2075  /// Half inclusive [,) range of valid x queries for current cache.
2076  int myMinValidX, myMaxValidX;
2077 
2078  /// Determines if we have anything to write back, only
2079  /// valid if TestForWrites is enabled.
2080  bool myDirty;
2081 
2083 
2084  friend class UT_VoxelProbeCube<T>;
2085  friend class UT_VoxelProbeFace<T>;
2086 };
2087 
2088 ///
2089 /// The vector probe is three normal probes into separate voxel arrays
2090 /// making it easier to read and write to aligned vector fields.
2091 /// If the vector field is face-centered, see the UT_VoxelProbeFace.
2092 ///
2093 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
2095 {
2096 public:
2098  { }
2100  { setArray(vx, vy, vz); }
2102  {}
2103 
2105  {
2106  myLines[0].setArray(vx);
2107  myLines[1].setArray(vy);
2108  myLines[2].setArray(vz);
2109  }
2110  void setConstArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz)
2111  {
2112  SYS_STATIC_ASSERT(DoWrite == false);
2113  setArray((UT_VoxelArray<T> *)vx, (UT_VoxelArray<T> *)vy, (UT_VoxelArray<T> *)vz);
2114  }
2115 
2116  inline UT_Vector3 getValue() const
2117  {
2118  return UT_Vector3(myLines[0].getValue(), myLines[1].getValue(), myLines[2].getValue());
2119  }
2120  inline T getValue(int axis) const
2121  {
2122  return myLines[axis].getValue();
2123  }
2124 
2125  inline void setValue(const UT_Vector3 &v)
2126  {
2127  myLines[0].setValue(v.x());
2128  myLines[1].setValue(v.y());
2129  myLines[2].setValue(v.z());
2130  }
2131 
2132  inline void setComponent(int axis, T val)
2133  {
2134  myLines[axis].setValue(val);
2135  }
2136 
2137  /// Resets where we currently point to.
2138  /// Returns true if we had to reset our cache line. If we didn't,
2139  /// and you have multiple probes acting in-step, you can just
2140  /// advanceX() the other probes
2141  template <typename S>
2143  { return setIndex(vit.x(), vit.y(), vit.z()); }
2144  template <typename S>
2146  { return setIndex(vit.x(), vit.y(), vit.z()); }
2147 
2148  bool setIndex(int x, int y, int z)
2149  {
2150  if (myLines[0].setIndex(x, y, z))
2151  {
2152  myLines[1].setIndex(x, y, z);
2153  myLines[2].setIndex(x, y, z);
2154  return true;
2155  }
2156  myLines[1].advanceX();
2157  myLines[2].advanceX();
2158  return false;
2159  }
2160 
2161  void advanceX()
2162  { myLines[0].advanceX(); myLines[1].advanceX(); myLines[2].advanceX(); }
2163 
2164 protected:
2166 };
2167 
2168 template <typename T>
2169 class
2171 {
2172 public:
2174  virtual ~UT_VoxelProbeCube();
2175 
2176  void setConstCubeArray(const UT_VoxelArray<T> *vox);
2177  void setConstPlusArray(const UT_VoxelArray<T> *vox);
2178 
2179  /// Allows you to query +/-1 in each direction. In cube update,
2180  /// all are valid. In plus update, only one of x y and z may be
2181  /// non zero.
2183  T
2184  getValue(int x, int y, int z) const
2185  {
2186  UT_ASSERT_P(x >= -1 && x <= 1 &&
2187  y >= -1 && y <= 1 &&
2188  z >= -1 && z <= 1);
2189 
2190  return myLines[y+1][z+1].getValue(x);
2191  }
2192 
2194  T
2196  {
2197  return getValue(offset[0], offset[1], offset[2]);
2198  }
2199 
2200  template <typename S>
2202  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2203  template <typename S>
2205  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2206  bool setIndexCube(int x, int y, int z);
2207 
2208  template <typename S>
2210  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2211  template <typename S>
2213  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2214  bool setIndexPlus(int x, int y, int z);
2215 
2216  /// Computes central difference gradient, does not scale
2217  /// by the step size (which is twice voxelsize)
2218  /// Requires PlusArray
2220  { return UT_Vector3(getValue(1,0,0) - getValue(-1,0,0),
2221  getValue(0,1,0) - getValue(0,-1,0),
2222  getValue(0,0,1) - getValue(0,0,-1)); }
2223 
2224  /// Computes the central difference curvature using the given
2225  /// inverse voxelsize (ie, 1/voxelsize) at this point.
2226  /// Requires CubeArray.
2227  fpreal64 curvature(const UT_Vector3 &invvoxelsize) const;
2228 
2229  /// Computes the laplacian, again with a given 1/voxelsize.
2230  /// Requires PlusArray
2231  fpreal64 laplacian(const UT_Vector3 &invvoxelsize) const;
2232 
2233 protected:
2234  /// Does an rotation of our cache lines, ym becomes y0 and y0 becomes yp,
2235  /// so further queries with y+1 will be cache hits for 2 out of 3.
2236  static void rotateLines(UT_VoxelProbe<T, true, false, false> &ym,
2239 
2241  /// Cached look up position. myValid stores if they are
2242  /// valid values or not
2243  bool myValid;
2244  int myX, myY, myZ;
2245  /// Half inclusive [,) range of valid x queries for current cache.
2246  int myMinValidX, myMaxValidX;
2247 };
2248 
2249 ///
2250 /// UT_VoxelProbeConstant
2251 ///
2252 /// Looks like a voxel probe but only returns a constant value.
2253 ///
2254 template <typename T>
2255 class
2257 {
2258 public:
2261 
2262  template <typename S>
2264  { return true; }
2265  template <typename S>
2267  { return true; }
2268  bool setIndex(int x, int y, int z)
2269  { return true; }
2270 
2271  void setValue(T val) { myValue = val; }
2272  T getValue() const { return myValue; }
2273 protected:
2275 };
2276 
2277 ///
2278 /// UT_VoxelProbeAverage
2279 ///
2280 /// When working with MAC grids one often has slightly misalgined
2281 /// fields. Ie, one field is at the half-grid spacing of another field.
2282 /// The step values are 0 if the dimension is algined, -1 for half a step
2283 /// back (ie, (val(-1)+val(0))/2) and 1 for half a step forward
2284 /// (ie, (val(0)+val(1))/2)
2285 ///
2286 template <typename T, int XStep, int YStep, int ZStep>
2287 class
2289 {
2290 public:
2293 
2294  void setArray(const UT_VoxelArray<T> *vox);
2295 
2296  template <typename S>
2298  { return setIndex(vit.x(), vit.y(), vit.z()); }
2299  template <typename S>
2301  { return setIndex(vit.x(), vit.y(), vit.z()); }
2302  bool setIndex(int x, int y, int z);
2303 
2304  /// Returns the velocity centered at this index, thus an average
2305  /// of the values in each of our internal probes.
2306  inline T getValue() const
2307  {
2308  if (ZStep)
2309  return (valueZ(1) + valueZ(0)) * 0.5;
2310  return valueZ(0);
2311  }
2312 
2313 protected:
2314  inline T valueZ(int z) const
2315  {
2316  if (YStep)
2317  return (valueYZ(1, z) + valueYZ(0, z)) * 0.5;
2318  return valueYZ(0, z);
2319  }
2320 
2321  inline T valueYZ(int y, int z) const
2322  {
2323  if (XStep > 0)
2324  return (myLines[y][z].getValue(1) + myLines[y][z].getValue(0)) * 0.5;
2325  if (XStep < 0)
2326  return (myLines[y][z].getValue(-1) + myLines[y][z].getValue(0)) * 0.5;
2327  return myLines[y][z].getValue();
2328  }
2329 
2330  // Stores [Y][Z] lines.
2332 };
2333 
2334 
2335 ///
2336 /// UT_VoxelProbeFace is designed to walk over three velocity
2337 /// fields that store face-centered values. The indices refer
2338 /// to the centers of the voxels.
2339 ///
2340 template <typename T>
2341 class
2343 {
2344 public:
2346  virtual ~UT_VoxelProbeFace();
2347 
2348  void setArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz);
2349  void setVoxelSize(const UT_Vector3 &voxelsize);
2350 
2351  template <typename S>
2353  { return setIndex(vit.x(), vit.y(), vit.z()); }
2354  template <typename S>
2356  { return setIndex(vit.x(), vit.y(), vit.z()); }
2357  bool setIndex(int x, int y, int z);
2358 
2359  /// Get the face values on each face component.
2360  /// Parameters are axis then side.
2361  /// 0 is the lower face, 1 the higher face.
2362  inline T face(int axis, int side) const
2363  {
2364  if (axis == 0)
2365  return myLines[0][0].getValue(side);
2366  else
2367  return myLines[axis][side].getValue();
2368  }
2369 
2370  /// Returns the velocity centered at this index, thus an average
2371  /// of the values in each of our internal probes.
2372  inline UT_Vector3 value() const
2373  {
2374  return UT_Vector3(0.5f * (face(0, 0) + face(0, 1)),
2375  0.5f * (face(1, 0) + face(1, 1)),
2376  0.5f * (face(2, 0) + face(2, 1)));
2377  }
2378 
2379  /// Returns the divergence of this cell.
2380  inline T divergence() const
2381  {
2382  return (face(0,1)-face(0,0)) * myVoxelSize.x()
2383  + (face(1,1)-face(1,0)) * myVoxelSize.y()
2384  + (face(2,1)-face(2,0)) * myVoxelSize.z();
2385 
2386  }
2387 
2388 protected:
2389 
2390  static void swapLines(UT_VoxelProbe<T, true, false, false> &ym,
2392 
2393 
2395 
2396  /// Cached look up position. myValid stores if they are
2397  /// valid values or not
2398  bool myValid;
2399  int myX, myY, myZ;
2400  /// Half inclusive [,) range of valid x queries for current cache.
2401  int myMinValidX, myMaxValidX;
2402 
2403  UT_Vector3 myVoxelSize, myInvVoxelSize;
2404 };
2405 
2406 
2407 #include "UT_VoxelArray.C"
2408 
2409 
2410 // Typedefs for common voxel array types
2414 
2422 // Read only probe
2426 // Write only
2430 // Read/Write always writeback.
2434 // Read/Write with testing
2438 
2439 // TODO: add support for read-write probe cube
2441 
2445 
2449 
2450 #endif
2451 
UT_COWWriteHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayWriteHandleV4
void setTile(const UT_VoxelArrayIterator< T > &vit)
exint exint const UT_JobInfo &info const
int x() const
Retrieve the current location of the iterator.
UT_Interrupt * myInterrupt
UT_Vector3I getVoxelRes() const
#define SYSmax(a, b)
Definition: SYS_Math.h:1521
GLbyte * weights
Definition: glew.h:7551
vint4 max(const vint4 &a, const vint4 &b)
Definition: simd.h:4703
int xyzTileToLinear(int x, int y, int z) const
void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const
SYS_FORCE_INLINE T lerpSample(T *samples, float fx, float fy, float fz) const
Lerps the given sample using trilinear interpolation.
const UT_VoxelTile< T > & operator=(const UT_VoxelTile< T > &src)
void size(int xres, int yres, int zres)
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void findAverage(T &avg) const
Determines the average value of the tile.
#define SYS_STATIC_ASSERT(expr)
UT_Vector3I linearTileToXYZ(int idx) const
GLenum src
Definition: glew.h:2410
int int32
Definition: SYS_Types.h:39
UT_VoxelProbe< fpreal32, true, true, true > UT_VoxelRWTProbeF
void setInterrupt(UT_Interrupt *interrupt)
T valueZ(int z) const
void loadData(UT_IStream &is)
Load an array, requires you have already size()d this array.
UT_VoxelBorderType getBorder() const
GLuint const GLchar * name
Definition: glew.h:1814
exint getDataLength() const
Returns the amount of data used by the tile myData pointer.
bool atEnd() const
Returns true if we have iterated over all of the voxels.
UT_COWHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayHandleF
void match(const UT_VoxelArray< T > &src)
SYS_FORCE_INLINE T getValue(const UT_Vector3I &offset) const
GLclampd zmax
Definition: glew.h:9021
bool isMatching(const UT_VoxelArray< S > &src) const
Axis-aligned bounding box (AABB).
Definition: GEO_Detail.h:43
*get result *(waiting if necessary)*A common idiom is to fire a bunch of sub tasks at the and then *wait for them to all complete We provide a helper class
Definition: thread.h:643
void setValue(UT_Vector3I index, T value)
virtual const char * getName()=0
T valueYZ(int y, int z) const
void resample(const UT_VoxelArray< T > &src, UT_FilterType filtertype=UT_FILTER_POINT, float filterwidthscale=1.0f, int clampaxis=-1)
Fills this by resampling the given voxel array.
const UT_VoxelCompressOptions & getCompressionOptions() const
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
int64 getMemoryUsage(bool inclusive) const
Return the amount of memory used by this array.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
GLuint index
Definition: glew.h:1814
bool setIndex(UT_VoxelTileIterator< S > &vit)
SYS_FORCE_INLINE void freeData()
T operator()(UT_Vector3D pos) const
THREADED_METHOD3_CONST(UT_VoxelArray< T >, numTiles() > 16, flatten, T *, flatarray, exint, ystride, exint, zstride) void flattenPartial(T *flatarray
virtual ~UT_VoxelProbeConstant()
UT_Vector3 gradient() const
int numVoxels() const
GLuint const GLfloat * val
Definition: glew.h:2794
virtual ~UT_VoxelVectorProbe()
UT_COWReadHandle< UT_VoxelArray< T > > myHandle
UT_VoxelArray< fpreal32 > UT_VoxelArrayF
void clampIndex(int &x, int &y, int &z) const
UT_COWReadHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayReadHandleF
virtual T getValue(const UT_VoxelTile< T > &tile, int x, int y, int z) const =0
GLboolean GLboolean GLboolean GLboolean a
Definition: glew.h:9477
T * fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const
static void registerCompressionEngine(UT_VoxelTileCompress< T > *engine)
fpreal myQuantizeTol
Tolerance for quantizing to reduced bit depth.
virtual bool lerp(GA_AttributeOperand &d, GA_AttributeOperand &a, GA_AttributeOperand &b, GA_AttributeOperand &t) const
d = SYSlerp(a, b, t);
UT_VoxelVectorProbe< fpreal32, true, true, true > UT_VoxelVectorRWTProbeF
UT_Vector3T< float > UT_Vector3
UT_VoxelTile< T > * myCurTile
Current processing tile.
UT_VoxelVectorProbe(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
UT_VoxelArray< T > * myBaseLevel
int64 exint
Definition: SYS_Types.h:125
T divergence() const
Returns the divergence of this cell.
SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
UT_VoxelBorderType
Definition: UT_VoxelArray.h:67
#define SYSabs(a)
Definition: SYS_Math.h:1523
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
bool myOwnBase
If true, we will delete the base level when we are done.
UT_VoxelArray< UT_Vector4 > UT_VoxelArrayV4
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:76
#define UT_API
Definition: UT_API.h:13
bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const
UT_VoxelArray< T > * myArray
fpreal UTvoxelTileDist(const UT_Vector2 &a, const UT_Vector2 &b)
void setArray(UT_VoxelArray< T > *vox)
T mixValues(T t1, T t2, mipmaptype function) const
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:34
int int tileoffy
const GLdouble * v
Definition: glew.h:1391
ImageBuf OIIO_API flatten(const ImageBuf &src, ROI roi={}, int nthreads=0)
void copyWithOffset(const UT_VoxelArray< T > &src, int offx, int offy, int offz)
int indexToLinearTile(int x, int y, int z) const
UT_VoxelTileIterator< int64 > UT_VoxelTileIteratorI
GLenum GLint GLuint mask
Definition: glew.h:1845
bool isConstant(T *cval=0) const
UT_Vector3T< int64 > UT_Vector3I
void makeConstant(T t)
Turns this tile into a constant tile of the given value.
bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const
T face(int axis, int side) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
UT_VoxelVectorProbe< fpreal32, true, true, false > UT_VoxelVectorRWProbeF
SYS_FORCE_INLINE T rawConstVal() const
UT_FilterType
Definition: UT_FilterType.h:16
void UTvoxelTileExpandMinMax(UT_Vector2 v, UT_Vector2 &min, UT_Vector2 &max)
bool atEnd() const
Returns true if we have iterated over all of the voxels.
const UT_JobInfo & info
void setCompressionOptions(const UT_VoxelCompressOptions &options)
float fpreal32
Definition: SYS_Types.h:200
virtual bool load(UT_JSONParser &p, UT_VoxelTile< T > &tile) const
UT_VoxelArray< T > * getArray() const
int myCurTile
Our current linear tile idx. A value of -1 implies at end.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
void setCompressionTolerance(fpreal tol)
void setHandle(UT_COWReadHandle< UT_VoxelArray< T > > handle)
void flatten(S *dst, int dststride) const
Flattens ourself into the given destination buffer.
void makeFpreal16()
Explicit compress to fpreal16. Lossy. No-op if already constant.
SYS_FORCE_INLINE T operator()(int x, int y, int z) const
int zres() const
virtual bool writeThrough(UT_VoxelTile< T > &tile, int x, int y, int z, T t) const =0
UT_VoxelProbeCube< fpreal32 > UT_VoxelROProbeCubeF
UT_VoxelVectorProbe< fpreal32, true, false, false > UT_VoxelVectorProbeF
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
GLdouble GLdouble z
Definition: glew.h:1559
virtual ~UT_VoxelTileCompress()
static fpreal dist(T a, T b)
bool setIndex(int x, int y, int z)
double fpreal64
Definition: SYS_Types.h:201
ImageBuf OIIO_API laplacian(const ImageBuf &src, ROI roi={}, int nthreads=0)
bool getCompressOnExit() const
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
unsigned char uint8
Definition: SYS_Types.h:36
bool writeThrough(int x, int y, int z, T t)
int yres() const
UT_VoxelTileIterator< fpreal32 > UT_VoxelTileIteratorF
const T * rawData() const
GLint GLint GLint GLint GLint GLint GLint GLbitfield GLenum filter
Definition: glew.h:2981
GLfloat GLfloat GLfloat v2
Definition: glew.h:1856
T getBorderScale(int axis) const
const S * writeTiles(const S *srcdata, int srcstride, const UT_IntArray &tilelist)
bool hasNan() const
Returns true if any NANs are in this tile.
void setArray(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
SYS_FORCE_INLINE T & y()
Definition: UT_Vector3.h:513
void resetX(int x)
THREADED_METHOD4(UT_VoxelArray< T >, numTiles() > 4, copyWithOffsetInternal, const UT_VoxelArray< T > &, src, int, offx, int, offy, int, offz) void copyWithOffsetInternalPartial(const UT_VoxelArray< T > &src
GLclampf f
Definition: glew.h:3499
exint exint zstride
GLint GLint GLint GLint GLint x
Definition: glew.h:1252
int myCurTileListIdx
Our current index into the tile list.
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
GLint GLint GLint GLint GLint GLint y
Definition: glew.h:1252
virtual void load(UT_IStream &is, UT_VoxelTile< T > &tile) const
int64 getMemoryUsage(bool inclusive) const
Returns the amount of memory used by this tile.
THREADED_METHOD4_CONST(UT_VoxelArray< T >, numTiles() > 16, flattenGLFixed8, uint8 *, flatarray, exint, ystride, exint, zstride, T, dummy) void flattenGLFixed8Partial(uint8 *flatarray
int getYRes() const
void weightedSum(int pstart[3], int pend[3], const float *weights[3], int start[3], T &result)
SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const
int numTiles() const
UT_BoundingBoxI getTileBBox() const
int getLinearTileNum() const
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
static void saveCompressionTypes(std::ostream &os)
Stores a list of compresson engines to os.
GLint GLenum GLsizei GLint GLsizei const void * data
Definition: glew.h:1379
T getBorderValue() const
UT_VoxelArray< T > * myArray
The array we belong to.
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:134
virtual ~UT_VoxelArray()
T getValue() const
virtual void save(std::ostream &os, const UT_VoxelTile< T > &tile) const
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
const UT_VoxelArray< T > & operator=(const UT_VoxelArray< T > &src)
Assignment operator:
SYS_FORCE_INLINE T & z()
Definition: UT_Vector3.h:515
GLubyte GLubyte GLubyte GLubyte w
Definition: glew.h:1890
UT_VoxelArray< T > * getArray() const
UT_Vector3T< T > SYSclamp(const UT_Vector3T< T > &v, const UT_Vector3T< T > &min, const UT_Vector3T< T > &max)
Definition: UT_Vector3.h:836
bool isValid() const
GLuint GLuint end
Definition: glew.h:1253
virtual ~UT_VoxelProbeAverage()
int int int tileoffz
void
Definition: png.h:1083
void makeRawUninitialized()
Definition: VM_SIMD.h:186
void setTile(const UT_VoxelArrayIterator< S > &vit, UT_VoxelArray< T > *array)
void setValue(int x, int y, int z, T t)
UT_VoxelTile< T > * getTile(int tx, int ty, int tz) const
const GLfloat * c
Definition: glew.h:16296
GLenum GLenum dst
Definition: glew.h:2410
UT_VoxelProbe< UT_Vector4, true, true, true > UT_VoxelRWTProbeV4
void setCompressOnExit(bool shouldcompress)
T evaluate(const UT_Vector3 &pos, const UT_Filter &filter, fpreal radius, int clampaxis=-1) const
long long int64
Definition: SYS_Types.h:116
UT_VoxelMipMap< fpreal32 > UT_VoxelMipMapF
bool setIndex(UT_VoxelTileIterator< S > &vit)
#define SYS_STATIC_FORCE_INLINE
Definition: SYS_Inline.h:48
bool tryCompress(const UT_VoxelCompressOptions &options)
virtual bool canSave() const
Does this engine support saving and loading?
int getRes(int dim) const
THREADED_METHOD(UT_VoxelArray< T >, numTiles() > 100, collapseAllTiles) void collapseAllTilesPartial(const UT_JobInfo &info)
GLuint GLfloat GLfloat y0
Definition: glew.h:12681
int getXRes() const
void setRes(int xr, int yr, int zr)
UT_VoxelArray< int64 > UT_VoxelArrayI
THREADED_METHOD1(UT_VoxelArray< T >, numTiles() > 100, constant, T, t) void const antPartial(T t
GLfloat bias
Definition: glew.h:10274
bool setIndexPlus(UT_VoxelTileIterator< S > &vit)
UT_API UT_ValArray< UT_VoxelTileCompress< fpreal16 > * > & UTvoxelTileGetCompressionEngines(fpreal16 *dummy)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
virtual bool isLossless() const
Returns true if the compression type is lossless.
signed char int8
Definition: SYS_Types.h:35
void getTileVoxels(int idx, UT_Vector3I &start, UT_Vector3I &end) const
idxth tile represents the voxels indexed [start,end).
SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z, T *sample) const
GLuint GLuint GLsizei GLenum type
Definition: glew.h:1253
SYS_FORCE_INLINE bool inlineConstant() const
void writeCacheLine(T *cacheline, int y, int z)
Fills a cache line from an external buffer into our own data.
void advanceX()
Blindly advances our current pointer.
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
void setValue(int x, int y, int z, T t)
SYS_FORCE_INLINE T lerpVoxelCoordAxis(UT_Vector3F pos) const
UT_VoxelArrayIterator< int64 > UT_VoxelArrayIteratorI
GLuint start
Definition: glew.h:1253
virtual int getDataLength(const UT_VoxelTile< T > &tile) const =0
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
void getTileVoxels(UT_Vector3I &start, UT_Vector3I &end) const
This tile will iterate over the voxels indexed [start,end).
GLsizei stride
Definition: glew.h:1523
GLenum GLuint GLint GLenum face
Definition: glew.h:4616
bool myAllowFP16
Conversion to fpreal16, only valid for scalar data.
SYS_FORCE_INLINE T lerpVoxelCoord(UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
int getZRes() const
const UT_JobInfo * myJobInfo
The job info to use for tilefetching.
void setConstArray(const UT_VoxelArray< T > *vox, int prex=0, int postx=0)
static UT_VoxelTileCompress< T > * getCompressionEngine(int index)
GLdouble GLdouble GLdouble b
Definition: glew.h:9122
bool getCompressOnExit() const
GLfloat GLfloat p
Definition: glew.h:16321
UT_VoxelTileIterator< UT_Vector4 > UT_VoxelTileIteratorV4
UT_VoxelProbe< fpreal32, false, true, false > UT_VoxelWOProbeF
virtual void findMinMax(const UT_VoxelTile< T > &tile, T &min, T &max) const
Definition: UT_VoxelArray.C:82
void setLinearTile(exint lineartilenum, UT_VoxelArray< T > *array)
bool isRawFull() const
Returns if this tile is in raw full format.
bool hasNan() const
Returns true if any element of the voxel array is NAN.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
UT_VoxelVectorProbe< fpreal32, false, true, false > UT_VoxelVectorWOProbeF
UT_VoxelProbe< UT_Vector4, true, false, false > UT_VoxelProbeV4
T operator()(UT_Vector3I index) const
SYS_FORCE_INLINE void lerpVoxelMinMaxAxis(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
SYS_STATIC_FORCE_INLINE T lerpValues(T v1, T v2, fpreal32 bias)
Lerps two numbers, templated to work with T.
T getValue(int x, int y, int z) const
bool setIndexPlus(UT_VoxelArrayIterator< S > &vit)
SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z, float fx, float fy, float fz) const
T getValue(int offset) const
void copyFragment(int dstx, int dsty, int dstz, const UT_VoxelTile< T > &srctile, int srcx, int srcy, int srcz)
void uncompress()
Turns a compressed tile into a raw tile.
UT_VoxelArray< T > * myArray
short int16
Definition: SYS_Types.h:37
fpreal64 fpreal
Definition: SYS_Types.h:277
THREADED_METHOD3(UT_VoxelArray< T >, numTiles() > 16, extractFromFlattened, const T *, flatarray, exint, ystride, exint, zstride) void extractFromFlattenedPartial(const T *flatarray
int idx(int idx) const
UT_COWReadHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayReadHandleV4
void setConstArray(const UT_VoxelArray< T > *vox)
UT_API UT_Interrupt * UTgetInterrupt()
Obtain global UT_Interrupt singleton.
SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
T getValue(int axis) const
UT_VoxelProbe< fpreal32, true, false, false > UT_VoxelProbeF
GLuint GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat t1
Definition: glew.h:12681
int numLevels() const
UT_BoundingBoxT< int64 > UT_BoundingBoxI
SYS_FORCE_INLINE T getValue(int x, int y, int z) const
UT_VoxelProbe< fpreal32, true, true, false > UT_VoxelRWProbeF
bool isRaw() const
Returns if this tile is in raw format.
void saveData(std::ostream &os) const
UT_ValArray< UT_VoxelArray< T > ** > myLevels
int getRes(int axis) const
virtual ~UT_VoxelTile()
SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
UT_BoundingBoxI getTileBBox(int idx) const
int int int offz
UT_Vector3 value() const
void linearTileToXYZ(int idx, int &x, int &y, int &z) const
UT_Vector3 myVoxelSize
UT_VoxelProbe< UT_Vector4, true, true, false > UT_VoxelRWProbeV4
exint numVoxels() const
void save(std::ostream &os) const
#define DEFINE_STD_FUNC(TYPE)
SYS_FORCE_INLINE T & x()
Definition: UT_Vector3.h:511
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
T operator()(int x, int y, int z) const
UT_COWHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayHandleV4
UT_VoxelTile< T > * getLinearTile(int idx) const
void load(UT_IStream &is, const UT_IntArray &compression)
GLuint64EXT * result
Definition: glew.h:14007
UT_VoxelProbe< UT_Vector4, false, true, false > UT_VoxelWOProbeV4
void setCompressOnExit(bool shouldcompress)
GLenum array
Definition: glew.h:9066
static int lookupCompressionEngine(const char *name)
UT_COWWriteHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayWriteHandleF
void setValue(const UT_Vector3 &v)
void setValue(T value)
SYS_FORCE_INLINE void lerpVoxelMinMax(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
bool setIndexCube(UT_VoxelArrayIterator< S > &vit)
#define UT_VOXEL_FREE(x)
Definition: UT_VoxelArray.h:52
bool setIndexCube(UT_VoxelTileIterator< S > &vit)
void uncompressFull()
Turns a tile into a raw full tile.
void setBorder(UT_VoxelBorderType type, T t)
UT_Vector3 getValue() const
T * rawFullData()
Returns the raw full data of the tile.
UT_VoxelArrayIterator< fpreal32 > UT_VoxelArrayIteratorF
virtual bool tryCompress(UT_VoxelTile< T > &tile, const UT_VoxelCompressOptions &options, T min, T max) const =0
void setForeignData(void *data, int8 compress_type)
SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
vint4 min(const vint4 &a, const vint4 &b)
Definition: simd.h:4694
static void expandMinMax(T v, T &min, T &max)
Designed to be specialized according to T.
GLsizei samples
Definition: glew.h:2998
int idx(int idx) const
bool extractSampleCube(int x, int y, int z, T *sample) const
void findMinMax(T &min, T &max) const
Finds the minimum and maximum T values.
bool isValidIndex(int x, int y, int z) const
Returns true if the given x, y, z values lie inside the valid index.
#define SYSmin(a, b)
Definition: SYS_Math.h:1522
void setComponent(int axis, T val)
virtual bool save(UT_JSONWriter &w, const UT_VoxelTile< T > &tile) const
SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z, float fx, float fy, float fz) const
void setBorderScale(T scalex, T scaley, T scalez)
GLsizei const GLfloat * value
Definition: glew.h:1849
bool setIndex(UT_VoxelTileIterator< S > &vit)
bool setIndex(UT_VoxelTileIterator< S > &vit)
void setConstArray(const UT_VoxelArray< T > *vx, const UT_VoxelArray< T > *vy, const UT_VoxelArray< T > *vz)
exint exint T dummy
GLint level
Definition: glew.h:1252
const UT_VoxelArray< T > * level(int level, int function) const
int xres() const
Read the current resolution.
bool compressionEnabled() const
GLdouble GLdouble t
Definition: glew.h:1398
S * extractTiles(S *dstdata, int stride, const UT_IntArray &tilelist) const
bool setIndex(int x, int y, int z)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
GLfloat GLfloat v1
Definition: glew.h:1852
void writeData(const S *src, int srcstride)
SYS_FORCE_INLINE T * rawConstData() const
bool isSimpleCompression() const
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
bool isConstant() const
Returns if this tile is constant.
SYS_FORCE_INLINE T lerpSampleAxis(T *samples, float fx, float fy, float fz) const
static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions)
UT_VoxelArrayIterator< UT_Vector4 > UT_VoxelArrayIteratorV4
int getTileRes(int dim) const
void evaluateMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelTileIterator< S > &vit)
fpreal getCompressionTolerance() const
void flattenPartialAxis(T *flatarray, exint ystride, const UT_JobInfo &info) const
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const
GLintptr offset
Definition: glew.h:1682