HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_VoxelArray.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_VoxelArray.h ( UT Library, C++)
7  *
8  * COMMENTS:
9  * This provides support for transparently tiled voxel arrays of data.
10  * The given type, T, should support normal arithmatic operations.
11  *
12  * The created array has elements indexed from 0, ie: [0..xdiv-1].
13  */
14 
15 #ifndef __UT_VoxelArray__
16 #define __UT_VoxelArray__
17 
18 #include "UT_API.h"
19 #include "UT_BoundingBox.h"
20 #include "UT_Vector2.h"
21 #include "UT_Vector3.h"
22 #include "UT_Vector4.h"
23 #include "UT_IntArray.h"
24 #include "UT_ValArray.h"
25 #include "UT_Array.h"
26 #include "UT_FilterType.h"
27 #include "UT_COW.h"
28 #include "UT_ThreadedAlgorithm.h"
29 #include "UT_Interrupt.h"
30 #include <VM/VM_SIMD.h>
31 
32 #include <SYS/SYS_SharedMemory.h>
33 #include <SYS/SYS_StaticAssert.h>
34 #include <SYS/SYS_Types.h>
35 
36 // TBB alloc results in real-world tests that are 3-4% faster. Yay!
37 // But unfortunately it is less aggressive with fragmentation, so
38 // we use effectively 2x the memory. Boo.
39 
40 //#define VOXEL_USE_TBB_ALLOC
41 
42 #ifdef VOXEL_USE_TBB_ALLOC
43 
44 #include <tbb/scalable_allocator.h>
45 
46 #define UT_VOXEL_ALLOC(x) scalable_malloc(x)
47 #define UT_VOXEL_FREE(x) scalable_free(x)
48 
49 #else
50 
51 #define UT_VOXEL_ALLOC(x) SYSamalloc((x), 128)
52 #define UT_VOXEL_FREE(x) SYSafree(x)
53 
54 #endif
55 
56 class UT_Filter;
57 class UT_JSONWriter;
58 class UT_JSONParser;
59 
60 static const int TILEBITS = 4;
61 static const int TILESIZE = 1 << TILEBITS;
62 static const int TILEMASK = TILESIZE-1;
63 
64 ///
65 /// Behaviour of out of bound reads.
66 ///
68 {
73 };
74 
75 template <typename T> class UT_VoxelTile;
76 template <typename T> class UT_VoxelArray;
77 template <typename T, bool DoRead, bool DoWrite, bool TestForWrite> class UT_VoxelProbe;
78 template <typename T> class UT_VoxelProbeCube;
79 template <typename T> class UT_VoxelProbeFace;
80 
82 {
83 public:
85  {
86  myConstantTol = 0;
87  myQuantizeTol = 0;
88  myAllowFP16 = false;
89  }
90 
91  // Used for quantization.
93  {
96  };
97 
98  /// Tiles will be constant if within this range. This may
99  /// need to be tighter than quantization tolerance as
100  /// dithering can't recover partial values.
102  /// Tolerance for quantizing to reduced bit depth
104 
106 
107  /// Conversion to fpreal16, only valid for scalar data.
109 };
110 
111 ///
112 /// UT_VoxelTileCompress
113 ///
114 /// A compression engine for UT_VoxelTiles of a specific type. This
115 /// is a verb class which is invoked from the voxeltile class.
116 ///
117 template <typename T>
119 {
120 public:
123 
124  /// Attempts to write data directly to the compressed tile.
125  /// Returns false if not possible.
126  virtual bool writeThrough(UT_VoxelTile<T> &tile,
127  int x, int y, int z, T t) const = 0;
128 
129  /// Reads directly from the compressed data.
130  /// Cannot alter the tile in any way because it must be threadsafe.
131  virtual T getValue(const UT_VoxelTile<T> &tile,
132  int x, int y, int z) const = 0;
133 
134  /// Attempts to compress the data according to the given tolerance.
135  /// If succesful, returns true.
136  virtual bool tryCompress(UT_VoxelTile<T> &tile,
137  const UT_VoxelCompressOptions &options,
138  T min, T max) const = 0;
139 
140  /// Returns the length in bytes of the data in the tile.
141  /// It must be at least one byte long.
142  virtual int getDataLength(const UT_VoxelTile<T> &tile) const = 0;
143 
144  /// Returns true if the compression type is lossless
145  virtual bool isLossless() const { return false; }
146 
147  /// Determines the min & max values of the tile. A default
148  /// implementation uses getValue() on all voxels.
149  virtual void findMinMax(const UT_VoxelTile<T> &tile, T &min, T &max) const;
150 
151  /// Does this engine support saving and loading?
152  virtual bool canSave() const { return false; }
153  virtual void save(std::ostream &os, const UT_VoxelTile<T> &tile) const {}
154  virtual bool save(UT_JSONWriter &w, const UT_VoxelTile<T> &tile) const
155  { return false; }
156  virtual void load(UT_IStream &is, UT_VoxelTile<T> &tile) const {}
157  virtual bool load(UT_JSONParser &p, UT_VoxelTile<T> &tile) const
158  { return false; }
159 
160  /// Returns the unique name of this compression engine so
161  /// we can look up engines by name (the index of the compression
162  /// engine is assigned at load time so isn't constant)
163  virtual const char *getName() = 0;
164 };
165 
177 
178 #define DEFINE_STD_FUNC(TYPE) \
179 inline void \
180 UTvoxelTileExpandMinMax(TYPE v, TYPE &min, TYPE &max) \
181 { \
182  if (v < min) \
183  min = v; \
184  else if (v > max) \
185  max = v; \
186 } \
187  \
188 inline fpreal \
189 UTvoxelTileDist(TYPE a, TYPE b) \
190 { \
191  return (fpreal) SYSabs(a - b); \
192 }
193 
202 
203 #undef DEFINE_STD_FUNC
204 
205 inline void
207 {
208  min.x() = SYSmin(v.x(), min.x());
209  max.x() = SYSmax(v.x(), max.x());
210 
211  min.y() = SYSmin(v.y(), min.y());
212  max.y() = SYSmax(v.y(), max.y());
213 }
214 
215 inline void
217 {
218  min.x() = SYSmin(v.x(), min.x());
219  max.x() = SYSmax(v.x(), max.x());
220 
221  min.y() = SYSmin(v.y(), min.y());
222  max.y() = SYSmax(v.y(), max.y());
223 
224  min.z() = SYSmin(v.z(), min.z());
225  max.z() = SYSmax(v.z(), max.z());
226 }
227 
228 inline void
230 {
231  min.x() = SYSmin(v.x(), min.x());
232  max.x() = SYSmax(v.x(), max.x());
233 
234  min.y() = SYSmin(v.y(), min.y());
235  max.y() = SYSmax(v.y(), max.y());
236 
237  min.z() = SYSmin(v.z(), min.z());
238  max.z() = SYSmax(v.z(), max.z());
239 
240  min.w() = SYSmin(v.w(), min.w());
241  max.w() = SYSmax(v.w(), max.w());
242 }
243 
244 inline fpreal
246 {
247  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y());
248 }
249 
250 inline fpreal
252 {
253  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
254  + SYSabs(a.z() - b.z());
255 }
256 
257 inline fpreal
259 {
260  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
261  + SYSabs(a.z() - b.z()) + SYSabs(a.w() - b.w());
262 }
263 
264 ///
265 /// UT_VoxelTile
266 ///
267 /// A UT_VoxelArray is composed of a number of these tiles. This is
268 /// done for two reasons:
269 /// 1) Increased memory locality when processing neighbouring points.
270 /// 2) Ability to compress or page out unneeded tiles.
271 /// Currently, the only special ability is the ability to create constant
272 /// tiles.
273 ///
274 /// To the end user of the UT_VoxelArray, the UT_VoxelTile should be
275 /// usually transparent. The only exception may be if they want to do
276 /// a FOR_ALL_TILES in order to ensure an optimal traversal order.
277 ///
278 template <typename T>
279 class UT_VoxelTile
280 {
281 public:
282  UT_VoxelTile();
283  virtual ~UT_VoxelTile();
284 
285  // Copy constructor:
287 
288 
289  // Assignment operator:
291 
293  {
299  };
300 
301  /// Fetch a given local value. (x,y,z) should be local to
302  /// this tile.
303  SYS_FORCE_INLINE T operator()(int x, int y, int z) const
304  {
305  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
306  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
307 
308  switch (myCompressionType)
309  {
310  case COMPRESS_RAW:
311  return ((T *)myData)[
312  ((z * myRes[1]) + y) * myRes[0] + x ];
313 
314  case COMPRESS_CONSTANT:
315  return rawConstVal();
316 
317  case COMPRESS_RAWFULL:
318  return ((T *)myData)[
319  ((z * TILESIZE) + y) * TILESIZE + x ];
320 
321  case COMPRESS_FPREAL16:
322  {
323  T result;
324  result = (((fpreal16 *)myData)[
325  ((z * myRes[1]) + y) * myRes[0] + x ]);
326  return result;
327  }
328  }
329 
330  // By default use the compression engine.
331  UT_VoxelTileCompress<T> *engine;
332 
333  engine = getCompressionEngine(myCompressionType);
334  return engine->getValue(*this, x, y, z);
335  }
336 
337  /// Lerps two numbers, templated to work with T.
339  {
340  return v1 + (v2 - v1) * bias;
341  }
342 
343  /// Does a trilinear interpolation. x,y,z should be local to this
344  /// as should x+1, y+1, and z+1. fx-fz should be 0..1.
345  SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const;
346 
347  template <int AXIS2D>
348  SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const;
349 
350  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
351  /// array should have 8 elements, x minor, z major.
352  /// Requires it is in bounds.
353  /// Returns true if all constant, in which case only a single
354  /// sample is filled, [0]
355  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
356  T *sample) const;
357  template <int AXIS2D>
358  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
359  T *sample) const;
360 
361  /// Extracts +/- dx, +/- dy, +/- dz and then the center into
362  /// 7 samples.
363  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
364  T *sample) const;
365  /// Extracts the full cube of +/- dx, dy, dz. xminor, zmajor, into
366  /// 27 elements.
367  bool extractSampleCube(int x, int y, int z,
368  T *sample) const;
369 #if 0
370  /// MSVC can't handle aligned parameters after the third so
371  /// frac must come first.
372  T lerp(v4uf frac, int x, int y, int z) const;
373 #endif
374 
375  /// Returns a cached line to our internal data, at local address x,y,z.
376  /// cacheline is a caller allocated structure to fill out if we have
377  /// to decompress. If forcecopy isn't set and we can, the result may
378  /// be an internal pointer. stride is set to the update for moving one
379  /// x position in the cache.
380  /// strideofone should be set to true if you want to prevent 0 stride
381  /// results for constant tiles.
382  T *fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const;
383 
384  /// Fills a cache line from an external buffer into our own data.
385  void writeCacheLine(T *cacheline, int y, int z);
386 
387  /// Copies between two tiles. The tile's voxels match up, but don't
388  /// have the same offset. The maximal overlapping voxels are copied.
389  /// this->setValue(dst, dsty, dstz, src(srcx, srcy, srcz));
390  void copyFragment(int dstx, int dsty, int dstz,
391  const UT_VoxelTile<T> &srctile,
392  int srcx, int srcy, int srcz);
393 
394  /// Flattens ourself into the given destination buffer.
395  template <typename S>
396  void flatten(S *dst, int dststride) const;
397 
398  /// Fills our values from the given dense flat buffer. Will
399  /// create a constant tile if the source is constant.
400  template <typename S>
401  void writeData(const S *src, int srcstride);
402 
403  /// The setData is intentionally seperate so we can avoid
404  /// expanding constant data when we write the same value to it.
405  void setValue(int x, int y, int z, T t);
406 
407  /// Finds the minimum and maximum T values
408  void findMinMax(T &min, T &max) const;
409 
410  /// Determines the average value of the tile.
411  void findAverage(T &avg) const;
412 
413  /// Returns if this tile is constant.
414  bool isConstant() const
415  { return myCompressionType == COMPRESS_CONSTANT; }
416 
417  /// Returns true if any NANs are in this tile
418  bool hasNan() const;
419 
420  /// Returns if this tile is in raw format.
421  bool isRaw() const
422  { return myCompressionType == COMPRESS_RAW; }
423 
424  /// Returns if this tile is in raw full format.
425  bool isRawFull() const
426  { return myCompressionType == COMPRESS_RAWFULL; }
427 
428  /// Returns true if this is a simple form of compression, either
429  /// constant, raw, or a raw full that isn't padded
430  bool isSimpleCompression() const
431  {
432  if (isRaw()) return true;
433  if (isConstant()) return true;
434  if (isRawFull() && myRes[0] == TILESIZE && myRes[1] == TILESIZE)
435  return true;
436  return false;
437  }
438 
439  /// Attempts to compress this tile. Returns true if any
440  /// compression performed.
441  bool tryCompress(const UT_VoxelCompressOptions &options);
442 
443  /// Turns this tile into a constant tile of the given value.
444  void makeConstant(T t);
445 
446  /// Explicit compress to fpreal16. Lossy. No-op if already constant.
447  void makeFpreal16();
448 
449  /// Turns a compressed tile into a raw tile.
450  void uncompress();
451 
452  /// Turns a tile into a raw full tile.
453  void uncompressFull();
454 
455  /// Like uncompress() except it leaves the data uninitialized. Result
456  /// is either COMPRESS_RAW or COMPRESS_RAWFULL depending on the tile res.
457  /// @note USE WITH CAUTION!
458  void makeRawUninitialized();
459 
460  /// Returns the raw full data of the tile.
462  {
463  uncompressFull();
464  return (T *)myData;
465  }
466 
467  /// This only makes sense for simple compression. Use with
468  /// extreme care.
470  { if (inlineConstant() && isConstant())
471  { return (T *) &myData; }
472  return (T *)myData; }
473  const T *rawData() const
474  { if (inlineConstant() && isConstant())
475  { return (const T *) &myData; }
476  return (const T *)myData; }
477 
478  /// Read the current resolution.
479  int xres() const { return myRes[0]; }
480  int yres() const { return myRes[1]; }
481  int zres() const { return myRes[2]; }
482 
483  int getRes(int dim) const { return myRes[dim]; }
484 
485 
486  int numVoxels() const { return myRes[0] * myRes[1] * myRes[2]; }
487 
488  /// Returns the amount of memory used by this tile.
489  int64 getMemoryUsage(bool inclusive) const;
490 
491  /// Returns the amount of data used by the tile myData pointer.
492  exint getDataLength() const;
493 
494  /// A routine used by filtered evaluation to accumulated a partial
495  /// filtered sum in this tile.
496  /// pstart, pend - voxel bounds (in UT_VoxelArray coordinates)
497  /// weights - weight array
498  /// start - UT_VoxelArray coordinates at [0] in the weight array
499  void weightedSum(int pstart[3], int pend[3],
500  const float *weights[3], int start[3],
501  T &result);
502 
503  /// Designed to be specialized according to T
504 
505  /// Update min & max to encompass T itself.
506  static void expandMinMax(T v, T &min, T &max)
507  {
508  UTvoxelTileExpandMinMax(v, min, max);
509  }
510 
511  /// Return the "distance" of a & b. This is used for
512  /// tolerance checks on equality comparisons.
513  static fpreal dist(T a, T b)
514  {
515  return UTvoxelTileDist(a, b);
516  }
517 
519 
520  // Returns the index of the bound compression engine.
521  static int lookupCompressionEngine(const char *name);
522  // Given an index, gets the compression engine.
524 
525  /// Saves this tile's data, in compressed form.
526  /// May save in uncompressed form is the compression type does
527  /// not support saving.
528  void save(std::ostream &os) const;
529  bool save(UT_JSONWriter &w) const;
530 
531  /// Loads tile data. Uses the compression index to map the saved
532  /// compression types into the correct loading compression types.
533  void load(UT_IStream &is, const UT_IntArray &compression);
534  bool load(UT_JSONParser &p, const UT_IntArray &compression);
535 
536  /// Stores a list of compresson engines to os.
537  static void saveCompressionTypes(std::ostream &os);
538  static bool saveCompressionTypes(UT_JSONWriter &w);
539 
540  /// Builds a translation table from the given stream's compression types
541  /// into our own valid compression types.
542  static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions);
543  static bool loadCompressionTypes(UT_JSONParser &p, UT_IntArray &compressions);
544 
545 protected:
546  // Attempts to set the value to the native compressed format
547  // Some compression types allow some values to be written
548  // without decompression. Eg, you can write to a constant tile
549  // the tile's own value without decompression.
550  // If this returns true, t has been written.
551  bool writeThrough(int x, int y, int z, T t);
552 
553  /// Sets the local res of the tile. Does *not* resize the allocated
554  /// memory.
555  void setRes(int xr, int yr, int zr)
556  { myRes[0] = xr; myRes[1] = yr; myRes[2] = zr; }
557 
559  {
560  return (sizeof(T) <= sizeof(T*));
561  }
562 
564  { if (inlineConstant()) { return *((const T *)&myData); }
565  return *((const T*)myData); }
567  { if (inlineConstant()) { return ((T *)&myData); }
568  return ((T*)myData); }
569 
570  void setForeignData(void *data, int8 compress_type)
571  {
572  freeData();
573  myCompressionType = compress_type;
574 
575  if (isConstant() && inlineConstant())
576  {
577  makeConstant(*(T *)data);
578  }
579  else
580  {
581  myData = data;
582  myForeignData = true;
583  }
584  }
585 
586 public:
587  /// Frees myData and sets it to zero. This is a bit tricky
588  /// as the constant tiles may be inlined.
589  /// This is only public for the compression engines.
591  {
592  if (inlineConstant() && isConstant())
593  {
594  // Do nothing!
595  }
596  else if (myData && !myForeignData)
597  {
599  }
600  myData = 0;
601  myForeignData = false;
602  }
603 
604 public:
605  // This is only public so the compression engines can get to it.
606  // It is blind data, do not alter!
607  void *myData;
608 private:
609 
610  /// Resolutions.
611  int8 myRes[3];
612 
613  /// Am I a constant tile?
614  int8 myCompressionType;
615 
616  int8 myForeignData;
617 
618  static UT_ValArray<UT_VoxelTileCompress<T> *> &getCompressionEngines()
619  {
620  return UTvoxelTileGetCompressionEngines((T *) 0);
621  }
622 
623  friend class UT_VoxelTileCompress<T>;
624  friend class UT_VoxelArray<T>;
625  template <typename S, bool DoWrite, bool DoRead, bool TestForWrites>
626  friend class UT_VoxelProbe;
627 };
628 
629 ///
630 /// UT_VoxelArray
631 ///
632 /// This provides data structure to hold a three dimmensional array
633 /// of data. The data should be some simple arithmetic type, such
634 /// as uint8, fpreal16, or UT_Vector3.
635 ///
636 /// Some operations, such as gradiants, may make less sense with uint8.
637 ///
638 template <typename T>
639 class UT_VoxelArray
640 {
641 public:
642  UT_VoxelArray();
643  virtual ~UT_VoxelArray();
644 
645  /// Copy constructor:
647 
648  /// Assignment operator:
650 
651  /// This sets the voxelarray to have the given resolution, resetting
652  /// all elements to 0.
653  void size(int xres, int yres, int zres);
654 
655  /// This will ensure this voxel array matches the given voxel array
656  /// in terms of dimensions & border conditions. It may invoke
657  /// a size() and hence reset the field to 0.
658  void match(const UT_VoxelArray<T> &src);
659 
660  template <typename S>
661  bool isMatching(const UT_VoxelArray<S> &src) const
662  {
663  return src.getXRes() == getXRes() &&
664  src.getYRes() == getYRes() &&
665  src.getZRes() == getZRes();
666  }
667 
668  int getXRes() const { return myRes[0]; }
669  int getYRes() const { return myRes[1]; }
670  int getZRes() const { return myRes[2]; }
671  int getRes(int axis) const { return myRes[axis]; }
672 
673  /// Return the amount of memory used by this array.
674  int64 getMemoryUsage(bool inclusive) const;
675 
676  /// Sets this voxel array to the given constant value. All tiles
677  /// are turned into constant tiles.
679  constant,
680  T, t)
681  void constantPartial(T t, const UT_JobInfo &info);
682 
683  /// If this voxel array is all constant tiles, returns true.
684  /// The optional pointer is initialized to the constant value iff
685  /// the array is constant. (Note by constant we mean made of constant
686  /// tiles of the same value - if some tiles are uncompressed but
687  /// constant, it will still return false)
688  bool isConstant(T *cval = 0) const;
689 
690  /// Returns true if any element of the voxel array is NAN
691  bool hasNan() const;
692 
693  /// This convience function lets you sample the voxel array.
694  /// pos is in the range [0..1]^3.
695  /// T value trilinearly interpolated. Edges are determined by the border
696  /// mode.
697  /// The cells are sampled at the center of the voxels.
698  T operator()(UT_Vector3D pos) const;
699  T operator()(UT_Vector3F pos) const;
700 
701  /// This convience function lets you sample the voxel array.
702  /// pos is in the range [0..1]^3.
703  /// The min/max is the range of the sampled values.
704  void evaluateMinMax(T &lerp, T &lmin, T &lmax,
705  UT_Vector3F pos) const;
706 
707  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
708  /// Allows out of range evaluation
710  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
711  /// Allows out of range evaluation
712  SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z,
713  float fx, float fy, float fz) const;
714  template <int AXIS2D>
716  template <int AXIS2D>
717  SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z,
718  float fx, float fy, float fz) const;
719 
720  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
721  /// Allows out of range evaluation. Also computes min/max of
722  /// interpolated samples.
723  SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax,
724  UT_Vector3F pos) const;
725  template <int AXIS2D>
726  SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax,
727  UT_Vector3F pos) const;
728  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
729  /// Allows out of range evaluation. Also computes min/max of
730  /// interpolated samples.
732  T &lerp, T &lmin, T &lmax,
733  int x, int y, int z,
734  float fx, float fy, float fz) const;
735  template <int AXIS2D>
737  T &lerp, T &lmin, T &lmax,
738  int x, int y, int z,
739  float fx, float fy, float fz) const;
740 
741  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
742  /// array should have 8 elements, x minor, z major.
743  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
744  T *sample) const;
745  template <int AXIS2D>
746  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
747  T *sample) const;
748 
749  /// Extracts a sample in a plus shape, dx, then dy, then dz, finally
750  /// the center into 7 voxels.
751  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
752  T *sample) const;
753  /// Extracts 27 dense 3x3x3 cube centered at x,y,z into samples
754  /// z major, xminor.
755  SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z,
756  T *sample) const;
757 
758  /// Lerps the given sample using trilinear interpolation
760  float fx, float fy, float fz) const;
761  template <int AXIS2D>
763  float fx, float fy, float fz) const;
764 
765  SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z,
766  float &fx, float &fy, float &fz) const
767  {
768  // Determine integer & fractional components.
769  fx = pos.x();
770  SYSfastSplitFloat(fx, x);
771  fy = pos.y();
772  SYSfastSplitFloat(fy, y);
773  fz = pos.z();
774  SYSfastSplitFloat(fz, z);
775  }
776  template <int AXIS2D>
777  SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z,
778  float &fx, float &fy, float &fz) const
779  {
780  // Determine integer & fractional components.
781  if (AXIS2D != 0)
782  {
783  fx = pos.x();
784  SYSfastSplitFloat(fx, x);
785  }
786  else
787  {
788  fx = 0.0;
789  x = 0;
790  }
791  if (AXIS2D != 1)
792  {
793  fy = pos.y();
794  SYSfastSplitFloat(fy, y);
795  }
796  else
797  {
798  fy = 0.0;
799  y = 0;
800  }
801  if (AXIS2D != 2)
802  {
803  fz = pos.z();
804  SYSfastSplitFloat(fz, z);
805  }
806  else
807  {
808  fz = 0.0;
809  z = 0;
810  }
811  }
812 #if 0
813  T operator()(v4uf pos) const;
814 #endif
815 
816  /// Filtered evaluation of the voxel array. This operation should
817  /// exhibit the same behavior as IMG3D_Channel::evaluate.
818  T evaluate(const UT_Vector3 &pos, const UT_Filter &filter,
819  fpreal radius, int clampaxis = -1) const;
820 
821  /// Fills this by resampling the given voxel array.
822  void resample(const UT_VoxelArray<T> &src,
823  UT_FilterType filtertype = UT_FILTER_POINT,
824  float filterwidthscale = 1.0f,
825  int clampaxis = -1);
826 
827  /// Flattens this into an array. Z major, then Y, then X.
828  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
830  flatten,
831  T *, flatarray,
832  exint, ystride,
833  exint, zstride)
834  void flattenPartial(T *flatarray, exint ystride, exint zstride,
836 
837  /// Flattens this into an array suitable for a GL 8bit texture.
838  /// Z major, then Y, then X.
839  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
841  flattenGLFixed8,
842  uint8 *, flatarray,
843  exint, ystride,
844  exint, zstride,
845  T , dummy)
846  void flattenGLFixed8Partial(uint8 *flatarray,
847  exint ystride, exint zstride,
848  T dummy,
849  const UT_JobInfo &info) const;
850 
851  /// Flattens this into an array suitable for a GL 16bit FP texture.
852  /// Z major, then Y, then X.
853  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
854  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
855  flattenGL16F,
856  UT_Vector4H *, flatarray,
857  exint, ystride,
858  exint, zstride,
859  T , dummy)
860  void flattenGL16FPartial(UT_Vector4H *flatarray,
861  exint ystride, exint zstride,
862  T dummy,
863  const UT_JobInfo &info) const;
864 
865  /// Flattens this into an array suitable for a GL 32b FP texture. Note that
866  /// this also works around an older Nvidia driver bug that caused very small
867  /// valued texels (<1e-9) to appear as huge random values in the texture.
868  /// Z major, then Y, then X.
869  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
870  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
871  flattenGL32F,
872  UT_Vector4F *, flatarray,
873  exint, ystride,
874  exint, zstride,
875  T , dummy)
876  void flattenGL32FPartial(UT_Vector4F *flatarray,
877  exint ystride, exint zstride,
878  T dummy,
879  const UT_JobInfo &info) const;
880 
881  /// Fills this from a flattened array. Z major, then Y, then X.
882  /// setValue(x,y,z, flatarray[x + y * ystride + z * zstride];
883  THREADED_METHOD3(UT_VoxelArray<T>, numTiles() > 16,
884  extractFromFlattened,
885  const T *, flatarray,
886  exint, ystride,
887  exint, zstride)
888  void extractFromFlattenedPartial(const T *flatarray,
889  exint ystride, exint zstride,
890  const UT_JobInfo &info);
891 
892  /// Copies into this voxel array from the source array.
893  /// Conceptually,
894  /// this->setValue(x, y, z, src.getValue(x+offx, y+offy, z+offz);
895  void copyWithOffset(const UT_VoxelArray<T> &src,
896  int offx, int offy, int offz);
897  THREADED_METHOD4(UT_VoxelArray<T>, numTiles() > 4,
898  copyWithOffsetInternal,
899  const UT_VoxelArray<T> &, src,
900  int, offx,
901  int, offy,
902  int, offz)
903  void copyWithOffsetInternalPartial(const UT_VoxelArray<T> &src,
904  int offx, int offy, int offz,
905  const UT_JobInfo &info);
906 
907  /// Fills dstdata with the voxel data of listed tiles. Stride is measured
908  /// in T. Data order is in tile-order. So, sorted by tilelist, then
909  /// z, y, x within that tile.
910  template <typename S>
911  S *extractTiles(S *dstdata, int stride,
912  const UT_IntArray &tilelist) const;
913 
914  /// Overwrites our tiles with the given data. Does checking
915  /// for constant tiles. Input srcdata stream should match
916  /// that of extractTiles.
917  template <typename S>
918  const S *writeTiles(const S *srcdata, int srcstride,
919  const UT_IntArray &tilelist);
920 
921  /// Converts a 3d position in range [0..1]^3 into the closest
922  /// index value.
923  /// Returns false if the resulting index was out of range. The index
924  /// will still be set.
925  bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const;
926  /// Convertes a 3d position in [0..1]^3 into the equivalent in
927  /// the integer cell space. Does not clamp to the closest value.
928  bool posToIndex(UT_Vector3 pos, UT_Vector3 &ipos) const;
929  /// Converts an index into a position.
930  /// Returns false if the source index was out of range, in which case
931  /// pos will be outside [0..1]^3
932  bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const;
933  bool indexToPos(int x, int y, int z, UT_Vector3D &pos) const;
934  void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const;
935  void findexToPos(UT_Vector3D ipos, UT_Vector3D &pos) const;
936 
937  /// Clamps the given x, y, and z values to lie inside the valid index
938  /// range.
939  void clampIndex(int &x, int &y, int &z) const
940  {
941  x = SYSclamp(x, 0, myRes[0]-1);
942  y = SYSclamp(y, 0, myRes[1]-1);
943  z = SYSclamp(z, 0, myRes[2]-1);
944  }
945 
946  /// Returns true if the given x, y, z values lie inside the valid index.
947  bool isValidIndex(int x, int y, int z) const
948  {
949  return !((x | y | z) < 0) &&
950  (((x - myRes[0]) & (y - myRes[1]) & (z - myRes[2])) < 0);
951  }
952 
953  /// This allows you to read & write the raw data.
954  /// Out of bound reads are illegal.
955  T operator()(int x, int y, int z) const
956  {
957  UT_ASSERT_P(isValidIndex(x, y, z));
958  return (*getTile(x >> TILEBITS,
959  y >> TILEBITS,
960  z >> TILEBITS))
961  (x & TILEMASK, y & TILEMASK, z & TILEMASK);
962  }
963  void setValue(int x, int y, int z, T t)
964  {
965  UT_ASSERT_P(isValidIndex(x, y, z));
966  getTile(x >> TILEBITS,
967  y >> TILEBITS,
968  z >> TILEBITS)->setValue(
969  x & TILEMASK, y & TILEMASK, z & TILEMASK, t);
970  }
971 
972  /// This will clamp the bounds to fit within the voxel array,
973  /// using the border type to resolve out of range values.
974  T getValue(int x, int y, int z) const
975  {
976  // First handle the most common case.
977  if (isValidIndex(x, y, z))
978  return (*this)(x, y, z);
979 
980  // Verify our voxel array is non-empty.
981  if (!myTiles)
982  return myBorderValue;
983 
984  // We now know we are out of range, adjust appropriately
985  switch (myBorderType)
986  {
988  return myBorderValue;
989 
991  if (x < 0 || x >= myRes[0])
992  {
993  x %= myRes[0];
994  if (x < 0)
995  x += myRes[0];
996  }
997  if (y < 0 || y >= myRes[1])
998  {
999  y %= myRes[1];
1000  if (y < 0)
1001  y += myRes[1];
1002  }
1003  if (z < 0 || z >= myRes[2])
1004  {
1005  z %= myRes[2];
1006  if (z < 0)
1007  z += myRes[2];
1008  }
1009  break;
1010 
1011  case UT_VOXELBORDER_STREAK:
1012  clampIndex(x, y, z);
1013  break;
1014  case UT_VOXELBORDER_EXTRAP:
1015  {
1016  int cx, cy, cz;
1017  T result;
1018 
1019  cx = x; cy = y; cz = z;
1020  clampIndex(cx, cy, cz);
1021 
1022  result = (*this)(cx, cy, cz);
1023  result += (x - cx) * myBorderScale[0] +
1024  (y - cy) * myBorderScale[1] +
1025  (z - cz) * myBorderScale[2];
1026  return result;
1027  }
1028  }
1029 
1030  // It is now within bounds, do normal fetch.
1031  return (*this)(x, y, z);
1032  }
1033 
1035  void setBorderScale(T scalex, T scaley, T scalez);
1036  UT_VoxelBorderType getBorder() const { return myBorderType; }
1037  T getBorderValue() const { return myBorderValue; }
1038  T getBorderScale(int axis) const { return myBorderScale[axis]; }
1039 
1040  /// This tries to compress or collapse each tile. This can
1041  /// be expensive (ie, converting a tile to constant), so
1042  /// should be saved until modifications are complete.
1044  collapseAllTiles)
1045  void collapseAllTilesPartial(const UT_JobInfo &info);
1046 
1047  /// Uncompresses all tiles into non-constant tiles. Useful
1048  /// if you have a multithreaded algorithm that may need to
1049  /// both read and write, if you write to a collapsed tile
1050  /// while someone else reads from it, bad stuff happens.
1051  /// Instead, you can expandAllTiles. This may have serious
1052  /// consequences in memory use, however.
1054  expandAllTiles)
1055  void expandAllTilesPartial(const UT_JobInfo &info);
1056 
1057  /// Uncompresses all tiles, but leaves constant tiles alone.
1058  /// Useful for cleaning out any non-standard compression algorithm
1059  /// that some external program can't handle.
1060  THREADED_METHOD(UT_VoxelArray<T>, numTiles() > 100,
1061  expandAllNonConstTiles)
1062  void expandAllNonConstTilesPartial(const UT_JobInfo &info);
1063 
1064  /// The direct tile access methods are to make TBF writing a bit
1065  /// more efficient.
1066  UT_VoxelTile<T> *getTile(int tx, int ty, int tz) const
1067  { return &myTiles[xyzTileToLinear(tx, ty, tz)]; }
1069  { return &myTiles[idx]; }
1070  void linearTileToXYZ(int idx, int &x, int &y, int &z) const
1071  {
1072  x = idx % myTileRes[0];
1073  idx -= x;
1074  idx /= myTileRes[0];
1075  y = idx % myTileRes[1];
1076  idx -= y;
1077  idx /= myTileRes[1];
1078  z = idx;
1079  }
1080  int xyzTileToLinear(int x, int y, int z) const
1081  { return (z * myTileRes[1] + y) * myTileRes[0] + x; }
1082 
1083  int indexToLinearTile(int x, int y, int z) const
1084  { return ((z >> TILEBITS) * myTileRes[1] + (y >> TILEBITS)) * myTileRes[0] + (x >> TILEBITS); }
1085 
1086  /// idxth tile represents the voxels indexed [start,end).
1087  void getTileVoxels(int idx,
1088  UT_Vector3I &start, UT_Vector3I &end) const
1089  {
1090  int x, y, z;
1091  linearTileToXYZ(idx, x, y, z);
1092 
1093  start.x() = x * TILESIZE;
1094  start.y() = y * TILESIZE;
1095  start.z() = z * TILESIZE;
1096  end = start;
1097  end.x() += myTiles[idx].xres();
1098  end.y() += myTiles[idx].yres();
1099  end.z() += myTiles[idx].zres();
1100  }
1101 
1103  {
1105  getTileVoxels(idx, start, end);
1106  return UT_BoundingBoxI(start, end);
1107  }
1108 
1109  /// Number of tiles along that axis. Not to be confused with
1110  /// the resolution of the individual tiles.
1111  int getTileRes(int dim) const { return myTileRes[dim]; }
1112  int numTiles() const
1113  { return myTileRes[0] * myTileRes[1] * myTileRes[2]; }
1114  exint numVoxels() const
1115  { return ((exint)myRes[0]) * myRes[1] * myRes[2]; }
1116 
1118  { myCompressionOptions = options; }
1120  { return myCompressionOptions; }
1121 
1123  { myCompressionOptions.myConstantTol = tol; }
1125  { return myCompressionOptions.myConstantTol; }
1126 
1127  /// Saves only the data of this array to the given stream.
1128  /// To reload it you will have to have a matching array in tiles
1129  /// dimensions and size.
1130  void saveData(std::ostream &os) const;
1131  bool saveData(UT_JSONWriter &w,
1132  const char *shared_mem_owner = 0) const;
1133 
1134  /// Load an array, requires you have already size()d this array.
1135  void loadData(UT_IStream &is);
1136  bool loadData(UT_JSONParser &p);
1137 
1138  /// Copy only the data from the source array.
1139  /// Note that it is an error to call this unless isMatching(src).
1141  copyData,
1142  const UT_VoxelArray<T> &, src)
1143 
1144  void copyDataPartial(const UT_VoxelArray<T> &src,
1145  const UT_JobInfo &info);
1146 
1147 private:
1149  resamplethread,
1150  const UT_VoxelArray<T> &, src,
1151  const UT_Filter *, filter,
1152  float, radius,
1153  int, clampaxis)
1154  void resamplethreadPartial(const UT_VoxelArray<T> &src,
1155  const UT_Filter *filter,
1156  float radius,
1157  int clampaxis,
1158  const UT_JobInfo &info);
1159 
1160 
1161  void deleteVoxels();
1162 
1163  SYS_SharedMemory *copyToSharedMemory(const char *shared_mem_owner) const;
1164  bool populateFromSharedMemory(const char *id);
1165 
1166 
1167  /// Number of elements in each dimension.
1168  int myRes[3];
1169 
1170  /// Inverse tile res, 1/myRes
1171  UT_Vector3 myInvRes;
1172 
1173  /// Number of tiles in each dimension.
1174  int myTileRes[3];
1175 
1176  /// Compression tolerance for lossy compression.
1177  UT_VoxelCompressOptions myCompressionOptions;
1178 
1179  /// Double dereferenced so we can theoretically resize easily.
1180  UT_VoxelTile<T> *myTiles;
1181 
1182  /// Outside values get this if constant borders are used
1183  T myBorderValue;
1184  /// Per axis scale factors for when extrapolating.
1185  T myBorderScale[3];
1186  UT_VoxelBorderType myBorderType;
1187 
1188  /// For initializing the tiles from shared memory.
1189  SYS_SharedMemory *mySharedMem;
1190  SYS_SharedMemoryView *mySharedMemView;
1191 };
1192 
1193 
1194 ///
1195 /// UT_VoxelMipMap
1196 ///
1197 /// This provides a mip-map type structure for a voxel array.
1198 /// It manages the different levels of voxels arrays that are needed.
1199 /// You can create different types of mip maps: average, maximum, etc,
1200 /// which can allow different tricks.
1201 /// Each level is one half the previous level, rounded up.
1202 /// Out of bound voxels are ignored from the lower levels.
1203 ///
1204 template <typename T>
1206 {
1207 public:
1208  /// The different types of functions that can be used for
1209  /// constructing a mip map.
1210  enum mipmaptype { MIPMAP_MAXIMUM=0, MIPMAP_AVERAGE=1, MIPMAP_MINIMUM=2 };
1211 
1212  UT_VoxelMipMap();
1213  virtual ~UT_VoxelMipMap();
1214 
1215  /// Copy constructor.
1216  UT_VoxelMipMap(const UT_VoxelMipMap<T> &src);
1217 
1218  /// Assignment operator:
1219  const UT_VoxelMipMap<T> &operator=(const UT_VoxelMipMap<T> &src);
1220 
1221  /// Builds from a given voxel array. The ownership flag determines
1222  /// if we gain ownership of the voxel array and should delete it.
1223  /// In any case, the new levels are owned by us.
1224  void build(UT_VoxelArray<T> *baselevel,
1225  mipmaptype function);
1226 
1227  /// Same as above but construct mipmaps simultaneously for more than
1228  /// one function. The order of the functions will correspond to the
1229  /// order of the data values passed to the traversal callback.
1230  void build(UT_VoxelArray<T> *baselevel,
1231  const UT_Array<mipmaptype> &functions);
1232 
1233  /// This does a top down traversal of the implicit octree defined
1234  /// by the voxel array. Returning false will abort that
1235  /// branch of the octree.
1236  /// The bounding box given is in cell space and is an exclusive
1237  /// box of the included cells (ie: (0..1)^3 means just cell 0,0,0)
1238  /// Note that each bounding box will not be square, unless you
1239  /// have the good fortune of starting with a power of 2 cube.
1240  /// The boolean goes true when the the callback is invoked on a
1241  /// base level.
1242  typedef bool (*Callback)(const T *funcs,
1243  const UT_BoundingBox &box,
1244  bool baselevel, void *data);
1245  void traverseTopDown(Callback function,
1246  void *data) const;
1247 
1248  /// Top down traversal on op. OP is invoked with
1249  /// bool op(const UT_BoundingBoxI &indexbox, int level)
1250  ///
1251  /// indexbox is half-inclusive (0..1)^3 means cell 0,0,0
1252  /// level 0 means the base level.
1253  /// (box.min.x()>>level, box.min.y()>>level, box.min.z()>>level)
1254  /// gives the index to extract the value from level..
1255  template <typename OP>
1256  void traverseTopDown(OP&op) const;
1257 
1258 
1259  /// Top down traversal, but which quad tree is visited first
1260  /// is controlled by
1261  /// float op.sortValue(UT_BoundingBoxI &indexbox, int level);
1262  /// Lower values are visited first.
1263  template <typename OP>
1264  void traverseTopDownSorted(OP&op) const;
1265 
1266 
1267  /// Return the amount of memory used by this mipmap.
1268  int64 getMemoryUsage(bool inclusive) const;
1269 
1270  int numLevels() const { return myNumLevels+1; }
1271 
1272  /// level 0 is the original grid, each level higher is a power
1273  /// of two smaller.
1274  const UT_VoxelArray<T> *level(int level, int function) const
1275  {
1276  if (level == 0)
1277  return myBaseLevel;
1278 
1279  return myLevels(function)[numLevels() - 1 - level];
1280  }
1281 
1282 private:
1283  void doTraverse(int x, int y, int z, int level,
1284  Callback function,
1285  void *data) const;
1286 
1287  /// Note: This variant of doTraverse has the opposite sense of level!
1288  template <typename OP>
1289  void doTraverse(int x, int y, int z, int level,
1290  OP &op) const;
1291  template <typename OP>
1292  void doTraverseSorted(int x, int y, int z, int level,
1293  OP &op) const;
1294 
1295  void initializePrivate();
1296  void destroyPrivate();
1297 
1298  THREADED_METHOD3(UT_VoxelMipMap<T>, dst.numTiles() > 1,
1299  downsample,
1300  UT_VoxelArray<T> &, dst,
1301  const UT_VoxelArray<T> &, src,
1302  mipmaptype, function)
1303  void downsamplePartial(UT_VoxelArray<T> &dst,
1304  const UT_VoxelArray<T> &src,
1305  mipmaptype function,
1306  const UT_JobInfo &info);
1307 
1308 protected:
1309  T mixValues(T t1, T t2, mipmaptype function) const
1310  {
1311  switch (function)
1312  {
1313  case MIPMAP_MAXIMUM:
1314  return SYSmax(t1, t2);
1315 
1316  case MIPMAP_AVERAGE:
1317  return (t1 + t2) / 2;
1318 
1319  case MIPMAP_MINIMUM:
1320  return SYSmin(t1, t2);
1321  }
1322 
1323  return t1;
1324  }
1325 
1326 
1327  /// This stores the base most level that was provided
1328  /// externally.
1329  UT_VoxelArray<T> *myBaseLevel;
1330  /// If true, we will delete the base level when we are done.
1332 
1333  /// Tracks the number of levels which we used to represent
1334  /// this hierarchy.
1336  /// The array of VoxelArrays, one per level.
1337  /// myLevels[0] is a 1x1x1 array. Each successive layer is twice
1338  /// as big in each each dimension. However, every layer is clamped
1339  /// against the resolution of the base layer.
1340  /// We own all these layers.
1342 };
1343 
1344 
1345 /// Iterator for Voxel Arrays
1346 ///
1347 /// This class eliminates the need for having
1348 /// for (z = 0; z < zres; z++)
1349 /// ...
1350 /// for (x = 0; x < xres; x++)
1351 /// loops everywhere.
1352 ///
1353 /// Note that the order of iteration is undefined! (The actual order is
1354 /// to complete each tile in turn, thereby hopefully improving cache
1355 /// coherency)
1356 ///
1357 /// It is safe to write to the voxel array while this iterator is active.
1358 /// It is *not* safe to resize the voxel array (or destroy it)
1359 ///
1360 /// The iterator is similar in principal to an STL iterator, but somewhat
1361 /// simpler. The classic STL loop
1362 /// for ( it = begin(); it != end(); ++it )
1363 /// is done using
1364 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1365 ///
1366 template <typename T>
1368 {
1369 public:
1373  virtual ~UT_VoxelArrayIterator();
1374 
1376  {
1377  myCurTile = -1;
1378  myHandle.resetHandle();
1379  myArray = vox;
1380  // Reset the range
1381  setPartialRange(0, 1);
1382  }
1384  {
1385  setArray((UT_VoxelArray<T> *) vox);
1386  }
1387 
1388  /// Iterates over the array pointed to by the handle. Only
1389  /// supports read access during the iteration as it does
1390  /// a read lock.
1392  {
1393  myHandle = handle;
1394  // Ideally we'd have a separate const iterator
1395  // from our non-const iterator so this would
1396  // only be exposed in the const version.
1397  myArray = const_cast<UT_VoxelArray<T> *>(&*myHandle);
1398 
1399  // Reset our range.
1400  myCurTile = -1;
1401  setPartialRange(0, 1);
1402  }
1403 
1404 
1405  /// Restricts this iterator to only run over a subset
1406  /// of the tiles. The tiles will be divided into approximately
1407  /// numrange equal groups, this will be the idx'th.
1408  /// The resulting iterator may have zero tiles.
1409  void setPartialRange(int idx, int numranges);
1410 
1411  /// Ties this iterator to the given jobinfo so it will
1412  /// match the jobinfo's processing.
1413  void splitByTile(const UT_JobInfo &info);
1414 
1415  /// Assigns an interrupt handler. This will be tested whenever
1416  /// it advances to a new tile. If it is interrupted, the iterator
1417  /// will jump forward to atEnd()
1418  void setInterrupt(UT_Interrupt *interrupt) { myInterrupt = interrupt; }
1419  void detectInterrupts() { myInterrupt = UTgetInterrupt(); }
1420 
1421  /// Restricts this iterator to the tiles that intersect
1422  /// the given bounding box of voxel coordinates.
1423  /// Note that this will not be a precise restriction as
1424  /// each tile is either included or not.
1425  /// You should setPartialRange() after setting the bbox range
1426  /// The bounding box is on the [0..1]^3 range.
1427  void restrictToBBox(const UT_BoundingBox &bbox);
1428  /// The [xmin, xmax] are inclusive and measured in voxels.
1429  void restrictToBBox(int xmin, int xmax,
1430  int ymin, int ymax,
1431  int zmin, int zmax);
1432 
1433  /// Resets the iterator to point to the first voxel.
1434  void rewind();
1435 
1436  /// Returns true if we have iterated over all of the voxels.
1437  bool atEnd() const
1438  { return myCurTile < 0; }
1439 
1440  /// Advances the iterator to point to the next voxel.
1441  void advance()
1442  {
1443  // We try to advance each axis, rolling over to the next.
1444  // If we exhaust this tile, we call advanceTile.
1445  myPos[0]++;
1446  myTileLocalPos[0]++;
1447  if (myTileLocalPos[0] >= myTileSize[0])
1448  {
1449  // Wrapped in X.
1450  myPos[0] -= myTileLocalPos[0];
1451  myTileLocalPos[0] = 0;
1452 
1453  myPos[1]++;
1454  myTileLocalPos[1]++;
1455  if (myTileLocalPos[1] >= myTileSize[1])
1456  {
1457  // Wrapped in Y.
1458  myPos[1] -= myTileLocalPos[1];
1459  myTileLocalPos[1] = 0;
1460 
1461  myPos[2]++;
1462  myTileLocalPos[2]++;
1463  if (myTileLocalPos[2] >= myTileSize[2])
1464  {
1465  // Wrapped in Z! Finished this tile!
1466  advanceTile();
1467  }
1468  }
1469  }
1470  }
1471 
1472  /// Retrieve the current location of the iterator.
1473  int x() const { return myPos[0]; }
1474  int y() const { return myPos[1]; }
1475  int z() const { return myPos[2]; }
1476  int idx(int idx) const { return myPos[idx]; }
1477 
1478  /// Retrieves the value that we are currently pointing at.
1479  /// This is faster than an operator(x,y,z) as we already know
1480  /// our current tile and that bounds checking isn't needed.
1481  T getValue() const
1482  {
1483  UT_ASSERT_P(myCurTile >= 0);
1484 
1485  UT_VoxelTile<T> *tile;
1486 
1487  tile = myArray->getLinearTile(myCurTile);
1488  return (*tile)(myTileLocalPos[0],
1489  myTileLocalPos[1],
1490  myTileLocalPos[2]);
1491  }
1492 
1493  /// Sets the voxel we are currently pointing to the given value.
1494  void setValue(T t) const
1495  {
1496  UT_ASSERT_P(myCurTile >= 0);
1497 
1498  UT_VoxelTile<T> *tile;
1499 
1500  tile = myArray->getLinearTile(myCurTile);
1501 
1502  tile->setValue(myTileLocalPos[0],
1503  myTileLocalPos[1],
1504  myTileLocalPos[2], t);
1505  }
1506 
1507  /// Returns true if the tile we are currently in is a constant tile.
1508  bool isTileConstant() const
1509  {
1510  UT_ASSERT_P(myCurTile >= 0);
1511 
1512  UT_VoxelTile<T> *tile;
1513 
1514  tile = myArray->getLinearTile(myCurTile);
1515  return tile->isConstant();
1516  }
1517 
1518  /// This tile will iterate over the voxels indexed [start,end).
1520  {
1521  start.x() = myTilePos[0] * TILESIZE;
1522  start.y() = myTilePos[1] * TILESIZE;
1523  start.z() = myTilePos[2] * TILESIZE;
1524  end = start;
1525  end.x() += myTileSize[0];
1526  end.y() += myTileSize[1];
1527  end.z() += myTileSize[2];
1528  }
1529 
1530  /// This tile will iterate over the *inclusive* voxels indexed
1531  /// in the returned boudning box.
1533  {
1535  getTileVoxels(start, end);
1536  return UT_BoundingBoxI(start, end);
1537  }
1538 
1539  /// Returns true if we are at the start of a new tile.
1540  bool isStartOfTile() const
1541  { return !(myTileLocalPos[0] ||
1542  myTileLocalPos[1] ||
1543  myTileLocalPos[2]); }
1544 
1545  /// Returns the VoxelTile we are currently processing
1547  {
1548  UT_ASSERT_P(myCurTile >= 0);
1549  return myArray->getLinearTile(myCurTile);
1550  }
1551  int getLinearTileNum() const
1552  {
1553  return myCurTile;
1554  }
1555 
1556  /// Advances the iterator to point to the next tile. Useful if the
1557  /// constant test showed that you didn't need to deal with this one.
1558  void advanceTile();
1559 
1560  /// Advances the iterator to pointing just before the next tile so
1561  /// the next advance() will be an advanceTile(). This is useful
1562  /// if you want to do a continue; as your break but the forloop
1563  /// is doing advance()
1564  /// Note the iterator is in a bad state until advance() is called.
1565  void skipToEndOfTile();
1566 
1567  /// Sets a flag which causes the iterator to tryCompress()
1568  /// tiles when it is done with them.
1569  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1570  void setCompressOnExit(bool shouldcompress)
1571  { myShouldCompressOnExit = shouldcompress; }
1572 
1573  /// These templated algorithms are designed to apply simple operations
1574  /// across all of the voxels with as little overhead as possible.
1575  /// The iterator should already point to a voxel array and if multithreaded
1576  /// had its partial range set. The source arrays must be matching size.
1577  /// The operator should support a () operator, and the result is
1578  /// vit.setValue( op(vit.getValue(), a->getValue(vit), ...);
1579  /// Passing T instead of UT_VoxelArray will treat it as a constant source
1580  /// Note if both source and destination tiles are constant, only
1581  /// a single operation is invoked.
1582  template <typename OP>
1583  void applyOperation(OP &op);
1584  template <typename OP, typename S>
1585  void applyOperation(OP &op, const UT_VoxelArray<S> &a);
1586  template <typename OP>
1587  void applyOperation(OP &op, T a);
1588  template <typename OP, typename S, typename R>
1589  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1590  const UT_VoxelArray<R> &b);
1591  template <typename OP, typename S, typename R, typename Q>
1592  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1593  const UT_VoxelArray<R> &b,
1594  const UT_VoxelArray<Q> &c);
1595  /// These variants will invoke op.isNoop(a, b, ...) which will return
1596  /// true if those values won't affect the destination. This allows
1597  /// constant source tiles to be skipped, for example when adding
1598  /// 0.
1599  template <typename OP, typename S>
1600  void applyOperationCheckNoop(OP &op, const UT_VoxelArray<S> &a);
1601  template <typename OP>
1602  void applyOperationCheckNoop(OP &op, T a);
1603 
1604  /// Assign operation works like apply operation, but *this is written
1605  /// to without reading, so there is one less parameter to the ()
1606  /// callback. This can optimize constant tile writes as the
1607  /// constant() status of the destination doesn't matter.
1608  template <typename OP, typename S>
1609  void assignOperation(OP &op, const UT_VoxelArray<S> &a);
1610  template <typename OP, typename S, typename R>
1611  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1612  const UT_VoxelArray<R> &b);
1613  template <typename OP, typename S, typename R, typename Q>
1614  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1615  const UT_VoxelArray<R> &b,
1616  const UT_VoxelArray<Q> &c);
1617 
1618  /// Reduction operators.
1619  /// op.reduce(T a) called for each voxel, *but*,
1620  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1621  template <typename OP>
1622  void reduceOperation(OP &op);
1623 
1624  UT_VoxelArray<T> *getArray() const { return myArray; }
1625 
1626 protected:
1627  /// The array we belong to.
1629  /// The handle that we have locked to get our array. It is null
1630  /// by default which makes the lock/unlock nops.
1632 
1633  /// Absolute index into voxel array.
1634  int myPos[3];
1635 
1636  /// Flag determining if we should compress tiles whenever we
1637  /// advance out of them.
1639 
1642 
1643 public:
1644  /// Our current linear tile idx. A value of -1 implies at end.
1646 
1647  /// Our current index into the tile list
1649 
1650  /// Our start & end tiles for processing a subrange.
1651  /// The tile range is half open [start, end)
1652  int myTileStart, myTileEnd;
1653 
1654  /// Which tile we are as per tx,ty,tz rather than linear index.
1655  int myTilePos[3];
1656 
1657  /// Our position within the current tile.
1658  int myTileLocalPos[3];
1659 
1660  /// The size of the current tile
1661  int myTileSize[3];
1662 
1663  /// The job info to use for tilefetching
1665 
1667 };
1668 
1669 /// Iterator for tiles inside Voxel Arrays
1670 ///
1671 /// This class eliminates the need for having
1672 /// for (z = 0; z < zres; z++)
1673 /// ...
1674 /// for (x = 0; x < xres; x++)
1675 /// loops everywhere.
1676 ///
1677 /// The iterator is similar in principal to an STL iterator, but somewhat
1678 /// simpler. The classic STL loop
1679 /// for ( it = begin(); it != end(); ++it )
1680 /// is done using
1681 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1682 ///
1683 template <typename T>
1685 {
1686 public:
1689  template <typename S>
1691  UT_VoxelArray<T> *array);
1692  virtual ~UT_VoxelTileIterator();
1693 
1694  template <typename S>
1696  UT_VoxelArray<T> *array)
1697  {
1698  UT_ASSERT_P(vit.isStartOfTile());
1699  myCurTile = array->getLinearTile(vit.getLinearTileNum());
1700  myArray = array;
1701  myTileStart[0] = vit.x();
1702  myTileStart[1] = vit.y();
1703  myTileStart[2] = vit.z();
1704  }
1705 
1707  {
1708  setTile(vit, vit.getArray());
1709  }
1710 
1711  void setLinearTile(exint lineartilenum, UT_VoxelArray<T> *array)
1712  {
1713  myCurTile = array->getLinearTile(lineartilenum);
1714  myArray = array;
1715 
1716  array->linearTileToXYZ(lineartilenum,
1717  myTileStart[0], myTileStart[1], myTileStart[2]);
1718  myTileStart[0] <<= TILEBITS;
1719  myTileStart[1] <<= TILEBITS;
1720  myTileStart[2] <<= TILEBITS;
1721  }
1722 
1723  /// Resets the iterator to point to the first voxel.
1724  void rewind();
1725 
1726  /// Returns true if we have iterated over all of the voxels.
1727  bool atEnd() const
1728  { return myCurTile == 0 || myAtEnd; }
1729 
1730  /// Advances the iterator to point to the next voxel.
1731  void advance()
1732  {
1733  // We try to advance each axis, rolling over to the next.
1734  // If we exhaust this tile, we call advanceTile.
1735  myPos[0]++;
1736  myTileLocalPos[0]++;
1737  if (myTileLocalPos[0] >= myTileSize[0])
1738  {
1739  // Wrapped in X.
1740  myPos[0] -= myTileLocalPos[0];
1741  myTileLocalPos[0] = 0;
1742 
1743  myPos[1]++;
1744  myTileLocalPos[1]++;
1745  if (myTileLocalPos[1] >= myTileSize[1])
1746  {
1747  // Wrapped in Y.
1748  myPos[1] -= myTileLocalPos[1];
1749  myTileLocalPos[1] = 0;
1750 
1751  myPos[2]++;
1752  myTileLocalPos[2]++;
1753  if (myTileLocalPos[2] >= myTileSize[2])
1754  {
1755  // Wrapped in Z! Finished this tile!
1756  advanceTile();
1757  }
1758  }
1759  }
1760  }
1761 
1762  /// Retrieve the current location of the iterator, in the
1763  /// containing voxel array, not in the tile.
1764  int x() const { return myPos[0]; }
1765  int y() const { return myPos[1]; }
1766  int z() const { return myPos[2]; }
1767  int idx(int idx) const { return myPos[idx]; }
1768 
1769  /// Retrieves the value that we are currently pointing at.
1770  /// This is faster than an operator(x,y,z) as we already know
1771  /// our current tile and that bounds checking isn't needed.
1772  T getValue() const
1773  {
1774  UT_ASSERT_P(myCurTile);
1775 
1776  return (*myCurTile)(myTileLocalPos[0],
1777  myTileLocalPos[1],
1778  myTileLocalPos[2]);
1779  }
1780 
1781  /// Sets the voxel we are currently pointing to the given value.
1782  void setValue(T t) const
1783  {
1784  UT_ASSERT_P(myCurTile);
1785 
1786  myCurTile->setValue(myTileLocalPos[0],
1787  myTileLocalPos[1],
1788  myTileLocalPos[2], t);
1789  }
1790 
1791  /// Returns true if the tile we are currently in is a constant tile.
1792  bool isTileConstant() const
1793  {
1794  UT_ASSERT_P(myCurTile);
1795 
1796  return myCurTile->isConstant();
1797  }
1798 
1799  /// Returns true if we are at the start of a new tile.
1800  bool isStartOfTile() const
1801  { return !(myTileLocalPos[0] ||
1802  myTileLocalPos[1] ||
1803  myTileLocalPos[2]); }
1804 
1805  /// Returns the VoxelTile we are currently processing
1807  {
1808  return myCurTile;
1809  }
1810 
1811  /// Advances the iterator to point to the next tile. Since
1812  /// we are restricted to one tile, effectively just ends the iterator.
1813  void advanceTile();
1814 
1815  /// Sets a flag which causes the iterator to tryCompress()
1816  /// tiles when it is done with them.
1817  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1818  void setCompressOnExit(bool shouldcompress)
1819  { myShouldCompressOnExit = shouldcompress; }
1820 
1821  /// Reduction operators.
1822  /// op.reduce(T a) called for each voxel, *but*,
1823  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1824  /// Early exits if op.reduce() returns false.
1825  template <typename OP>
1826  bool reduceOperation(OP &op);
1827 
1828 protected:
1829  /// Current processing tile
1832 
1833  /// Absolute index into voxel array.
1834  int myPos[3];
1835  /// Absolute index of start of tile
1836  int myTileStart[3];
1837 
1838  /// Flag determining if we should compress tiles whenever we
1839  /// advance out of them.
1841 
1842  /// Since we want to allow multiple passes, we can't
1843  /// clear out myCurTile when we hit the end.
1844  bool myAtEnd;
1845 
1846 public:
1847  /// Our position within the current tile.
1848  int myTileLocalPos[3];
1849 
1850  /// The size of the current tile
1851  int myTileSize[3];
1852 };
1853 
1854 /// Probe for Voxel Arrays
1855 ///
1856 /// This class is designed to allow for efficient evaluation
1857 /// of aligned indices of a voxel array, provided the voxels are iterated
1858 /// in a tile-by-tile, x-inner most, manner.
1859 ///
1860 /// This class will create a local copy of the voxel data where needed,
1861 /// uncompressing the information once for every 16 queries. It will
1862 /// also create an aligned buffer so you can safely use v4uf on fpreal32
1863 /// data.
1864 ///
1865 /// For queries where you need surrounding values, the prex and postx can
1866 /// specify padding on the probe. prex should be -1 to allow reading
1867 /// -1 offset, postx 1 to allow reading a 1 offset.
1868 ///
1869 
1870 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
1871 class UT_VoxelProbe
1872 {
1873 public:
1874  UT_VoxelProbe();
1875  UT_VoxelProbe(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
1876  virtual ~UT_VoxelProbe();
1877 
1878  void setArray(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
1880  int prex = 0, int postx = 0)
1881  {
1882  SYS_STATIC_ASSERT(DoWrite == false);
1883  setArray((UT_VoxelArray<T> *)vox, prex, postx);
1884  }
1885 
1886  UT_VoxelArray<T> *getArray() const { return myArray; }
1887 
1888  bool isValid() const { return myArray != 0; }
1889 
1890  inline T getValue() const
1891  {
1892  return *myCurLine;
1893  }
1894  inline T getValue(int offset) const
1895  {
1896  return myCurLine[myStride*offset];
1897  }
1898 
1899  inline void setValue(T value)
1900  {
1901  UT_ASSERT_P(DoWrite);
1902  *myCurLine = value;
1903  if (TestForWrites)
1904  myDirty = true;
1905  }
1906 
1907 
1908  /// Resets where we currently point to.
1909  /// Returns true if we had to reset our cache line. If we didn't,
1910  /// and you have multiple probes acting in-step, you can just
1911  /// advanceX() the other probes
1912  template <typename S>
1914  { return setIndex(vit.x(), vit.y(), vit.z()); }
1915  template <typename S>
1917  { return setIndex(vit.x(), vit.y(), vit.z()); }
1918 
1919  bool setIndex(int x, int y, int z);
1920 
1921  /// Blindly advances our current pointer.
1922  inline void advanceX()
1923  {
1924  myCurLine += myStride;
1925  myX++;
1926  UT_ASSERT_P(myX < myMaxValidX);
1927  }
1928 
1929  /// Adjusts our current pointer to the given absolute location,
1930  /// assumes the new value is inside our valid range.
1931  inline void resetX(int x)
1932  {
1933  myCurLine += myStride * (x - myX);
1934  myX = x;
1935  UT_ASSERT_P(myX < myMaxValidX && myX >= myMinValidX);
1936  }
1937 
1938 protected:
1939  void reloadCache(int x, int y, int z);
1940 
1941  void writeCacheLine();
1942 
1943  void buildConstantCache(T value);
1944 
1946  /// myCacheLine[0] is the start of the cache line, so -1 would be
1947  /// the first pre-rolled value
1949  /// Where we actually allocated our cache line, aligned to 4x multiple
1950  /// to ensure SSE compatible.
1952 
1953  int myX, myY, myZ;
1954  int myPreX, myPostX;
1957  /// Half inclusive [,) range of valid x queries for current cache.
1958  int myMinValidX, myMaxValidX;
1959 
1960  /// Determines if we have anything to write back, only
1961  /// valid if TestForWrites is enabled.
1962  bool myDirty;
1963 
1965 
1966  friend class UT_VoxelProbeCube<T>;
1967  friend class UT_VoxelProbeFace<T>;
1968 };
1969 
1970 ///
1971 /// The vector probe is three normal probes into separate voxel arrays
1972 /// making it easier to read and write to aligned vector fields.
1973 /// If the vector field is face-centered, see the UT_VoxelProbeFace.
1974 ///
1975 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
1977 {
1978 public:
1980  { }
1982  { setArray(vx, vy, vz); }
1984  {}
1985 
1987  {
1988  myLines[0].setArray(vx);
1989  myLines[1].setArray(vy);
1990  myLines[2].setArray(vz);
1991  }
1992  void setConstArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz)
1993  {
1994  SYS_STATIC_ASSERT(DoWrite == false);
1995  setArray((UT_VoxelArray<T> *)vx, (UT_VoxelArray<T> *)vy, (UT_VoxelArray<T> *)vz);
1996  }
1997 
1998  inline UT_Vector3 getValue() const
1999  {
2000  return UT_Vector3(myLines[0].getValue(), myLines[1].getValue(), myLines[2].getValue());
2001  }
2002  inline T getValue(int axis) const
2003  {
2004  return myLines[axis].getValue();
2005  }
2006 
2007  inline void setValue(const UT_Vector3 &v)
2008  {
2009  myLines[0].setValue(v.x());
2010  myLines[1].setValue(v.y());
2011  myLines[2].setValue(v.z());
2012  }
2013 
2014  inline void setComponent(int axis, T val)
2015  {
2016  myLines[axis].setValue(val);
2017  }
2018 
2019  /// Resets where we currently point to.
2020  /// Returns true if we had to reset our cache line. If we didn't,
2021  /// and you have multiple probes acting in-step, you can just
2022  /// advanceX() the other probes
2023  template <typename S>
2025  { return setIndex(vit.x(), vit.y(), vit.z()); }
2026  template <typename S>
2028  { return setIndex(vit.x(), vit.y(), vit.z()); }
2029 
2030  bool setIndex(int x, int y, int z)
2031  {
2032  if (myLines[0].setIndex(x, y, z))
2033  {
2034  myLines[1].setIndex(x, y, z);
2035  myLines[2].setIndex(x, y, z);
2036  return true;
2037  }
2038  myLines[1].advanceX();
2039  myLines[2].advanceX();
2040  return false;
2041  }
2042 
2043  void advanceX()
2044  { myLines[0].advanceX(); myLines[1].advanceX(); myLines[2].advanceX(); }
2045 
2046 protected:
2048 };
2049 
2050 template <typename T>
2051 class
2053 {
2054 public:
2056  virtual ~UT_VoxelProbeCube();
2057 
2058  void setCubeArray(const UT_VoxelArray<T> *vox);
2059  void setPlusArray(const UT_VoxelArray<T> *vox);
2060 
2061  /// Allows you to query +/-1 in each direction. In cube update,
2062  /// all are valid. In plus update, only one of x y and z may be
2063  /// non zero.
2064  inline T getValue(int x, int y, int z) const
2065  { return myLines[y+1][z+1].getValue(x); }
2066 
2067  template <typename S>
2069  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2070  template <typename S>
2072  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2073  bool setIndexCube(int x, int y, int z);
2074 
2075  template <typename S>
2077  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2078  template <typename S>
2080  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2081  bool setIndexPlus(int x, int y, int z);
2082 
2083  /// Computes central difference gradient, does not scale
2084  /// by the step size (which is twice voxelsize)
2085  /// Requires PlusArray
2087  { return UT_Vector3(getValue(1,0,0) - getValue(-1,0,0),
2088  getValue(0,1,0) - getValue(0,-1,0),
2089  getValue(0,0,1) - getValue(0,0,-1)); }
2090 
2091  /// Computes the central difference curvature using the given
2092  /// inverse voxelsize (ie, 1/voxelsize) at this point.
2093  /// Requires CubeArray.
2094  fpreal64 curvature(const UT_Vector3 &invvoxelsize) const;
2095 
2096  /// Computes the laplacian, again with a given 1/voxelsize.
2097  /// Requires PlusArray
2098  fpreal64 laplacian(const UT_Vector3 &invvoxelsize) const;
2099 
2100 protected:
2101  /// Does an rotation of our cache lines, ym becomes y0 and y0 becomes yp,
2102  /// so further queries with y+1 will be cache hits for 2 out of 3.
2104 
2106  /// Cached look up position. myValid stores if they are
2107  /// valid values or not
2108  bool myValid;
2109  int myX, myY, myZ;
2110  /// Half inclusive [,) range of valid x queries for current cache.
2111  int myMinValidX, myMaxValidX;
2112 };
2113 
2114 ///
2115 /// UT_VoxelProbeConstant
2116 ///
2117 /// Looks like a voxel probe but only returns a constant value.
2118 ///
2119 template <typename T>
2120 class
2122 {
2123 public:
2126 
2127  template <typename S>
2129  { return true; }
2130  template <typename S>
2132  { return true; }
2133  bool setIndex(int x, int y, int z)
2134  { return true; }
2135 
2136  void setValue(T val) { myValue = val; }
2137  T getValue() const { return myValue; }
2138 protected:
2140 };
2141 
2142 ///
2143 /// UT_VoxelProbeAverage
2144 ///
2145 /// When working with MAC grids one often has slightly misalgined
2146 /// fields. Ie, one field is at the half-grid spacing of another field.
2147 /// The step values are 0 if the dimension is algined, -1 for half a step
2148 /// back (ie, (val(-1)+val(0))/2) and 1 for half a step forward
2149 /// (ie, (val(0)+val(1))/2)
2150 ///
2151 template <typename T, int XStep, int YStep, int ZStep>
2152 class
2154 {
2155 public:
2158 
2159  void setArray(const UT_VoxelArray<T> *vox);
2160 
2161  template <typename S>
2163  { return setIndex(vit.x(), vit.y(), vit.z()); }
2164  template <typename S>
2166  { return setIndex(vit.x(), vit.y(), vit.z()); }
2167  bool setIndex(int x, int y, int z);
2168 
2169  /// Returns the velocity centered at this index, thus an average
2170  /// of the values in each of our internal probes.
2171  inline T getValue() const
2172  {
2173  if (ZStep)
2174  return (valueZ(1) + valueZ(0)) * 0.5;
2175  return valueZ(0);
2176  }
2177 
2178 protected:
2179  inline T valueZ(int z) const
2180  {
2181  if (YStep)
2182  return (valueYZ(1, z) + valueYZ(0, z)) * 0.5;
2183  return valueYZ(0, z);
2184  }
2185 
2186  inline T valueYZ(int y, int z) const
2187  {
2188  if (XStep > 0)
2189  return (myLines[y][z].getValue(1) + myLines[y][z].getValue(0)) * 0.5;
2190  if (XStep < 0)
2191  return (myLines[y][z].getValue(-1) + myLines[y][z].getValue(0)) * 0.5;
2192  return myLines[y][z].getValue();
2193  }
2194 
2195  // Stores [Y][Z] lines.
2197 };
2198 
2199 
2200 ///
2201 /// UT_VoxelProbeFace is designed to walk over three velocity
2202 /// fields that store face-centered values. The indices refer
2203 /// to the centers of the voxels.
2204 ///
2205 template <typename T>
2206 class
2208 {
2209 public:
2211  virtual ~UT_VoxelProbeFace();
2212 
2213  void setArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz);
2214  void setVoxelSize(const UT_Vector3 &voxelsize);
2215 
2216  template <typename S>
2218  { return setIndex(vit.x(), vit.y(), vit.z()); }
2219  template <typename S>
2221  { return setIndex(vit.x(), vit.y(), vit.z()); }
2222  bool setIndex(int x, int y, int z);
2223 
2224  /// Get the face values on each face component.
2225  /// Parameters are axis then side.
2226  /// 0 is the lower face, 1 the higher face.
2227  inline T face(int axis, int side) const
2228  {
2229  if (axis == 0)
2230  return myLines[0][0].getValue(side);
2231  else
2232  return myLines[axis][side].getValue();
2233  }
2234 
2235  /// Returns the velocity centered at this index, thus an average
2236  /// of the values in each of our internal probes.
2237  inline UT_Vector3 value() const
2238  {
2239  return UT_Vector3(0.5f * (face(0, 0) + face(0, 1)),
2240  0.5f * (face(1, 0) + face(1, 1)),
2241  0.5f * (face(2, 0) + face(2, 1)));
2242  }
2243 
2244  /// Returns the divergence of this cell.
2245  inline T divergence() const
2246  {
2247  return (face(0,1)-face(0,0)) * myVoxelSize.x()
2248  + (face(1,1)-face(1,0)) * myVoxelSize.y()
2249  + (face(2,1)-face(2,0)) * myVoxelSize.z();
2250 
2251  }
2252 
2253 protected:
2254 
2255  static void swapLines(UT_VoxelProbe<T, true, false, false> &ym,
2257 
2258 
2260 
2261  /// Cached look up position. myValid stores if they are
2262  /// valid values or not
2263  bool myValid;
2264  int myX, myY, myZ;
2265  /// Half inclusive [,) range of valid x queries for current cache.
2266  int myMinValidX, myMaxValidX;
2267 
2268  UT_Vector3 myVoxelSize, myInvVoxelSize;
2269 };
2270 
2271 
2272 #if defined( WIN32 ) || defined( LINUX ) || defined( MBSD ) || defined(GAMEOS)
2273  #include "UT_VoxelArray.C"
2274 #endif
2275 
2276 
2277 // Typedefs for common voxel array types
2281 
2289 // Read only probe
2293 // Write only
2297 // Read/Write always writeback.
2301 // Read/Write with testing
2305 
2306 
2308 
2312 
2316 
2317 #endif
2318 
UT_COWWriteHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayWriteHandleV4
void setTile(const UT_VoxelArrayIterator< T > &vit)
exint exint const UT_JobInfo &info const
int x() const
Retrieve the current location of the iterator.
UT_Interrupt * myInterrupt
#define SYSmax(a, b)
Definition: SYS_Math.h:1367
int xyzTileToLinear(int x, int y, int z) const
void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const
SYS_FORCE_INLINE T lerpSample(T *samples, float fx, float fy, float fz) const
Lerps the given sample using trilinear interpolation.
const UT_VoxelTile< T > & operator=(const UT_VoxelTile< T > &src)
void size(int xres, int yres, int zres)
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void findAverage(T &avg) const
Determines the average value of the tile.
#define SYS_STATIC_ASSERT(expr)
UT_VoxelProbe< fpreal32, true, true, true > UT_VoxelRWTProbeF
void setInterrupt(UT_Interrupt *interrupt)
T valueZ(int z) const
void loadData(UT_IStream &is)
Load an array, requires you have already size()d this array.
T & z(void)
Definition: UT_Vector4.h:379
UT_VoxelBorderType getBorder() const
exint getDataLength() const
Returns the amount of data used by the tile myData pointer.
bool atEnd() const
Returns true if we have iterated over all of the voxels.
UT_COWHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayHandleF
void match(const UT_VoxelArray< T > &src)
bool isMatching(const UT_VoxelArray< S > &src) const
Axis-aligned bounding box (AABB).
Definition: GEO_Detail.h:43
virtual const char * getName()=0
T valueYZ(int y, int z) const
void resample(const UT_VoxelArray< T > &src, UT_FilterType filtertype=UT_FILTER_POINT, float filterwidthscale=1.0f, int clampaxis=-1)
Fills this by resampling the given voxel array.
const UT_VoxelCompressOptions & getCompressionOptions() const
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
int64 getMemoryUsage(bool inclusive) const
Return the amount of memory used by this array.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
bool setIndex(UT_VoxelTileIterator< S > &vit)
SYS_FORCE_INLINE void freeData()
const GLdouble * v
Definition: glcorearb.h:836
T operator()(UT_Vector3D pos) const
THREADED_METHOD3_CONST(UT_VoxelArray< T >, numTiles() > 16, flatten, T *, flatarray, exint, ystride, exint, zstride) void flattenPartial(T *flatarray
virtual ~UT_VoxelProbeConstant()
UT_Vector3 gradient() const
int numVoxels() const
GLuint start
Definition: glcorearb.h:474
UT_VoxelProbeCube< fpreal32 > UT_VoxelProbeCubeF
virtual ~UT_VoxelVectorProbe()
UT_COWReadHandle< UT_VoxelArray< T > > myHandle
UT_VoxelArray< fpreal32 > UT_VoxelArrayF
void clampIndex(int &x, int &y, int &z) const
UT_COWReadHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayReadHandleF
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
virtual T getValue(const UT_VoxelTile< T > &tile, int x, int y, int z) const =0
T * fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const
static void registerCompressionEngine(UT_VoxelTileCompress< T > *engine)
fpreal myQuantizeTol
Tolerance for quantizing to reduced bit depth.
virtual bool lerp(GA_AttributeOperand &d, GA_AttributeOperand &a, GA_AttributeOperand &b, GA_AttributeOperand &t) const
d = SYSlerp(a, b, t);
UT_VoxelVectorProbe< fpreal32, true, true, true > UT_VoxelVectorRWTProbeF
UT_Vector3T< float > UT_Vector3
UT_VoxelTile< T > * myCurTile
Current processing tile.
GLdouble GLdouble GLdouble z
Definition: glcorearb.h:847
UT_VoxelVectorProbe(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
UT_VoxelArray< T > * myBaseLevel
T divergence() const
Returns the divergence of this cell.
GLint level
Definition: glcorearb.h:107
SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
T & x(void)
Definition: UT_Vector2.h:285
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1221
UT_VoxelBorderType
Definition: UT_VoxelArray.h:67
#define SYSabs(a)
Definition: SYS_Math.h:1369
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
bool myOwnBase
If true, we will delete the base level when we are done.
UT_VoxelArray< UT_Vector4 > UT_VoxelArrayV4
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:75
#define UT_API
Definition: UT_API.h:13
bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const
UT_VoxelArray< T > * myArray
fpreal UTvoxelTileDist(const UT_Vector2 &a, const UT_Vector2 &b)
void setArray(UT_VoxelArray< T > *vox)
GLint y
Definition: glcorearb.h:102
T mixValues(T t1, T t2, mipmaptype function) const
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:32
void copyWithOffset(const UT_VoxelArray< T > &src, int offx, int offy, int offz)
int indexToLinearTile(int x, int y, int z) const
UT_VoxelTileIterator< int64 > UT_VoxelTileIteratorI
bool isConstant(T *cval=0) const
void makeConstant(T t)
Turns this tile into a constant tile of the given value.
bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const
GLfloat GLfloat GLfloat v2
Definition: glcorearb.h:817
T face(int axis, int side) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
SYS_FORCE_INLINE T & x(void)
Definition: UT_Vector3.h:498
UT_VoxelVectorProbe< fpreal32, true, true, false > UT_VoxelVectorRWProbeF
SYS_FORCE_INLINE T rawConstVal() const
3D Vector class.
UT_FilterType
Definition: UT_FilterType.h:16
void UTvoxelTileExpandMinMax(UT_Vector2 v, UT_Vector2 &min, UT_Vector2 &max)
bool atEnd() const
Returns true if we have iterated over all of the voxels.
const UT_JobInfo & info
void setCompressionOptions(const UT_VoxelCompressOptions &options)
virtual bool load(UT_JSONParser &p, UT_VoxelTile< T > &tile) const
UT_VoxelArray< T > * getArray() const
int myCurTile
Our current linear tile idx. A value of -1 implies at end.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
void setCompressionTolerance(fpreal tol)
void setHandle(UT_COWReadHandle< UT_VoxelArray< T > > handle)
void flatten(S *dst, int dststride) const
Flattens ourself into the given destination buffer.
void makeFpreal16()
Explicit compress to fpreal16. Lossy. No-op if already constant.
SYS_FORCE_INLINE T operator()(int x, int y, int z) const
int zres() const
virtual bool writeThrough(UT_VoxelTile< T > &tile, int x, int y, int z, T t) const =0
UT_VoxelVectorProbe< fpreal32, true, false, false > UT_VoxelVectorProbeF
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
virtual ~UT_VoxelTileCompress()
signed char int8
Definition: SYS_Types.h:31
static fpreal dist(T a, T b)
bool setIndex(int x, int y, int z)
bool getCompressOnExit() const
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
bool writeThrough(int x, int y, int z, T t)
int yres() const
SYS_FORCE_INLINE T & z(void)
Definition: UT_Vector3.h:502
UT_VoxelTileIterator< fpreal32 > UT_VoxelTileIteratorF
long long int64
Definition: SYS_Types.h:107
const T * rawData() const
T getBorderScale(int axis) const
const S * writeTiles(const S *srcdata, int srcstride, const UT_IntArray &tilelist)
GLfloat f
Definition: glcorearb.h:1925
bool hasNan() const
Returns true if any NANs are in this tile.
void setArray(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
void resetX(int x)
THREADED_METHOD4(UT_VoxelArray< T >, numTiles() > 4, copyWithOffsetInternal, const UT_VoxelArray< T > &, src, int, offx, int, offy, int, offz) void copyWithOffsetInternalPartial(const UT_VoxelArray< T > &src
exint exint zstride
int myCurTileListIdx
Our current index into the tile list.
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
virtual void load(UT_IStream &is, UT_VoxelTile< T > &tile) const
int64 getMemoryUsage(bool inclusive) const
Returns the amount of memory used by this tile.
THREADED_METHOD4_CONST(UT_VoxelArray< T >, numTiles() > 16, flattenGLFixed8, uint8 *, flatarray, exint, ystride, exint, zstride, T, dummy) void flattenGLFixed8Partial(uint8 *flatarray
int getYRes() const
void weightedSum(int pstart[3], int pend[3], const float *weights[3], int start[3], T &result)
SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const
int numTiles() const
UT_BoundingBoxI getTileBBox() const
int getLinearTileNum() const
int64 exint
Definition: SYS_Types.h:116
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
static void saveCompressionTypes(std::ostream &os)
Stores a list of compresson engines to os.
T getBorderValue() const
const std::enable_if<!VecTraits< T >::IsVec, T >::type & max(const T &a, const T &b)
Definition: Composite.h:133
UT_VoxelArray< T > * myArray
The array we belong to.
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:125
double fpreal64
Definition: SYS_Types.h:192
virtual ~UT_VoxelArray()
T getValue() const
GLuint GLuint end
Definition: glcorearb.h:474
virtual void save(std::ostream &os, const UT_VoxelTile< T > &tile) const
GLint GLenum GLboolean GLsizei stride
Definition: glcorearb.h:871
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
const UT_VoxelArray< T > & operator=(const UT_VoxelArray< T > &src)
Assignment operator:
UT_VoxelArray< T > * getArray() const
bool isValid() const
virtual ~UT_VoxelProbeAverage()
GLintptr offset
Definition: glcorearb.h:664
void makeRawUninitialized()
Definition: VM_SIMD.h:180
void setTile(const UT_VoxelArrayIterator< S > &vit, UT_VoxelArray< T > *array)
void setValue(int x, int y, int z, T t)
UT_VoxelTile< T > * getTile(int tx, int ty, int tz) const
UT_VoxelProbe< UT_Vector4, true, true, true > UT_VoxelRWTProbeV4
void setCompressOnExit(bool shouldcompress)
T evaluate(const UT_Vector3 &pos, const UT_Filter &filter, fpreal radius, int clampaxis=-1) const
UT_VoxelMipMap< fpreal32 > UT_VoxelMipMapF
bool setIndex(UT_VoxelTileIterator< S > &vit)
#define SYS_STATIC_FORCE_INLINE
Definition: SYS_Inline.h:48
bool tryCompress(const UT_VoxelCompressOptions &options)
virtual bool canSave() const
Does this engine support saving and loading?
int getRes(int dim) const
THREADED_METHOD(UT_VoxelArray< T >, numTiles() > 100, collapseAllTiles) void collapseAllTilesPartial(const UT_JobInfo &info)
int getXRes() const
void setRes(int xr, int yr, int zr)
UT_VoxelArray< int64 > UT_VoxelArrayI
THREADED_METHOD1(UT_VoxelArray< T >, numTiles() > 100, constant, T, t) void const antPartial(T t
bool setIndexPlus(UT_VoxelTileIterator< S > &vit)
UT_API UT_ValArray< UT_VoxelTileCompress< fpreal16 > * > & UTvoxelTileGetCompressionEngines(fpreal16 *dummy)
GLboolean * data
Definition: glcorearb.h:130
bool setIndex(UT_VoxelArrayIterator< S > &vit)
virtual bool isLossless() const
Returns true if the compression type is lossless.
GLuint const GLchar * name
Definition: glcorearb.h:785
int int32
Definition: SYS_Types.h:35
void getTileVoxels(int idx, UT_Vector3I &start, UT_Vector3I &end) const
idxth tile represents the voxels indexed [start,end).
T & y(void)
Definition: UT_Vector4.h:377
T getValue(int x, int y, int z) const
SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z, T *sample) const
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1221
SYS_FORCE_INLINE bool inlineConstant() const
void writeCacheLine(T *cacheline, int y, int z)
Fills a cache line from an external buffer into our own data.
void advanceX()
Blindly advances our current pointer.
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
void setValue(int x, int y, int z, T t)
SYS_FORCE_INLINE T lerpVoxelCoordAxis(UT_Vector3F pos) const
UT_VoxelArrayIterator< int64 > UT_VoxelArrayIteratorI
virtual int getDataLength(const UT_VoxelTile< T > &tile) const =0
GLsizei samples
Definition: glcorearb.h:1297
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
void getTileVoxels(UT_Vector3I &start, UT_Vector3I &end) const
This tile will iterate over the voxels indexed [start,end).
bool myAllowFP16
Conversion to fpreal16, only valid for scalar data.
SYS_FORCE_INLINE T lerpVoxelCoord(UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
int getZRes() const
const UT_JobInfo * myJobInfo
The job info to use for tilefetching.
void setConstArray(const UT_VoxelArray< T > *vox, int prex=0, int postx=0)
static UT_VoxelTileCompress< T > * getCompressionEngine(int index)
GridType::Ptr laplacian(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the Laplacian of the given scalar grid.
bool getCompressOnExit() const
UT_VoxelTileIterator< UT_Vector4 > UT_VoxelTileIteratorV4
UT_VoxelProbe< fpreal32, false, true, false > UT_VoxelWOProbeF
GLenum GLenum dst
Definition: glcorearb.h:1792
SYS_FORCE_INLINE T & y(void)
Definition: UT_Vector3.h:500
GLsizei const GLfloat * value
Definition: glcorearb.h:823
virtual void findMinMax(const UT_VoxelTile< T > &tile, T &min, T &max) const
Definition: UT_VoxelArray.C:80
double fpreal
Definition: SYS_Types.h:270
void setLinearTile(exint lineartilenum, UT_VoxelArray< T > *array)
bool isRawFull() const
Returns if this tile is in raw full format.
bool hasNan() const
Returns true if any element of the voxel array is NAN.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
UT_VoxelVectorProbe< fpreal32, false, true, false > UT_VoxelVectorWOProbeF
UT_VoxelProbe< UT_Vector4, true, false, false > UT_VoxelProbeV4
SYS_FORCE_INLINE void lerpVoxelMinMaxAxis(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
SYS_STATIC_FORCE_INLINE T lerpValues(T v1, T v2, fpreal32 bias)
Lerps two numbers, templated to work with T.
T getValue(int x, int y, int z) const
bool setIndexPlus(UT_VoxelArrayIterator< S > &vit)
SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z, float fx, float fy, float fz) const
T getValue(int offset) const
void copyFragment(int dstx, int dsty, int dstz, const UT_VoxelTile< T > &srctile, int srcx, int srcy, int srcz)
void uncompress()
Turns a compressed tile into a raw tile.
UT_VoxelArray< T > * myArray
unsigned char uint8
Definition: SYS_Types.h:32
THREADED_METHOD3(UT_VoxelArray< T >, numTiles() > 16, extractFromFlattened, const T *, flatarray, exint, ystride, exint, zstride) void extractFromFlattenedPartial(const T *flatarray
int idx(int idx) const
UT_COWReadHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayReadHandleV4
void setConstArray(const UT_VoxelArray< T > *vox)
UT_API UT_Interrupt * UTgetInterrupt()
Obtain global UT_Interrupt singleton.
SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
GLuint index
Definition: glcorearb.h:785
T getValue(int axis) const
UT_VoxelProbe< fpreal32, true, false, false > UT_VoxelProbeF
int numLevels() const
UT_BoundingBoxT< int64 > UT_BoundingBoxI
UT_VoxelProbe< fpreal32, true, true, false > UT_VoxelRWProbeF
bool isRaw() const
Returns if this tile is in raw format.
void saveData(std::ostream &os) const
UT_ValArray< UT_VoxelArray< T > ** > myLevels
GLint GLenum GLint x
Definition: glcorearb.h:408
int getRes(int axis) const
GLfloat GLfloat v1
Definition: glcorearb.h:816
virtual ~UT_VoxelTile()
GLuint GLfloat * val
Definition: glcorearb.h:1607
SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
UT_BoundingBoxI getTileBBox(int idx) const
int int int offz
UT_Vector3 value() const
void linearTileToXYZ(int idx, int &x, int &y, int &z) const
UT_Vector3 myVoxelSize
UT_VoxelProbe< UT_Vector4, true, true, false > UT_VoxelRWProbeV4
exint numVoxels() const
void save(std::ostream &os) const
#define DEFINE_STD_FUNC(TYPE)
GLint GLint GLsizei GLint GLenum GLenum type
Definition: glcorearb.h:107
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
T operator()(int x, int y, int z) const
UT_COWHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayHandleV4
T & x(void)
Definition: UT_Vector4.h:375
UT_VoxelTile< T > * getLinearTile(int idx) const
void load(UT_IStream &is, const UT_IntArray &compression)
UT_VoxelProbe< UT_Vector4, false, true, false > UT_VoxelWOProbeV4
void setCompressOnExit(bool shouldcompress)
static int lookupCompressionEngine(const char *name)
T & y(void)
Definition: UT_Vector2.h:287
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:856
UT_COWWriteHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayWriteHandleF
void setValue(const UT_Vector3 &v)
void setValue(T value)
SYS_FORCE_INLINE void lerpVoxelMinMax(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
bool setIndexCube(UT_VoxelArrayIterator< S > &vit)
#define UT_VOXEL_FREE(x)
Definition: UT_VoxelArray.h:52
bool setIndexCube(UT_VoxelTileIterator< S > &vit)
void uncompressFull()
Turns a tile into a raw full tile.
void setBorder(UT_VoxelBorderType type, T t)
UT_Vector3 getValue() const
T * rawFullData()
Returns the raw full data of the tile.
UT_VoxelArrayIterator< fpreal32 > UT_VoxelArrayIteratorF
virtual bool tryCompress(UT_VoxelTile< T > &tile, const UT_VoxelCompressOptions &options, T min, T max) const =0
void setForeignData(void *data, int8 compress_type)
SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
static void expandMinMax(T v, T &min, T &max)
Designed to be specialized according to T.
T & w(void)
Definition: UT_Vector4.h:381
short int16
Definition: SYS_Types.h:33
int idx(int idx) const
bool extractSampleCube(int x, int y, int z, T *sample) const
void findMinMax(T &min, T &max) const
Finds the minimum and maximum T values.
bool isValidIndex(int x, int y, int z) const
Returns true if the given x, y, z values lie inside the valid index.
#define SYSmin(a, b)
Definition: SYS_Math.h:1368
const std::enable_if<!VecTraits< T >::IsVec, T >::type & min(const T &a, const T &b)
Definition: Composite.h:129
void setComponent(int axis, T val)
virtual bool save(UT_JSONWriter &w, const UT_VoxelTile< T > &tile) const
SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z, float fx, float fy, float fz) const
void setBorderScale(T scalex, T scaley, T scalez)
bool setIndex(UT_VoxelTileIterator< S > &vit)
bool setIndex(UT_VoxelTileIterator< S > &vit)
void setConstArray(const UT_VoxelArray< T > *vx, const UT_VoxelArray< T > *vy, const UT_VoxelArray< T > *vz)
float fpreal32
Definition: SYS_Types.h:191
exint exint T dummy
const UT_VoxelArray< T > * level(int level, int function) const
int xres() const
Read the current resolution.
S * extractTiles(S *dstdata, int stride, const UT_IntArray &tilelist) const
bool setIndex(int x, int y, int z)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
void writeData(const S *src, int srcstride)
SYS_FORCE_INLINE T * rawConstData() const
bool isSimpleCompression() const
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
bool isConstant() const
Returns if this tile is constant.
SYS_FORCE_INLINE T lerpSampleAxis(T *samples, float fx, float fy, float fz) const
static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions)
UT_VoxelArrayIterator< UT_Vector4 > UT_VoxelArrayIteratorV4
int getTileRes(int dim) const
void evaluateMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelTileIterator< S > &vit)
fpreal getCompressionTolerance() const
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const
GLint GLint GLint GLint GLint GLint GLint GLbitfield GLenum filter
Definition: glcorearb.h:1296
GLenum src
Definition: glcorearb.h:1792