HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_VoxelArray.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_VoxelArray.h ( UT Library, C++)
7  *
8  * COMMENTS:
9  * This provides support for transparently tiled voxel arrays of data.
10  * The given type, T, should support normal arithmatic operations.
11  *
12  * The created array has elements indexed from 0, ie: [0..xdiv-1].
13  */
14 
15 #ifndef __UT_VoxelArray__
16 #define __UT_VoxelArray__
17 
18 #include "UT_API.h"
19 #include "UT_BoundingBox.h"
20 #include "UT_Vector2.h"
21 #include "UT_Vector3.h"
22 #include "UT_Vector4.h"
23 #include "UT_IntArray.h"
24 #include "UT_ValArray.h"
25 #include "UT_Array.h"
26 #include "UT_FilterType.h"
27 #include "UT_COW.h"
28 #include "UT_ThreadedAlgorithm.h"
29 #include "UT_Interrupt.h"
30 #include <VM/VM_SIMD.h>
31 
32 #include <SYS/SYS_SharedMemory.h>
33 #include <SYS/SYS_StaticAssert.h>
34 #include <SYS/SYS_Types.h>
35 
36 // TBB alloc results in real-world tests that are 3-4% faster. Yay!
37 // But unfortunately it is less aggressive with fragmentation, so
38 // we use effectively 2x the memory. Boo.
39 
40 //#define VOXEL_USE_TBB_ALLOC
41 
42 #ifdef VOXEL_USE_TBB_ALLOC
43 
44 #include <tbb/scalable_allocator.h>
45 
46 #define UT_VOXEL_ALLOC(x) scalable_malloc(x)
47 #define UT_VOXEL_FREE(x) scalable_free(x)
48 
49 #else
50 
51 #define UT_VOXEL_ALLOC(x) SYSamalloc((x), 128)
52 #define UT_VOXEL_FREE(x) SYSafree(x)
53 
54 #endif
55 
56 class UT_Filter;
57 class UT_JSONWriter;
58 class UT_JSONParser;
59 
60 static const int TILEBITS = 4;
61 static const int TILESIZE = 1 << TILEBITS;
62 static const int TILEMASK = TILESIZE-1;
63 
64 ///
65 /// Behaviour of out of bound reads.
66 ///
68 {
73 };
74 
75 template <typename T> class UT_VoxelTile;
76 template <typename T> class UT_VoxelArray;
77 template <typename T, bool DoRead, bool DoWrite, bool TestForWrite> class UT_VoxelProbe;
78 template <typename T> class UT_VoxelProbeCube;
79 template <typename T> class UT_VoxelProbeFace;
80 
82 {
83 public:
85  {
86  myConstantTol = 0;
87  myQuantizeTol = 0;
88  myAllowFP16 = false;
89  }
90 
91  // Used for quantization.
93  {
96  };
97 
98  /// Tiles will be constant if within this range. This may
99  /// need to be tighter than quantization tolerance as
100  /// dithering can't recover partial values.
102  /// Tolerance for quantizing to reduced bit depth
104 
106 
107  /// Conversion to fpreal16, only valid for scalar data.
109 };
110 
111 ///
112 /// UT_VoxelTileCompress
113 ///
114 /// A compression engine for UT_VoxelTiles of a specific type. This
115 /// is a verb class which is invoked from the voxeltile class.
116 ///
117 template <typename T>
119 {
120 public:
123 
124  /// Attempts to write data directly to the compressed tile.
125  /// Returns false if not possible.
126  virtual bool writeThrough(UT_VoxelTile<T> &tile,
127  int x, int y, int z, T t) const = 0;
128 
129  /// Reads directly from the compressed data.
130  /// Cannot alter the tile in any way because it must be threadsafe.
131  virtual T getValue(const UT_VoxelTile<T> &tile,
132  int x, int y, int z) const = 0;
133 
134  /// Attempts to compress the data according to the given tolerance.
135  /// If succesful, returns true.
136  virtual bool tryCompress(UT_VoxelTile<T> &tile,
137  const UT_VoxelCompressOptions &options,
138  T min, T max) const = 0;
139 
140  /// Returns the length in bytes of the data in the tile.
141  /// It must be at least one byte long.
142  virtual int getDataLength(const UT_VoxelTile<T> &tile) const = 0;
143 
144  /// Returns true if the compression type is lossless
145  virtual bool isLossless() const { return false; }
146 
147  /// Determines the min & max values of the tile. A default
148  /// implementation uses getValue() on all voxels.
149  virtual void findMinMax(const UT_VoxelTile<T> &tile, T &min, T &max) const;
150 
151  /// Does this engine support saving and loading?
152  virtual bool canSave() const { return false; }
153  virtual void save(std::ostream &os, const UT_VoxelTile<T> &tile) const {}
154  virtual bool save(UT_JSONWriter &w, const UT_VoxelTile<T> &tile) const
155  { return false; }
156  virtual void load(UT_IStream &is, UT_VoxelTile<T> &tile) const {}
157  virtual bool load(UT_JSONParser &p, UT_VoxelTile<T> &tile) const
158  { return false; }
159 
160  /// Returns the unique name of this compression engine so
161  /// we can look up engines by name (the index of the compression
162  /// engine is assigned at load time so isn't constant)
163  virtual const char *getName() = 0;
164 };
165 
177 
178 #define DEFINE_STD_FUNC(TYPE) \
179 inline void \
180 UTvoxelTileExpandMinMax(TYPE v, TYPE &min, TYPE &max) \
181 { \
182  if (v < min) \
183  min = v; \
184  else if (v > max) \
185  max = v; \
186 } \
187  \
188 inline fpreal \
189 UTvoxelTileDist(TYPE a, TYPE b) \
190 { \
191  return (fpreal) SYSabs(a - b); \
192 }
193 
202 
203 #undef DEFINE_STD_FUNC
204 
205 inline void
207 {
208  min.x() = SYSmin(v.x(), min.x());
209  max.x() = SYSmax(v.x(), max.x());
210 
211  min.y() = SYSmin(v.y(), min.y());
212  max.y() = SYSmax(v.y(), max.y());
213 }
214 
215 inline void
217 {
218  min.x() = SYSmin(v.x(), min.x());
219  max.x() = SYSmax(v.x(), max.x());
220 
221  min.y() = SYSmin(v.y(), min.y());
222  max.y() = SYSmax(v.y(), max.y());
223 
224  min.z() = SYSmin(v.z(), min.z());
225  max.z() = SYSmax(v.z(), max.z());
226 }
227 
228 inline void
230 {
231  min.x() = SYSmin(v.x(), min.x());
232  max.x() = SYSmax(v.x(), max.x());
233 
234  min.y() = SYSmin(v.y(), min.y());
235  max.y() = SYSmax(v.y(), max.y());
236 
237  min.z() = SYSmin(v.z(), min.z());
238  max.z() = SYSmax(v.z(), max.z());
239 
240  min.w() = SYSmin(v.w(), min.w());
241  max.w() = SYSmax(v.w(), max.w());
242 }
243 
244 inline fpreal
246 {
247  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y());
248 }
249 
250 inline fpreal
252 {
253  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
254  + SYSabs(a.z() - b.z());
255 }
256 
257 inline fpreal
259 {
260  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
261  + SYSabs(a.z() - b.z()) + SYSabs(a.w() - b.w());
262 }
263 
264 ///
265 /// UT_VoxelTile
266 ///
267 /// A UT_VoxelArray is composed of a number of these tiles. This is
268 /// done for two reasons:
269 /// 1) Increased memory locality when processing neighbouring points.
270 /// 2) Ability to compress or page out unneeded tiles.
271 /// Currently, the only special ability is the ability to create constant
272 /// tiles.
273 ///
274 /// To the end user of the UT_VoxelArray, the UT_VoxelTile should be
275 /// usually transparent. The only exception may be if they want to do
276 /// a FOR_ALL_TILES in order to ensure an optimal traversal order.
277 ///
278 template <typename T>
279 class UT_VoxelTile
280 {
281 public:
282  UT_VoxelTile();
283  virtual ~UT_VoxelTile();
284 
285  // Copy constructor:
287 
288 
289  // Assignment operator:
291 
293  {
299  };
300 
301  /// Fetch a given local value. (x,y,z) should be local to
302  /// this tile.
303  SYS_FORCE_INLINE T operator()(int x, int y, int z) const
304  {
305  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
306  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
307 
308  switch (myCompressionType)
309  {
310  case COMPRESS_RAW:
311  return ((T *)myData)[
312  ((z * myRes[1]) + y) * myRes[0] + x ];
313 
314  case COMPRESS_CONSTANT:
315  return rawConstVal();
316 
317  case COMPRESS_RAWFULL:
318  return ((T *)myData)[
319  ((z * TILESIZE) + y) * TILESIZE + x ];
320 
321  case COMPRESS_FPREAL16:
322  {
323  T result;
324  result = (((fpreal16 *)myData)[
325  ((z * myRes[1]) + y) * myRes[0] + x ]);
326  return result;
327  }
328  }
329 
330  // By default use the compression engine.
331  UT_VoxelTileCompress<T> *engine;
332 
333  engine = getCompressionEngine(myCompressionType);
334  return engine->getValue(*this, x, y, z);
335  }
336 
337  /// Lerps two numbers, templated to work with T.
339  {
340  return v1 + (v2 - v1) * bias;
341  }
342 
343  /// Does a trilinear interpolation. x,y,z should be local to this
344  /// as should x+1, y+1, and z+1. fx-fz should be 0..1.
345  SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const;
346 
347  template <int AXIS2D>
348  SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const;
349 
350  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
351  /// array should have 8 elements, x minor, z major.
352  /// Requires it is in bounds.
353  /// Returns true if all constant, in which case only a single
354  /// sample is filled, [0]
355  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
356  T *sample) const;
357  template <int AXIS2D>
358  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
359  T *sample) const;
360 
361  /// Extracts +/- dx, +/- dy, +/- dz and then the center into
362  /// 7 samples.
363  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
364  T *sample) const;
365  /// Extracts the full cube of +/- dx, dy, dz. xminor, zmajor, into
366  /// 27 elements.
367  bool extractSampleCube(int x, int y, int z,
368  T *sample) const;
369 #if 0
370  /// MSVC can't handle aligned parameters after the third so
371  /// frac must come first.
372  T lerp(v4uf frac, int x, int y, int z) const;
373 #endif
374 
375  /// Returns a cached line to our internal data, at local address x,y,z.
376  /// cacheline is a caller allocated structure to fill out if we have
377  /// to decompress. If forcecopy isn't set and we can, the result may
378  /// be an internal pointer. stride is set to the update for moving one
379  /// x position in the cache.
380  /// strideofone should be set to true if you want to prevent 0 stride
381  /// results for constant tiles.
382  T *fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const;
383 
384  /// Fills a cache line from an external buffer into our own data.
385  void writeCacheLine(T *cacheline, int y, int z);
386 
387  /// Copies between two tiles. The tile's voxels match up, but don't
388  /// have the same offset. The maximal overlapping voxels are copied.
389  /// this->setValue(dst, dsty, dstz, src(srcx, srcy, srcz));
390  void copyFragment(int dstx, int dsty, int dstz,
391  const UT_VoxelTile<T> &srctile,
392  int srcx, int srcy, int srcz);
393 
394  /// Flattens ourself into the given destination buffer.
395  template <typename S>
396  void flatten(S *dst, int dststride) const;
397 
398  /// Fills our values from the given dense flat buffer. Will
399  /// create a constant tile if the source is constant.
400  template <typename S>
401  void writeData(const S *src, int srcstride);
402 
403  /// The setData is intentionally seperate so we can avoid
404  /// expanding constant data when we write the same value to it.
405  void setValue(int x, int y, int z, T t);
406 
407  /// Finds the minimum and maximum T values
408  void findMinMax(T &min, T &max) const;
409 
410  /// Determines the average value of the tile.
411  void findAverage(T &avg) const;
412 
413  /// Returns if this tile is constant.
414  bool isConstant() const
415  { return myCompressionType == COMPRESS_CONSTANT; }
416 
417  /// Returns true if any NANs are in this tile
418  bool hasNan() const;
419 
420  /// Returns if this tile is in raw format.
421  bool isRaw() const
422  { return myCompressionType == COMPRESS_RAW; }
423 
424  /// Returns if this tile is in raw full format.
425  bool isRawFull() const
426  { return myCompressionType == COMPRESS_RAWFULL; }
427 
428  /// Returns true if this is a simple form of compression, either
429  /// constant, raw, or a raw full that isn't padded
430  bool isSimpleCompression() const
431  {
432  if (isRaw()) return true;
433  if (isConstant()) return true;
434  if (isRawFull() && myRes[0] == TILESIZE && myRes[1] == TILESIZE)
435  return true;
436  return false;
437  }
438 
439  /// Attempts to compress this tile. Returns true if any
440  /// compression performed.
441  bool tryCompress(const UT_VoxelCompressOptions &options);
442 
443  /// Turns this tile into a constant tile of the given value.
444  void makeConstant(T t);
445 
446  /// Explicit compress to fpreal16. Lossy. No-op if already constant.
447  void makeFpreal16();
448 
449  /// Turns a compressed tile into a raw tile.
450  void uncompress();
451 
452  /// Turns a tile into a raw full tile.
453  void uncompressFull();
454 
455  /// Like uncompress() except it leaves the data uninitialized. Result
456  /// is either COMPRESS_RAW or COMPRESS_RAWFULL depending on the tile res.
457  /// @note USE WITH CAUTION!
458  void makeRawUninitialized();
459 
460  /// Returns the raw full data of the tile.
462  {
463  uncompressFull();
464  return (T *)myData;
465  }
466 
467  /// This only makes sense for simple compression. Use with
468  /// extreme care.
470  { if (inlineConstant() && isConstant())
471  { return (T *) &myData; }
472  return (T *)myData; }
473  const T *rawData() const
474  { if (inlineConstant() && isConstant())
475  { return (const T *) &myData; }
476  return (const T *)myData; }
477 
478  /// Read the current resolution.
479  int xres() const { return myRes[0]; }
480  int yres() const { return myRes[1]; }
481  int zres() const { return myRes[2]; }
482 
483  int getRes(int dim) const { return myRes[dim]; }
484 
485 
486  int numVoxels() const { return myRes[0] * myRes[1] * myRes[2]; }
487 
488  /// Returns the amount of memory used by this tile.
489  int64 getMemoryUsage(bool inclusive) const;
490 
491  /// Returns the amount of data used by the tile myData pointer.
492  exint getDataLength() const;
493 
494  /// A routine used by filtered evaluation to accumulated a partial
495  /// filtered sum in this tile.
496  /// pstart, pend - voxel bounds (in UT_VoxelArray coordinates)
497  /// weights - weight array
498  /// start - UT_VoxelArray coordinates at [0] in the weight array
499  void weightedSum(int pstart[3], int pend[3],
500  const float *weights[3], int start[3],
501  T &result);
502 
503  /// Designed to be specialized according to T
504 
505  /// Update min & max to encompass T itself.
506  static void expandMinMax(T v, T &min, T &max)
507  {
508  UTvoxelTileExpandMinMax(v, min, max);
509  }
510 
511  /// Return the "distance" of a & b. This is used for
512  /// tolerance checks on equality comparisons.
513  static fpreal dist(T a, T b)
514  {
515  return UTvoxelTileDist(a, b);
516  }
517 
519 
520  // Returns the index of the bound compression engine.
521  static int lookupCompressionEngine(const char *name);
522  // Given an index, gets the compression engine.
524 
525  /// Saves this tile's data, in compressed form.
526  /// May save in uncompressed form is the compression type does
527  /// not support saving.
528  void save(std::ostream &os) const;
529  bool save(UT_JSONWriter &w) const;
530 
531  /// Loads tile data. Uses the compression index to map the saved
532  /// compression types into the correct loading compression types.
533  void load(UT_IStream &is, const UT_IntArray &compression);
534  bool load(UT_JSONParser &p, const UT_IntArray &compression);
535 
536  /// Stores a list of compresson engines to os.
537  static void saveCompressionTypes(std::ostream &os);
538  static bool saveCompressionTypes(UT_JSONWriter &w);
539 
540  /// Builds a translation table from the given stream's compression types
541  /// into our own valid compression types.
542  static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions);
543  static bool loadCompressionTypes(UT_JSONParser &p, UT_IntArray &compressions);
544 
545 protected:
546  // Attempts to set the value to the native compressed format
547  // Some compression types allow some values to be written
548  // without decompression. Eg, you can write to a constant tile
549  // the tile's own value without decompression.
550  // If this returns true, t has been written.
551  bool writeThrough(int x, int y, int z, T t);
552 
553  /// Sets the local res of the tile. Does *not* resize the allocated
554  /// memory.
555  void setRes(int xr, int yr, int zr)
556  { myRes[0] = xr; myRes[1] = yr; myRes[2] = zr; }
557 
559  {
560  return (sizeof(T) <= sizeof(T*));
561  }
562 
564  { if (inlineConstant()) { return *((const T *)&myData); }
565  return *((const T*)myData); }
567  { if (inlineConstant()) { return ((T *)&myData); }
568  return ((T*)myData); }
569 
570  void setForeignData(void *data, int8 compress_type)
571  {
572  freeData();
573  myCompressionType = compress_type;
574 
575  if (isConstant() && inlineConstant())
576  {
577  makeConstant(*(T *)data);
578  }
579  else
580  {
581  myData = data;
582  myForeignData = true;
583  }
584  }
585 
586 public:
587  /// Frees myData and sets it to zero. This is a bit tricky
588  /// as the constant tiles may be inlined.
589  /// This is only public for the compression engines.
591  {
592  if (inlineConstant() && isConstant())
593  {
594  // Do nothing!
595  }
596  else if (myData && !myForeignData)
597  {
599  }
600  myData = 0;
601  myForeignData = false;
602  }
603 
604 public:
605  // This is only public so the compression engines can get to it.
606  // It is blind data, do not alter!
607  void *myData;
608 private:
609 
610  /// Resolutions.
611  int8 myRes[3];
612 
613  /// Am I a constant tile?
614  int8 myCompressionType;
615 
616  int8 myForeignData;
617 
618  static UT_ValArray<UT_VoxelTileCompress<T> *> &getCompressionEngines()
619  {
620  return UTvoxelTileGetCompressionEngines((T *) 0);
621  }
622 
623  friend class UT_VoxelTileCompress<T>;
624  friend class UT_VoxelArray<T>;
625  template <typename S, bool DoWrite, bool DoRead, bool TestForWrites>
626  friend class UT_VoxelProbe;
627 };
628 
629 ///
630 /// UT_VoxelArray
631 ///
632 /// This provides data structure to hold a three dimmensional array
633 /// of data. The data should be some simple arithmetic type, such
634 /// as uint8, fpreal16, or UT_Vector3.
635 ///
636 /// Some operations, such as gradiants, may make less sense with uint8.
637 ///
638 template <typename T>
639 class UT_VoxelArray
640 {
641 public:
642  UT_VoxelArray();
643  virtual ~UT_VoxelArray();
644 
645  /// Copy constructor:
647 
648  /// Assignment operator:
650 
651  /// This sets the voxelarray to have the given resolution, resetting
652  /// all elements to 0.
653  void size(int xres, int yres, int zres);
654 
655  /// This will ensure this voxel array matches the given voxel array
656  /// in terms of dimensions & border conditions. It may invoke
657  /// a size() and hence reset the field to 0.
658  void match(const UT_VoxelArray<T> &src);
659 
660  template <typename S>
661  bool isMatching(const UT_VoxelArray<S> &src) const
662  {
663  return src.getXRes() == getXRes() &&
664  src.getYRes() == getYRes() &&
665  src.getZRes() == getZRes();
666  }
667 
668  int getXRes() const { return myRes[0]; }
669  int getYRes() const { return myRes[1]; }
670  int getZRes() const { return myRes[2]; }
671  int getRes(int axis) const { return myRes[axis]; }
672 
673  /// Return the amount of memory used by this array.
674  int64 getMemoryUsage(bool inclusive) const;
675 
676  /// Sets this voxel array to the given constant value. All tiles
677  /// are turned into constant tiles.
679  constant,
680  T, t)
681  void constantPartial(T t, const UT_JobInfo &info);
682 
683  /// If this voxel array is all constant tiles, returns true.
684  /// The optional pointer is initialized to the constant value iff
685  /// the array is constant. (Note by constant we mean made of constant
686  /// tiles of the same value - if some tiles are uncompressed but
687  /// constant, it will still return false)
688  bool isConstant(T *cval = 0) const;
689 
690  /// Returns true if any element of the voxel array is NAN
691  bool hasNan() const;
692 
693  /// This convience function lets you sample the voxel array.
694  /// pos is in the range [0..1]^3.
695  /// T value trilinearly interpolated. Edges are determined by the border
696  /// mode.
697  /// The cells are sampled at the center of the voxels.
698  T operator()(UT_Vector3D pos) const;
699  T operator()(UT_Vector3F pos) const;
700 
701  /// This convience function lets you sample the voxel array.
702  /// pos is in the range [0..1]^3.
703  /// The min/max is the range of the sampled values.
704  void evaluateMinMax(T &lerp, T &lmin, T &lmax,
705  UT_Vector3F pos) const;
706 
707  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
708  /// Allows out of range evaluation
710  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
711  /// Allows out of range evaluation
712  SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z,
713  float fx, float fy, float fz) const;
714  template <int AXIS2D>
716  template <int AXIS2D>
717  SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z,
718  float fx, float fy, float fz) const;
719 
720  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
721  /// Allows out of range evaluation. Also computes min/max of
722  /// interpolated samples.
723  SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax,
724  UT_Vector3F pos) const;
725  template <int AXIS2D>
726  SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax,
727  UT_Vector3F pos) const;
728  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
729  /// Allows out of range evaluation. Also computes min/max of
730  /// interpolated samples.
732  T &lerp, T &lmin, T &lmax,
733  int x, int y, int z,
734  float fx, float fy, float fz) const;
735  template <int AXIS2D>
737  T &lerp, T &lmin, T &lmax,
738  int x, int y, int z,
739  float fx, float fy, float fz) const;
740 
741  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
742  /// array should have 8 elements, x minor, z major.
743  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
744  T *sample) const;
745  template <int AXIS2D>
746  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
747  T *sample) const;
748 
749  /// Extracts a sample in a plus shape, dx, then dy, then dz, finally
750  /// the center into 7 voxels.
751  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
752  T *sample) const;
753  /// Extracts 27 dense 3x3x3 cube centered at x,y,z into samples
754  /// z major, xminor.
755  SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z,
756  T *sample) const;
757 
758  /// Lerps the given sample using trilinear interpolation
760  float fx, float fy, float fz) const;
761  template <int AXIS2D>
763  float fx, float fy, float fz) const;
764 
765  SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z,
766  float &fx, float &fy, float &fz) const
767  {
768  // Determine integer & fractional components.
769  fx = pos.x();
770  SYSfastSplitFloat(fx, x);
771  fy = pos.y();
772  SYSfastSplitFloat(fy, y);
773  fz = pos.z();
774  SYSfastSplitFloat(fz, z);
775  }
776  template <int AXIS2D>
777  SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z,
778  float &fx, float &fy, float &fz) const
779  {
780  // Determine integer & fractional components.
781  if (AXIS2D != 0)
782  {
783  fx = pos.x();
784  SYSfastSplitFloat(fx, x);
785  }
786  else
787  {
788  fx = 0.0;
789  x = 0;
790  }
791  if (AXIS2D != 1)
792  {
793  fy = pos.y();
794  SYSfastSplitFloat(fy, y);
795  }
796  else
797  {
798  fy = 0.0;
799  y = 0;
800  }
801  if (AXIS2D != 2)
802  {
803  fz = pos.z();
804  SYSfastSplitFloat(fz, z);
805  }
806  else
807  {
808  fz = 0.0;
809  z = 0;
810  }
811  }
812 #if 0
813  T operator()(v4uf pos) const;
814 #endif
815 
816  /// Filtered evaluation of the voxel array. This operation should
817  /// exhibit the same behavior as IMG3D_Channel::evaluate.
818  T evaluate(const UT_Vector3 &pos, const UT_Filter &filter,
819  fpreal radius, int clampaxis = -1) const;
820 
821  /// Fills this by resampling the given voxel array.
822  void resample(const UT_VoxelArray<T> &src,
823  UT_FilterType filtertype = UT_FILTER_POINT,
824  float filterwidthscale = 1.0f,
825  int clampaxis = -1);
826 
827  /// Flattens this into an array. Z major, then Y, then X.
828  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
830  flatten,
831  T *, flatarray,
832  exint, ystride,
833  exint, zstride)
834  void flattenPartial(T *flatarray, exint ystride, exint zstride,
836 
837  /// Flattens this into an array suitable for a GL 8bit texture.
838  /// Z major, then Y, then X.
839  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
841  flattenGLFixed8,
842  uint8 *, flatarray,
843  exint, ystride,
844  exint, zstride,
845  T , dummy)
846  void flattenGLFixed8Partial(uint8 *flatarray,
847  exint ystride, exint zstride,
848  T dummy,
849  const UT_JobInfo &info) const;
850 
851  /// Flattens this into an array suitable for a GL 16bit FP texture.
852  /// Z major, then Y, then X.
853  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
854  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
855  flattenGL16F,
856  UT_Vector4H *, flatarray,
857  exint, ystride,
858  exint, zstride,
859  T , dummy)
860  void flattenGL16FPartial(UT_Vector4H *flatarray,
861  exint ystride, exint zstride,
862  T dummy,
863  const UT_JobInfo &info) const;
864 
865  /// Flattens this into an array suitable for a GL 32b FP texture. Note that
866  /// this also works around an older Nvidia driver bug that caused very small
867  /// valued texels (<1e-9) to appear as huge random values in the texture.
868  /// Z major, then Y, then X.
869  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
870  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
871  flattenGL32F,
872  UT_Vector4F *, flatarray,
873  exint, ystride,
874  exint, zstride,
875  T , dummy)
876  void flattenGL32FPartial(UT_Vector4F *flatarray,
877  exint ystride, exint zstride,
878  T dummy,
879  const UT_JobInfo &info) const;
880 
881  /// Fills this from a flattened array. Z major, then Y, then X.
882  /// setValue(x,y,z, flatarray[x + y * ystride + z * zstride];
883  THREADED_METHOD3(UT_VoxelArray<T>, numTiles() > 16,
884  extractFromFlattened,
885  const T *, flatarray,
886  exint, ystride,
887  exint, zstride)
888  void extractFromFlattenedPartial(const T *flatarray,
889  exint ystride, exint zstride,
890  const UT_JobInfo &info);
891 
892  /// Copies into this voxel array from the source array.
893  /// Conceptually,
894  /// this->setValue(x, y, z, src.getValue(x+offx, y+offy, z+offz);
895  void copyWithOffset(const UT_VoxelArray<T> &src,
896  int offx, int offy, int offz);
897  THREADED_METHOD4(UT_VoxelArray<T>, numTiles() > 4,
898  copyWithOffsetInternal,
899  const UT_VoxelArray<T> &, src,
900  int, offx,
901  int, offy,
902  int, offz)
903  void copyWithOffsetInternalPartial(const UT_VoxelArray<T> &src,
904  int offx, int offy, int offz,
905  const UT_JobInfo &info);
906 
907  /// Fills dstdata with the voxel data of listed tiles. Stride is measured
908  /// in T. Data order is in tile-order. So, sorted by tilelist, then
909  /// z, y, x within that tile.
910  template <typename S>
911  S *extractTiles(S *dstdata, int stride,
912  const UT_IntArray &tilelist) const;
913 
914  /// Overwrites our tiles with the given data. Does checking
915  /// for constant tiles. Input srcdata stream should match
916  /// that of extractTiles.
917  template <typename S>
918  const S *writeTiles(const S *srcdata, int srcstride,
919  const UT_IntArray &tilelist);
920 
921  /// Converts a 3d position in range [0..1]^3 into the closest
922  /// index value.
923  /// Returns false if the resulting index was out of range. The index
924  /// will still be set.
925  bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const;
926  /// Convertes a 3d position in [0..1]^3 into the equivalent in
927  /// the integer cell space. Does not clamp to the closest value.
928  bool posToIndex(UT_Vector3 pos, UT_Vector3 &ipos) const;
929  /// Converts an index into a position.
930  /// Returns false if the source index was out of range, in which case
931  /// pos will be outside [0..1]^3
932  bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const;
933  bool indexToPos(int x, int y, int z, UT_Vector3D &pos) const;
934  void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const;
935  void findexToPos(UT_Vector3D ipos, UT_Vector3D &pos) const;
936 
937  /// Clamps the given x, y, and z values to lie inside the valid index
938  /// range.
939  void clampIndex(int &x, int &y, int &z) const
940  {
941  x = SYSclamp(x, 0, myRes[0]-1);
942  y = SYSclamp(y, 0, myRes[1]-1);
943  z = SYSclamp(z, 0, myRes[2]-1);
944  }
945 
946  /// Returns true if the given x, y, z values lie inside the valid index.
947  bool isValidIndex(int x, int y, int z) const
948  {
949  return !((x | y | z) < 0) &&
950  (((x - myRes[0]) & (y - myRes[1]) & (z - myRes[2])) < 0);
951  }
952 
953  /// This allows you to read & write the raw data.
954  /// Out of bound reads are illegal.
955  T operator()(int x, int y, int z) const
956  {
957  UT_ASSERT_P(isValidIndex(x, y, z));
958  return (*getTile(x >> TILEBITS,
959  y >> TILEBITS,
960  z >> TILEBITS))
961  (x & TILEMASK, y & TILEMASK, z & TILEMASK);
962  }
963  void setValue(int x, int y, int z, T t)
964  {
965  UT_ASSERT_P(isValidIndex(x, y, z));
966  getTile(x >> TILEBITS,
967  y >> TILEBITS,
968  z >> TILEBITS)->setValue(
969  x & TILEMASK, y & TILEMASK, z & TILEMASK, t);
970  }
971 
972  /// This will clamp the bounds to fit within the voxel array,
973  /// using the border type to resolve out of range values.
974  T getValue(int x, int y, int z) const
975  {
976  // First handle the most common case.
977  if (isValidIndex(x, y, z))
978  return (*this)(x, y, z);
979 
980  // Verify our voxel array is non-empty.
981  if (!myTiles)
982  return myBorderValue;
983 
984  // We now know we are out of range, adjust appropriately
985  switch (myBorderType)
986  {
988  return myBorderValue;
989 
991  if (x < 0 || x >= myRes[0])
992  {
993  x %= myRes[0];
994  if (x < 0)
995  x += myRes[0];
996  }
997  if (y < 0 || y >= myRes[1])
998  {
999  y %= myRes[1];
1000  if (y < 0)
1001  y += myRes[1];
1002  }
1003  if (z < 0 || z >= myRes[2])
1004  {
1005  z %= myRes[2];
1006  if (z < 0)
1007  z += myRes[2];
1008  }
1009  break;
1010 
1011  case UT_VOXELBORDER_STREAK:
1012  clampIndex(x, y, z);
1013  break;
1014  case UT_VOXELBORDER_EXTRAP:
1015  {
1016  int cx, cy, cz;
1017  T result;
1018 
1019  cx = x; cy = y; cz = z;
1020  clampIndex(cx, cy, cz);
1021 
1022  result = (*this)(cx, cy, cz);
1023  result += (x - cx) * myBorderScale[0] +
1024  (y - cy) * myBorderScale[1] +
1025  (z - cz) * myBorderScale[2];
1026  return result;
1027  }
1028  }
1029 
1030  // It is now within bounds, do normal fetch.
1031  return (*this)(x, y, z);
1032  }
1033 
1035  void setBorderScale(T scalex, T scaley, T scalez);
1036  UT_VoxelBorderType getBorder() const { return myBorderType; }
1037  T getBorderValue() const { return myBorderValue; }
1038  T getBorderScale(int axis) const { return myBorderScale[axis]; }
1039 
1040  /// This tries to compress or collapse each tile. This can
1041  /// be expensive (ie, converting a tile to constant), so
1042  /// should be saved until modifications are complete.
1044  collapseAllTiles)
1045  void collapseAllTilesPartial(const UT_JobInfo &info);
1046 
1047  /// Uncompresses all tiles into non-constant tiles. Useful
1048  /// if you have a multithreaded algorithm that may need to
1049  /// both read and write, if you write to a collapsed tile
1050  /// while someone else reads from it, bad stuff happens.
1051  /// Instead, you can expandAllTiles. This may have serious
1052  /// consequences in memory use, however.
1054  expandAllTiles)
1055  void expandAllTilesPartial(const UT_JobInfo &info);
1056 
1057  /// Uncompresses all tiles, but leaves constant tiles alone.
1058  /// Useful for cleaning out any non-standard compression algorithm
1059  /// that some external program can't handle.
1060  THREADED_METHOD(UT_VoxelArray<T>, numTiles() > 100,
1061  expandAllNonConstTiles)
1062  void expandAllNonConstTilesPartial(const UT_JobInfo &info);
1063 
1064  /// The direct tile access methods are to make TBF writing a bit
1065  /// more efficient.
1066  UT_VoxelTile<T> *getTile(int tx, int ty, int tz) const
1067  { return &myTiles[xyzTileToLinear(tx, ty, tz)]; }
1069  { return &myTiles[idx]; }
1070  void linearTileToXYZ(int idx, int &x, int &y, int &z) const
1071  {
1072  x = idx % myTileRes[0];
1073  idx -= x;
1074  idx /= myTileRes[0];
1075  y = idx % myTileRes[1];
1076  idx -= y;
1077  idx /= myTileRes[1];
1078  z = idx;
1079  }
1080  int xyzTileToLinear(int x, int y, int z) const
1081  { return (z * myTileRes[1] + y) * myTileRes[0] + x; }
1082 
1083  int indexToLinearTile(int x, int y, int z) const
1084  { return ((z >> TILEBITS) * myTileRes[1] + (y >> TILEBITS)) * myTileRes[0] + (x >> TILEBITS); }
1085 
1086  /// Number of tiles along that axis. Not to be confused with
1087  /// the resolution of the individual tiles.
1088  int getTileRes(int dim) const { return myTileRes[dim]; }
1089  int numTiles() const
1090  { return myTileRes[0] * myTileRes[1] * myTileRes[2]; }
1091  exint numVoxels() const
1092  { return ((exint)myRes[0]) * myRes[1] * myRes[2]; }
1093 
1095  { myCompressionOptions = options; }
1097  { return myCompressionOptions; }
1098 
1100  { myCompressionOptions.myConstantTol = tol; }
1102  { return myCompressionOptions.myConstantTol; }
1103 
1104  /// Saves only the data of this array to the given stream.
1105  /// To reload it you will have to have a matching array in tiles
1106  /// dimensions and size.
1107  void saveData(std::ostream &os) const;
1108  bool saveData(UT_JSONWriter &w,
1109  const char *shared_mem_owner = 0) const;
1110 
1111  /// Load an array, requires you have already size()d this array.
1112  void loadData(UT_IStream &is);
1113  bool loadData(UT_JSONParser &p);
1114 
1115  /// Copy only the data from the source array.
1116  /// Note that it is an error to call this unless isMatching(src).
1118  copyData,
1119  const UT_VoxelArray<T> &, src)
1120 
1121  void copyDataPartial(const UT_VoxelArray<T> &src,
1122  const UT_JobInfo &info);
1123 
1124 private:
1126  resamplethread,
1127  const UT_VoxelArray<T> &, src,
1128  const UT_Filter *, filter,
1129  float, radius,
1130  int, clampaxis)
1131  void resamplethreadPartial(const UT_VoxelArray<T> &src,
1132  const UT_Filter *filter,
1133  float radius,
1134  int clampaxis,
1135  const UT_JobInfo &info);
1136 
1137 
1138  void deleteVoxels();
1139 
1140  SYS_SharedMemory *copyToSharedMemory(const char *shared_mem_owner) const;
1141  bool populateFromSharedMemory(const char *id);
1142 
1143 
1144  /// Number of elements in each dimension.
1145  int myRes[3];
1146 
1147  /// Inverse tile res, 1/myRes
1148  UT_Vector3 myInvRes;
1149 
1150  /// Number of tiles in each dimension.
1151  int myTileRes[3];
1152 
1153  /// Compression tolerance for lossy compression.
1154  UT_VoxelCompressOptions myCompressionOptions;
1155 
1156  /// Double dereferenced so we can theoretically resize easily.
1157  UT_VoxelTile<T> *myTiles;
1158 
1159  /// Outside values get this if constant borders are used
1160  T myBorderValue;
1161  /// Per axis scale factors for when extrapolating.
1162  T myBorderScale[3];
1163  UT_VoxelBorderType myBorderType;
1164 
1165  /// For initializing the tiles from shared memory.
1166  SYS_SharedMemory *mySharedMem;
1167  SYS_SharedMemoryView *mySharedMemView;
1168 };
1169 
1170 
1171 ///
1172 /// UT_VoxelMipMap
1173 ///
1174 /// This provides a mip-map type structure for a voxel array.
1175 /// It manages the different levels of voxels arrays that are needed.
1176 /// You can create different types of mip maps: average, maximum, etc,
1177 /// which can allow different tricks.
1178 /// Each level is one half the previous level, rounded up.
1179 /// Out of bound voxels are ignored from the lower levels.
1180 ///
1181 template <typename T>
1183 {
1184 public:
1185  /// The different types of functions that can be used for
1186  /// constructing a mip map.
1187  enum mipmaptype { MIPMAP_MAXIMUM=0, MIPMAP_AVERAGE=1, MIPMAP_MINIMUM=2 };
1188 
1189  UT_VoxelMipMap();
1190  virtual ~UT_VoxelMipMap();
1191 
1192  /// Copy constructor.
1193  UT_VoxelMipMap(const UT_VoxelMipMap<T> &src);
1194 
1195  /// Assignment operator:
1196  const UT_VoxelMipMap<T> &operator=(const UT_VoxelMipMap<T> &src);
1197 
1198  /// Builds from a given voxel array. The ownership flag determines
1199  /// if we gain ownership of the voxel array and should delete it.
1200  /// In any case, the new levels are owned by us.
1201  void build(UT_VoxelArray<T> *baselevel,
1202  mipmaptype function);
1203 
1204  /// Same as above but construct mipmaps simultaneously for more than
1205  /// one function. The order of the functions will correspond to the
1206  /// order of the data values passed to the traversal callback.
1207  void build(UT_VoxelArray<T> *baselevel,
1208  const UT_Array<mipmaptype> &functions);
1209 
1210  /// This does a top down traversal of the implicit octree defined
1211  /// by the voxel array. Returning false will abort that
1212  /// branch of the octree.
1213  /// The bounding box given is in cell space and is an exclusive
1214  /// box of the included cells (ie: (0..1)^3 means just cell 0,0,0)
1215  /// Note that each bounding box will not be square, unless you
1216  /// have the good fortune of starting with a power of 2 cube.
1217  /// The boolean goes true when the the callback is invoked on a
1218  /// base level.
1219  typedef bool (*Callback)(const T *funcs,
1220  const UT_BoundingBox &box,
1221  bool baselevel, void *data);
1222  void traverseTopDown(Callback function,
1223  void *data) const;
1224 
1225  /// Top down traversal on op. OP is invoked with
1226  /// bool op(const UT_BoundingBoxI &indexbox, int level)
1227  ///
1228  /// indexbox is half-inclusive (0..1)^3 means cell 0,0,0
1229  /// level 0 means the base level.
1230  /// (box.min.x()>>level, box.min.y()>>level, box.min.z()>>level)
1231  /// gives the index to extract the value from level..
1232  template <typename OP>
1233  void traverseTopDown(OP&op) const;
1234 
1235 
1236  /// Top down traversal, but which quad tree is visited first
1237  /// is controlled by
1238  /// float op.sortValue(UT_BoundingBoxI &indexbox, int level);
1239  /// Lower values are visited first.
1240  template <typename OP>
1241  void traverseTopDownSorted(OP&op) const;
1242 
1243 
1244  /// Return the amount of memory used by this mipmap.
1245  int64 getMemoryUsage(bool inclusive) const;
1246 
1247  int numLevels() const { return myNumLevels+1; }
1248 
1249  /// level 0 is the original grid, each level higher is a power
1250  /// of two smaller.
1251  const UT_VoxelArray<T> *level(int level, int function) const
1252  {
1253  if (level == 0)
1254  return myBaseLevel;
1255 
1256  return myLevels(function)[numLevels() - 1 - level];
1257  }
1258 
1259 private:
1260  void doTraverse(int x, int y, int z, int level,
1261  Callback function,
1262  void *data) const;
1263 
1264  /// Note: This variant of doTraverse has the opposite sense of level!
1265  template <typename OP>
1266  void doTraverse(int x, int y, int z, int level,
1267  OP &op) const;
1268  template <typename OP>
1269  void doTraverseSorted(int x, int y, int z, int level,
1270  OP &op) const;
1271 
1272  void initializePrivate();
1273  void destroyPrivate();
1274 
1275  THREADED_METHOD3(UT_VoxelMipMap<T>, dst.numTiles() > 1,
1276  downsample,
1277  UT_VoxelArray<T> &, dst,
1278  const UT_VoxelArray<T> &, src,
1279  mipmaptype, function)
1280  void downsamplePartial(UT_VoxelArray<T> &dst,
1281  const UT_VoxelArray<T> &src,
1282  mipmaptype function,
1283  const UT_JobInfo &info);
1284 
1285 protected:
1286  T mixValues(T t1, T t2, mipmaptype function) const
1287  {
1288  switch (function)
1289  {
1290  case MIPMAP_MAXIMUM:
1291  return SYSmax(t1, t2);
1292 
1293  case MIPMAP_AVERAGE:
1294  return (t1 + t2) / 2;
1295 
1296  case MIPMAP_MINIMUM:
1297  return SYSmin(t1, t2);
1298  }
1299 
1300  return t1;
1301  }
1302 
1303 
1304  /// This stores the base most level that was provided
1305  /// externally.
1306  UT_VoxelArray<T> *myBaseLevel;
1307  /// If true, we will delete the base level when we are done.
1309 
1310  /// Tracks the number of levels which we used to represent
1311  /// this hierarchy.
1313  /// The array of VoxelArrays, one per level.
1314  /// myLevels[0] is a 1x1x1 array. Each successive layer is twice
1315  /// as big in each each dimension. However, every layer is clamped
1316  /// against the resolution of the base layer.
1317  /// We own all these layers.
1319 };
1320 
1321 
1322 /// Iterator for Voxel Arrays
1323 ///
1324 /// This class eliminates the need for having
1325 /// for (z = 0; z < zres; z++)
1326 /// ...
1327 /// for (x = 0; x < xres; x++)
1328 /// loops everywhere.
1329 ///
1330 /// Note that the order of iteration is undefined! (The actual order is
1331 /// to complete each tile in turn, thereby hopefully improving cache
1332 /// coherency)
1333 ///
1334 /// It is safe to write to the voxel array while this iterator is active.
1335 /// It is *not* safe to resize the voxel array (or destroy it)
1336 ///
1337 /// The iterator is similar in principal to an STL iterator, but somewhat
1338 /// simpler. The classic STL loop
1339 /// for ( it = begin(); it != end(); ++it )
1340 /// is done using
1341 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1342 ///
1343 template <typename T>
1345 {
1346 public:
1350  virtual ~UT_VoxelArrayIterator();
1351 
1353  {
1354  myCurTile = -1;
1355  myHandle.resetHandle();
1356  myArray = vox;
1357  // Reset the range
1358  setPartialRange(0, 1);
1359  }
1361  {
1362  setArray((UT_VoxelArray<T> *) vox);
1363  }
1364 
1365  /// Iterates over the array pointed to by the handle. Only
1366  /// supports read access during the iteration as it does
1367  /// a read lock.
1369  {
1370  myHandle = handle;
1371  // Ideally we'd have a separate const iterator
1372  // from our non-const iterator so this would
1373  // only be exposed in the const version.
1374  myArray = const_cast<UT_VoxelArray<T> *>(&*myHandle);
1375 
1376  // Reset our range.
1377  myCurTile = -1;
1378  setPartialRange(0, 1);
1379  }
1380 
1381 
1382  /// Restricts this iterator to only run over a subset
1383  /// of the tiles. The tiles will be divided into approximately
1384  /// numrange equal groups, this will be the idx'th.
1385  /// The resulting iterator may have zero tiles.
1386  void setPartialRange(int idx, int numranges);
1387 
1388  /// Ties this iterator to the given jobinfo so it will
1389  /// match the jobinfo's processing.
1390  void splitByTile(const UT_JobInfo &info);
1391 
1392  /// Assigns an interrupt handler. This will be tested whenever
1393  /// it advances to a new tile. If it is interrupted, the iterator
1394  /// will jump forward to atEnd()
1395  void setInterrupt(UT_Interrupt *interrupt) { myInterrupt = interrupt; }
1396  void detectInterrupts() { myInterrupt = UTgetInterrupt(); }
1397 
1398  /// Restricts this iterator to the tiles that intersect
1399  /// the given bounding box of voxel coordinates.
1400  /// Note that this will not be a precise restriction as
1401  /// each tile is either included or not.
1402  /// You should setPartialRange() after setting the bbox range
1403  /// The bounding box is on the [0..1]^3 range.
1404  void restrictToBBox(const UT_BoundingBox &bbox);
1405  /// The [xmin, xmax] are inclusive and measured in voxels.
1406  void restrictToBBox(int xmin, int xmax,
1407  int ymin, int ymax,
1408  int zmin, int zmax);
1409 
1410  /// Resets the iterator to point to the first voxel.
1411  void rewind();
1412 
1413  /// Returns true if we have iterated over all of the voxels.
1414  bool atEnd() const
1415  { return myCurTile < 0; }
1416 
1417  /// Advances the iterator to point to the next voxel.
1418  void advance()
1419  {
1420  // We try to advance each axis, rolling over to the next.
1421  // If we exhaust this tile, we call advanceTile.
1422  myPos[0]++;
1423  myTileLocalPos[0]++;
1424  if (myTileLocalPos[0] >= myTileSize[0])
1425  {
1426  // Wrapped in X.
1427  myPos[0] -= myTileLocalPos[0];
1428  myTileLocalPos[0] = 0;
1429 
1430  myPos[1]++;
1431  myTileLocalPos[1]++;
1432  if (myTileLocalPos[1] >= myTileSize[1])
1433  {
1434  // Wrapped in Y.
1435  myPos[1] -= myTileLocalPos[1];
1436  myTileLocalPos[1] = 0;
1437 
1438  myPos[2]++;
1439  myTileLocalPos[2]++;
1440  if (myTileLocalPos[2] >= myTileSize[2])
1441  {
1442  // Wrapped in Z! Finished this tile!
1443  advanceTile();
1444  }
1445  }
1446  }
1447  }
1448 
1449  /// Retrieve the current location of the iterator.
1450  int x() const { return myPos[0]; }
1451  int y() const { return myPos[1]; }
1452  int z() const { return myPos[2]; }
1453  int idx(int idx) const { return myPos[idx]; }
1454 
1455  /// Retrieves the value that we are currently pointing at.
1456  /// This is faster than an operator(x,y,z) as we already know
1457  /// our current tile and that bounds checking isn't needed.
1458  T getValue() const
1459  {
1460  UT_ASSERT_P(myCurTile >= 0);
1461 
1462  UT_VoxelTile<T> *tile;
1463 
1464  tile = myArray->getLinearTile(myCurTile);
1465  return (*tile)(myTileLocalPos[0],
1466  myTileLocalPos[1],
1467  myTileLocalPos[2]);
1468  }
1469 
1470  /// Sets the voxel we are currently pointing to the given value.
1471  void setValue(T t) const
1472  {
1473  UT_ASSERT_P(myCurTile >= 0);
1474 
1475  UT_VoxelTile<T> *tile;
1476 
1477  tile = myArray->getLinearTile(myCurTile);
1478 
1479  tile->setValue(myTileLocalPos[0],
1480  myTileLocalPos[1],
1481  myTileLocalPos[2], t);
1482  }
1483 
1484  /// Returns true if the tile we are currently in is a constant tile.
1485  bool isTileConstant() const
1486  {
1487  UT_ASSERT_P(myCurTile >= 0);
1488 
1489  UT_VoxelTile<T> *tile;
1490 
1491  tile = myArray->getLinearTile(myCurTile);
1492  return tile->isConstant();
1493  }
1494 
1495  /// This tile will iterate over the voxels indexed [start,end).
1497  {
1498  start.x() = myTilePos[0] * TILESIZE;
1499  start.y() = myTilePos[1] * TILESIZE;
1500  start.z() = myTilePos[2] * TILESIZE;
1501  end = start;
1502  end.x() += myTileSize[0];
1503  end.y() += myTileSize[1];
1504  end.z() += myTileSize[2];
1505  }
1506 
1507  /// This tile will iterate over the *inclusive* voxels indexed
1508  /// in the returned boudning box.
1510  {
1512  getTileVoxels(start, end);
1513  return UT_BoundingBoxI(start, end);
1514  }
1515 
1516  /// Returns true if we are at the start of a new tile.
1517  bool isStartOfTile() const
1518  { return !(myTileLocalPos[0] ||
1519  myTileLocalPos[1] ||
1520  myTileLocalPos[2]); }
1521 
1522  /// Returns the VoxelTile we are currently processing
1524  {
1525  UT_ASSERT_P(myCurTile >= 0);
1526  return myArray->getLinearTile(myCurTile);
1527  }
1528  int getLinearTileNum() const
1529  {
1530  return myCurTile;
1531  }
1532 
1533  /// Advances the iterator to point to the next tile. Useful if the
1534  /// constant test showed that you didn't need to deal with this one.
1535  void advanceTile();
1536 
1537  /// Advances the iterator to pointing just before the next tile so
1538  /// the next advance() will be an advanceTile(). This is useful
1539  /// if you want to do a continue; as your break but the forloop
1540  /// is doing advance()
1541  /// Note the iterator is in a bad state until advance() is called.
1542  void skipToEndOfTile();
1543 
1544  /// Sets a flag which causes the iterator to tryCompress()
1545  /// tiles when it is done with them.
1546  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1547  void setCompressOnExit(bool shouldcompress)
1548  { myShouldCompressOnExit = shouldcompress; }
1549 
1550  /// These templated algorithms are designed to apply simple operations
1551  /// across all of the voxels with as little overhead as possible.
1552  /// The iterator should already point to a voxel array and if multithreaded
1553  /// had its partial range set. The source arrays must be matching size.
1554  /// The operator should support a () operator, and the result is
1555  /// vit.setValue( op(vit.getValue(), a->getValue(vit), ...);
1556  /// Passing T instead of UT_VoxelArray will treat it as a constant source
1557  /// Note if both source and destination tiles are constant, only
1558  /// a single operation is invoked.
1559  template <typename OP>
1560  void applyOperation(OP &op);
1561  template <typename OP, typename S>
1562  void applyOperation(OP &op, const UT_VoxelArray<S> &a);
1563  template <typename OP>
1564  void applyOperation(OP &op, T a);
1565  template <typename OP, typename S, typename R>
1566  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1567  const UT_VoxelArray<R> &b);
1568  template <typename OP, typename S, typename R, typename Q>
1569  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1570  const UT_VoxelArray<R> &b,
1571  const UT_VoxelArray<Q> &c);
1572  /// These variants will invoke op.isNoop(a, b, ...) which will return
1573  /// true if those values won't affect the destination. This allows
1574  /// constant source tiles to be skipped, for example when adding
1575  /// 0.
1576  template <typename OP, typename S>
1577  void applyOperationCheckNoop(OP &op, const UT_VoxelArray<S> &a);
1578  template <typename OP>
1579  void applyOperationCheckNoop(OP &op, T a);
1580 
1581  /// Assign operation works like apply operation, but *this is written
1582  /// to without reading, so there is one less parameter to the ()
1583  /// callback. This can optimize constant tile writes as the
1584  /// constant() status of the destination doesn't matter.
1585  template <typename OP, typename S>
1586  void assignOperation(OP &op, const UT_VoxelArray<S> &a);
1587  template <typename OP, typename S, typename R>
1588  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1589  const UT_VoxelArray<R> &b);
1590  template <typename OP, typename S, typename R, typename Q>
1591  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1592  const UT_VoxelArray<R> &b,
1593  const UT_VoxelArray<Q> &c);
1594 
1595  /// Reduction operators.
1596  /// op.reduce(T a) called for each voxel, *but*,
1597  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1598  template <typename OP>
1599  void reduceOperation(OP &op);
1600 
1601  UT_VoxelArray<T> *getArray() const { return myArray; }
1602 
1603 protected:
1604  /// The array we belong to.
1606  /// The handle that we have locked to get our array. It is null
1607  /// by default which makes the lock/unlock nops.
1609 
1610  /// Absolute index into voxel array.
1611  int myPos[3];
1612 
1613  /// Flag determining if we should compress tiles whenever we
1614  /// advance out of them.
1616 
1619 
1620 public:
1621  /// Our current linear tile idx. A value of -1 implies at end.
1623 
1624  /// Our current index into the tile list
1626 
1627  /// Our start & end tiles for processing a subrange.
1628  /// The tile range is half open [start, end)
1629  int myTileStart, myTileEnd;
1630 
1631  /// Which tile we are as per tx,ty,tz rather than linear index.
1632  int myTilePos[3];
1633 
1634  /// Our position within the current tile.
1635  int myTileLocalPos[3];
1636 
1637  /// The size of the current tile
1638  int myTileSize[3];
1639 
1640  /// The job info to use for tilefetching
1642 
1644 };
1645 
1646 /// Iterator for tiles inside Voxel Arrays
1647 ///
1648 /// This class eliminates the need for having
1649 /// for (z = 0; z < zres; z++)
1650 /// ...
1651 /// for (x = 0; x < xres; x++)
1652 /// loops everywhere.
1653 ///
1654 /// The iterator is similar in principal to an STL iterator, but somewhat
1655 /// simpler. The classic STL loop
1656 /// for ( it = begin(); it != end(); ++it )
1657 /// is done using
1658 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1659 ///
1660 template <typename T>
1662 {
1663 public:
1666  template <typename S>
1668  UT_VoxelArray<T> *array);
1669  virtual ~UT_VoxelTileIterator();
1670 
1671  template <typename S>
1673  UT_VoxelArray<T> *array)
1674  {
1675  UT_ASSERT_P(vit.isStartOfTile());
1676  myCurTile = array->getLinearTile(vit.getLinearTileNum());
1677  myArray = array;
1678  myTileStart[0] = vit.x();
1679  myTileStart[1] = vit.y();
1680  myTileStart[2] = vit.z();
1681  }
1682 
1684  {
1685  setTile(vit, vit.getArray());
1686  }
1687 
1688  void setLinearTile(exint lineartilenum, UT_VoxelArray<T> *array)
1689  {
1690  myCurTile = array->getLinearTile(lineartilenum);
1691  myArray = array;
1692 
1693  array->linearTileToXYZ(lineartilenum,
1694  myTileStart[0], myTileStart[1], myTileStart[2]);
1695  myTileStart[0] <<= TILEBITS;
1696  myTileStart[1] <<= TILEBITS;
1697  myTileStart[2] <<= TILEBITS;
1698  }
1699 
1700  /// Resets the iterator to point to the first voxel.
1701  void rewind();
1702 
1703  /// Returns true if we have iterated over all of the voxels.
1704  bool atEnd() const
1705  { return myCurTile == 0 || myAtEnd; }
1706 
1707  /// Advances the iterator to point to the next voxel.
1708  void advance()
1709  {
1710  // We try to advance each axis, rolling over to the next.
1711  // If we exhaust this tile, we call advanceTile.
1712  myPos[0]++;
1713  myTileLocalPos[0]++;
1714  if (myTileLocalPos[0] >= myTileSize[0])
1715  {
1716  // Wrapped in X.
1717  myPos[0] -= myTileLocalPos[0];
1718  myTileLocalPos[0] = 0;
1719 
1720  myPos[1]++;
1721  myTileLocalPos[1]++;
1722  if (myTileLocalPos[1] >= myTileSize[1])
1723  {
1724  // Wrapped in Y.
1725  myPos[1] -= myTileLocalPos[1];
1726  myTileLocalPos[1] = 0;
1727 
1728  myPos[2]++;
1729  myTileLocalPos[2]++;
1730  if (myTileLocalPos[2] >= myTileSize[2])
1731  {
1732  // Wrapped in Z! Finished this tile!
1733  advanceTile();
1734  }
1735  }
1736  }
1737  }
1738 
1739  /// Retrieve the current location of the iterator, in the
1740  /// containing voxel array, not in the tile.
1741  int x() const { return myPos[0]; }
1742  int y() const { return myPos[1]; }
1743  int z() const { return myPos[2]; }
1744  int idx(int idx) const { return myPos[idx]; }
1745 
1746  /// Retrieves the value that we are currently pointing at.
1747  /// This is faster than an operator(x,y,z) as we already know
1748  /// our current tile and that bounds checking isn't needed.
1749  T getValue() const
1750  {
1751  UT_ASSERT_P(myCurTile);
1752 
1753  return (*myCurTile)(myTileLocalPos[0],
1754  myTileLocalPos[1],
1755  myTileLocalPos[2]);
1756  }
1757 
1758  /// Sets the voxel we are currently pointing to the given value.
1759  void setValue(T t) const
1760  {
1761  UT_ASSERT_P(myCurTile);
1762 
1763  myCurTile->setValue(myTileLocalPos[0],
1764  myTileLocalPos[1],
1765  myTileLocalPos[2], t);
1766  }
1767 
1768  /// Returns true if the tile we are currently in is a constant tile.
1769  bool isTileConstant() const
1770  {
1771  UT_ASSERT_P(myCurTile);
1772 
1773  return myCurTile->isConstant();
1774  }
1775 
1776  /// Returns true if we are at the start of a new tile.
1777  bool isStartOfTile() const
1778  { return !(myTileLocalPos[0] ||
1779  myTileLocalPos[1] ||
1780  myTileLocalPos[2]); }
1781 
1782  /// Returns the VoxelTile we are currently processing
1784  {
1785  return myCurTile;
1786  }
1787 
1788  /// Advances the iterator to point to the next tile. Since
1789  /// we are restricted to one tile, effectively just ends the iterator.
1790  void advanceTile();
1791 
1792  /// Sets a flag which causes the iterator to tryCompress()
1793  /// tiles when it is done with them.
1794  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1795  void setCompressOnExit(bool shouldcompress)
1796  { myShouldCompressOnExit = shouldcompress; }
1797 
1798  /// Reduction operators.
1799  /// op.reduce(T a) called for each voxel, *but*,
1800  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1801  /// Early exits if op.reduce() returns false.
1802  template <typename OP>
1803  bool reduceOperation(OP &op);
1804 
1805 protected:
1806  /// Current processing tile
1809 
1810  /// Absolute index into voxel array.
1811  int myPos[3];
1812  /// Absolute index of start of tile
1813  int myTileStart[3];
1814 
1815  /// Flag determining if we should compress tiles whenever we
1816  /// advance out of them.
1818 
1819  /// Since we want to allow multiple passes, we can't
1820  /// clear out myCurTile when we hit the end.
1821  bool myAtEnd;
1822 
1823 public:
1824  /// Our position within the current tile.
1825  int myTileLocalPos[3];
1826 
1827  /// The size of the current tile
1828  int myTileSize[3];
1829 };
1830 
1831 /// Probe for Voxel Arrays
1832 ///
1833 /// This class is designed to allow for efficient evaluation
1834 /// of aligned indices of a voxel array, provided the voxels are iterated
1835 /// in a tile-by-tile, x-inner most, manner.
1836 ///
1837 /// This class will create a local copy of the voxel data where needed,
1838 /// uncompressing the information once for every 16 queries. It will
1839 /// also create an aligned buffer so you can safely use v4uf on fpreal32
1840 /// data.
1841 ///
1842 /// For queries where you need surrounding values, the prex and postx can
1843 /// specify padding on the probe. prex should be -1 to allow reading
1844 /// -1 offset, postx 1 to allow reading a 1 offset.
1845 ///
1846 
1847 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
1848 class UT_VoxelProbe
1849 {
1850 public:
1851  UT_VoxelProbe();
1852  UT_VoxelProbe(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
1853  virtual ~UT_VoxelProbe();
1854 
1855  void setArray(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
1857  int prex = 0, int postx = 0)
1858  {
1859  SYS_STATIC_ASSERT(DoWrite == false);
1860  setArray((UT_VoxelArray<T> *)vox, prex, postx);
1861  }
1862 
1863  UT_VoxelArray<T> *getArray() const { return myArray; }
1864 
1865  bool isValid() const { return myArray != 0; }
1866 
1867  inline T getValue() const
1868  {
1869  return *myCurLine;
1870  }
1871  inline T getValue(int offset) const
1872  {
1873  return myCurLine[myStride*offset];
1874  }
1875 
1876  inline void setValue(T value)
1877  {
1878  UT_ASSERT_P(DoWrite);
1879  *myCurLine = value;
1880  if (TestForWrites)
1881  myDirty = true;
1882  }
1883 
1884 
1885  /// Resets where we currently point to.
1886  /// Returns true if we had to reset our cache line. If we didn't,
1887  /// and you have multiple probes acting in-step, you can just
1888  /// advanceX() the other probes
1889  template <typename S>
1891  { return setIndex(vit.x(), vit.y(), vit.z()); }
1892  template <typename S>
1894  { return setIndex(vit.x(), vit.y(), vit.z()); }
1895 
1896  bool setIndex(int x, int y, int z);
1897 
1898  /// Blindly advances our current pointer.
1899  inline void advanceX()
1900  {
1901  myCurLine += myStride;
1902  myX++;
1903  UT_ASSERT_P(myX < myMaxValidX);
1904  }
1905 
1906  /// Adjusts our current pointer to the given absolute location,
1907  /// assumes the new value is inside our valid range.
1908  inline void resetX(int x)
1909  {
1910  myCurLine += myStride * (x - myX);
1911  myX = x;
1912  UT_ASSERT_P(myX < myMaxValidX && myX >= myMinValidX);
1913  }
1914 
1915 protected:
1916  void reloadCache(int x, int y, int z);
1917 
1918  void writeCacheLine();
1919 
1920  void buildConstantCache(T value);
1921 
1923  /// myCacheLine[0] is the start of the cache line, so -1 would be
1924  /// the first pre-rolled value
1926  /// Where we actually allocated our cache line, aligned to 4x multiple
1927  /// to ensure SSE compatible.
1929 
1930  int myX, myY, myZ;
1931  int myPreX, myPostX;
1934  /// Half inclusive [,) range of valid x queries for current cache.
1935  int myMinValidX, myMaxValidX;
1936 
1937  /// Determines if we have anything to write back, only
1938  /// valid if TestForWrites is enabled.
1939  bool myDirty;
1940 
1942 
1943  friend class UT_VoxelProbeCube<T>;
1944  friend class UT_VoxelProbeFace<T>;
1945 };
1946 
1947 ///
1948 /// The vector probe is three normal probes into separate voxel arrays
1949 /// making it easier to read and write to aligned vector fields.
1950 /// If the vector field is face-centered, see the UT_VoxelProbeFace.
1951 ///
1952 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
1954 {
1955 public:
1957  { }
1959  { setArray(vx, vy, vz); }
1961  {}
1962 
1964  {
1965  myLines[0].setArray(vx);
1966  myLines[1].setArray(vy);
1967  myLines[2].setArray(vz);
1968  }
1969  void setConstArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz)
1970  {
1971  SYS_STATIC_ASSERT(DoWrite == false);
1972  setArray((UT_VoxelArray<T> *)vx, (UT_VoxelArray<T> *)vy, (UT_VoxelArray<T> *)vz);
1973  }
1974 
1975  inline UT_Vector3 getValue() const
1976  {
1977  return UT_Vector3(myLines[0].getValue(), myLines[1].getValue(), myLines[2].getValue());
1978  }
1979  inline T getValue(int axis) const
1980  {
1981  return myLines[axis].getValue();
1982  }
1983 
1984  inline void setValue(const UT_Vector3 &v)
1985  {
1986  myLines[0].setValue(v.x());
1987  myLines[1].setValue(v.y());
1988  myLines[2].setValue(v.z());
1989  }
1990 
1991  inline void setComponent(int axis, T val)
1992  {
1993  myLines[axis].setValue(val);
1994  }
1995 
1996  /// Resets where we currently point to.
1997  /// Returns true if we had to reset our cache line. If we didn't,
1998  /// and you have multiple probes acting in-step, you can just
1999  /// advanceX() the other probes
2000  template <typename S>
2002  { return setIndex(vit.x(), vit.y(), vit.z()); }
2003  template <typename S>
2005  { return setIndex(vit.x(), vit.y(), vit.z()); }
2006 
2007  bool setIndex(int x, int y, int z)
2008  {
2009  if (myLines[0].setIndex(x, y, z))
2010  {
2011  myLines[1].setIndex(x, y, z);
2012  myLines[2].setIndex(x, y, z);
2013  return true;
2014  }
2015  myLines[1].advanceX();
2016  myLines[2].advanceX();
2017  return false;
2018  }
2019 
2020  void advanceX()
2021  { myLines[0].advanceX(); myLines[1].advanceX(); myLines[2].advanceX(); }
2022 
2023 protected:
2025 };
2026 
2027 template <typename T>
2028 class
2030 {
2031 public:
2033  virtual ~UT_VoxelProbeCube();
2034 
2035  void setCubeArray(const UT_VoxelArray<T> *vox);
2036  void setPlusArray(const UT_VoxelArray<T> *vox);
2037 
2038  /// Allows you to query +/-1 in each direction. In cube update,
2039  /// all are valid. In plus update, only one of x y and z may be
2040  /// non zero.
2041  inline T getValue(int x, int y, int z) const
2042  { return myLines[y+1][z+1].getValue(x); }
2043 
2044  template <typename S>
2046  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2047  template <typename S>
2049  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2050  bool setIndexCube(int x, int y, int z);
2051 
2052  template <typename S>
2054  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2055  template <typename S>
2057  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2058  bool setIndexPlus(int x, int y, int z);
2059 
2060  /// Computes central difference gradient, does not scale
2061  /// by the step size (which is twice voxelsize)
2062  /// Requires PlusArray
2064  { return UT_Vector3(getValue(1,0,0) - getValue(-1,0,0),
2065  getValue(0,1,0) - getValue(0,-1,0),
2066  getValue(0,0,1) - getValue(0,0,-1)); }
2067 
2068  /// Computes the central difference curvature using the given
2069  /// inverse voxelsize (ie, 1/voxelsize) at this point.
2070  /// Requires CubeArray.
2071  fpreal64 curvature(const UT_Vector3 &invvoxelsize) const;
2072 
2073  /// Computes the laplacian, again with a given 1/voxelsize.
2074  /// Requires PlusArray
2075  fpreal64 laplacian(const UT_Vector3 &invvoxelsize) const;
2076 
2077 protected:
2078  /// Does an rotation of our cache lines, ym becomes y0 and y0 becomes yp,
2079  /// so further queries with y+1 will be cache hits for 2 out of 3.
2081 
2083  /// Cached look up position. myValid stores if they are
2084  /// valid values or not
2085  bool myValid;
2086  int myX, myY, myZ;
2087  /// Half inclusive [,) range of valid x queries for current cache.
2088  int myMinValidX, myMaxValidX;
2089 };
2090 
2091 ///
2092 /// UT_VoxelProbeConstant
2093 ///
2094 /// Looks like a voxel probe but only returns a constant value.
2095 ///
2096 template <typename T>
2097 class
2099 {
2100 public:
2103 
2104  template <typename S>
2106  { return true; }
2107  template <typename S>
2109  { return true; }
2110  bool setIndex(int x, int y, int z)
2111  { return true; }
2112 
2113  void setValue(T val) { myValue = val; }
2114  T getValue() const { return myValue; }
2115 protected:
2117 };
2118 
2119 ///
2120 /// UT_VoxelProbeAverage
2121 ///
2122 /// When working with MAC grids one often has slightly misalgined
2123 /// fields. Ie, one field is at the half-grid spacing of another field.
2124 /// The step values are 0 if the dimension is algined, -1 for half a step
2125 /// back (ie, (val(-1)+val(0))/2) and 1 for half a step forward
2126 /// (ie, (val(0)+val(1))/2)
2127 ///
2128 template <typename T, int XStep, int YStep, int ZStep>
2129 class
2131 {
2132 public:
2135 
2136  void setArray(const UT_VoxelArray<T> *vox);
2137 
2138  template <typename S>
2140  { return setIndex(vit.x(), vit.y(), vit.z()); }
2141  template <typename S>
2143  { return setIndex(vit.x(), vit.y(), vit.z()); }
2144  bool setIndex(int x, int y, int z);
2145 
2146  /// Returns the velocity centered at this index, thus an average
2147  /// of the values in each of our internal probes.
2148  inline T getValue() const
2149  {
2150  if (ZStep)
2151  return (valueZ(1) + valueZ(0)) * 0.5;
2152  return valueZ(0);
2153  }
2154 
2155 protected:
2156  inline T valueZ(int z) const
2157  {
2158  if (YStep)
2159  return (valueYZ(1, z) + valueYZ(0, z)) * 0.5;
2160  return valueYZ(0, z);
2161  }
2162 
2163  inline T valueYZ(int y, int z) const
2164  {
2165  if (XStep > 0)
2166  return (myLines[y][z].getValue(1) + myLines[y][z].getValue(0)) * 0.5;
2167  if (XStep < 0)
2168  return (myLines[y][z].getValue(-1) + myLines[y][z].getValue(0)) * 0.5;
2169  return myLines[y][z].getValue();
2170  }
2171 
2172  // Stores [Y][Z] lines.
2174 };
2175 
2176 
2177 ///
2178 /// UT_VoxelProbeFace is designed to walk over three velocity
2179 /// fields that store face-centered values. The indices refer
2180 /// to the centers of the voxels.
2181 ///
2182 template <typename T>
2183 class
2185 {
2186 public:
2188  virtual ~UT_VoxelProbeFace();
2189 
2190  void setArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz);
2191  void setVoxelSize(const UT_Vector3 &voxelsize);
2192 
2193  template <typename S>
2195  { return setIndex(vit.x(), vit.y(), vit.z()); }
2196  template <typename S>
2198  { return setIndex(vit.x(), vit.y(), vit.z()); }
2199  bool setIndex(int x, int y, int z);
2200 
2201  /// Get the face values on each face component.
2202  /// Parameters are axis then side.
2203  /// 0 is the lower face, 1 the higher face.
2204  inline T face(int axis, int side) const
2205  {
2206  if (axis == 0)
2207  return myLines[0][0].getValue(side);
2208  else
2209  return myLines[axis][side].getValue();
2210  }
2211 
2212  /// Returns the velocity centered at this index, thus an average
2213  /// of the values in each of our internal probes.
2214  inline UT_Vector3 value() const
2215  {
2216  return UT_Vector3(0.5f * (face(0, 0) + face(0, 1)),
2217  0.5f * (face(1, 0) + face(1, 1)),
2218  0.5f * (face(2, 0) + face(2, 1)));
2219  }
2220 
2221  /// Returns the divergence of this cell.
2222  inline T divergence() const
2223  {
2224  return (face(0,1)-face(0,0)) * myVoxelSize.x()
2225  + (face(1,1)-face(1,0)) * myVoxelSize.y()
2226  + (face(2,1)-face(2,0)) * myVoxelSize.z();
2227 
2228  }
2229 
2230 protected:
2231 
2232  static void swapLines(UT_VoxelProbe<T, true, false, false> &ym,
2234 
2235 
2237 
2238  /// Cached look up position. myValid stores if they are
2239  /// valid values or not
2240  bool myValid;
2241  int myX, myY, myZ;
2242  /// Half inclusive [,) range of valid x queries for current cache.
2243  int myMinValidX, myMaxValidX;
2244 
2245  UT_Vector3 myVoxelSize, myInvVoxelSize;
2246 };
2247 
2248 
2249 #if defined( WIN32 ) || defined( LINUX ) || defined( MBSD ) || defined(GAMEOS)
2250  #include "UT_VoxelArray.C"
2251 #endif
2252 
2253 
2254 // Typedefs for common voxel array types
2258 
2266 // Read only probe
2270 // Write only
2274 // Read/Write always writeback.
2278 // Read/Write with testing
2282 
2283 
2285 
2289 
2293 
2294 #endif
2295 
UT_COWWriteHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayWriteHandleV4
void setTile(const UT_VoxelArrayIterator< T > &vit)
exint exint const UT_JobInfo &info const
int x() const
Retrieve the current location of the iterator.
UT_Interrupt * myInterrupt
#define SYSmax(a, b)
Definition: SYS_Math.h:1367
int xyzTileToLinear(int x, int y, int z) const
void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const
SYS_FORCE_INLINE T lerpSample(T *samples, float fx, float fy, float fz) const
Lerps the given sample using trilinear interpolation.
const UT_VoxelTile< T > & operator=(const UT_VoxelTile< T > &src)
void size(int xres, int yres, int zres)
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void findAverage(T &avg) const
Determines the average value of the tile.
#define SYS_STATIC_ASSERT(expr)
UT_VoxelProbe< fpreal32, true, true, true > UT_VoxelRWTProbeF
void setInterrupt(UT_Interrupt *interrupt)
T valueZ(int z) const
void loadData(UT_IStream &is)
Load an array, requires you have already size()d this array.
T & z(void)
Definition: UT_Vector4.h:379
UT_VoxelBorderType getBorder() const
exint getDataLength() const
Returns the amount of data used by the tile myData pointer.
bool atEnd() const
Returns true if we have iterated over all of the voxels.
UT_COWHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayHandleF
void match(const UT_VoxelArray< T > &src)
bool isMatching(const UT_VoxelArray< S > &src) const
virtual const char * getName()=0
T valueYZ(int y, int z) const
void resample(const UT_VoxelArray< T > &src, UT_FilterType filtertype=UT_FILTER_POINT, float filterwidthscale=1.0f, int clampaxis=-1)
Fills this by resampling the given voxel array.
const UT_VoxelCompressOptions & getCompressionOptions() const
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
int64 getMemoryUsage(bool inclusive) const
Return the amount of memory used by this array.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
bool setIndex(UT_VoxelTileIterator< S > &vit)
SYS_FORCE_INLINE void freeData()
const GLdouble * v
Definition: glcorearb.h:836
T operator()(UT_Vector3D pos) const
THREADED_METHOD3_CONST(UT_VoxelArray< T >, numTiles() > 16, flatten, T *, flatarray, exint, ystride, exint, zstride) void flattenPartial(T *flatarray
virtual ~UT_VoxelProbeConstant()
UT_Vector3 gradient() const
int numVoxels() const
GLuint start
Definition: glcorearb.h:474
UT_VoxelProbeCube< fpreal32 > UT_VoxelProbeCubeF
virtual ~UT_VoxelVectorProbe()
UT_COWReadHandle< UT_VoxelArray< T > > myHandle
UT_VoxelArray< fpreal32 > UT_VoxelArrayF
void clampIndex(int &x, int &y, int &z) const
UT_COWReadHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayReadHandleF
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
virtual T getValue(const UT_VoxelTile< T > &tile, int x, int y, int z) const =0
T * fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const
static void registerCompressionEngine(UT_VoxelTileCompress< T > *engine)
fpreal myQuantizeTol
Tolerance for quantizing to reduced bit depth.
virtual bool lerp(GA_AttributeOperand &d, GA_AttributeOperand &a, GA_AttributeOperand &b, GA_AttributeOperand &t) const
d = SYSlerp(a, b, t);
UT_VoxelVectorProbe< fpreal32, true, true, true > UT_VoxelVectorRWTProbeF
UT_Vector3T< float > UT_Vector3
UT_VoxelTile< T > * myCurTile
Current processing tile.
GLdouble GLdouble GLdouble z
Definition: glcorearb.h:847
UT_VoxelVectorProbe(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
UT_VoxelArray< T > * myBaseLevel
T divergence() const
Returns the divergence of this cell.
GLint level
Definition: glcorearb.h:107
SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
T & x(void)
Definition: UT_Vector2.h:285
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1221
UT_VoxelBorderType
Definition: UT_VoxelArray.h:67
#define SYSabs(a)
Definition: SYS_Math.h:1369
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
bool myOwnBase
If true, we will delete the base level when we are done.
UT_VoxelArray< UT_Vector4 > UT_VoxelArrayV4
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:75
#define UT_API
Definition: UT_API.h:13
bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const
UT_VoxelArray< T > * myArray
fpreal UTvoxelTileDist(const UT_Vector2 &a, const UT_Vector2 &b)
void setArray(UT_VoxelArray< T > *vox)
GLint y
Definition: glcorearb.h:102
T mixValues(T t1, T t2, mipmaptype function) const
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:32
void copyWithOffset(const UT_VoxelArray< T > &src, int offx, int offy, int offz)
int indexToLinearTile(int x, int y, int z) const
UT_VoxelTileIterator< int64 > UT_VoxelTileIteratorI
bool isConstant(T *cval=0) const
void makeConstant(T t)
Turns this tile into a constant tile of the given value.
bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const
GLfloat GLfloat GLfloat v2
Definition: glcorearb.h:817
T face(int axis, int side) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
SYS_FORCE_INLINE T & x(void)
Definition: UT_Vector3.h:498
UT_VoxelVectorProbe< fpreal32, true, true, false > UT_VoxelVectorRWProbeF
SYS_FORCE_INLINE T rawConstVal() const
3D Vector class.
UT_FilterType
Definition: UT_FilterType.h:16
void UTvoxelTileExpandMinMax(UT_Vector2 v, UT_Vector2 &min, UT_Vector2 &max)
bool atEnd() const
Returns true if we have iterated over all of the voxels.
const UT_JobInfo & info
void setCompressionOptions(const UT_VoxelCompressOptions &options)
virtual bool load(UT_JSONParser &p, UT_VoxelTile< T > &tile) const
UT_VoxelArray< T > * getArray() const
int myCurTile
Our current linear tile idx. A value of -1 implies at end.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
void setCompressionTolerance(fpreal tol)
void setHandle(UT_COWReadHandle< UT_VoxelArray< T > > handle)
void flatten(S *dst, int dststride) const
Flattens ourself into the given destination buffer.
void makeFpreal16()
Explicit compress to fpreal16. Lossy. No-op if already constant.
SYS_FORCE_INLINE T operator()(int x, int y, int z) const
int zres() const
virtual bool writeThrough(UT_VoxelTile< T > &tile, int x, int y, int z, T t) const =0
UT_VoxelVectorProbe< fpreal32, true, false, false > UT_VoxelVectorProbeF
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
virtual ~UT_VoxelTileCompress()
signed char int8
Definition: SYS_Types.h:31
static fpreal dist(T a, T b)
bool setIndex(int x, int y, int z)
bool getCompressOnExit() const
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
bool writeThrough(int x, int y, int z, T t)
int yres() const
SYS_FORCE_INLINE T & z(void)
Definition: UT_Vector3.h:502
UT_VoxelTileIterator< fpreal32 > UT_VoxelTileIteratorF
long long int64
Definition: SYS_Types.h:107
const T * rawData() const
T getBorderScale(int axis) const
const S * writeTiles(const S *srcdata, int srcstride, const UT_IntArray &tilelist)
GLfloat f
Definition: glcorearb.h:1925
bool hasNan() const
Returns true if any NANs are in this tile.
void setArray(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
void resetX(int x)
THREADED_METHOD4(UT_VoxelArray< T >, numTiles() > 4, copyWithOffsetInternal, const UT_VoxelArray< T > &, src, int, offx, int, offy, int, offz) void copyWithOffsetInternalPartial(const UT_VoxelArray< T > &src
exint exint zstride
int myCurTileListIdx
Our current index into the tile list.
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
virtual void load(UT_IStream &is, UT_VoxelTile< T > &tile) const
int64 getMemoryUsage(bool inclusive) const
Returns the amount of memory used by this tile.
THREADED_METHOD4_CONST(UT_VoxelArray< T >, numTiles() > 16, flattenGLFixed8, uint8 *, flatarray, exint, ystride, exint, zstride, T, dummy) void flattenGLFixed8Partial(uint8 *flatarray
int getYRes() const
void weightedSum(int pstart[3], int pend[3], const float *weights[3], int start[3], T &result)
SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const
int numTiles() const
UT_BoundingBoxI getTileBBox() const
int getLinearTileNum() const
int64 exint
Definition: SYS_Types.h:116
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
static void saveCompressionTypes(std::ostream &os)
Stores a list of compresson engines to os.
T getBorderValue() const
const std::enable_if<!VecTraits< T >::IsVec, T >::type & max(const T &a, const T &b)
Definition: Composite.h:133
UT_VoxelArray< T > * myArray
The array we belong to.
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:125
double fpreal64
Definition: SYS_Types.h:192
virtual ~UT_VoxelArray()
T getValue() const
GLuint GLuint end
Definition: glcorearb.h:474
virtual void save(std::ostream &os, const UT_VoxelTile< T > &tile) const
GLint GLenum GLboolean GLsizei stride
Definition: glcorearb.h:871
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
const UT_VoxelArray< T > & operator=(const UT_VoxelArray< T > &src)
Assignment operator:
UT_VoxelArray< T > * getArray() const
bool isValid() const
virtual ~UT_VoxelProbeAverage()
GLintptr offset
Definition: glcorearb.h:664
void makeRawUninitialized()
Definition: VM_SIMD.h:180
void setTile(const UT_VoxelArrayIterator< S > &vit, UT_VoxelArray< T > *array)
void setValue(int x, int y, int z, T t)
UT_VoxelTile< T > * getTile(int tx, int ty, int tz) const
UT_VoxelProbe< UT_Vector4, true, true, true > UT_VoxelRWTProbeV4
void setCompressOnExit(bool shouldcompress)
T evaluate(const UT_Vector3 &pos, const UT_Filter &filter, fpreal radius, int clampaxis=-1) const
UT_VoxelMipMap< fpreal32 > UT_VoxelMipMapF
bool setIndex(UT_VoxelTileIterator< S > &vit)
#define SYS_STATIC_FORCE_INLINE
Definition: SYS_Inline.h:48
bool tryCompress(const UT_VoxelCompressOptions &options)
virtual bool canSave() const
Does this engine support saving and loading?
int getRes(int dim) const
THREADED_METHOD(UT_VoxelArray< T >, numTiles() > 100, collapseAllTiles) void collapseAllTilesPartial(const UT_JobInfo &info)
int getXRes() const
void setRes(int xr, int yr, int zr)
UT_VoxelArray< int64 > UT_VoxelArrayI
THREADED_METHOD1(UT_VoxelArray< T >, numTiles() > 100, constant, T, t) void const antPartial(T t
bool setIndexPlus(UT_VoxelTileIterator< S > &vit)
UT_API UT_ValArray< UT_VoxelTileCompress< fpreal16 > * > & UTvoxelTileGetCompressionEngines(fpreal16 *dummy)
GLboolean * data
Definition: glcorearb.h:130
bool setIndex(UT_VoxelArrayIterator< S > &vit)
virtual bool isLossless() const
Returns true if the compression type is lossless.
GLuint const GLchar * name
Definition: glcorearb.h:785
int int32
Definition: SYS_Types.h:35
T & y(void)
Definition: UT_Vector4.h:377
T getValue(int x, int y, int z) const
SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z, T *sample) const
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1221
SYS_FORCE_INLINE bool inlineConstant() const
void writeCacheLine(T *cacheline, int y, int z)
Fills a cache line from an external buffer into our own data.
void advanceX()
Blindly advances our current pointer.
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
void setValue(int x, int y, int z, T t)
SYS_FORCE_INLINE T lerpVoxelCoordAxis(UT_Vector3F pos) const
UT_VoxelArrayIterator< int64 > UT_VoxelArrayIteratorI
virtual int getDataLength(const UT_VoxelTile< T > &tile) const =0
GLsizei samples
Definition: glcorearb.h:1297
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
void getTileVoxels(UT_Vector3I &start, UT_Vector3I &end) const
This tile will iterate over the voxels indexed [start,end).
bool myAllowFP16
Conversion to fpreal16, only valid for scalar data.
SYS_FORCE_INLINE T lerpVoxelCoord(UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
int getZRes() const
const UT_JobInfo * myJobInfo
The job info to use for tilefetching.
void setConstArray(const UT_VoxelArray< T > *vox, int prex=0, int postx=0)
static UT_VoxelTileCompress< T > * getCompressionEngine(int index)
GridType::Ptr laplacian(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the Laplacian of the given scalar grid.
bool getCompressOnExit() const
UT_VoxelTileIterator< UT_Vector4 > UT_VoxelTileIteratorV4
UT_VoxelProbe< fpreal32, false, true, false > UT_VoxelWOProbeF
GLenum GLenum dst
Definition: glcorearb.h:1792
SYS_FORCE_INLINE T & y(void)
Definition: UT_Vector3.h:500
GLsizei const GLfloat * value
Definition: glcorearb.h:823
virtual void findMinMax(const UT_VoxelTile< T > &tile, T &min, T &max) const
Definition: UT_VoxelArray.C:80
double fpreal
Definition: SYS_Types.h:270
void setLinearTile(exint lineartilenum, UT_VoxelArray< T > *array)
bool isRawFull() const
Returns if this tile is in raw full format.
bool hasNan() const
Returns true if any element of the voxel array is NAN.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
UT_VoxelVectorProbe< fpreal32, false, true, false > UT_VoxelVectorWOProbeF
UT_VoxelProbe< UT_Vector4, true, false, false > UT_VoxelProbeV4
SYS_FORCE_INLINE void lerpVoxelMinMaxAxis(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
SYS_STATIC_FORCE_INLINE T lerpValues(T v1, T v2, fpreal32 bias)
Lerps two numbers, templated to work with T.
T getValue(int x, int y, int z) const
bool setIndexPlus(UT_VoxelArrayIterator< S > &vit)
SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z, float fx, float fy, float fz) const
T getValue(int offset) const
void copyFragment(int dstx, int dsty, int dstz, const UT_VoxelTile< T > &srctile, int srcx, int srcy, int srcz)
void uncompress()
Turns a compressed tile into a raw tile.
UT_VoxelArray< T > * myArray
unsigned char uint8
Definition: SYS_Types.h:32
THREADED_METHOD3(UT_VoxelArray< T >, numTiles() > 16, extractFromFlattened, const T *, flatarray, exint, ystride, exint, zstride) void extractFromFlattenedPartial(const T *flatarray
int idx(int idx) const
UT_COWReadHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayReadHandleV4
void setConstArray(const UT_VoxelArray< T > *vox)
UT_API UT_Interrupt * UTgetInterrupt()
Obtain global UT_Interrupt singleton.
SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
GLuint index
Definition: glcorearb.h:785
T getValue(int axis) const
UT_VoxelProbe< fpreal32, true, false, false > UT_VoxelProbeF
int numLevels() const
UT_BoundingBoxT< int64 > UT_BoundingBoxI
UT_VoxelProbe< fpreal32, true, true, false > UT_VoxelRWProbeF
bool isRaw() const
Returns if this tile is in raw format.
void saveData(std::ostream &os) const
UT_ValArray< UT_VoxelArray< T > ** > myLevels
GLint GLenum GLint x
Definition: glcorearb.h:408
int getRes(int axis) const
GLfloat GLfloat v1
Definition: glcorearb.h:816
virtual ~UT_VoxelTile()
GLuint GLfloat * val
Definition: glcorearb.h:1607
SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
int int int offz
UT_Vector3 value() const
void linearTileToXYZ(int idx, int &x, int &y, int &z) const
UT_Vector3 myVoxelSize
UT_VoxelProbe< UT_Vector4, true, true, false > UT_VoxelRWProbeV4
exint numVoxels() const
void save(std::ostream &os) const
#define DEFINE_STD_FUNC(TYPE)
GLint GLint GLsizei GLint GLenum GLenum type
Definition: glcorearb.h:107
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
T operator()(int x, int y, int z) const
UT_COWHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayHandleV4
T & x(void)
Definition: UT_Vector4.h:375
UT_VoxelTile< T > * getLinearTile(int idx) const
void load(UT_IStream &is, const UT_IntArray &compression)
UT_VoxelProbe< UT_Vector4, false, true, false > UT_VoxelWOProbeV4
void setCompressOnExit(bool shouldcompress)
static int lookupCompressionEngine(const char *name)
T & y(void)
Definition: UT_Vector2.h:287
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:856
UT_COWWriteHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayWriteHandleF
void setValue(const UT_Vector3 &v)
void setValue(T value)
SYS_FORCE_INLINE void lerpVoxelMinMax(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
bool setIndexCube(UT_VoxelArrayIterator< S > &vit)
#define UT_VOXEL_FREE(x)
Definition: UT_VoxelArray.h:52
bool setIndexCube(UT_VoxelTileIterator< S > &vit)
void uncompressFull()
Turns a tile into a raw full tile.
void setBorder(UT_VoxelBorderType type, T t)
UT_Vector3 getValue() const
T * rawFullData()
Returns the raw full data of the tile.
UT_VoxelArrayIterator< fpreal32 > UT_VoxelArrayIteratorF
virtual bool tryCompress(UT_VoxelTile< T > &tile, const UT_VoxelCompressOptions &options, T min, T max) const =0
void setForeignData(void *data, int8 compress_type)
SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
static void expandMinMax(T v, T &min, T &max)
Designed to be specialized according to T.
T & w(void)
Definition: UT_Vector4.h:381
short int16
Definition: SYS_Types.h:33
int idx(int idx) const
bool extractSampleCube(int x, int y, int z, T *sample) const
void findMinMax(T &min, T &max) const
Finds the minimum and maximum T values.
bool isValidIndex(int x, int y, int z) const
Returns true if the given x, y, z values lie inside the valid index.
#define SYSmin(a, b)
Definition: SYS_Math.h:1368
const std::enable_if<!VecTraits< T >::IsVec, T >::type & min(const T &a, const T &b)
Definition: Composite.h:129
void setComponent(int axis, T val)
virtual bool save(UT_JSONWriter &w, const UT_VoxelTile< T > &tile) const
SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z, float fx, float fy, float fz) const
void setBorderScale(T scalex, T scaley, T scalez)
bool setIndex(UT_VoxelTileIterator< S > &vit)
bool setIndex(UT_VoxelTileIterator< S > &vit)
void setConstArray(const UT_VoxelArray< T > *vx, const UT_VoxelArray< T > *vy, const UT_VoxelArray< T > *vz)
float fpreal32
Definition: SYS_Types.h:191
exint exint T dummy
const UT_VoxelArray< T > * level(int level, int function) const
int xres() const
Read the current resolution.
S * extractTiles(S *dstdata, int stride, const UT_IntArray &tilelist) const
bool setIndex(int x, int y, int z)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
void writeData(const S *src, int srcstride)
SYS_FORCE_INLINE T * rawConstData() const
bool isSimpleCompression() const
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
bool isConstant() const
Returns if this tile is constant.
SYS_FORCE_INLINE T lerpSampleAxis(T *samples, float fx, float fy, float fz) const
static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions)
UT_VoxelArrayIterator< UT_Vector4 > UT_VoxelArrayIteratorV4
int getTileRes(int dim) const
void evaluateMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelTileIterator< S > &vit)
fpreal getCompressionTolerance() const
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const
GLint GLint GLint GLint GLint GLint GLint GLbitfield GLenum filter
Definition: glcorearb.h:1296
GLenum src
Definition: glcorearb.h:1792