HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
UT_VoxelArray.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_VoxelArray.h ( UT Library, C++)
7  *
8  * COMMENTS:
9  * This provides support for transparently tiled voxel arrays of data.
10  * The given type, T, should support normal arithmatic operations.
11  *
12  * The created array has elements indexed from 0, ie: [0..xdiv-1].
13  */
14 
15 #ifndef __UT_VoxelArray__
16 #define __UT_VoxelArray__
17 
18 #include "UT_API.h"
19 #include "UT_BoundingBox.h"
20 #include "UT_Vector2.h"
21 #include "UT_Vector3.h"
22 #include "UT_Vector4.h"
23 #include "UT_IntArray.h"
24 #include "UT_ValArray.h"
25 #include "UT_Array.h"
26 #include "UT_FilterType.h"
27 #include "UT_COW.h"
28 #include "UT_ThreadedAlgorithm.h"
29 #include "UT_Interrupt.h"
30 #include <VM/VM_SIMD.h>
31 
32 #include <SYS/SYS_SharedMemory.h>
33 #include <SYS/SYS_StaticAssert.h>
34 #include <SYS/SYS_Types.h>
35 
36 #include <hboost/shared_ptr.hpp>
37 
38 // TBB alloc results in real-world tests that are 3-4% faster. Yay!
39 // But unfortunately it is less aggressive with fragmentation, so
40 // we use effectively 2x the memory. Boo.
41 
42 //#define VOXEL_USE_TBB_ALLOC
43 
44 #ifdef VOXEL_USE_TBB_ALLOC
45 
46 #include <tbb/scalable_allocator.h>
47 
48 #define UT_VOXEL_ALLOC(x) scalable_malloc(x)
49 #define UT_VOXEL_FREE(x) scalable_free(x)
50 
51 #else
52 
53 #define UT_VOXEL_ALLOC(x) SYSamalloc((x), 128)
54 #define UT_VOXEL_FREE(x) SYSafree(x)
55 
56 #endif
57 
58 class UT_Filter;
59 class UT_JSONWriter;
60 class UT_JSONParser;
61 
62 static const int TILEBITS = 4;
63 static const int TILESIZE = 1 << TILEBITS;
64 static const int TILEMASK = TILESIZE-1;
65 
66 ///
67 /// Behaviour of out of bound reads.
68 ///
70 {
75 };
76 
77 template <typename T> class UT_VoxelTile;
78 template <typename T> class UT_VoxelArray;
79 template <typename T, bool DoRead, bool DoWrite, bool TestForWrite> class UT_VoxelProbe;
80 template <typename T> class UT_VoxelProbeCube;
81 template <typename T> class UT_VoxelProbeFace;
82 
84 {
85 public:
87  {
88  myConstantTol = 0;
89  myQuantizeTol = 0;
90  myAllowFP16 = false;
91  }
92 
93  // Used for quantization.
95  {
98  };
99 
100  /// Tiles will be constant if within this range. This may
101  /// need to be tighter than quantization tolerance as
102  /// dithering can't recover partial values.
104  /// Tolerance for quantizing to reduced bit depth
106 
108 
109  /// Conversion to fpreal16, only valid for scalar data.
111 };
112 
113 ///
114 /// UT_VoxelTileCompress
115 ///
116 /// A compression engine for UT_VoxelTiles of a specific type. This
117 /// is a verb class which is invoked from the voxeltile class.
118 ///
119 template <typename T>
121 {
122 public:
125 
126  /// Attempts to write data directly to the compressed tile.
127  /// Returns false if not possible.
128  virtual bool writeThrough(UT_VoxelTile<T> &tile,
129  int x, int y, int z, T t) const = 0;
130 
131  /// Reads directly from the compressed data.
132  /// Cannot alter the tile in any way because it must be threadsafe.
133  virtual T getValue(const UT_VoxelTile<T> &tile,
134  int x, int y, int z) const = 0;
135 
136  /// Attempts to compress the data according to the given tolerance.
137  /// If succesful, returns true.
138  virtual bool tryCompress(UT_VoxelTile<T> &tile,
139  const UT_VoxelCompressOptions &options,
140  T min, T max) const = 0;
141 
142  /// Returns the length in bytes of the data in the tile.
143  /// It must be at least one byte long.
144  virtual int getDataLength(const UT_VoxelTile<T> &tile) const = 0;
145 
146  /// Returns true if the compression type is lossless
147  virtual bool isLossless() const { return false; }
148 
149  /// Determines the min & max values of the tile. A default
150  /// implementation uses getValue() on all voxels.
151  virtual void findMinMax(const UT_VoxelTile<T> &tile, T &min, T &max) const;
152 
153  /// Does this engine support saving and loading?
154  virtual bool canSave() const { return false; }
155  virtual void save(std::ostream &os, const UT_VoxelTile<T> &tile) const {}
156  virtual bool save(UT_JSONWriter &w, const UT_VoxelTile<T> &tile) const
157  { return false; }
158  virtual void load(UT_IStream &is, UT_VoxelTile<T> &tile) const {}
159  virtual bool load(UT_JSONParser &p, UT_VoxelTile<T> &tile) const
160  { return false; }
161 
162  /// Returns the unique name of this compression engine so
163  /// we can look up engines by name (the index of the compression
164  /// engine is assigned at load time so isn't constant)
165  virtual const char *getName() = 0;
166 };
167 
179 
180 #define DEFINE_STD_FUNC(TYPE) \
181 inline void \
182 UTvoxelTileExpandMinMax(TYPE v, TYPE &min, TYPE &max) \
183 { \
184  if (v < min) \
185  min = v; \
186  else if (v > max) \
187  max = v; \
188 } \
189  \
190 inline fpreal \
191 UTvoxelTileDist(TYPE a, TYPE b) \
192 { \
193  return (fpreal) SYSabs(a - b); \
194 }
195 
204 
205 #undef DEFINE_STD_FUNC
206 
207 inline void
209 {
210  min.x() = SYSmin(v.x(), min.x());
211  max.x() = SYSmax(v.x(), max.x());
212 
213  min.y() = SYSmin(v.y(), min.y());
214  max.y() = SYSmax(v.y(), max.y());
215 }
216 
217 inline void
219 {
220  min.x() = SYSmin(v.x(), min.x());
221  max.x() = SYSmax(v.x(), max.x());
222 
223  min.y() = SYSmin(v.y(), min.y());
224  max.y() = SYSmax(v.y(), max.y());
225 
226  min.z() = SYSmin(v.z(), min.z());
227  max.z() = SYSmax(v.z(), max.z());
228 }
229 
230 inline void
232 {
233  min.x() = SYSmin(v.x(), min.x());
234  max.x() = SYSmax(v.x(), max.x());
235 
236  min.y() = SYSmin(v.y(), min.y());
237  max.y() = SYSmax(v.y(), max.y());
238 
239  min.z() = SYSmin(v.z(), min.z());
240  max.z() = SYSmax(v.z(), max.z());
241 
242  min.w() = SYSmin(v.w(), min.w());
243  max.w() = SYSmax(v.w(), max.w());
244 }
245 
246 inline fpreal
248 {
249  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y());
250 }
251 
252 inline fpreal
254 {
255  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
256  + SYSabs(a.z() - b.z());
257 }
258 
259 inline fpreal
261 {
262  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
263  + SYSabs(a.z() - b.z()) + SYSabs(a.w() - b.w());
264 }
265 
266 ///
267 /// UT_VoxelTile
268 ///
269 /// A UT_VoxelArray is composed of a number of these tiles. This is
270 /// done for two reasons:
271 /// 1) Increased memory locality when processing neighbouring points.
272 /// 2) Ability to compress or page out unneeded tiles.
273 /// Currently, the only special ability is the ability to create constant
274 /// tiles.
275 ///
276 /// To the end user of the UT_VoxelArray, the UT_VoxelTile should be
277 /// usually transparent. The only exception may be if they want to do
278 /// a FOR_ALL_TILES in order to ensure an optimal traversal order.
279 ///
280 template <typename T>
281 class UT_VoxelTile
282 {
283 public:
284  UT_VoxelTile();
285  virtual ~UT_VoxelTile();
286 
287  // Copy constructor:
289 
290 
291  // Assignment operator:
293 
295  {
301  };
302 
303  /// Fetch a given local value. (x,y,z) should be local to
304  /// this tile.
305  SYS_FORCE_INLINE T operator()(int x, int y, int z) const
306  {
307  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
308  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
309 
310  switch (myCompressionType)
311  {
312  case COMPRESS_RAW:
313  return ((T *)myData)[
314  ((z * myRes[1]) + y) * myRes[0] + x ];
315 
316  case COMPRESS_CONSTANT:
317  return rawConstVal();
318 
319  case COMPRESS_RAWFULL:
320  return ((T *)myData)[
321  ((z * TILESIZE) + y) * TILESIZE + x ];
322 
323  case COMPRESS_FPREAL16:
324  {
325  T result;
326  result = (((fpreal16 *)myData)[
327  ((z * myRes[1]) + y) * myRes[0] + x ]);
328  return result;
329  }
330  }
331 
332  // By default use the compression engine.
333  UT_VoxelTileCompress<T> *engine;
334 
335  engine = getCompressionEngine(myCompressionType);
336  return engine->getValue(*this, x, y, z);
337  }
338 
339  /// Lerps two numbers, templated to work with T.
341  {
342  return v1 + (v2 - v1) * bias;
343  }
344 
345  /// Does a trilinear interpolation. x,y,z should be local to this
346  /// as should x+1, y+1, and z+1. fx-fz should be 0..1.
347  SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const;
348 
349  template <int AXIS2D>
350  SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const;
351 
352  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
353  /// array should have 8 elements, x minor, z major.
354  /// Requires it is in bounds.
355  /// Returns true if all constant, in which case only a single
356  /// sample is filled, [0]
357  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
358  T *sample) const;
359  template <int AXIS2D>
360  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
361  T *sample) const;
362 
363  /// Extracts +/- dx, +/- dy, +/- dz and then the center into
364  /// 7 samples.
365  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
366  T *sample) const;
367  /// Extracts the full cube of +/- dx, dy, dz. xminor, zmajor, into
368  /// 27 elements.
369  bool extractSampleCube(int x, int y, int z,
370  T *sample) const;
371 #if 0
372  /// MSVC can't handle aligned parameters after the third so
373  /// frac must come first.
374  T lerp(v4uf frac, int x, int y, int z) const;
375 #endif
376 
377  /// Returns a cached line to our internal data, at local address x,y,z.
378  /// cacheline is a caller allocated structure to fill out if we have
379  /// to decompress. If forcecopy isn't set and we can, the result may
380  /// be an internal pointer. stride is set to the update for moving one
381  /// x position in the cache.
382  /// strideofone should be set to true if you want to prevent 0 stride
383  /// results for constant tiles.
384  T *fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const;
385 
386  /// Fills a cache line from an external buffer into our own data.
387  void writeCacheLine(T *cacheline, int y, int z);
388 
389  /// Copies between two tiles. The tile's voxels match up, but don't
390  /// have the same offset. The maximal overlapping voxels are copied.
391  /// this->setValue(dst, dsty, dstz, src(srcx, srcy, srcz));
392  void copyFragment(int dstx, int dsty, int dstz,
393  const UT_VoxelTile<T> &srctile,
394  int srcx, int srcy, int srcz);
395 
396  /// Flattens ourself into the given destination buffer.
397  template <typename S>
398  void flatten(S *dst, int dststride) const;
399 
400  /// Fills our values from the given dense flat buffer. Will
401  /// create a constant tile if the source is constant.
402  template <typename S>
403  void writeData(const S *src, int srcstride);
404 
405  /// The setData is intentionally seperate so we can avoid
406  /// expanding constant data when we write the same value to it.
407  void setValue(int x, int y, int z, T t);
408 
409  /// Finds the minimum and maximum T values
410  void findMinMax(T &min, T &max) const;
411 
412  /// Determines the average value of the tile.
413  void findAverage(T &avg) const;
414 
415  /// Returns if this tile is constant.
416  bool isConstant() const
417  { return myCompressionType == COMPRESS_CONSTANT; }
418 
419  /// Returns true if any NANs are in this tile
420  bool hasNan() const;
421 
422  /// Returns if this tile is in raw format.
423  bool isRaw() const
424  { return myCompressionType == COMPRESS_RAW; }
425 
426  /// Returns if this tile is in raw full format.
427  bool isRawFull() const
428  { return myCompressionType == COMPRESS_RAWFULL; }
429 
430  /// Returns true if this is a simple form of compression, either
431  /// constant, raw, or a raw full that isn't padded
432  bool isSimpleCompression() const
433  {
434  if (isRaw()) return true;
435  if (isConstant()) return true;
436  if (isRawFull() && myRes[0] == TILESIZE && myRes[1] == TILESIZE)
437  return true;
438  return false;
439  }
440 
441  /// Attempts to compress this tile. Returns true if any
442  /// compression performed.
443  bool tryCompress(const UT_VoxelCompressOptions &options);
444 
445  /// Turns this tile into a constant tile of the given value.
446  void makeConstant(T t);
447 
448  /// Explicit compress to fpreal16. Lossy. No-op if already constant.
449  void makeFpreal16();
450 
451  /// Turns a compressed tile into a raw tile.
452  void uncompress();
453 
454  /// Turns a tile into a raw full tile.
455  void uncompressFull();
456 
457  /// Like uncompress() except it leaves the data uninitialized. Result
458  /// is either COMPRESS_RAW or COMPRESS_RAWFULL depending on the tile res.
459  /// @note USE WITH CAUTION!
460  void makeRawUninitialized();
461 
462  /// Returns the raw full data of the tile.
464  {
465  uncompressFull();
466  return (T *)myData;
467  }
468 
469  /// This only makes sense for simple compression. Use with
470  /// extreme care.
472  { if (inlineConstant() && isConstant())
473  { return (T *) &myData; }
474  return (T *)myData; }
475  const T *rawData() const
476  { if (inlineConstant() && isConstant())
477  { return (const T *) &myData; }
478  return (const T *)myData; }
479 
480  /// Read the current resolution.
481  int xres() const { return myRes[0]; }
482  int yres() const { return myRes[1]; }
483  int zres() const { return myRes[2]; }
484 
485  int getRes(int dim) const { return myRes[dim]; }
486 
487 
488  int numVoxels() const { return myRes[0] * myRes[1] * myRes[2]; }
489 
490  /// Returns the amount of memory used by this tile.
491  int64 getMemoryUsage(bool inclusive) const;
492 
493  /// Returns the amount of data used by the tile myData pointer.
494  exint getDataLength() const;
495 
496  /// A routine used by filtered evaluation to accumulated a partial
497  /// filtered sum in this tile.
498  /// pstart, pend - voxel bounds (in UT_VoxelArray coordinates)
499  /// weights - weight array
500  /// start - UT_VoxelArray coordinates at [0] in the weight array
501  void weightedSum(int pstart[3], int pend[3],
502  const float *weights[3], int start[3],
503  T &result);
504 
505  /// Designed to be specialized according to T
506 
507  /// Update min & max to encompass T itself.
508  static void expandMinMax(T v, T &min, T &max)
509  {
510  UTvoxelTileExpandMinMax(v, min, max);
511  }
512 
513  /// Return the "distance" of a & b. This is used for
514  /// tolerance checks on equality comparisons.
515  static fpreal dist(T a, T b)
516  {
517  return UTvoxelTileDist(a, b);
518  }
519 
521 
522  // Returns the index of the bound compression engine.
523  static int lookupCompressionEngine(const char *name);
524  // Given an index, gets the compression engine.
526 
527  /// Saves this tile's data, in compressed form.
528  /// May save in uncompressed form is the compression type does
529  /// not support saving.
530  void save(std::ostream &os) const;
531  bool save(UT_JSONWriter &w) const;
532 
533  /// Loads tile data. Uses the compression index to map the saved
534  /// compression types into the correct loading compression types.
535  void load(UT_IStream &is, const UT_IntArray &compression);
536  bool load(UT_JSONParser &p, const UT_IntArray &compression);
537 
538  /// Stores a list of compresson engines to os.
539  static void saveCompressionTypes(std::ostream &os);
540  static bool saveCompressionTypes(UT_JSONWriter &w);
541 
542  /// Builds a translation table from the given stream's compression types
543  /// into our own valid compression types.
544  static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions);
545  static bool loadCompressionTypes(UT_JSONParser &p, UT_IntArray &compressions);
546 
547 protected:
548  // Attempts to set the value to the native compressed format
549  // Some compression types allow some values to be written
550  // without decompression. Eg, you can write to a constant tile
551  // the tile's own value without decompression.
552  // If this returns true, t has been written.
553  bool writeThrough(int x, int y, int z, T t);
554 
555  /// Sets the local res of the tile. Does *not* resize the allocated
556  /// memory.
557  void setRes(int xr, int yr, int zr)
558  { myRes[0] = xr; myRes[1] = yr; myRes[2] = zr; }
559 
561  {
562  return (sizeof(T) <= sizeof(T*));
563  }
564 
566  { if (inlineConstant()) { return *((const T *)&myData); }
567  return *((const T*)myData); }
569  { if (inlineConstant()) { return ((T *)&myData); }
570  return ((T*)myData); }
571 
572  void setForeignData(void *data, int8 compress_type)
573  {
574  freeData();
575  myCompressionType = compress_type;
576 
577  if (isConstant() && inlineConstant())
578  {
579  makeConstant(*(T *)data);
580  }
581  else
582  {
583  myData = data;
584  myForeignData = true;
585  }
586  }
587 
588 public:
589  /// Frees myData and sets it to zero. This is a bit tricky
590  /// as the constant tiles may be inlined.
591  /// This is only public for the compression engines.
593  {
594  if (inlineConstant() && isConstant())
595  {
596  // Do nothing!
597  }
598  else if (myData && !myForeignData)
599  {
601  }
602  myData = 0;
603  myForeignData = false;
604  }
605 
606 public:
607  // This is only public so the compression engines can get to it.
608  // It is blind data, do not alter!
609  void *myData;
610 private:
611 
612  /// Resolutions.
613  int8 myRes[3];
614 
615  /// Am I a constant tile?
616  int8 myCompressionType;
617 
618  int8 myForeignData;
619 
620  static UT_ValArray<UT_VoxelTileCompress<T> *> &getCompressionEngines()
621  {
622  return UTvoxelTileGetCompressionEngines((T *) 0);
623  }
624 
625  friend class UT_VoxelTileCompress<T>;
626  friend class UT_VoxelArray<T>;
627  template <typename S, bool DoWrite, bool DoRead, bool TestForWrites>
628  friend class UT_VoxelProbe;
629 };
630 
631 ///
632 /// UT_VoxelArray
633 ///
634 /// This provides data structure to hold a three dimmensional array
635 /// of data. The data should be some simple arithmetic type, such
636 /// as uint8, fpreal16, or UT_Vector3.
637 ///
638 /// Some operations, such as gradiants, may make less sense with uint8.
639 ///
640 template <typename T>
641 class UT_VoxelArray
642 {
643 public:
644  UT_VoxelArray();
645  virtual ~UT_VoxelArray();
646 
647  /// Copy constructor:
649 
650  /// Assignment operator:
652 
653  /// This sets the voxelarray to have the given resolution, resetting
654  /// all elements to 0.
655  void size(int xres, int yres, int zres);
656 
657  /// This will ensure this voxel array matches the given voxel array
658  /// in terms of dimensions & border conditions. It may invoke
659  /// a size() and hence reset the field to 0.
660  void match(const UT_VoxelArray<T> &src);
661 
662  template <typename S>
663  bool isMatching(const UT_VoxelArray<S> &src) const
664  {
665  return src.getXRes() == getXRes() &&
666  src.getYRes() == getYRes() &&
667  src.getZRes() == getZRes();
668  }
669 
670  int getXRes() const { return myRes[0]; }
671  int getYRes() const { return myRes[1]; }
672  int getZRes() const { return myRes[2]; }
673  int getRes(int axis) const { return myRes[axis]; }
674 
675  /// Return the amount of memory used by this array.
676  int64 getMemoryUsage(bool inclusive) const;
677 
678  /// Sets this voxel array to the given constant value. All tiles
679  /// are turned into constant tiles.
681  constant,
682  T, t)
683  void constantPartial(T t, const UT_JobInfo &info);
684 
685  /// If this voxel array is all constant tiles, returns true.
686  /// The optional pointer is initialized to the constant value iff
687  /// the array is constant. (Note by constant we mean made of constant
688  /// tiles of the same value - if some tiles are uncompressed but
689  /// constant, it will still return false)
690  bool isConstant(T *cval = 0) const;
691 
692  /// Returns true if any element of the voxel array is NAN
693  bool hasNan() const;
694 
695  /// This convience function lets you sample the voxel array.
696  /// pos is in the range [0..1]^3.
697  /// T value trilinearly interpolated. Edges are determined by the border
698  /// mode.
699  /// The cells are sampled at the center of the voxels.
700  T operator()(UT_Vector3D pos) const;
701  T operator()(UT_Vector3F pos) const;
702 
703  /// This convience function lets you sample the voxel array.
704  /// pos is in the range [0..1]^3.
705  /// The min/max is the range of the sampled values.
706  void evaluateMinMax(T &lerp, T &lmin, T &lmax,
707  UT_Vector3F pos) const;
708 
709  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
710  /// Allows out of range evaluation
712  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
713  /// Allows out of range evaluation
714  SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z,
715  float fx, float fy, float fz) const;
716  template <int AXIS2D>
718  template <int AXIS2D>
719  SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z,
720  float fx, float fy, float fz) const;
721 
722  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
723  /// Allows out of range evaluation. Also computes min/max of
724  /// interpolated samples.
725  SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax,
726  UT_Vector3F pos) const;
727  template <int AXIS2D>
728  SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax,
729  UT_Vector3F pos) const;
730  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
731  /// Allows out of range evaluation. Also computes min/max of
732  /// interpolated samples.
734  T &lerp, T &lmin, T &lmax,
735  int x, int y, int z,
736  float fx, float fy, float fz) const;
737  template <int AXIS2D>
739  T &lerp, T &lmin, T &lmax,
740  int x, int y, int z,
741  float fx, float fy, float fz) const;
742 
743  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
744  /// array should have 8 elements, x minor, z major.
745  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
746  T *sample) const;
747  template <int AXIS2D>
748  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
749  T *sample) const;
750 
751  /// Extracts a sample in a plus shape, dx, then dy, then dz, finally
752  /// the center into 7 voxels.
753  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
754  T *sample) const;
755  /// Extracts 27 dense 3x3x3 cube centered at x,y,z into samples
756  /// z major, xminor.
757  SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z,
758  T *sample) const;
759 
760  /// Lerps the given sample using trilinear interpolation
762  float fx, float fy, float fz) const;
763  template <int AXIS2D>
765  float fx, float fy, float fz) const;
766 
767  SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z,
768  float &fx, float &fy, float &fz) const
769  {
770  // Determine integer & fractional components.
771  fx = pos.x();
772  SYSfastSplitFloat(fx, x);
773  fy = pos.y();
774  SYSfastSplitFloat(fy, y);
775  fz = pos.z();
776  SYSfastSplitFloat(fz, z);
777  }
778  template <int AXIS2D>
779  SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z,
780  float &fx, float &fy, float &fz) const
781  {
782  // Determine integer & fractional components.
783  if (AXIS2D != 0)
784  {
785  fx = pos.x();
786  SYSfastSplitFloat(fx, x);
787  }
788  else
789  {
790  fx = 0.0;
791  x = 0;
792  }
793  if (AXIS2D != 1)
794  {
795  fy = pos.y();
796  SYSfastSplitFloat(fy, y);
797  }
798  else
799  {
800  fy = 0.0;
801  y = 0;
802  }
803  if (AXIS2D != 2)
804  {
805  fz = pos.z();
806  SYSfastSplitFloat(fz, z);
807  }
808  else
809  {
810  fz = 0.0;
811  z = 0;
812  }
813  }
814 #if 0
815  T operator()(v4uf pos) const;
816 #endif
817 
818  /// Filtered evaluation of the voxel array. This operation should
819  /// exhibit the same behavior as IMG3D_Channel::evaluate.
820  T evaluate(const UT_Vector3 &pos, const UT_Filter &filter,
821  fpreal radius, int clampaxis = -1) const;
822 
823  /// Fills this by resampling the given voxel array.
824  void resample(const UT_VoxelArray<T> &src,
825  UT_FilterType filtertype = UT_FILTER_POINT,
826  float filterwidthscale = 1.0f,
827  int clampaxis = -1);
828 
829  /// Flattens this into an array. Z major, then Y, then X.
830  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
832  flatten,
833  T *, flatarray,
834  exint, ystride,
835  exint, zstride)
836  void flattenPartial(T *flatarray, exint ystride, exint zstride,
838 
839  /// Flattens this into an array suitable for a GL 8bit texture.
840  /// Z major, then Y, then X.
841  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
843  flattenGLFixed8,
844  uint8 *, flatarray,
845  exint, ystride,
846  exint, zstride,
847  T , dummy)
848  void flattenGLFixed8Partial(uint8 *flatarray,
849  exint ystride, exint zstride,
850  T dummy,
851  const UT_JobInfo &info) const;
852 
853  /// Flattens this into an array suitable for a GL 16bit FP texture.
854  /// Z major, then Y, then X.
855  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
856  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
857  flattenGL16F,
858  UT_Vector4H *, flatarray,
859  exint, ystride,
860  exint, zstride,
861  T , dummy)
862  void flattenGL16FPartial(UT_Vector4H *flatarray,
863  exint ystride, exint zstride,
864  T dummy,
865  const UT_JobInfo &info) const;
866 
867  /// Flattens this into an array suitable for a GL 32b FP texture. Note that
868  /// this also works around an older Nvidia driver bug that caused very small
869  /// valued texels (<1e-9) to appear as huge random values in the texture.
870  /// Z major, then Y, then X.
871  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
872  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
873  flattenGL32F,
874  UT_Vector4F *, flatarray,
875  exint, ystride,
876  exint, zstride,
877  T , dummy)
878  void flattenGL32FPartial(UT_Vector4F *flatarray,
879  exint ystride, exint zstride,
880  T dummy,
881  const UT_JobInfo &info) const;
882 
883  /// Fills this from a flattened array. Z major, then Y, then X.
884  /// setValue(x,y,z, flatarray[x + y * ystride + z * zstride];
885  THREADED_METHOD3(UT_VoxelArray<T>, numTiles() > 16,
886  extractFromFlattened,
887  const T *, flatarray,
888  exint, ystride,
889  exint, zstride)
890  void extractFromFlattenedPartial(const T *flatarray,
891  exint ystride, exint zstride,
892  const UT_JobInfo &info);
893 
894  /// Copies into this voxel array from the source array.
895  /// Conceptually,
896  /// this->setValue(x, y, z, src.getValue(x+offx, y+offy, z+offz);
897  void copyWithOffset(const UT_VoxelArray<T> &src,
898  int offx, int offy, int offz);
899  THREADED_METHOD4(UT_VoxelArray<T>, numTiles() > 4,
900  copyWithOffsetInternal,
901  const UT_VoxelArray<T> &, src,
902  int, offx,
903  int, offy,
904  int, offz)
905  void copyWithOffsetInternalPartial(const UT_VoxelArray<T> &src,
906  int offx, int offy, int offz,
907  const UT_JobInfo &info);
908 
909  /// Fills dstdata with the voxel data of listed tiles. Stride is measured
910  /// in T. Data order is in tile-order. So, sorted by tilelist, then
911  /// z, y, x within that tile.
912  template <typename S>
913  S *extractTiles(S *dstdata, int stride,
914  const UT_IntArray &tilelist) const;
915 
916  /// Overwrites our tiles with the given data. Does checking
917  /// for constant tiles. Input srcdata stream should match
918  /// that of extractTiles.
919  template <typename S>
920  const S *writeTiles(const S *srcdata, int srcstride,
921  const UT_IntArray &tilelist);
922 
923  /// Converts a 3d position in range [0..1]^3 into the closest
924  /// index value.
925  /// Returns false if the resulting index was out of range. The index
926  /// will still be set.
927  bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const;
928  /// Convertes a 3d position in [0..1]^3 into the equivalent in
929  /// the integer cell space. Does not clamp to the closest value.
930  bool posToIndex(UT_Vector3 pos, UT_Vector3 &ipos) const;
931  /// Converts an index into a position.
932  /// Returns false if the source index was out of range, in which case
933  /// pos will be outside [0..1]^3
934  bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const;
935  bool indexToPos(int x, int y, int z, UT_Vector3D &pos) const;
936  void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const;
937  void findexToPos(UT_Vector3D ipos, UT_Vector3D &pos) const;
938 
939  /// Clamps the given x, y, and z values to lie inside the valid index
940  /// range.
941  void clampIndex(int &x, int &y, int &z) const
942  {
943  x = SYSclamp(x, 0, myRes[0]-1);
944  y = SYSclamp(y, 0, myRes[1]-1);
945  z = SYSclamp(z, 0, myRes[2]-1);
946  }
947 
948  /// Returns true if the given x, y, z values lie inside the valid index.
949  bool isValidIndex(int x, int y, int z) const
950  {
951  return !((x | y | z) < 0) &&
952  (((x - myRes[0]) & (y - myRes[1]) & (z - myRes[2])) < 0);
953  }
954 
955  /// This allows you to read & write the raw data.
956  /// Out of bound reads are illegal.
957  T operator()(int x, int y, int z) const
958  {
959  UT_ASSERT_P(isValidIndex(x, y, z));
960  return (*getTile(x >> TILEBITS,
961  y >> TILEBITS,
962  z >> TILEBITS))
963  (x & TILEMASK, y & TILEMASK, z & TILEMASK);
964  }
965  void setValue(int x, int y, int z, T t)
966  {
967  UT_ASSERT_P(isValidIndex(x, y, z));
968  getTile(x >> TILEBITS,
969  y >> TILEBITS,
970  z >> TILEBITS)->setValue(
971  x & TILEMASK, y & TILEMASK, z & TILEMASK, t);
972  }
973 
974  /// This will clamp the bounds to fit within the voxel array,
975  /// using the border type to resolve out of range values.
976  T getValue(int x, int y, int z) const
977  {
978  // First handle the most common case.
979  if (isValidIndex(x, y, z))
980  return (*this)(x, y, z);
981 
982  // Verify our voxel array is non-empty.
983  if (!myTiles)
984  return myBorderValue;
985 
986  // We now know we are out of range, adjust appropriately
987  switch (myBorderType)
988  {
990  return myBorderValue;
991 
993  if (x < 0 || x >= myRes[0])
994  {
995  x %= myRes[0];
996  if (x < 0)
997  x += myRes[0];
998  }
999  if (y < 0 || y >= myRes[1])
1000  {
1001  y %= myRes[1];
1002  if (y < 0)
1003  y += myRes[1];
1004  }
1005  if (z < 0 || z >= myRes[2])
1006  {
1007  z %= myRes[2];
1008  if (z < 0)
1009  z += myRes[2];
1010  }
1011  break;
1012 
1013  case UT_VOXELBORDER_STREAK:
1014  clampIndex(x, y, z);
1015  break;
1016  case UT_VOXELBORDER_EXTRAP:
1017  {
1018  int cx, cy, cz;
1019  T result;
1020 
1021  cx = x; cy = y; cz = z;
1022  clampIndex(cx, cy, cz);
1023 
1024  result = (*this)(cx, cy, cz);
1025  result += (x - cx) * myBorderScale[0] +
1026  (y - cy) * myBorderScale[1] +
1027  (z - cz) * myBorderScale[2];
1028  return result;
1029  }
1030  }
1031 
1032  // It is now within bounds, do normal fetch.
1033  return (*this)(x, y, z);
1034  }
1035 
1037  void setBorderScale(T scalex, T scaley, T scalez);
1038  UT_VoxelBorderType getBorder() const { return myBorderType; }
1039  T getBorderValue() const { return myBorderValue; }
1040  T getBorderScale(int axis) const { return myBorderScale[axis]; }
1041 
1042  /// This tries to compress or collapse each tile. This can
1043  /// be expensive (ie, converting a tile to constant), so
1044  /// should be saved until modifications are complete.
1046  collapseAllTiles)
1047  void collapseAllTilesPartial(const UT_JobInfo &info);
1048 
1049  /// Uncompresses all tiles into non-constant tiles. Useful
1050  /// if you have a multithreaded algorithm that may need to
1051  /// both read and write, if you write to a collapsed tile
1052  /// while someone else reads from it, bad stuff happens.
1053  /// Instead, you can expandAllTiles. This may have serious
1054  /// consequences in memory use, however.
1056  expandAllTiles)
1057  void expandAllTilesPartial(const UT_JobInfo &info);
1058 
1059  /// Uncompresses all tiles, but leaves constant tiles alone.
1060  /// Useful for cleaning out any non-standard compression algorithm
1061  /// that some external program can't handle.
1062  THREADED_METHOD(UT_VoxelArray<T>, numTiles() > 100,
1063  expandAllNonConstTiles)
1064  void expandAllNonConstTilesPartial(const UT_JobInfo &info);
1065 
1066  /// The direct tile access methods are to make TBF writing a bit
1067  /// more efficient.
1068  UT_VoxelTile<T> *getTile(int tx, int ty, int tz) const
1069  { return &myTiles[xyzTileToLinear(tx, ty, tz)]; }
1071  { return &myTiles[idx]; }
1072  void linearTileToXYZ(int idx, int &x, int &y, int &z) const
1073  {
1074  x = idx % myTileRes[0];
1075  idx -= x;
1076  idx /= myTileRes[0];
1077  y = idx % myTileRes[1];
1078  idx -= y;
1079  idx /= myTileRes[1];
1080  z = idx;
1081  }
1082  int xyzTileToLinear(int x, int y, int z) const
1083  { return (z * myTileRes[1] + y) * myTileRes[0] + x; }
1084 
1085  int indexToLinearTile(int x, int y, int z) const
1086  { return ((z >> TILEBITS) * myTileRes[1] + (y >> TILEBITS)) * myTileRes[0] + (x >> TILEBITS); }
1087 
1088  /// Number of tiles along that axis. Not to be confused with
1089  /// the resolution of the individual tiles.
1090  int getTileRes(int dim) const { return myTileRes[dim]; }
1091  int numTiles() const
1092  { return myTileRes[0] * myTileRes[1] * myTileRes[2]; }
1093  exint numVoxels() const
1094  { return ((exint)myRes[0]) * myRes[1] * myRes[2]; }
1095 
1097  { myCompressionOptions = options; }
1099  { return myCompressionOptions; }
1100 
1102  { myCompressionOptions.myConstantTol = tol; }
1104  { return myCompressionOptions.myConstantTol; }
1105 
1106  /// Saves only the data of this array to the given stream.
1107  /// To reload it you will have to have a matching array in tiles
1108  /// dimensions and size.
1109  void saveData(std::ostream &os) const;
1110  bool saveData(UT_JSONWriter &w,
1111  const char *shared_mem_owner = 0) const;
1112 
1113  /// Load an array, requires you have already size()d this array.
1114  void loadData(UT_IStream &is);
1115  bool loadData(UT_JSONParser &p);
1116 
1117  /// Copy only the data from the source array.
1118  /// Note that it is an error to call this unless isMatching(src).
1120  copyData,
1121  const UT_VoxelArray<T> &, src)
1122 
1123  void copyDataPartial(const UT_VoxelArray<T> &src,
1124  const UT_JobInfo &info);
1125 
1126 private:
1128  resamplethread,
1129  const UT_VoxelArray<T> &, src,
1130  const UT_Filter *, filter,
1131  float, radius,
1132  int, clampaxis)
1133  void resamplethreadPartial(const UT_VoxelArray<T> &src,
1134  const UT_Filter *filter,
1135  float radius,
1136  int clampaxis,
1137  const UT_JobInfo &info);
1138 
1139 
1140  void deleteVoxels();
1141 
1142  SYS_SharedMemory *copyToSharedMemory(const char *shared_mem_owner) const;
1143  bool populateFromSharedMemory(const char *id);
1144 
1145 
1146  /// Number of elements in each dimension.
1147  int myRes[3];
1148 
1149  /// Inverse tile res, 1/myRes
1150  UT_Vector3 myInvRes;
1151 
1152  /// Number of tiles in each dimension.
1153  int myTileRes[3];
1154 
1155  /// Compression tolerance for lossy compression.
1156  UT_VoxelCompressOptions myCompressionOptions;
1157 
1158  /// Double dereferenced so we can theoretically resize easily.
1159  UT_VoxelTile<T> *myTiles;
1160 
1161  /// Outside values get this if constant borders are used
1162  T myBorderValue;
1163  /// Per axis scale factors for when extrapolating.
1164  T myBorderScale[3];
1165  UT_VoxelBorderType myBorderType;
1166 
1167  /// For initializing the tiles from shared memory.
1168  SYS_SharedMemory *mySharedMem;
1169  SYS_SharedMemoryView *mySharedMemView;
1170 };
1171 
1172 
1173 ///
1174 /// UT_VoxelMipMap
1175 ///
1176 /// This provides a mip-map type structure for a voxel array.
1177 /// It manages the different levels of voxels arrays that are needed.
1178 /// You can create different types of mip maps: average, maximum, etc,
1179 /// which can allow different tricks.
1180 /// Each level is one half the previous level, rounded up.
1181 /// Out of bound voxels are ignored from the lower levels.
1182 ///
1183 template <typename T>
1185 {
1186 public:
1187  /// The different types of functions that can be used for
1188  /// constructing a mip map.
1189  enum mipmaptype { MIPMAP_MAXIMUM=0, MIPMAP_AVERAGE=1, MIPMAP_MINIMUM=2 };
1190 
1191  UT_VoxelMipMap();
1192  virtual ~UT_VoxelMipMap();
1193 
1194  /// Copy constructor.
1195  UT_VoxelMipMap(const UT_VoxelMipMap<T> &src);
1196 
1197  /// Assignment operator:
1198  const UT_VoxelMipMap<T> &operator=(const UT_VoxelMipMap<T> &src);
1199 
1200  /// Builds from a given voxel array. The ownership flag determines
1201  /// if we gain ownership of the voxel array and should delete it.
1202  /// In any case, the new levels are owned by us.
1203  void build(UT_VoxelArray<T> *baselevel,
1204  mipmaptype function);
1205 
1206  /// Same as above but construct mipmaps simultaneously for more than
1207  /// one function. The order of the functions will correspond to the
1208  /// order of the data values passed to the traversal callback.
1209  void build(UT_VoxelArray<T> *baselevel,
1210  const UT_Array<mipmaptype> &functions);
1211 
1212  /// This does a top down traversal of the implicit octree defined
1213  /// by the voxel array. Returning false will abort that
1214  /// branch of the octree.
1215  /// The bounding box given is in cell space and is an exclusive
1216  /// box of the included cells (ie: (0..1)^3 means just cell 0,0,0)
1217  /// Note that each bounding box will not be square, unless you
1218  /// have the good fortune of starting with a power of 2 cube.
1219  /// The boolean goes true when the the callback is invoked on a
1220  /// base level.
1221  typedef bool (*Callback)(const T *funcs,
1222  const UT_BoundingBox &box,
1223  bool baselevel, void *data);
1224  void traverseTopDown(Callback function,
1225  void *data) const;
1226 
1227  /// Top down traversal on op. OP is invoked with
1228  /// bool op(const UT_BoundingBoxI &indexbox, int level)
1229  ///
1230  /// indexbox is half-inclusive (0..1)^3 means cell 0,0,0
1231  /// level 0 means the base level.
1232  /// (box.min.x()>>level, box.min.y()>>level, box.min.z()>>level)
1233  /// gives the index to extract the value from level..
1234  template <typename OP>
1235  void traverseTopDown(OP&op) const;
1236 
1237 
1238  /// Top down traversal, but which quad tree is visited first
1239  /// is controlled by
1240  /// float op.sortValue(UT_BoundingBoxI &indexbox, int level);
1241  /// Lower values are visited first.
1242  template <typename OP>
1243  void traverseTopDownSorted(OP&op) const;
1244 
1245 
1246  /// Return the amount of memory used by this mipmap.
1247  int64 getMemoryUsage(bool inclusive) const;
1248 
1249  int numLevels() const { return myNumLevels+1; }
1250 
1251  /// level 0 is the original grid, each level higher is a power
1252  /// of two smaller.
1253  const UT_VoxelArray<T> *level(int level, int function) const
1254  {
1255  if (level == 0)
1256  return myBaseLevel;
1257 
1258  return myLevels(function)[numLevels() - 1 - level];
1259  }
1260 
1261 private:
1262  void doTraverse(int x, int y, int z, int level,
1263  Callback function,
1264  void *data) const;
1265 
1266  /// Note: This variant of doTraverse has the opposite sense of level!
1267  template <typename OP>
1268  void doTraverse(int x, int y, int z, int level,
1269  OP &op) const;
1270  template <typename OP>
1271  void doTraverseSorted(int x, int y, int z, int level,
1272  OP &op) const;
1273 
1274  void initializePrivate();
1275  void destroyPrivate();
1276 
1277  THREADED_METHOD3(UT_VoxelMipMap<T>, dst.numTiles() > 1,
1278  downsample,
1279  UT_VoxelArray<T> &, dst,
1280  const UT_VoxelArray<T> &, src,
1281  mipmaptype, function)
1282  void downsamplePartial(UT_VoxelArray<T> &dst,
1283  const UT_VoxelArray<T> &src,
1284  mipmaptype function,
1285  const UT_JobInfo &info);
1286 
1287 protected:
1288  T mixValues(T t1, T t2, mipmaptype function) const
1289  {
1290  switch (function)
1291  {
1292  case MIPMAP_MAXIMUM:
1293  return SYSmax(t1, t2);
1294 
1295  case MIPMAP_AVERAGE:
1296  return (t1 + t2) / 2;
1297 
1298  case MIPMAP_MINIMUM:
1299  return SYSmin(t1, t2);
1300  }
1301 
1302  return t1;
1303  }
1304 
1305 
1306  /// This stores the base most level that was provided
1307  /// externally.
1308  UT_VoxelArray<T> *myBaseLevel;
1309  /// If true, we will delete the base level when we are done.
1311 
1312  /// Tracks the number of levels which we used to represent
1313  /// this hierarchy.
1315  /// The array of VoxelArrays, one per level.
1316  /// myLevels[0] is a 1x1x1 array. Each successive layer is twice
1317  /// as big in each each dimension. However, every layer is clamped
1318  /// against the resolution of the base layer.
1319  /// We own all these layers.
1321 };
1322 
1323 
1324 /// Iterator for Voxel Arrays
1325 ///
1326 /// This class eliminates the need for having
1327 /// for (z = 0; z < zres; z++)
1328 /// ...
1329 /// for (x = 0; x < xres; x++)
1330 /// loops everywhere.
1331 ///
1332 /// Note that the order of iteration is undefined! (The actual order is
1333 /// to complete each tile in turn, thereby hopefully improving cache
1334 /// coherency)
1335 ///
1336 /// It is safe to write to the voxel array while this iterator is active.
1337 /// It is *not* safe to resize the voxel array (or destroy it)
1338 ///
1339 /// The iterator is similar in principal to an STL iterator, but somewhat
1340 /// simpler. The classic STL loop
1341 /// for ( it = begin(); it != end(); ++it )
1342 /// is done using
1343 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1344 ///
1345 template <typename T>
1347 {
1348 public:
1352  virtual ~UT_VoxelArrayIterator();
1353 
1355  {
1356  myCurTile = -1;
1357  myHandle.resetHandle();
1358  myArray = vox;
1359  // Reset the range
1360  setPartialRange(0, 1);
1361  }
1363  {
1364  setArray((UT_VoxelArray<T> *) vox);
1365  }
1366 
1367  /// Iterates over the array pointed to by the handle. Only
1368  /// supports read access during the iteration as it does
1369  /// a read lock.
1371  {
1372  myHandle = handle;
1373  // Ideally we'd have a separate const iterator
1374  // from our non-const iterator so this would
1375  // only be exposed in the const version.
1376  myArray = const_cast<UT_VoxelArray<T> *>(&*myHandle);
1377 
1378  // Reset our range.
1379  myCurTile = -1;
1380  setPartialRange(0, 1);
1381  }
1382 
1383 
1384  /// Restricts this iterator to only run over a subset
1385  /// of the tiles. The tiles will be divided into approximately
1386  /// numrange equal groups, this will be the idx'th.
1387  /// The resulting iterator may have zero tiles.
1388  void setPartialRange(int idx, int numranges);
1389 
1390  /// Ties this iterator to the given jobinfo so it will
1391  /// match the jobinfo's processing.
1392  void splitByTile(const UT_JobInfo &info);
1393 
1394  /// Assigns an interrupt handler. This will be tested whenever
1395  /// it advances to a new tile. If it is interrupted, the iterator
1396  /// will jump forward to atEnd()
1397  void setInterrupt(UT_Interrupt *interrupt) { myInterrupt = interrupt; }
1398  void detectInterrupts() { myInterrupt = UTgetInterrupt(); }
1399 
1400  /// Restricts this iterator to the tiles that intersect
1401  /// the given bounding box of voxel coordinates.
1402  /// Note that this will not be a precise restriction as
1403  /// each tile is either included or not.
1404  /// You should setPartialRange() after setting the bbox range
1405  /// The bounding box is on the [0..1]^3 range.
1406  void restrictToBBox(const UT_BoundingBox &bbox);
1407  /// The [xmin, xmax] are inclusive and measured in voxels.
1408  void restrictToBBox(int xmin, int xmax,
1409  int ymin, int ymax,
1410  int zmin, int zmax);
1411 
1412  /// Resets the iterator to point to the first voxel.
1413  void rewind();
1414 
1415  /// Returns true if we have iterated over all of the voxels.
1416  bool atEnd() const
1417  { return myCurTile < 0; }
1418 
1419  /// Advances the iterator to point to the next voxel.
1420  void advance()
1421  {
1422  // We try to advance each axis, rolling over to the next.
1423  // If we exhaust this tile, we call advanceTile.
1424  myPos[0]++;
1425  myTileLocalPos[0]++;
1426  if (myTileLocalPos[0] >= myTileSize[0])
1427  {
1428  // Wrapped in X.
1429  myPos[0] -= myTileLocalPos[0];
1430  myTileLocalPos[0] = 0;
1431 
1432  myPos[1]++;
1433  myTileLocalPos[1]++;
1434  if (myTileLocalPos[1] >= myTileSize[1])
1435  {
1436  // Wrapped in Y.
1437  myPos[1] -= myTileLocalPos[1];
1438  myTileLocalPos[1] = 0;
1439 
1440  myPos[2]++;
1441  myTileLocalPos[2]++;
1442  if (myTileLocalPos[2] >= myTileSize[2])
1443  {
1444  // Wrapped in Z! Finished this tile!
1445  advanceTile();
1446  }
1447  }
1448  }
1449  }
1450 
1451  /// Retrieve the current location of the iterator.
1452  int x() const { return myPos[0]; }
1453  int y() const { return myPos[1]; }
1454  int z() const { return myPos[2]; }
1455  int idx(int idx) const { return myPos[idx]; }
1456 
1457  /// Retrieves the value that we are currently pointing at.
1458  /// This is faster than an operator(x,y,z) as we already know
1459  /// our current tile and that bounds checking isn't needed.
1460  T getValue() const
1461  {
1462  UT_ASSERT_P(myCurTile >= 0);
1463 
1464  UT_VoxelTile<T> *tile;
1465 
1466  tile = myArray->getLinearTile(myCurTile);
1467  return (*tile)(myTileLocalPos[0],
1468  myTileLocalPos[1],
1469  myTileLocalPos[2]);
1470  }
1471 
1472  /// Sets the voxel we are currently pointing to the given value.
1473  void setValue(T t) const
1474  {
1475  UT_ASSERT_P(myCurTile >= 0);
1476 
1477  UT_VoxelTile<T> *tile;
1478 
1479  tile = myArray->getLinearTile(myCurTile);
1480 
1481  tile->setValue(myTileLocalPos[0],
1482  myTileLocalPos[1],
1483  myTileLocalPos[2], t);
1484  }
1485 
1486  /// Returns true if the tile we are currently in is a constant tile.
1487  bool isTileConstant() const
1488  {
1489  UT_ASSERT_P(myCurTile >= 0);
1490 
1491  UT_VoxelTile<T> *tile;
1492 
1493  tile = myArray->getLinearTile(myCurTile);
1494  return tile->isConstant();
1495  }
1496 
1497  /// This tile will iterate over the voxels indexed [start,end).
1499  {
1500  start.x() = myTilePos[0] * TILESIZE;
1501  start.y() = myTilePos[1] * TILESIZE;
1502  start.z() = myTilePos[2] * TILESIZE;
1503  end = start;
1504  end.x() += myTileSize[0];
1505  end.y() += myTileSize[1];
1506  end.z() += myTileSize[2];
1507  }
1508 
1509  /// This tile will iterate over the *inclusive* voxels indexed
1510  /// in the returned boudning box.
1512  {
1514  getTileVoxels(start, end);
1515  return UT_BoundingBoxI(start, end);
1516  }
1517 
1518  /// Returns true if we are at the start of a new tile.
1519  bool isStartOfTile() const
1520  { return !(myTileLocalPos[0] ||
1521  myTileLocalPos[1] ||
1522  myTileLocalPos[2]); }
1523 
1524  /// Returns the VoxelTile we are currently processing
1526  {
1527  UT_ASSERT_P(myCurTile >= 0);
1528  return myArray->getLinearTile(myCurTile);
1529  }
1530  int getLinearTileNum() const
1531  {
1532  return myCurTile;
1533  }
1534 
1535  /// Advances the iterator to point to the next tile. Useful if the
1536  /// constant test showed that you didn't need to deal with this one.
1537  void advanceTile();
1538 
1539  /// Advances the iterator to pointing just before the next tile so
1540  /// the next advance() will be an advanceTile(). This is useful
1541  /// if you want to do a continue; as your break but the forloop
1542  /// is doing advance()
1543  /// Note the iterator is in a bad state until advance() is called.
1544  void skipToEndOfTile();
1545 
1546  /// Sets a flag which causes the iterator to tryCompress()
1547  /// tiles when it is done with them.
1548  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1549  void setCompressOnExit(bool shouldcompress)
1550  { myShouldCompressOnExit = shouldcompress; }
1551 
1552  /// These templated algorithms are designed to apply simple operations
1553  /// across all of the voxels with as little overhead as possible.
1554  /// The iterator should already point to a voxel array and if multithreaded
1555  /// had its partial range set. The source arrays must be matching size.
1556  /// The operator should support a () operator, and the result is
1557  /// vit.setValue( op(vit.getValue(), a->getValue(vit), ...);
1558  /// Passing T instead of UT_VoxelArray will treat it as a constant source
1559  /// Note if both source and destination tiles are constant, only
1560  /// a single operation is invoked.
1561  template <typename OP>
1562  void applyOperation(OP &op);
1563  template <typename OP, typename S>
1564  void applyOperation(OP &op, const UT_VoxelArray<S> &a);
1565  template <typename OP>
1566  void applyOperation(OP &op, T a);
1567  template <typename OP, typename S, typename R>
1568  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1569  const UT_VoxelArray<R> &b);
1570  template <typename OP, typename S, typename R, typename Q>
1571  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1572  const UT_VoxelArray<R> &b,
1573  const UT_VoxelArray<Q> &c);
1574  /// These variants will invoke op.isNoop(a, b, ...) which will return
1575  /// true if those values won't affect the destination. This allows
1576  /// constant source tiles to be skipped, for example when adding
1577  /// 0.
1578  template <typename OP, typename S>
1579  void applyOperationCheckNoop(OP &op, const UT_VoxelArray<S> &a);
1580  template <typename OP>
1581  void applyOperationCheckNoop(OP &op, T a);
1582 
1583  /// Assign operation works like apply operation, but *this is written
1584  /// to without reading, so there is one less parameter to the ()
1585  /// callback. This can optimize constant tile writes as the
1586  /// constant() status of the destination doesn't matter.
1587  template <typename OP, typename S>
1588  void assignOperation(OP &op, const UT_VoxelArray<S> &a);
1589  template <typename OP, typename S, typename R>
1590  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1591  const UT_VoxelArray<R> &b);
1592  template <typename OP, typename S, typename R, typename Q>
1593  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1594  const UT_VoxelArray<R> &b,
1595  const UT_VoxelArray<Q> &c);
1596 
1597  /// Reduction operators.
1598  /// op.reduce(T a) called for each voxel, *but*,
1599  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1600  template <typename OP>
1601  void reduceOperation(OP &op);
1602 
1603  UT_VoxelArray<T> *getArray() const { return myArray; }
1604 
1605 protected:
1606  /// The array we belong to.
1608  /// The handle that we have locked to get our array. It is null
1609  /// by default which makes the lock/unlock nops.
1611 
1612  /// Absolute index into voxel array.
1613  int myPos[3];
1614 
1615  /// Flag determining if we should compress tiles whenever we
1616  /// advance out of them.
1618 
1621 
1622 public:
1623  /// Our current linear tile idx. A value of -1 implies at end.
1625 
1626  /// Our current index into the tile list
1628 
1629  /// Our start & end tiles for processing a subrange.
1630  /// The tile range is half open [start, end)
1631  int myTileStart, myTileEnd;
1632 
1633  /// Which tile we are as per tx,ty,tz rather than linear index.
1634  int myTilePos[3];
1635 
1636  /// Our position within the current tile.
1637  int myTileLocalPos[3];
1638 
1639  /// The size of the current tile
1640  int myTileSize[3];
1641 
1642  /// The job info to use for tilefetching
1644 
1646 };
1647 
1648 /// Iterator for tiles inside Voxel Arrays
1649 ///
1650 /// This class eliminates the need for having
1651 /// for (z = 0; z < zres; z++)
1652 /// ...
1653 /// for (x = 0; x < xres; x++)
1654 /// loops everywhere.
1655 ///
1656 /// The iterator is similar in principal to an STL iterator, but somewhat
1657 /// simpler. The classic STL loop
1658 /// for ( it = begin(); it != end(); ++it )
1659 /// is done using
1660 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1661 ///
1662 template <typename T>
1664 {
1665 public:
1668  template <typename S>
1670  UT_VoxelArray<T> *array);
1671  virtual ~UT_VoxelTileIterator();
1672 
1673  template <typename S>
1675  UT_VoxelArray<T> *array)
1676  {
1677  UT_ASSERT_P(vit.isStartOfTile());
1678  myCurTile = array->getLinearTile(vit.getLinearTileNum());
1679  myArray = array;
1680  myTileStart[0] = vit.x();
1681  myTileStart[1] = vit.y();
1682  myTileStart[2] = vit.z();
1683  }
1684 
1686  {
1687  setTile(vit, vit.getArray());
1688  }
1689 
1690  void setLinearTile(exint lineartilenum, UT_VoxelArray<T> *array)
1691  {
1692  myCurTile = array->getLinearTile(lineartilenum);
1693  myArray = array;
1694 
1695  array->linearTileToXYZ(lineartilenum,
1696  myTileStart[0], myTileStart[1], myTileStart[2]);
1697  myTileStart[0] <<= TILEBITS;
1698  myTileStart[1] <<= TILEBITS;
1699  myTileStart[2] <<= TILEBITS;
1700  }
1701 
1702  /// Resets the iterator to point to the first voxel.
1703  void rewind();
1704 
1705  /// Returns true if we have iterated over all of the voxels.
1706  bool atEnd() const
1707  { return myCurTile == 0 || myAtEnd; }
1708 
1709  /// Advances the iterator to point to the next voxel.
1710  void advance()
1711  {
1712  // We try to advance each axis, rolling over to the next.
1713  // If we exhaust this tile, we call advanceTile.
1714  myPos[0]++;
1715  myTileLocalPos[0]++;
1716  if (myTileLocalPos[0] >= myTileSize[0])
1717  {
1718  // Wrapped in X.
1719  myPos[0] -= myTileLocalPos[0];
1720  myTileLocalPos[0] = 0;
1721 
1722  myPos[1]++;
1723  myTileLocalPos[1]++;
1724  if (myTileLocalPos[1] >= myTileSize[1])
1725  {
1726  // Wrapped in Y.
1727  myPos[1] -= myTileLocalPos[1];
1728  myTileLocalPos[1] = 0;
1729 
1730  myPos[2]++;
1731  myTileLocalPos[2]++;
1732  if (myTileLocalPos[2] >= myTileSize[2])
1733  {
1734  // Wrapped in Z! Finished this tile!
1735  advanceTile();
1736  }
1737  }
1738  }
1739  }
1740 
1741  /// Retrieve the current location of the iterator, in the
1742  /// containing voxel array, not in the tile.
1743  int x() const { return myPos[0]; }
1744  int y() const { return myPos[1]; }
1745  int z() const { return myPos[2]; }
1746  int idx(int idx) const { return myPos[idx]; }
1747 
1748  /// Retrieves the value that we are currently pointing at.
1749  /// This is faster than an operator(x,y,z) as we already know
1750  /// our current tile and that bounds checking isn't needed.
1751  T getValue() const
1752  {
1753  UT_ASSERT_P(myCurTile);
1754 
1755  return (*myCurTile)(myTileLocalPos[0],
1756  myTileLocalPos[1],
1757  myTileLocalPos[2]);
1758  }
1759 
1760  /// Sets the voxel we are currently pointing to the given value.
1761  void setValue(T t) const
1762  {
1763  UT_ASSERT_P(myCurTile);
1764 
1765  myCurTile->setValue(myTileLocalPos[0],
1766  myTileLocalPos[1],
1767  myTileLocalPos[2], t);
1768  }
1769 
1770  /// Returns true if the tile we are currently in is a constant tile.
1771  bool isTileConstant() const
1772  {
1773  UT_ASSERT_P(myCurTile);
1774 
1775  return myCurTile->isConstant();
1776  }
1777 
1778  /// Returns true if we are at the start of a new tile.
1779  bool isStartOfTile() const
1780  { return !(myTileLocalPos[0] ||
1781  myTileLocalPos[1] ||
1782  myTileLocalPos[2]); }
1783 
1784  /// Returns the VoxelTile we are currently processing
1786  {
1787  return myCurTile;
1788  }
1789 
1790  /// Advances the iterator to point to the next tile. Since
1791  /// we are restricted to one tile, effectively just ends the iterator.
1792  void advanceTile();
1793 
1794  /// Sets a flag which causes the iterator to tryCompress()
1795  /// tiles when it is done with them.
1796  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1797  void setCompressOnExit(bool shouldcompress)
1798  { myShouldCompressOnExit = shouldcompress; }
1799 
1800  /// Reduction operators.
1801  /// op.reduce(T a) called for each voxel, *but*,
1802  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1803  /// Early exits if op.reduce() returns false.
1804  template <typename OP>
1805  bool reduceOperation(OP &op);
1806 
1807 protected:
1808  /// Current processing tile
1811 
1812  /// Absolute index into voxel array.
1813  int myPos[3];
1814  /// Absolute index of start of tile
1815  int myTileStart[3];
1816 
1817  /// Flag determining if we should compress tiles whenever we
1818  /// advance out of them.
1820 
1821  /// Since we want to allow multiple passes, we can't
1822  /// clear out myCurTile when we hit the end.
1823  bool myAtEnd;
1824 
1825 public:
1826  /// Our position within the current tile.
1827  int myTileLocalPos[3];
1828 
1829  /// The size of the current tile
1830  int myTileSize[3];
1831 };
1832 
1833 /// Probe for Voxel Arrays
1834 ///
1835 /// This class is designed to allow for efficient evaluation
1836 /// of aligned indices of a voxel array, provided the voxels are iterated
1837 /// in a tile-by-tile, x-inner most, manner.
1838 ///
1839 /// This class will create a local copy of the voxel data where needed,
1840 /// uncompressing the information once for every 16 queries. It will
1841 /// also create an aligned buffer so you can safely use v4uf on fpreal32
1842 /// data.
1843 ///
1844 /// For queries where you need surrounding values, the prex and postx can
1845 /// specify padding on the probe. prex should be -1 to allow reading
1846 /// -1 offset, postx 1 to allow reading a 1 offset.
1847 ///
1848 
1849 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
1850 class UT_VoxelProbe
1851 {
1852 public:
1853  UT_VoxelProbe();
1854  UT_VoxelProbe(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
1855  virtual ~UT_VoxelProbe();
1856 
1857  void setArray(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
1859  int prex = 0, int postx = 0)
1860  {
1861  SYS_STATIC_ASSERT(DoWrite == false);
1862  setArray((UT_VoxelArray<T> *)vox, prex, postx);
1863  }
1864 
1865  UT_VoxelArray<T> *getArray() const { return myArray; }
1866 
1867  bool isValid() const { return myArray != 0; }
1868 
1869  inline T getValue() const
1870  {
1871  return *myCurLine;
1872  }
1873  inline T getValue(int offset) const
1874  {
1875  return myCurLine[myStride*offset];
1876  }
1877 
1878  inline void setValue(T value)
1879  {
1880  UT_ASSERT_P(DoWrite);
1881  *myCurLine = value;
1882  if (TestForWrites)
1883  myDirty = true;
1884  }
1885 
1886 
1887  /// Resets where we currently point to.
1888  /// Returns true if we had to reset our cache line. If we didn't,
1889  /// and you have multiple probes acting in-step, you can just
1890  /// advanceX() the other probes
1891  template <typename S>
1893  { return setIndex(vit.x(), vit.y(), vit.z()); }
1894  template <typename S>
1896  { return setIndex(vit.x(), vit.y(), vit.z()); }
1897 
1898  bool setIndex(int x, int y, int z);
1899 
1900  /// Blindly advances our current pointer.
1901  inline void advanceX()
1902  {
1903  myCurLine += myStride;
1904  myX++;
1905  UT_ASSERT_P(myX < myMaxValidX);
1906  }
1907 
1908  /// Adjusts our current pointer to the given absolute location,
1909  /// assumes the new value is inside our valid range.
1910  inline void resetX(int x)
1911  {
1912  myCurLine += myStride * (x - myX);
1913  myX = x;
1914  UT_ASSERT_P(myX < myMaxValidX && myX >= myMinValidX);
1915  }
1916 
1917 protected:
1918  void reloadCache(int x, int y, int z);
1919 
1920  void writeCacheLine();
1921 
1922  void buildConstantCache(T value);
1923 
1925  /// myCacheLine[0] is the start of the cache line, so -1 would be
1926  /// the first pre-rolled value
1928  /// Where we actually allocated our cache line, aligned to 4x multiple
1929  /// to ensure SSE compatible.
1931 
1932  int myX, myY, myZ;
1933  int myPreX, myPostX;
1936  /// Half inclusive [,) range of valid x queries for current cache.
1937  int myMinValidX, myMaxValidX;
1938 
1939  /// Determines if we have anything to write back, only
1940  /// valid if TestForWrites is enabled.
1941  bool myDirty;
1942 
1944 
1945  friend class UT_VoxelProbeCube<T>;
1946  friend class UT_VoxelProbeFace<T>;
1947 };
1948 
1949 ///
1950 /// The vector probe is three normal probes into separate voxel arrays
1951 /// making it easier to read and write to aligned vector fields.
1952 /// If the vector field is face-centered, see the UT_VoxelProbeFace.
1953 ///
1954 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
1956 {
1957 public:
1959  { }
1961  { setArray(vx, vy, vz); }
1963  {}
1964 
1966  {
1967  myLines[0].setArray(vx);
1968  myLines[1].setArray(vy);
1969  myLines[2].setArray(vz);
1970  }
1971  void setConstArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz)
1972  {
1973  SYS_STATIC_ASSERT(DoWrite == false);
1974  setArray((UT_VoxelArray<T> *)vx, (UT_VoxelArray<T> *)vy, (UT_VoxelArray<T> *)vz);
1975  }
1976 
1977  inline UT_Vector3 getValue() const
1978  {
1979  return UT_Vector3(myLines[0].getValue(), myLines[1].getValue(), myLines[2].getValue());
1980  }
1981  inline T getValue(int axis) const
1982  {
1983  return myLines[axis].getValue();
1984  }
1985 
1986  inline void setValue(const UT_Vector3 &v)
1987  {
1988  myLines[0].setValue(v.x());
1989  myLines[1].setValue(v.y());
1990  myLines[2].setValue(v.z());
1991  }
1992 
1993  inline void setComponent(int axis, T val)
1994  {
1995  myLines[axis].setValue(val);
1996  }
1997 
1998  /// Resets where we currently point to.
1999  /// Returns true if we had to reset our cache line. If we didn't,
2000  /// and you have multiple probes acting in-step, you can just
2001  /// advanceX() the other probes
2002  template <typename S>
2004  { return setIndex(vit.x(), vit.y(), vit.z()); }
2005  template <typename S>
2007  { return setIndex(vit.x(), vit.y(), vit.z()); }
2008 
2009  bool setIndex(int x, int y, int z)
2010  {
2011  if (myLines[0].setIndex(x, y, z))
2012  {
2013  myLines[1].setIndex(x, y, z);
2014  myLines[2].setIndex(x, y, z);
2015  return true;
2016  }
2017  myLines[1].advanceX();
2018  myLines[2].advanceX();
2019  return false;
2020  }
2021 
2022  void advanceX()
2023  { myLines[0].advanceX(); myLines[1].advanceX(); myLines[2].advanceX(); }
2024 
2025 protected:
2027 };
2028 
2029 template <typename T>
2030 class
2032 {
2033 public:
2035  virtual ~UT_VoxelProbeCube();
2036 
2037  void setCubeArray(const UT_VoxelArray<T> *vox);
2038  void setPlusArray(const UT_VoxelArray<T> *vox);
2039 
2040  /// Allows you to query +/-1 in each direction. In cube update,
2041  /// all are valid. In plus update, only one of x y and z may be
2042  /// non zero.
2043  inline T getValue(int x, int y, int z) const
2044  { return myLines[y+1][z+1].getValue(x); }
2045 
2046  template <typename S>
2048  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2049  template <typename S>
2051  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2052  bool setIndexCube(int x, int y, int z);
2053 
2054  template <typename S>
2056  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2057  template <typename S>
2059  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2060  bool setIndexPlus(int x, int y, int z);
2061 
2062  /// Computes central difference gradient, does not scale
2063  /// by the step size (which is twice voxelsize)
2064  /// Requires PlusArray
2066  { return UT_Vector3(getValue(1,0,0) - getValue(-1,0,0),
2067  getValue(0,1,0) - getValue(0,-1,0),
2068  getValue(0,0,1) - getValue(0,0,-1)); }
2069 
2070  /// Computes the central difference curvature using the given
2071  /// inverse voxelsize (ie, 1/voxelsize) at this point.
2072  /// Requires CubeArray.
2073  fpreal64 curvature(const UT_Vector3 &invvoxelsize) const;
2074 
2075  /// Computes the laplacian, again with a given 1/voxelsize.
2076  /// Requires PlusArray
2077  fpreal64 laplacian(const UT_Vector3 &invvoxelsize) const;
2078 
2079 protected:
2080  /// Does an rotation of our cache lines, ym becomes y0 and y0 becomes yp,
2081  /// so further queries with y+1 will be cache hits for 2 out of 3.
2083 
2085  /// Cached look up position. myValid stores if they are
2086  /// valid values or not
2087  bool myValid;
2088  int myX, myY, myZ;
2089  /// Half inclusive [,) range of valid x queries for current cache.
2090  int myMinValidX, myMaxValidX;
2091 };
2092 
2093 ///
2094 /// UT_VoxelProbeConstant
2095 ///
2096 /// Looks like a voxel probe but only returns a constant value.
2097 ///
2098 template <typename T>
2099 class
2101 {
2102 public:
2105 
2106  template <typename S>
2108  { return true; }
2109  template <typename S>
2111  { return true; }
2112  bool setIndex(int x, int y, int z)
2113  { return true; }
2114 
2115  void setValue(T val) { myValue = val; }
2116  T getValue() const { return myValue; }
2117 protected:
2119 };
2120 
2121 ///
2122 /// UT_VoxelProbeAverage
2123 ///
2124 /// When working with MAC grids one often has slightly misalgined
2125 /// fields. Ie, one field is at the half-grid spacing of another field.
2126 /// The step values are 0 if the dimension is algined, -1 for half a step
2127 /// back (ie, (val(-1)+val(0))/2) and 1 for half a step forward
2128 /// (ie, (val(0)+val(1))/2)
2129 ///
2130 template <typename T, int XStep, int YStep, int ZStep>
2131 class
2133 {
2134 public:
2137 
2138  void setArray(const UT_VoxelArray<T> *vox);
2139 
2140  template <typename S>
2142  { return setIndex(vit.x(), vit.y(), vit.z()); }
2143  template <typename S>
2145  { return setIndex(vit.x(), vit.y(), vit.z()); }
2146  bool setIndex(int x, int y, int z);
2147 
2148  /// Returns the velocity centered at this index, thus an average
2149  /// of the values in each of our internal probes.
2150  inline T getValue() const
2151  {
2152  if (ZStep)
2153  return (valueZ(1) + valueZ(0)) * 0.5;
2154  return valueZ(0);
2155  }
2156 
2157 protected:
2158  inline T valueZ(int z) const
2159  {
2160  if (YStep)
2161  return (valueYZ(1, z) + valueYZ(0, z)) * 0.5;
2162  return valueYZ(0, z);
2163  }
2164 
2165  inline T valueYZ(int y, int z) const
2166  {
2167  if (XStep > 0)
2168  return (myLines[y][z].getValue(1) + myLines[y][z].getValue(0)) * 0.5;
2169  if (XStep < 0)
2170  return (myLines[y][z].getValue(-1) + myLines[y][z].getValue(0)) * 0.5;
2171  return myLines[y][z].getValue();
2172  }
2173 
2174  // Stores [Y][Z] lines.
2176 };
2177 
2178 
2179 ///
2180 /// UT_VoxelProbeFace is designed to walk over three velocity
2181 /// fields that store face-centered values. The indices refer
2182 /// to the centers of the voxels.
2183 ///
2184 template <typename T>
2185 class
2187 {
2188 public:
2190  virtual ~UT_VoxelProbeFace();
2191 
2192  void setArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz);
2193  void setVoxelSize(const UT_Vector3 &voxelsize);
2194 
2195  template <typename S>
2197  { return setIndex(vit.x(), vit.y(), vit.z()); }
2198  template <typename S>
2200  { return setIndex(vit.x(), vit.y(), vit.z()); }
2201  bool setIndex(int x, int y, int z);
2202 
2203  /// Get the face values on each face component.
2204  /// Parameters are axis then side.
2205  /// 0 is the lower face, 1 the higher face.
2206  inline T face(int axis, int side) const
2207  {
2208  if (axis == 0)
2209  return myLines[0][0].getValue(side);
2210  else
2211  return myLines[axis][side].getValue();
2212  }
2213 
2214  /// Returns the velocity centered at this index, thus an average
2215  /// of the values in each of our internal probes.
2216  inline UT_Vector3 value() const
2217  {
2218  return UT_Vector3(0.5f * (face(0, 0) + face(0, 1)),
2219  0.5f * (face(1, 0) + face(1, 1)),
2220  0.5f * (face(2, 0) + face(2, 1)));
2221  }
2222 
2223  /// Returns the divergence of this cell.
2224  inline T divergence() const
2225  {
2226  return (face(0,1)-face(0,0)) * myVoxelSize.x()
2227  + (face(1,1)-face(1,0)) * myVoxelSize.y()
2228  + (face(2,1)-face(2,0)) * myVoxelSize.z();
2229 
2230  }
2231 
2232 protected:
2233 
2234  static void swapLines(UT_VoxelProbe<T, true, false, false> &ym,
2236 
2237 
2239 
2240  /// Cached look up position. myValid stores if they are
2241  /// valid values or not
2242  bool myValid;
2243  int myX, myY, myZ;
2244  /// Half inclusive [,) range of valid x queries for current cache.
2245  int myMinValidX, myMaxValidX;
2246 
2247  UT_Vector3 myVoxelSize, myInvVoxelSize;
2248 };
2249 
2250 
2251 #if defined( WIN32 ) || defined( LINUX ) || defined( MBSD ) || defined(GAMEOS)
2252  #include "UT_VoxelArray.C"
2253 #endif
2254 
2255 
2256 // Typedefs for common voxel array types
2260 
2268 // Read only probe
2272 // Write only
2276 // Read/Write always writeback.
2280 // Read/Write with testing
2284 
2285 
2287 
2291 
2295 
2296 #endif
2297 
UT_COWWriteHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayWriteHandleV4
void setTile(const UT_VoxelArrayIterator< T > &vit)
exint exint const UT_JobInfo &info const
int x() const
Retrieve the current location of the iterator.
UT_Interrupt * myInterrupt
#define SYSmax(a, b)
Definition: SYS_Math.h:1365
int xyzTileToLinear(int x, int y, int z) const
void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const
SYS_FORCE_INLINE T lerpSample(T *samples, float fx, float fy, float fz) const
Lerps the given sample using trilinear interpolation.
const UT_VoxelTile< T > & operator=(const UT_VoxelTile< T > &src)
void size(int xres, int yres, int zres)
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void findAverage(T &avg) const
Determines the average value of the tile.
#define SYS_STATIC_ASSERT(expr)
UT_VoxelProbe< fpreal32, true, true, true > UT_VoxelRWTProbeF
void setInterrupt(UT_Interrupt *interrupt)
T valueZ(int z) const
void loadData(UT_IStream &is)
Load an array, requires you have already size()d this array.
T & z(void)
Definition: UT_Vector4.h:372
UT_VoxelBorderType getBorder() const
exint getDataLength() const
Returns the amount of data used by the tile myData pointer.
bool atEnd() const
Returns true if we have iterated over all of the voxels.
UT_COWHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayHandleF
void match(const UT_VoxelArray< T > &src)
bool isMatching(const UT_VoxelArray< S > &src) const
virtual const char * getName()=0
T valueYZ(int y, int z) const
void resample(const UT_VoxelArray< T > &src, UT_FilterType filtertype=UT_FILTER_POINT, float filterwidthscale=1.0f, int clampaxis=-1)
Fills this by resampling the given voxel array.
const UT_VoxelCompressOptions & getCompressionOptions() const
const hboost::disable_if_c< VecTraits< T >::IsVec, T >::type & min(const T &a, const T &b)
Definition: Composite.h:128
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
int64 getMemoryUsage(bool inclusive) const
Return the amount of memory used by this array.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
bool setIndex(UT_VoxelTileIterator< S > &vit)
SYS_FORCE_INLINE void freeData()
const GLdouble * v
Definition: glcorearb.h:836
T operator()(UT_Vector3D pos) const
THREADED_METHOD3_CONST(UT_VoxelArray< T >, numTiles() > 16, flatten, T *, flatarray, exint, ystride, exint, zstride) void flattenPartial(T *flatarray
virtual ~UT_VoxelProbeConstant()
UT_Vector3 gradient() const
int numVoxels() const
GLuint start
Definition: glcorearb.h:474
UT_VoxelProbeCube< fpreal32 > UT_VoxelProbeCubeF
virtual ~UT_VoxelVectorProbe()
UT_COWReadHandle< UT_VoxelArray< T > > myHandle
UT_VoxelArray< fpreal32 > UT_VoxelArrayF
void clampIndex(int &x, int &y, int &z) const
UT_COWReadHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayReadHandleF
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
virtual T getValue(const UT_VoxelTile< T > &tile, int x, int y, int z) const =0
T * fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const
static void registerCompressionEngine(UT_VoxelTileCompress< T > *engine)
fpreal myQuantizeTol
Tolerance for quantizing to reduced bit depth.
virtual bool lerp(GA_AttributeOperand &d, GA_AttributeOperand &a, GA_AttributeOperand &b, GA_AttributeOperand &t) const
d = SYSlerp(a, b, t);
UT_VoxelVectorProbe< fpreal32, true, true, true > UT_VoxelVectorRWTProbeF
UT_Vector3T< float > UT_Vector3
UT_VoxelTile< T > * myCurTile
Current processing tile.
GLdouble GLdouble GLdouble z
Definition: glcorearb.h:847
UT_VoxelVectorProbe(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
UT_VoxelArray< T > * myBaseLevel
T divergence() const
Returns the divergence of this cell.
GLint level
Definition: glcorearb.h:107
SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
T & x(void)
Definition: UT_Vector2.h:284
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1221
UT_VoxelBorderType
Definition: UT_VoxelArray.h:69
#define SYSabs(a)
Definition: SYS_Math.h:1367
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
bool myOwnBase
If true, we will delete the base level when we are done.
UT_VoxelArray< UT_Vector4 > UT_VoxelArrayV4
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:72
#define UT_API
Definition: UT_API.h:12
bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const
UT_VoxelArray< T > * myArray
fpreal UTvoxelTileDist(const UT_Vector2 &a, const UT_Vector2 &b)
void setArray(UT_VoxelArray< T > *vox)
GLint y
Definition: glcorearb.h:102
T mixValues(T t1, T t2, mipmaptype function) const
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:32
void copyWithOffset(const UT_VoxelArray< T > &src, int offx, int offy, int offz)
int indexToLinearTile(int x, int y, int z) const
UT_VoxelTileIterator< int64 > UT_VoxelTileIteratorI
bool isConstant(T *cval=0) const
void makeConstant(T t)
Turns this tile into a constant tile of the given value.
bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const
GLfloat GLfloat GLfloat v2
Definition: glcorearb.h:817
T face(int axis, int side) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
SYS_FORCE_INLINE T & x(void)
Definition: UT_Vector3.h:581
UT_VoxelVectorProbe< fpreal32, true, true, false > UT_VoxelVectorRWProbeF
SYS_FORCE_INLINE T rawConstVal() const
3D Vector class.
UT_FilterType
Definition: UT_FilterType.h:16
void UTvoxelTileExpandMinMax(UT_Vector2 v, UT_Vector2 &min, UT_Vector2 &max)
bool atEnd() const
Returns true if we have iterated over all of the voxels.
const UT_JobInfo & info
void setCompressionOptions(const UT_VoxelCompressOptions &options)
virtual bool load(UT_JSONParser &p, UT_VoxelTile< T > &tile) const
UT_VoxelArray< T > * getArray() const
int myCurTile
Our current linear tile idx. A value of -1 implies at end.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
void setCompressionTolerance(fpreal tol)
void setHandle(UT_COWReadHandle< UT_VoxelArray< T > > handle)
void flatten(S *dst, int dststride) const
Flattens ourself into the given destination buffer.
void makeFpreal16()
Explicit compress to fpreal16. Lossy. No-op if already constant.
SYS_FORCE_INLINE T operator()(int x, int y, int z) const
int zres() const
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:101
virtual bool writeThrough(UT_VoxelTile< T > &tile, int x, int y, int z, T t) const =0
UT_VoxelVectorProbe< fpreal32, true, false, false > UT_VoxelVectorProbeF
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
const hboost::disable_if_c< VecTraits< T >::IsVec, T >::type & max(const T &a, const T &b)
Definition: Composite.h:132
virtual ~UT_VoxelTileCompress()
signed char int8
Definition: SYS_Types.h:24
static fpreal dist(T a, T b)
bool setIndex(int x, int y, int z)
bool getCompressOnExit() const
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
bool writeThrough(int x, int y, int z, T t)
int yres() const
SYS_FORCE_INLINE T & z(void)
Definition: UT_Vector3.h:585
UT_VoxelTileIterator< fpreal32 > UT_VoxelTileIteratorF
long long int64
Definition: SYS_Types.h:100
const T * rawData() const
T getBorderScale(int axis) const
const S * writeTiles(const S *srcdata, int srcstride, const UT_IntArray &tilelist)
GLfloat f
Definition: glcorearb.h:1925
bool hasNan() const
Returns true if any NANs are in this tile.
void setArray(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
void resetX(int x)
THREADED_METHOD4(UT_VoxelArray< T >, numTiles() > 4, copyWithOffsetInternal, const UT_VoxelArray< T > &, src, int, offx, int, offy, int, offz) void copyWithOffsetInternalPartial(const UT_VoxelArray< T > &src
exint exint zstride
int myCurTileListIdx
Our current index into the tile list.
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
virtual void load(UT_IStream &is, UT_VoxelTile< T > &tile) const
int64 getMemoryUsage(bool inclusive) const
Returns the amount of memory used by this tile.
THREADED_METHOD4_CONST(UT_VoxelArray< T >, numTiles() > 16, flattenGLFixed8, uint8 *, flatarray, exint, ystride, exint, zstride, T, dummy) void flattenGLFixed8Partial(uint8 *flatarray
int getYRes() const
void weightedSum(int pstart[3], int pend[3], const float *weights[3], int start[3], T &result)
SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const
int numTiles() const
UT_BoundingBoxI getTileBBox() const
int getLinearTileNum() const
int64 exint
Definition: SYS_Types.h:109
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
static void saveCompressionTypes(std::ostream &os)
Stores a list of compresson engines to os.
T getBorderValue() const
UT_VoxelArray< T > * myArray
The array we belong to.
double fpreal64
Definition: SYS_Types.h:185
virtual ~UT_VoxelArray()
T getValue() const
GLuint GLuint end
Definition: glcorearb.h:474
virtual void save(std::ostream &os, const UT_VoxelTile< T > &tile) const
GLint GLenum GLboolean GLsizei stride
Definition: glcorearb.h:871
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
const UT_VoxelArray< T > & operator=(const UT_VoxelArray< T > &src)
Assignment operator:
UT_VoxelArray< T > * getArray() const
bool isValid() const
virtual ~UT_VoxelProbeAverage()
GLintptr offset
Definition: glcorearb.h:664
void makeRawUninitialized()
Definition: VM_SIMD.h:126
void setTile(const UT_VoxelArrayIterator< S > &vit, UT_VoxelArray< T > *array)
void setValue(int x, int y, int z, T t)
UT_VoxelTile< T > * getTile(int tx, int ty, int tz) const
UT_VoxelProbe< UT_Vector4, true, true, true > UT_VoxelRWTProbeV4
void setCompressOnExit(bool shouldcompress)
T evaluate(const UT_Vector3 &pos, const UT_Filter &filter, fpreal radius, int clampaxis=-1) const
UT_VoxelMipMap< fpreal32 > UT_VoxelMipMapF
bool setIndex(UT_VoxelTileIterator< S > &vit)
#define SYS_STATIC_FORCE_INLINE
Definition: SYS_Inline.h:48
bool tryCompress(const UT_VoxelCompressOptions &options)
virtual bool canSave() const
Does this engine support saving and loading?
int getRes(int dim) const
THREADED_METHOD(UT_VoxelArray< T >, numTiles() > 100, collapseAllTiles) void collapseAllTilesPartial(const UT_JobInfo &info)
int getXRes() const
void setRes(int xr, int yr, int zr)
UT_VoxelArray< int64 > UT_VoxelArrayI
THREADED_METHOD1(UT_VoxelArray< T >, numTiles() > 100, constant, T, t) void const antPartial(T t
bool setIndexPlus(UT_VoxelTileIterator< S > &vit)
UT_API UT_ValArray< UT_VoxelTileCompress< fpreal16 > * > & UTvoxelTileGetCompressionEngines(fpreal16 *dummy)
GLboolean * data
Definition: glcorearb.h:130
bool setIndex(UT_VoxelArrayIterator< S > &vit)
virtual bool isLossless() const
Returns true if the compression type is lossless.
GLuint const GLchar * name
Definition: glcorearb.h:785
int int32
Definition: SYS_Types.h:28
T & y(void)
Definition: UT_Vector4.h:370
T getValue(int x, int y, int z) const
SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z, T *sample) const
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1221
SYS_FORCE_INLINE bool inlineConstant() const
void writeCacheLine(T *cacheline, int y, int z)
Fills a cache line from an external buffer into our own data.
void advanceX()
Blindly advances our current pointer.
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
void setValue(int x, int y, int z, T t)
SYS_FORCE_INLINE T lerpVoxelCoordAxis(UT_Vector3F pos) const
UT_VoxelArrayIterator< int64 > UT_VoxelArrayIteratorI
virtual int getDataLength(const UT_VoxelTile< T > &tile) const =0
GLsizei samples
Definition: glcorearb.h:1297
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
void getTileVoxels(UT_Vector3I &start, UT_Vector3I &end) const
This tile will iterate over the voxels indexed [start,end).
bool myAllowFP16
Conversion to fpreal16, only valid for scalar data.
SYS_FORCE_INLINE T lerpVoxelCoord(UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
int getZRes() const
const UT_JobInfo * myJobInfo
The job info to use for tilefetching.
void setConstArray(const UT_VoxelArray< T > *vox, int prex=0, int postx=0)
static UT_VoxelTileCompress< T > * getCompressionEngine(int index)
GridType::Ptr laplacian(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the Laplacian of the given scalar grid.
bool getCompressOnExit() const
UT_VoxelTileIterator< UT_Vector4 > UT_VoxelTileIteratorV4
UT_VoxelProbe< fpreal32, false, true, false > UT_VoxelWOProbeF
GLenum GLenum dst
Definition: glcorearb.h:1792
SYS_FORCE_INLINE T & y(void)
Definition: UT_Vector3.h:583
GLsizei const GLfloat * value
Definition: glcorearb.h:823
virtual void findMinMax(const UT_VoxelTile< T > &tile, T &min, T &max) const
Definition: UT_VoxelArray.C:80
double fpreal
Definition: SYS_Types.h:263
void setLinearTile(exint lineartilenum, UT_VoxelArray< T > *array)
bool isRawFull() const
Returns if this tile is in raw full format.
bool hasNan() const
Returns true if any element of the voxel array is NAN.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
UT_VoxelVectorProbe< fpreal32, false, true, false > UT_VoxelVectorWOProbeF
UT_VoxelProbe< UT_Vector4, true, false, false > UT_VoxelProbeV4
SYS_FORCE_INLINE void lerpVoxelMinMaxAxis(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
SYS_STATIC_FORCE_INLINE T lerpValues(T v1, T v2, fpreal32 bias)
Lerps two numbers, templated to work with T.
T getValue(int x, int y, int z) const
bool setIndexPlus(UT_VoxelArrayIterator< S > &vit)
SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z, float fx, float fy, float fz) const
T getValue(int offset) const
void copyFragment(int dstx, int dsty, int dstz, const UT_VoxelTile< T > &srctile, int srcx, int srcy, int srcz)
void uncompress()
Turns a compressed tile into a raw tile.
UT_VoxelArray< T > * myArray
unsigned char uint8
Definition: SYS_Types.h:25
THREADED_METHOD3(UT_VoxelArray< T >, numTiles() > 16, extractFromFlattened, const T *, flatarray, exint, ystride, exint, zstride) void extractFromFlattenedPartial(const T *flatarray
int idx(int idx) const
UT_COWReadHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayReadHandleV4
void setConstArray(const UT_VoxelArray< T > *vox)
UT_API UT_Interrupt * UTgetInterrupt()
Obtain global UT_Interrupt singleton.
SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
GLuint index
Definition: glcorearb.h:785
T getValue(int axis) const
UT_VoxelProbe< fpreal32, true, false, false > UT_VoxelProbeF
int numLevels() const
UT_VoxelProbe< fpreal32, true, true, false > UT_VoxelRWProbeF
bool isRaw() const
Returns if this tile is in raw format.
void saveData(std::ostream &os) const
UT_ValArray< UT_VoxelArray< T > ** > myLevels
GLint GLenum GLint x
Definition: glcorearb.h:408
int getRes(int axis) const
GLfloat GLfloat v1
Definition: glcorearb.h:816
virtual ~UT_VoxelTile()
GLuint GLfloat * val
Definition: glcorearb.h:1607
SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
int int int offz
UT_Vector3 value() const
void linearTileToXYZ(int idx, int &x, int &y, int &z) const
UT_Vector3 myVoxelSize
UT_VoxelProbe< UT_Vector4, true, true, false > UT_VoxelRWProbeV4
exint numVoxels() const
void save(std::ostream &os) const
#define DEFINE_STD_FUNC(TYPE)
GLint GLint GLsizei GLint GLenum GLenum type
Definition: glcorearb.h:107
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
T operator()(int x, int y, int z) const
UT_COWHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayHandleV4
T & x(void)
Definition: UT_Vector4.h:368
UT_VoxelTile< T > * getLinearTile(int idx) const
void load(UT_IStream &is, const UT_IntArray &compression)
UT_VoxelProbe< UT_Vector4, false, true, false > UT_VoxelWOProbeV4
void setCompressOnExit(bool shouldcompress)
static int lookupCompressionEngine(const char *name)
T & y(void)
Definition: UT_Vector2.h:286
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:856
UT_COWWriteHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayWriteHandleF
void setValue(const UT_Vector3 &v)
void setValue(T value)
SYS_FORCE_INLINE void lerpVoxelMinMax(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
bool setIndexCube(UT_VoxelArrayIterator< S > &vit)
#define UT_VOXEL_FREE(x)
Definition: UT_VoxelArray.h:54
bool setIndexCube(UT_VoxelTileIterator< S > &vit)
void uncompressFull()
Turns a tile into a raw full tile.
void setBorder(UT_VoxelBorderType type, T t)
UT_Vector3 getValue() const
T * rawFullData()
Returns the raw full data of the tile.
UT_VoxelArrayIterator< fpreal32 > UT_VoxelArrayIteratorF
virtual bool tryCompress(UT_VoxelTile< T > &tile, const UT_VoxelCompressOptions &options, T min, T max) const =0
void setForeignData(void *data, int8 compress_type)
SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
static void expandMinMax(T v, T &min, T &max)
Designed to be specialized according to T.
T & w(void)
Definition: UT_Vector4.h:374
short int16
Definition: SYS_Types.h:26
int idx(int idx) const
bool extractSampleCube(int x, int y, int z, T *sample) const
void findMinMax(T &min, T &max) const
Finds the minimum and maximum T values.
bool isValidIndex(int x, int y, int z) const
Returns true if the given x, y, z values lie inside the valid index.
#define SYSmin(a, b)
Definition: SYS_Math.h:1366
void setComponent(int axis, T val)
virtual bool save(UT_JSONWriter &w, const UT_VoxelTile< T > &tile) const
SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z, float fx, float fy, float fz) const
void setBorderScale(T scalex, T scaley, T scalez)
bool setIndex(UT_VoxelTileIterator< S > &vit)
bool setIndex(UT_VoxelTileIterator< S > &vit)
void setConstArray(const UT_VoxelArray< T > *vx, const UT_VoxelArray< T > *vy, const UT_VoxelArray< T > *vz)
float fpreal32
Definition: SYS_Types.h:184
exint exint T dummy
const UT_VoxelArray< T > * level(int level, int function) const
int xres() const
Read the current resolution.
S * extractTiles(S *dstdata, int stride, const UT_IntArray &tilelist) const
bool setIndex(int x, int y, int z)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
void writeData(const S *src, int srcstride)
SYS_FORCE_INLINE T * rawConstData() const
bool isSimpleCompression() const
UT_BoundingBoxT< int64 > UT_BoundingBoxI
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
bool isConstant() const
Returns if this tile is constant.
SYS_FORCE_INLINE T lerpSampleAxis(T *samples, float fx, float fy, float fz) const
static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions)
UT_VoxelArrayIterator< UT_Vector4 > UT_VoxelArrayIteratorV4
int getTileRes(int dim) const
void evaluateMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelTileIterator< S > &vit)
fpreal getCompressionTolerance() const
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const
GLint GLint GLint GLint GLint GLint GLint GLbitfield GLenum filter
Definition: glcorearb.h:1296
GLenum src
Definition: glcorearb.h:1792