HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_VoxelArray.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_VoxelArray.h ( UT Library, C++)
7  *
8  * COMMENTS:
9  * This provides support for transparently tiled voxel arrays of data.
10  * The given type, T, should support normal arithmatic operations.
11  *
12  * The created array has elements indexed from 0, ie: [0..xdiv-1].
13  */
14 
15 #ifndef __UT_VoxelArray__
16 #define __UT_VoxelArray__
17 
18 #include "UT_API.h"
19 #include "UT_BoundingBox.h"
20 #include "UT_Vector2.h"
21 #include "UT_Vector3.h"
22 #include "UT_Vector4.h"
23 #include "UT_IntArray.h"
24 #include "UT_ValArray.h"
25 #include "UT_Array.h"
26 #include "UT_FilterType.h"
27 #include "UT_COW.h"
28 #include "UT_ThreadedAlgorithm.h"
29 #include "UT_Interrupt.h"
30 #include <VM/VM_SIMD.h>
31 
32 #include <SYS/SYS_SharedMemory.h>
33 #include <SYS/SYS_StaticAssert.h>
34 #include <SYS/SYS_Types.h>
35 
36 // TBB alloc results in real-world tests that are 3-4% faster. Yay!
37 // But unfortunately it is less aggressive with fragmentation, so
38 // we use effectively 2x the memory. Boo.
39 
40 //#define VOXEL_USE_TBB_ALLOC
41 
42 #ifdef VOXEL_USE_TBB_ALLOC
43 
44 #include <tbb/scalable_allocator.h>
45 
46 #define UT_VOXEL_ALLOC(x) scalable_malloc(x)
47 #define UT_VOXEL_FREE(x) scalable_free(x)
48 
49 #else
50 
51 #define UT_VOXEL_ALLOC(x) SYSamalloc((x), 128)
52 #define UT_VOXEL_FREE(x) SYSafree(x)
53 
54 #endif
55 
56 class UT_Filter;
57 class UT_JSONWriter;
58 class UT_JSONParser;
59 
60 static const int TILEBITS = 4;
61 static const int TILESIZE = 1 << TILEBITS;
62 static const int TILEMASK = TILESIZE-1;
63 
64 ///
65 /// Behaviour of out of bound reads.
66 ///
68 {
73 };
74 
75 template <typename T> class UT_VoxelTile;
76 template <typename T> class UT_VoxelArray;
77 template <typename T, bool DoRead, bool DoWrite, bool TestForWrite> class UT_VoxelProbe;
78 template <typename T> class UT_VoxelProbeCube;
79 template <typename T> class UT_VoxelProbeFace;
80 
82 {
83  int tileidx;
84  int numvoxel;
85 };
86 
88 {
89 public:
91  {
92  myConstantTol = 0;
93  myQuantizeTol = 0;
94  myAllowFP16 = false;
95  }
96 
97  // Used for quantization.
99  {
102  };
103 
104  /// Determines if compressTile should be run on this grid for
105  /// things other than constant compression. Used by writeTiles
106  /// to limit compression attempts.
107  bool compressionEnabled() const
108  {
109  return myAllowFP16 || myConstantTol > 0 || myQuantizeTol > 0;
110  }
111 
112  /// Tiles will be constant if within this range. This may
113  /// need to be tighter than quantization tolerance as
114  /// dithering can't recover partial values.
116  /// Tolerance for quantizing to reduced bit depth
118 
120 
121  /// Conversion to fpreal16, only valid for scalar data.
123 };
124 
125 ///
126 /// UT_VoxelTileCompress
127 ///
128 /// A compression engine for UT_VoxelTiles of a specific type. This
129 /// is a verb class which is invoked from the voxeltile class.
130 ///
131 template <typename T>
133 {
134 public:
137 
138  /// Attempts to write data directly to the compressed tile.
139  /// Returns false if not possible.
140  virtual bool writeThrough(UT_VoxelTile<T> &tile,
141  int x, int y, int z, T t) const = 0;
142 
143  /// Reads directly from the compressed data.
144  /// Cannot alter the tile in any way because it must be threadsafe.
145  virtual T getValue(const UT_VoxelTile<T> &tile,
146  int x, int y, int z) const = 0;
147 
148  /// Attempts to compress the data according to the given tolerance.
149  /// If succesful, returns true.
150  virtual bool tryCompress(UT_VoxelTile<T> &tile,
151  const UT_VoxelCompressOptions &options,
152  T min, T max) const = 0;
153 
154  /// Returns the length in bytes of the data in the tile.
155  /// It must be at least one byte long.
156  virtual int getDataLength(const UT_VoxelTile<T> &tile) const = 0;
157 
158  /// Returns true if the compression type is lossless
159  virtual bool isLossless() const { return false; }
160 
161  /// Determines the min & max values of the tile. A default
162  /// implementation uses getValue() on all voxels.
163  virtual void findMinMax(const UT_VoxelTile<T> &tile, T &min, T &max) const;
164 
165  /// Does this engine support saving and loading?
166  virtual bool canSave() const { return false; }
167  virtual void save(std::ostream &os, const UT_VoxelTile<T> &tile) const {}
168  virtual bool save(UT_JSONWriter &w, const UT_VoxelTile<T> &tile) const
169  { return false; }
170  virtual void load(UT_IStream &is, UT_VoxelTile<T> &tile) const {}
171  virtual bool load(UT_JSONParser &p, UT_VoxelTile<T> &tile) const
172  { return false; }
173 
174  /// Returns the unique name of this compression engine so
175  /// we can look up engines by name (the index of the compression
176  /// engine is assigned at load time so isn't constant)
177  virtual const char *getName() = 0;
178 };
179 
191 
192 #define DEFINE_STD_FUNC(TYPE) \
193 inline void \
194 UTvoxelTileExpandMinMax(TYPE v, TYPE &min, TYPE &max) \
195 { \
196  if (v < min) \
197  min = v; \
198  else if (v > max) \
199  max = v; \
200 } \
201  \
202 inline fpreal \
203 UTvoxelTileDist(TYPE a, TYPE b) \
204 { \
205  return (fpreal) SYSabs(a - b); \
206 }
207 
216 
217 #undef DEFINE_STD_FUNC
218 
219 inline void
221 {
222  min.x() = SYSmin(v.x(), min.x());
223  max.x() = SYSmax(v.x(), max.x());
224 
225  min.y() = SYSmin(v.y(), min.y());
226  max.y() = SYSmax(v.y(), max.y());
227 }
228 
229 inline void
231 {
232  min.x() = SYSmin(v.x(), min.x());
233  max.x() = SYSmax(v.x(), max.x());
234 
235  min.y() = SYSmin(v.y(), min.y());
236  max.y() = SYSmax(v.y(), max.y());
237 
238  min.z() = SYSmin(v.z(), min.z());
239  max.z() = SYSmax(v.z(), max.z());
240 }
241 
242 inline void
244 {
245  min.x() = SYSmin(v.x(), min.x());
246  max.x() = SYSmax(v.x(), max.x());
247 
248  min.y() = SYSmin(v.y(), min.y());
249  max.y() = SYSmax(v.y(), max.y());
250 
251  min.z() = SYSmin(v.z(), min.z());
252  max.z() = SYSmax(v.z(), max.z());
253 
254  min.w() = SYSmin(v.w(), min.w());
255  max.w() = SYSmax(v.w(), max.w());
256 }
257 
258 inline fpreal
260 {
261  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y());
262 }
263 
264 inline fpreal
266 {
267  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
268  + SYSabs(a.z() - b.z());
269 }
270 
271 inline fpreal
273 {
274  return SYSabs(a.x() - b.x()) + SYSabs(a.y() - b.y())
275  + SYSabs(a.z() - b.z()) + SYSabs(a.w() - b.w());
276 }
277 
278 ///
279 /// UT_VoxelTile
280 ///
281 /// A UT_VoxelArray is composed of a number of these tiles. This is
282 /// done for two reasons:
283 /// 1) Increased memory locality when processing neighbouring points.
284 /// 2) Ability to compress or page out unneeded tiles.
285 /// Currently, the only special ability is the ability to create constant
286 /// tiles.
287 ///
288 /// To the end user of the UT_VoxelArray, the UT_VoxelTile should be
289 /// usually transparent. The only exception may be if they want to do
290 /// a FOR_ALL_TILES in order to ensure an optimal traversal order.
291 ///
292 template <typename T>
293 class UT_VoxelTile
294 {
295 public:
296  UT_VoxelTile();
297  ~UT_VoxelTile();
298 
299  // Copy constructor:
301 
302 
303  // Assignment operator:
305 
307  {
313  };
314 
315  /// Fetch a given local value. (x,y,z) should be local to
316  /// this tile.
317  SYS_FORCE_INLINE T operator()(int x, int y, int z) const
318  {
319  UT_ASSERT_P(x >= 0 && y >= 0 && z >= 0);
320  UT_ASSERT_P(x < myRes[0] && y < myRes[1] && z < myRes[2]);
321 
322  switch (myCompressionType)
323  {
324  case COMPRESS_RAW:
325  return ((T *)myData)[
326  ((z * myRes[1]) + y) * myRes[0] + x ];
327 
328  case COMPRESS_CONSTANT:
329  return rawConstVal();
330 
331  case COMPRESS_RAWFULL:
332  return ((T *)myData)[
333  ((z * TILESIZE) + y) * TILESIZE + x ];
334 
335  case COMPRESS_FPREAL16:
336  {
337  T result;
338  result = (((fpreal16 *)myData)[
339  ((z * myRes[1]) + y) * myRes[0] + x ]);
340  return result;
341  }
342  }
343 
344  // By default use the compression engine.
345  UT_VoxelTileCompress<T> *engine;
346 
347  engine = getCompressionEngine(myCompressionType);
348  return engine->getValue(*this, x, y, z);
349  }
350 
351  /// Lerps two numbers, templated to work with T.
353  {
354  return v1 + (v2 - v1) * bias;
355  }
356 
357  /// Does a trilinear interpolation. x,y,z should be local to this
358  /// as should x+1, y+1, and z+1. fx-fz should be 0..1.
359  SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const;
360 
361  template <int AXIS2D>
362  SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const;
363 
364  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
365  /// array should have 8 elements, x minor, z major.
366  /// Requires it is in bounds.
367  /// Returns true if all constant, in which case only a single
368  /// sample is filled, [0]
369  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
370  T *sample) const;
371  template <int AXIS2D>
372  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
373  T *sample) const;
374 
375  /// Extracts +/- dx, +/- dy, +/- dz and then the center into
376  /// 7 samples.
377  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
378  T *sample) const;
379  /// Extracts the full cube of +/- dx, dy, dz. xminor, zmajor, into
380  /// 27 elements.
381  bool extractSampleCube(int x, int y, int z,
382  T *sample) const;
383 #if 0
384  /// MSVC can't handle aligned parameters after the third so
385  /// frac must come first.
386  T lerp(v4uf frac, int x, int y, int z) const;
387 #endif
388 
389  /// Returns a cached line to our internal data, at local address x,y,z.
390  /// cacheline is a caller allocated structure to fill out if we have
391  /// to decompress. If forcecopy isn't set and we can, the result may
392  /// be an internal pointer. stride is set to the update for moving one
393  /// x position in the cache.
394  /// strideofone should be set to true if you want to prevent 0 stride
395  /// results for constant tiles.
396  T *fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const;
397 
398  /// Fills a cache line from an external buffer into our own data.
399  void writeCacheLine(T *cacheline, int y, int z);
400 
401  /// Copies between two tiles. The tile's voxels match up, but don't
402  /// have the same offset. The maximal overlapping voxels are copied.
403  /// this->setValue(dst, dsty, dstz, src(srcx, srcy, srcz));
404  void copyFragment(int dstx, int dsty, int dstz,
405  const UT_VoxelTile<T> &srctile,
406  int srcx, int srcy, int srcz);
407 
408  /// Flattens ourself into the given destination buffer.
409  template <typename S>
410  void flatten(S *dst, int dststride) const;
411 
412  /// Fills our values from the given dense flat buffer. Will
413  /// create a constant tile if the source is constant.
414  template <typename S>
415  void writeData(const S *src, int srcstride);
416 
417  /// The setData is intentionally seperate so we can avoid
418  /// expanding constant data when we write the same value to it.
419  void setValue(int x, int y, int z, T t);
420 
421  /// Finds the minimum and maximum T values
422  void findMinMax(T &min, T &max) const;
423 
424  /// Determines the average value of the tile.
425  void findAverage(T &avg) const;
426 
427  /// Returns if this tile is constant.
428  bool isConstant() const
429  { return myCompressionType == COMPRESS_CONSTANT; }
430 
431  /// Returns true if any NANs are in this tile
432  bool hasNan() const;
433 
434  /// Returns if this tile is in raw format.
435  bool isRaw() const
436  { return myCompressionType == COMPRESS_RAW; }
437 
438  /// Returns if this tile is in raw full format.
439  bool isRawFull() const
440  { return myCompressionType == COMPRESS_RAWFULL; }
441 
442  /// Returns true if this is a simple form of compression, either
443  /// constant, raw, or a raw full that isn't padded
444  bool isSimpleCompression() const
445  {
446  if (isRaw()) return true;
447  if (isConstant()) return true;
448  if (isRawFull() && myRes[0] == TILESIZE && myRes[1] == TILESIZE)
449  return true;
450  return false;
451  }
452 
453  /// Attempts to compress this tile. Returns true if any
454  /// compression performed.
455  bool tryCompress(const UT_VoxelCompressOptions &options);
456 
457  /// Turns this tile into a constant tile of the given value.
458  void makeConstant(T t);
459 
460  /// Explicit compress to fpreal16. Lossy. No-op if already constant.
461  void makeFpreal16();
462 
463  /// Turns a compressed tile into a raw tile.
464  void uncompress();
465 
466  /// Turns a tile into a raw full tile.
467  void uncompressFull();
468 
469  /// Like uncompress() except it leaves the data uninitialized. Result
470  /// is either COMPRESS_RAW or COMPRESS_RAWFULL depending on the tile res.
471  /// @note USE WITH CAUTION!
472  void makeRawUninitialized();
473 
474  /// Returns the raw full data of the tile.
476  {
477  uncompressFull();
478  return (T *)myData;
479  }
480 
481  /// This only makes sense for simple compression. Use with
482  /// extreme care.
484  { if (inlineConstant() && isConstant())
485  { return (T *) &myData; }
486  return (T *)myData; }
487  const T *rawData() const
488  { if (inlineConstant() && isConstant())
489  { return (const T *) &myData; }
490  return (const T *)myData; }
491 
492  /// Read the current resolution.
493  int xres() const { return myRes[0]; }
494  int yres() const { return myRes[1]; }
495  int zres() const { return myRes[2]; }
496 
497  int getRes(int dim) const { return myRes[dim]; }
498 
499 
500  int numVoxels() const { return myRes[0] * myRes[1] * myRes[2]; }
501 
502  /// Returns the amount of memory used by this tile.
503  int64 getMemoryUsage(bool inclusive) const;
504 
505  /// Returns the amount of data used by the tile myData pointer.
506  exint getDataLength() const;
507 
508  /// A routine used by filtered evaluation to accumulated a partial
509  /// filtered sum in this tile.
510  /// pstart, pend - voxel bounds (in UT_VoxelArray coordinates)
511  /// weights - weight array
512  /// start - UT_VoxelArray coordinates at [0] in the weight array
513  void weightedSum(int pstart[3], int pend[3],
514  const float *weights[3], int start[3],
515  T &result);
516 
517  /// Designed to be specialized according to T
518 
519  /// Update min & max to encompass T itself.
520  static void expandMinMax(T v, T &min, T &max)
521  {
522  UTvoxelTileExpandMinMax(v, min, max);
523  }
524 
525  /// Return the "distance" of a & b. This is used for
526  /// tolerance checks on equality comparisons.
527  static fpreal dist(T a, T b)
528  {
529  return UTvoxelTileDist(a, b);
530  }
531 
533 
534  // Returns the index of the bound compression engine.
535  static int lookupCompressionEngine(const char *name);
536  // Given an index, gets the compression engine.
538 
539  /// Saves this tile's data, in compressed form.
540  /// May save in uncompressed form is the compression type does
541  /// not support saving.
542  void save(std::ostream &os) const;
543  bool save(UT_JSONWriter &w) const;
544 
545  /// Loads tile data. Uses the compression index to map the saved
546  /// compression types into the correct loading compression types.
547  void load(UT_IStream &is, const UT_IntArray &compression);
548  bool load(UT_JSONParser &p, const UT_IntArray &compression);
549 
550  /// Stores a list of compresson engines to os.
551  static void saveCompressionTypes(std::ostream &os);
552  static bool saveCompressionTypes(UT_JSONWriter &w);
553 
554  /// Builds a translation table from the given stream's compression types
555  /// into our own valid compression types.
556  static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions);
557  static bool loadCompressionTypes(UT_JSONParser &p, UT_IntArray &compressions);
558 
559 protected:
560  // Attempts to set the value to the native compressed format
561  // Some compression types allow some values to be written
562  // without decompression. Eg, you can write to a constant tile
563  // the tile's own value without decompression.
564  // If this returns true, t has been written.
565  bool writeThrough(int x, int y, int z, T t);
566 
567  /// Sets the local res of the tile. Does *not* resize the allocated
568  /// memory.
569  void setRes(int xr, int yr, int zr)
570  { myRes[0] = xr; myRes[1] = yr; myRes[2] = zr; }
571 
573  {
574  return (sizeof(T) <= sizeof(T*));
575  }
576 
578  { if (inlineConstant()) { return *((const T *)&myData); }
579  return *((const T*)myData); }
581  { if (inlineConstant()) { return ((T *)&myData); }
582  return ((T*)myData); }
583 
584  void setForeignData(void *data, int8 compress_type)
585  {
586  freeData();
587  myCompressionType = compress_type;
588 
589  if (isConstant() && inlineConstant())
590  {
591  makeConstant(*(T *)data);
592  }
593  else
594  {
595  myData = data;
596  myForeignData = true;
597  }
598  }
599 
600 public:
601  /// Frees myData and sets it to zero. This is a bit tricky
602  /// as the constant tiles may be inlined.
603  /// This is only public for the compression engines.
605  {
606  if (inlineConstant() && isConstant())
607  {
608  // Do nothing!
609  }
610  else if (myData && !myForeignData)
611  {
613  }
614  myData = 0;
615  myForeignData = false;
616  }
617 
618 public:
619  // This is only public so the compression engines can get to it.
620  // It is blind data, do not alter!
621  void *myData;
622 private:
623 
624  /// Resolutions.
625  int8 myRes[3];
626 
627  /// Am I a constant tile?
628  int8 myCompressionType;
629 
630  int8 myForeignData;
631 
632  static UT_ValArray<UT_VoxelTileCompress<T> *> &getCompressionEngines()
633  {
634  return UTvoxelTileGetCompressionEngines((T *) 0);
635  }
636 
637  friend class UT_VoxelTileCompress<T>;
638  friend class UT_VoxelArray<T>;
639  template <typename S, bool DoWrite, bool DoRead, bool TestForWrites>
640  friend class UT_VoxelProbe;
641 };
642 
643 ///
644 /// UT_VoxelArray
645 ///
646 /// This provides data structure to hold a three dimmensional array
647 /// of data. The data should be some simple arithmetic type, such
648 /// as uint8, fpreal16, or UT_Vector3.
649 ///
650 /// Some operations, such as gradiants, may make less sense with uint8.
651 ///
652 template <typename T>
653 class UT_VoxelArray
654 {
655 public:
656  UT_VoxelArray();
657  ~UT_VoxelArray();
658 
659  /// Copy constructor:
661 
662  /// Assignment operator:
664 
665  /// This sets the voxelarray to have the given resolution. If resolution is
666  /// changed, all elements will be set to 0. If resolution is already equal
667  /// to the arguments, all elements will be set to 0 only if reset is true;
668  /// otherwise, the voxel array will be left untouched.
669  void size(int xres, int yres, int zres, bool reset = true);
670 
671  /// This will ensure this voxel array matches the given voxel array
672  /// in terms of dimensions & border conditions. It may invoke
673  /// a size() and hence reset the field to 0.
674  void match(const UT_VoxelArray<T> &src);
675 
676  template <typename S>
677  bool isMatching(const UT_VoxelArray<S> &src) const
678  {
679  return src.getXRes() == getXRes() &&
680  src.getYRes() == getYRes() &&
681  src.getZRes() == getZRes();
682  }
683 
684  int getXRes() const { return myRes[0]; }
685  int getYRes() const { return myRes[1]; }
686  int getZRes() const { return myRes[2]; }
687  int getRes(int axis) const { return myRes[axis]; }
688 
690  {
691  return UT_Vector3I(myRes[0], myRes[1], myRes[2]);
692 
693  }
694 
695  /// Return the amount of memory used by this array.
696  int64 getMemoryUsage(bool inclusive) const;
697 
698  /// Sets this voxel array to the given constant value. All tiles
699  /// are turned into constant tiles.
701  constant,
702  T, t)
703  void constantPartial(T t, const UT_JobInfo &info);
704 
705  /// If this voxel array is all constant tiles, returns true.
706  /// The optional pointer is initialized to the constant value iff
707  /// the array is constant. (Note by constant we mean made of constant
708  /// tiles of the same value - if some tiles are uncompressed but
709  /// constant, it will still return false)
710  bool isConstant(T *cval = 0) const;
711 
712  /// Returns true if any element of the voxel array is NAN
713  bool hasNan() const;
714 
715  /// This convience function lets you sample the voxel array.
716  /// pos is in the range [0..1]^3.
717  /// T value trilinearly interpolated. Edges are determined by the border
718  /// mode.
719  /// The cells are sampled at the center of the voxels.
720  T operator()(UT_Vector3D pos) const;
721  T operator()(UT_Vector3F pos) const;
722 
723  /// This convience function lets you sample the voxel array.
724  /// pos is in the range [0..1]^3.
725  /// The min/max is the range of the sampled values.
726  void evaluateMinMax(T &lerp, T &lmin, T &lmax,
727  UT_Vector3F pos) const;
728 
729  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
730  /// Allows out of range evaluation
732  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
733  /// Allows out of range evaluation
734  SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z,
735  float fx, float fy, float fz) const;
736  template <int AXIS2D>
738  template <int AXIS2D>
739  SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z,
740  float fx, float fy, float fz) const;
741 
742  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
743  /// Allows out of range evaluation. Also computes min/max of
744  /// interpolated samples.
745  SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax,
746  UT_Vector3F pos) const;
747  template <int AXIS2D>
748  SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax,
749  UT_Vector3F pos) const;
750  /// Evaluate using voxel coords, from 0,0,0 to resx,resy,resz.
751  /// Allows out of range evaluation. Also computes min/max of
752  /// interpolated samples.
754  T &lerp, T &lmin, T &lmax,
755  int x, int y, int z,
756  float fx, float fy, float fz) const;
757  template <int AXIS2D>
759  T &lerp, T &lmin, T &lmax,
760  int x, int y, int z,
761  float fx, float fy, float fz) const;
762 
763  /// Extracts a sample of [x,y,z] to [x+1,y+1,z+1]. The sample
764  /// array should have 8 elements, x minor, z major.
765  SYS_FORCE_INLINE bool extractSample(int x, int y, int z,
766  T *sample) const;
767  template <int AXIS2D>
768  SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z,
769  T *sample) const;
770 
771  /// Extracts a sample in a plus shape, dx, then dy, then dz, finally
772  /// the center into 7 voxels.
773  SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z,
774  T *sample) const;
775  /// Extracts 27 dense 3x3x3 cube centered at x,y,z into samples
776  /// z major, xminor.
777  SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z,
778  T *sample) const;
779 
780  /// Lerps the given sample using trilinear interpolation
782  float fx, float fy, float fz) const;
783  template <int AXIS2D>
785  float fx, float fy, float fz) const;
786 
787  SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z,
788  float &fx, float &fy, float &fz) const
789  {
790  // Determine integer & fractional components.
791  fx = pos.x();
792  SYSfastSplitFloat(fx, x);
793  fy = pos.y();
794  SYSfastSplitFloat(fy, y);
795  fz = pos.z();
796  SYSfastSplitFloat(fz, z);
797  }
798  template <int AXIS2D>
799  SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z,
800  float &fx, float &fy, float &fz) const
801  {
802  // Determine integer & fractional components.
803  if (AXIS2D != 0)
804  {
805  fx = pos.x();
806  SYSfastSplitFloat(fx, x);
807  }
808  else
809  {
810  fx = 0.0;
811  x = 0;
812  }
813  if (AXIS2D != 1)
814  {
815  fy = pos.y();
816  SYSfastSplitFloat(fy, y);
817  }
818  else
819  {
820  fy = 0.0;
821  y = 0;
822  }
823  if (AXIS2D != 2)
824  {
825  fz = pos.z();
826  SYSfastSplitFloat(fz, z);
827  }
828  else
829  {
830  fz = 0.0;
831  z = 0;
832  }
833  }
834 #if 0
835  T operator()(v4uf pos) const;
836 #endif
837 
838  /// Filtered evaluation of the voxel array. This operation should
839  /// exhibit the same behavior as IMG3D_Channel::evaluate.
840  T evaluate(const UT_Vector3 &pos, const UT_Filter &filter,
841  fpreal radius, int clampaxis = -1) const;
842 
843  /// Fills this by resampling the given voxel array.
844  void resample(const UT_VoxelArray<T> &src,
845  UT_FilterType filtertype = UT_FILTER_POINT,
846  float filterwidthscale = 1.0f,
847  int clampaxis = -1);
848 
849  /// Flattens this into an array. Z major, then Y, then X.
850  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
852  flatten,
853  T *, flatarray,
854  exint, ystride,
855  exint, zstride)
856  void flattenPartial(T *flatarray, exint ystride, exint zstride,
858 
859  /// Flattens this into an array. Z major, then Y, then X.
860  /// Flattens a 2d slice where AXIS2D is constant.
861  /// If AXIS2D == 2 (ie, z): flatarray[x + y * ystride] = getValue(x, y, 0);
862  /// Flattens by destination x-major stripes to avoid page collisions
863  /// on freshly allocated memory buffers.
864  template <int AXIS2D>
865  void flattenPartialAxis(T *flatarray, exint ystride,
866  const UT_JobInfo &info) const;
867 
868  /// Flattens this into an array suitable for a GL 8bit texture.
869  /// Z major, then Y, then X.
870  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
872  flattenGLFixed8,
873  uint8 *, flatarray,
874  exint, ystride,
875  exint, zstride,
876  T , dummy)
877  void flattenGLFixed8Partial(uint8 *flatarray,
878  exint ystride, exint zstride,
879  T dummy,
880  const UT_JobInfo &info) const;
881 
882  /// Flattens this into an array suitable for a GL 16bit FP texture.
883  /// Z major, then Y, then X.
884  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
885  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
886  flattenGL16F,
887  UT_Vector4H *, flatarray,
888  exint, ystride,
889  exint, zstride,
890  T , dummy)
891  void flattenGL16FPartial(UT_Vector4H *flatarray,
892  exint ystride, exint zstride,
893  T dummy,
894  const UT_JobInfo &info) const;
895 
896  /// Flattens this into an array suitable for a GL 32b FP texture. Note that
897  /// this also works around an older Nvidia driver bug that caused very small
898  /// valued texels (<1e-9) to appear as huge random values in the texture.
899  /// Z major, then Y, then X.
900  /// flatarray[x + y * ystride + z * zstride] = getValue(x, y, z);
901  THREADED_METHOD4_CONST(UT_VoxelArray<T>, numTiles() > 16,
902  flattenGL32F,
903  UT_Vector4F *, flatarray,
904  exint, ystride,
905  exint, zstride,
906  T , dummy)
907  void flattenGL32FPartial(UT_Vector4F *flatarray,
908  exint ystride, exint zstride,
909  T dummy,
910  const UT_JobInfo &info) const;
911 
912  /// Fills this from a flattened array. Z major, then Y, then X.
913  /// setValue(x,y,z, flatarray[x + y * ystride + z * zstride];
914  THREADED_METHOD3(UT_VoxelArray<T>, numTiles() > 16,
915  extractFromFlattened,
916  const T *, flatarray,
917  exint, ystride,
918  exint, zstride)
919  void extractFromFlattenedPartial(const T *flatarray,
920  exint ystride, exint zstride,
921  const UT_JobInfo &info);
922 
923  /// Copies into this voxel array from the source array.
924  /// Conceptually,
925  /// this->setValue(x, y, z, src.getValue(x+offx, y+offy, z+offz);
926  void copyWithOffset(const UT_VoxelArray<T> &src,
927  int offx, int offy, int offz);
928  THREADED_METHOD4(UT_VoxelArray<T>, numTiles() > 4,
929  copyWithOffsetInternal,
930  const UT_VoxelArray<T> &, src,
931  int, offx,
932  int, offy,
933  int, offz)
934  void copyWithOffsetInternalPartial(const UT_VoxelArray<T> &src,
935  int offx, int offy, int offz,
936  const UT_JobInfo &info);
937 
938  /// Moves data from the source voxel array into this array. The offsets should
939  /// be in terms of tiles. Source may be modified as this array steals its data
940  /// buffers in such a way that no dynamic memory will leak when these arrays
941  /// are freed.
942  /// Conceptually, this function performs the same operation as copyWithOffset,
943  /// but with offsets specified in terms of tiles:
944  /// this->setValue(x, y, z, src.getValue(x+off_v_x, y+off_v_y, z+off_v_z)
945  /// where off_v_A=tileoffA*TILESIZE for A in {x, y, z}.
946  void moveTilesWithOffset(UT_VoxelArray<T> &src, int tileoffx, int tileoffy,
947  int tileoffz);
948 
949  /// Fills dstdata with the voxel data of listed tiles. Stride is measured
950  /// in T. Data order is in tile-order. So, sorted by tilelist, then
951  /// z, y, x within that tile.
952  /// The ix/iy/iz variant allows partial tiles. If the number of
953  /// voxels to write to a tile matches the tile size, however, the
954  /// ix/iy/iz is ignored and the tile is written in canonical order.
955  template <typename S>
956  S *extractTiles(S *dstdata, int stride,
957  const UT_IntArray &tilelist) const;
958  template <typename S, typename IDX>
959  S *extractTiles(S *dstdata, int stride,
960  const IDX *ix, const IDX *iy, const IDX *iz,
961  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist) const;
962 
963  /// Overwrites our tiles with the given data. Does checking
964  /// for constant tiles. Input srcdata stream should match
965  /// that of extractTiles.
966  template <typename S>
967  const S *writeTiles(const S *srcdata, int srcstride,
968  const UT_IntArray &tilelist);
969  template <typename S, typename IDX>
970  const S *writeTiles(const S *srcdata, int srcstride,
971  const IDX *ix, const IDX *iy, const IDX *iz,
972  const UT_Array<UT_VoxelArrayTileDataDescr> &tilelist);
973 
974  /// Converts a 3d position in range [0..1]^3 into the closest
975  /// index value.
976  /// Returns false if the resulting index was out of range. The index
977  /// will still be set.
978  bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const;
979  bool posToIndex(UT_Vector3D pos, exint &x, exint &y, exint &z) const;
980  /// Convertes a 3d position in [0..1]^3 into the equivalent in
981  /// the integer cell space. Does not clamp to the closest value.
982  bool posToIndex(UT_Vector3 pos, UT_Vector3 &ipos) const;
983  bool posToIndex(UT_Vector3D pos, UT_Vector3D &ipos) const;
984  /// Converts an index into a position.
985  /// Returns false if the source index was out of range, in which case
986  /// pos will be outside [0..1]^3
987  bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const;
988  bool indexToPos(exint x, exint y, exint z, UT_Vector3D &pos) const;
989  void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const;
990  void findexToPos(UT_Vector3D ipos, UT_Vector3D &pos) const;
991 
992  /// Clamps the given x, y, and z values to lie inside the valid index
993  /// range.
994  void clampIndex(int &x, int &y, int &z) const
995  {
996  x = SYSclamp(x, 0, myRes[0]-1);
997  y = SYSclamp(y, 0, myRes[1]-1);
998  z = SYSclamp(z, 0, myRes[2]-1);
999  }
1000 
1001  /// Returns true if the given x, y, z values lie inside the valid index.
1002  bool isValidIndex(int x, int y, int z) const
1003  {
1004  return !((x | y | z) < 0) &&
1005  (((x - myRes[0]) & (y - myRes[1]) & (z - myRes[2])) < 0);
1006  }
1007 
1008  /// This allows you to read & write the raw data.
1009  /// Out of bound reads are illegal.
1011  {
1012  return (*this)(index[0], index[1], index[2]);
1013  }
1014  T operator()(int x, int y, int z) const
1015  {
1016  UT_ASSERT_P(isValidIndex(x, y, z));
1017  return (*getTile(x >> TILEBITS,
1018  y >> TILEBITS,
1019  z >> TILEBITS))
1020  (x & TILEMASK, y & TILEMASK, z & TILEMASK);
1021  }
1022 
1024  {
1025  setValue(index[0], index[1], index[2], value);
1026  }
1027 
1028  void setValue(int x, int y, int z, T t)
1029  {
1030  UT_ASSERT_P(isValidIndex(x, y, z));
1031  getTile(x >> TILEBITS,
1032  y >> TILEBITS,
1033  z >> TILEBITS)->setValue(
1034  x & TILEMASK, y & TILEMASK, z & TILEMASK, t);
1035  }
1036 
1037  /// This will clamp the bounds to fit within the voxel array,
1038  /// using the border type to resolve out of range values.
1039  T getValue(int x, int y, int z) const
1040  {
1041  // First handle the most common case.
1042  if (isValidIndex(x, y, z))
1043  return (*this)(x, y, z);
1044 
1045  // Verify our voxel array is non-empty.
1046  if (!myTiles)
1047  return myBorderValue;
1048 
1049  // We now know we are out of range, adjust appropriately
1050  switch (myBorderType)
1051  {
1053  return myBorderValue;
1054 
1055  case UT_VOXELBORDER_REPEAT:
1056  if (x < 0 || x >= myRes[0])
1057  {
1058  x %= myRes[0];
1059  if (x < 0)
1060  x += myRes[0];
1061  }
1062  if (y < 0 || y >= myRes[1])
1063  {
1064  y %= myRes[1];
1065  if (y < 0)
1066  y += myRes[1];
1067  }
1068  if (z < 0 || z >= myRes[2])
1069  {
1070  z %= myRes[2];
1071  if (z < 0)
1072  z += myRes[2];
1073  }
1074  break;
1075 
1076  case UT_VOXELBORDER_STREAK:
1077  clampIndex(x, y, z);
1078  break;
1079  case UT_VOXELBORDER_EXTRAP:
1080  {
1081  int cx, cy, cz;
1082  T result;
1083 
1084  cx = x; cy = y; cz = z;
1085  clampIndex(cx, cy, cz);
1086 
1087  result = (*this)(cx, cy, cz);
1088  result += (x - cx) * myBorderScale[0] +
1089  (y - cy) * myBorderScale[1] +
1090  (z - cz) * myBorderScale[2];
1091  return result;
1092  }
1093  }
1094 
1095  // It is now within bounds, do normal fetch.
1096  return (*this)(x, y, z);
1097  }
1098 
1099  /// Gets values in the box [bbox.minvec(), bbox.maxvec())
1100  /// Values are stored in the array `values` of size `size` that has to be at least `bbox.volume()`
1101  /// The order of values is give by: `i + bbox.xsize() * (j + bbox.ysize() * k)`
1102  ///
1103  /// If returns true, values in `bbox` are constant and only values[0] is guaranteed to be assigned.
1104  bool getValues(const UT_BoundingBoxI &bbox,
1105  T * values,
1106  const exint size) const
1107  {
1108  UT_ASSERT_P(bbox.volume() <= size);
1109 
1110  const UT_BoundingBoxI bounds = {0, 0, 0, getXRes(), getYRes(), getZRes()};
1111 
1112  const UT_BoundingBoxI tiles =
1113  {bbox.xmin() >> TILEBITS,
1114  bbox.ymin() >> TILEBITS,
1115  bbox.zmin() >> TILEBITS,
1116  ((bbox.xmax() - 1) >> TILEBITS) + 1,
1117  ((bbox.ymax() - 1) >> TILEBITS) + 1,
1118  ((bbox.zmax() - 1) >> TILEBITS) + 1};
1119 
1120  bool allconstant = true;
1121 
1122  UT_BoundingBoxI tilesamples;
1123 
1124  for (int kt = tiles.zmin(); kt < tiles.zmax(); kt++)
1125  {
1126  // zmin & zmax
1127  tilesamples.vals[2][0] = TILESIZE * kt;
1128  tilesamples.vals[2][1] = TILESIZE * (kt + 1);
1129  // clip bounds
1130  if (kt == tiles.zmin())
1131  tilesamples.vals[2][0] = bbox.zmin();
1132  if (kt == tiles.zmax() - 1)
1133  tilesamples.vals[2][1] = bbox.zmax();
1134 
1135  for (int jt = tiles.ymin(); jt < tiles.ymax(); jt++)
1136  {
1137  // ymin & ymax
1138  tilesamples.vals[1][0] = TILESIZE * jt;
1139  tilesamples.vals[1][1] = TILESIZE * (jt + 1);
1140  // clip bounds
1141  if (jt == tiles.ymin())
1142  tilesamples.vals[1][0] = bbox.ymin();
1143  if (jt == tiles.ymax() - 1)
1144  tilesamples.vals[1][1] = bbox.ymax();
1145 
1146  for (int it = tiles.xmin(); it < tiles.xmax(); it++)
1147  {
1148  // xmin & xmax
1149  tilesamples.vals[0][0] = TILESIZE * it;
1150  tilesamples.vals[0][1] = TILESIZE * (it + 1);
1151  // clip bounds
1152  if (it == tiles.xmin())
1153  tilesamples.vals[0][0] = bbox.xmin();
1154  if (it == tiles.xmax() - 1)
1155  tilesamples.vals[0][1] = bbox.xmax();
1156 
1157  const bool inbounds = tilesamples.isInside(bounds);
1158 
1159  if (inbounds)
1160  {
1161  const UT_VoxelTile<T> *tile = getTile(it, jt, kt);
1162 
1163  for (int k = tilesamples.zmin();
1164  k < tilesamples.zmax(); k++)
1165  {
1166  for (int j = tilesamples.ymin();
1167  j < tilesamples.ymax(); j++)
1168  {
1169  for (int i = tilesamples.xmin();
1170  i < tilesamples.xmax(); i++)
1171  {
1172  const UT_Vector3I localindex = {
1173  i - bbox.xmin(),
1174  j - bbox.ymin(),
1175  k - bbox.zmin()};
1176 
1177  const int locallinindex
1178  = localindex.x()
1179  + bbox.xsize() * (localindex.y()
1180  + bbox.ysize() * localindex.z());
1181 
1182  values[locallinindex] = (*tile)(
1183  i & TILEMASK,
1184  j & TILEMASK,
1185  k & TILEMASK);
1186 
1187  if (allconstant
1188  && (values[0] != values[locallinindex]))
1189  {
1190  allconstant = false;
1191  }
1192  }
1193  }
1194  }
1195  }
1196  else
1197  {
1198  for (int k = tilesamples.zmin(); k < tilesamples.zmax(); k++)
1199  {
1200  for (int j = tilesamples.ymin();
1201  j < tilesamples.ymax(); j++)
1202  {
1203  for (int i = tilesamples.xmin();
1204  i < tilesamples.xmax(); i++)
1205  {
1206  const UT_Vector3I localindex = {
1207  i - bbox.xmin(),
1208  j - bbox.ymin(),
1209  k - bbox.zmin()};
1210 
1211  const int locallinindex
1212  = localindex.x()
1213  + bbox.xsize() * (localindex.y()
1214  + bbox.ysize() * localindex.z());
1215 
1216  values[locallinindex] = getValue(i, j, k);
1217 
1218  if (allconstant
1219  && (values[0] != values[locallinindex]))
1220  {
1221  allconstant = false;
1222  }
1223  }
1224  }
1225  }
1226 
1227  }
1228  }
1229  }
1230  }
1231 
1232  return allconstant;
1233  }
1234 
1236  void setBorderScale(T scalex, T scaley, T scalez);
1237  UT_VoxelBorderType getBorder() const { return myBorderType; }
1238  T getBorderValue() const { return myBorderValue; }
1239  T getBorderScale(int axis) const { return myBorderScale[axis]; }
1240 
1241  /// This tries to compress or collapse each tile. This can
1242  /// be expensive (ie, converting a tile to constant), so
1243  /// should be saved until modifications are complete.
1245  collapseAllTiles)
1246  void collapseAllTilesPartial(const UT_JobInfo &info);
1247 
1248  /// Uncompresses all tiles into non-constant tiles. Useful
1249  /// if you have a multithreaded algorithm that may need to
1250  /// both read and write, if you write to a collapsed tile
1251  /// while someone else reads from it, bad stuff happens.
1252  /// Instead, you can expandAllTiles. This may have serious
1253  /// consequences in memory use, however.
1255  expandAllTiles)
1256  void expandAllTilesPartial(const UT_JobInfo &info);
1257 
1258  /// Uncompresses all tiles, but leaves constant tiles alone.
1259  /// Useful for cleaning out any non-standard compression algorithm
1260  /// that some external program can't handle.
1261  THREADED_METHOD(UT_VoxelArray<T>, numTiles() > 100,
1262  expandAllNonConstTiles)
1263  void expandAllNonConstTilesPartial(const UT_JobInfo &info);
1264 
1265  /// The direct tile access methods are to make TBF writing a bit
1266  /// more efficient.
1267  UT_VoxelTile<T> *getTile(int tx, int ty, int tz) const
1268  { return &myTiles[xyzTileToLinear(tx, ty, tz)]; }
1270  { return &myTiles[idx]; }
1271  void linearTileToXYZ(int idx, int &x, int &y, int &z) const
1272  {
1273  x = idx % myTileRes[0];
1274  idx -= x;
1275  idx /= myTileRes[0];
1276  y = idx % myTileRes[1];
1277  idx -= y;
1278  idx /= myTileRes[1];
1279  z = idx;
1280  }
1282  {
1283  UT_Vector3I tileindex;
1284  tileindex[0] = idx % myTileRes[0];
1285  idx -= tileindex[0];
1286  idx /= myTileRes[0];
1287  tileindex[1] = idx % myTileRes[1];
1288  idx -= tileindex[1];
1289  idx /= myTileRes[1];
1290  tileindex[2] = idx;
1291 
1292  return tileindex;
1293  }
1294 
1295  int xyzTileToLinear(int x, int y, int z) const
1296  { return (z * myTileRes[1] + y) * myTileRes[0] + x; }
1297 
1298  int indexToLinearTile(int x, int y, int z) const
1299  { return ((z >> TILEBITS) * myTileRes[1] + (y >> TILEBITS)) * myTileRes[0] + (x >> TILEBITS); }
1300 
1301  /// idxth tile represents the voxels indexed [start,end).
1302  void getTileVoxels(int idx,
1303  UT_Vector3I &start, UT_Vector3I &end) const
1304  {
1305  int x, y, z;
1306  linearTileToXYZ(idx, x, y, z);
1307 
1308  start.x() = x * TILESIZE;
1309  start.y() = y * TILESIZE;
1310  start.z() = z * TILESIZE;
1311  end = start;
1312  end.x() += myTiles[idx].xres();
1313  end.y() += myTiles[idx].yres();
1314  end.z() += myTiles[idx].zres();
1315  }
1316 
1318  {
1320  getTileVoxels(idx, start, end);
1321  return UT_BoundingBoxI(start, end);
1322  }
1323 
1324  /// Number of tiles along that axis. Not to be confused with
1325  /// the resolution of the individual tiles.
1326  int getTileRes(int dim) const { return myTileRes[dim]; }
1327  int numTiles() const
1328  { return myTileRes[0] * myTileRes[1] * myTileRes[2]; }
1329  exint numVoxels() const
1330  { return ((exint)myRes[0]) * myRes[1] * myRes[2]; }
1331 
1333  { myCompressionOptions = options; }
1335  { return myCompressionOptions; }
1336 
1338  { myCompressionOptions.myConstantTol = tol; }
1340  { return myCompressionOptions.myConstantTol; }
1341 
1342  /// Saves only the data of this array to the given stream.
1343  /// To reload it you will have to have a matching array in tiles
1344  /// dimensions and size.
1345  void saveData(std::ostream &os) const;
1346  bool saveData(UT_JSONWriter &w,
1347  const char *shared_mem_owner = 0) const;
1348 
1349  /// Load an array, requires you have already size()d this array.
1350  void loadData(UT_IStream &is);
1351  bool loadData(UT_JSONParser &p);
1352 
1353  /// Copy only the data from the source array.
1354  /// Note that it is an error to call this unless isMatching(src).
1356  copyData,
1357  const UT_VoxelArray<T> &, src)
1358 
1359  void copyDataPartial(const UT_VoxelArray<T> &src,
1360  const UT_JobInfo &info);
1361 
1362 private:
1364  resamplethread,
1365  const UT_VoxelArray<T> &, src,
1366  const UT_Filter *, filter,
1367  float, radius,
1368  int, clampaxis)
1369  void resamplethreadPartial(const UT_VoxelArray<T> &src,
1370  const UT_Filter *filter,
1371  float radius,
1372  int clampaxis,
1373  const UT_JobInfo &info);
1374 
1375 
1376  void deleteVoxels();
1377 
1378  SYS_SharedMemory *copyToSharedMemory(const char *shared_mem_owner) const;
1379  bool populateFromSharedMemory(const char *id);
1380 
1381 
1382  /// Number of elements in each dimension.
1383  int myRes[3];
1384 
1385  /// Inverse tile res, 1/myRes
1386  UT_Vector3 myInvRes;
1387 
1388  /// Number of tiles in each dimension.
1389  int myTileRes[3];
1390 
1391  /// Compression tolerance for lossy compression.
1392  UT_VoxelCompressOptions myCompressionOptions;
1393 
1394  /// Double dereferenced so we can theoretically resize easily.
1395  UT_VoxelTile<T> *myTiles;
1396 
1397  /// Outside values get this if constant borders are used
1398  T myBorderValue;
1399  /// Per axis scale factors for when extrapolating.
1400  T myBorderScale[3];
1401  UT_VoxelBorderType myBorderType;
1402 
1403  /// For initializing the tiles from shared memory.
1404  SYS_SharedMemory *mySharedMem;
1405  SYS_SharedMemoryView *mySharedMemView;
1406 };
1407 
1408 
1409 ///
1410 /// UT_VoxelMipMap
1411 ///
1412 /// This provides a mip-map type structure for a voxel array.
1413 /// It manages the different levels of voxels arrays that are needed.
1414 /// You can create different types of mip maps: average, maximum, etc,
1415 /// which can allow different tricks.
1416 /// Each level is one half the previous level, rounded up.
1417 /// Out of bound voxels are ignored from the lower levels.
1418 ///
1419 template <typename T>
1421 {
1422 public:
1423  /// The different types of functions that can be used for
1424  /// constructing a mip map.
1425  enum mipmaptype { MIPMAP_MAXIMUM=0, MIPMAP_AVERAGE=1, MIPMAP_MINIMUM=2 };
1426 
1427  UT_VoxelMipMap();
1428  ~UT_VoxelMipMap();
1429 
1430  /// Copy constructor.
1431  UT_VoxelMipMap(const UT_VoxelMipMap<T> &src);
1432 
1433  /// Assignment operator:
1434  const UT_VoxelMipMap<T> &operator=(const UT_VoxelMipMap<T> &src);
1435 
1436  /// Builds from a given voxel array. The ownership flag determines
1437  /// if we gain ownership of the voxel array and should delete it.
1438  /// In any case, the new levels are owned by us.
1439  void build(UT_VoxelArray<T> *baselevel,
1440  mipmaptype function);
1441 
1442  /// Same as above but construct mipmaps simultaneously for more than
1443  /// one function. The order of the functions will correspond to the
1444  /// order of the data values passed to the traversal callback.
1445  void build(UT_VoxelArray<T> *baselevel,
1446  const UT_Array<mipmaptype> &functions);
1447 
1448  /// This does a top down traversal of the implicit octree defined
1449  /// by the voxel array. Returning false will abort that
1450  /// branch of the octree.
1451  /// The bounding box given is in cell space and is an exclusive
1452  /// box of the included cells (ie: (0..1)^3 means just cell 0,0,0)
1453  /// Note that each bounding box will not be square, unless you
1454  /// have the good fortune of starting with a power of 2 cube.
1455  /// The boolean goes true when the the callback is invoked on a
1456  /// base level.
1457  typedef bool (*Callback)(const T *funcs,
1458  const UT_BoundingBox &box,
1459  bool baselevel, void *data);
1460  void traverseTopDown(Callback function,
1461  void *data) const;
1462 
1463  /// Top down traversal on op. OP is invoked with
1464  /// bool op(const UT_BoundingBoxI &indexbox, int level)
1465  ///
1466  /// indexbox is half-inclusive (0..1)^3 means cell 0,0,0
1467  /// level 0 means the base level.
1468  /// (box.min.x()>>level, box.min.y()>>level, box.min.z()>>level)
1469  /// gives the index to extract the value from level..
1470  template <typename OP>
1471  void traverseTopDown(OP&op) const;
1472 
1473 
1474  /// Top down traversal, but which quad tree is visited first
1475  /// is controlled by
1476  /// float op.sortValue(UT_BoundingBoxI &indexbox, int level);
1477  /// Lower values are visited first.
1478  template <typename OP>
1479  void traverseTopDownSorted(OP&op) const;
1480 
1481 
1482  /// Return the amount of memory used by this mipmap.
1483  int64 getMemoryUsage(bool inclusive) const;
1484 
1485  int numLevels() const { return myNumLevels+1; }
1486 
1487  /// level 0 is the original grid, each level higher is a power
1488  /// of two smaller.
1489  const UT_VoxelArray<T> *level(int level, int function) const
1490  {
1491  if (level == 0)
1492  return myBaseLevel;
1493 
1494  return myLevels(function)[numLevels() - 1 - level];
1495  }
1496 
1497 private:
1498  void doTraverse(int x, int y, int z, int level,
1499  Callback function,
1500  void *data) const;
1501 
1502  /// Note: This variant of doTraverse has the opposite sense of level!
1503  template <typename OP>
1504  void doTraverse(int x, int y, int z, int level,
1505  OP &op) const;
1506  template <typename OP>
1507  void doTraverseSorted(int x, int y, int z, int level,
1508  OP &op) const;
1509 
1510  void initializePrivate();
1511  void destroyPrivate();
1512 
1513  THREADED_METHOD3(UT_VoxelMipMap<T>, dst.numTiles() > 1,
1514  downsample,
1515  UT_VoxelArray<T> &, dst,
1516  const UT_VoxelArray<T> &, src,
1517  mipmaptype, function)
1518  void downsamplePartial(UT_VoxelArray<T> &dst,
1519  const UT_VoxelArray<T> &src,
1520  mipmaptype function,
1521  const UT_JobInfo &info);
1522 
1523 protected:
1524  T mixValues(T t1, T t2, mipmaptype function) const
1525  {
1526  switch (function)
1527  {
1528  case MIPMAP_MAXIMUM:
1529  return SYSmax(t1, t2);
1530 
1531  case MIPMAP_AVERAGE:
1532  return (t1 + t2) / 2;
1533 
1534  case MIPMAP_MINIMUM:
1535  return SYSmin(t1, t2);
1536  }
1537 
1538  return t1;
1539  }
1540 
1541 
1542  /// This stores the base most level that was provided
1543  /// externally.
1544  UT_VoxelArray<T> *myBaseLevel;
1545  /// If true, we will delete the base level when we are done.
1547 
1548  /// Tracks the number of levels which we used to represent
1549  /// this hierarchy.
1551  /// The array of VoxelArrays, one per level.
1552  /// myLevels[0] is a 1x1x1 array. Each successive layer is twice
1553  /// as big in each each dimension. However, every layer is clamped
1554  /// against the resolution of the base layer.
1555  /// We own all these layers.
1557 };
1558 
1559 
1560 /// Iterator for Voxel Arrays
1561 ///
1562 /// This class eliminates the need for having
1563 /// for (z = 0; z < zres; z++)
1564 /// ...
1565 /// for (x = 0; x < xres; x++)
1566 /// loops everywhere.
1567 ///
1568 /// Note that the order of iteration is undefined! (The actual order is
1569 /// to complete each tile in turn, thereby hopefully improving cache
1570 /// coherency)
1571 ///
1572 /// It is safe to write to the voxel array while this iterator is active.
1573 /// It is *not* safe to resize the voxel array (or destroy it)
1574 ///
1575 /// The iterator is similar in principal to an STL iterator, but somewhat
1576 /// simpler. The classic STL loop
1577 /// for ( it = begin(); it != end(); ++it )
1578 /// is done using
1579 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1580 ///
1581 template <typename T>
1583 {
1584 public:
1589 
1591  {
1592  myCurTile = -1;
1593  myHandle.resetHandle();
1594  myArray = vox;
1595  // Reset the range
1596  setPartialRange(0, 1);
1597  }
1599  {
1600  setArray((UT_VoxelArray<T> *) vox);
1601  }
1602 
1603  /// Iterates over the array pointed to by the handle. Only
1604  /// supports read access during the iteration as it does
1605  /// a read lock.
1607  {
1608  myHandle = handle;
1609  // Ideally we'd have a separate const iterator
1610  // from our non-const iterator so this would
1611  // only be exposed in the const version.
1612  myArray = const_cast<UT_VoxelArray<T> *>(&*myHandle);
1613 
1614  // Reset our range.
1615  myCurTile = -1;
1616  setPartialRange(0, 1);
1617  }
1618 
1619 
1620  /// Restricts this iterator to only run over a subset
1621  /// of the tiles. The tiles will be divided into approximately
1622  /// numrange equal groups, this will be the idx'th.
1623  /// The resulting iterator may have zero tiles.
1624  void setPartialRange(int idx, int numranges);
1625 
1626  /// Ties this iterator to the given jobinfo so it will
1627  /// match the jobinfo's processing.
1628  void splitByTile(const UT_JobInfo &info);
1629 
1630  /// Assigns an interrupt handler. This will be tested whenever
1631  /// it advances to a new tile. If it is interrupted, the iterator
1632  /// will jump forward to atEnd()
1633  void setInterrupt(UT_Interrupt *interrupt) { myInterrupt = interrupt; }
1634  void detectInterrupts() { myInterrupt = UTgetInterrupt(); }
1635 
1636  /// Restricts this iterator to the tiles that intersect
1637  /// the given bounding box of voxel coordinates.
1638  /// Note that this will not be a precise restriction as
1639  /// each tile is either included or not.
1640  /// You should setPartialRange() after setting the bbox range
1641  /// The bounding box is on the [0..1]^3 range.
1642  void restrictToBBox(const UT_BoundingBox &bbox);
1643  /// The [xmin, xmax] are inclusive and measured in voxels.
1644  void restrictToBBox(int xmin, int xmax,
1645  int ymin, int ymax,
1646  int zmin, int zmax);
1647 
1648  /// Resets the iterator to point to the first voxel.
1649  void rewind();
1650 
1651  /// Returns true if we have iterated over all of the voxels.
1652  bool atEnd() const
1653  { return myCurTile < 0; }
1654 
1655  /// Advances the iterator to point to the next voxel.
1656  void advance()
1657  {
1658  // We try to advance each axis, rolling over to the next.
1659  // If we exhaust this tile, we call advanceTile.
1660  myPos[0]++;
1661  myTileLocalPos[0]++;
1662  if (myTileLocalPos[0] >= myTileSize[0])
1663  {
1664  // Wrapped in X.
1665  myPos[0] -= myTileLocalPos[0];
1666  myTileLocalPos[0] = 0;
1667 
1668  myPos[1]++;
1669  myTileLocalPos[1]++;
1670  if (myTileLocalPos[1] >= myTileSize[1])
1671  {
1672  // Wrapped in Y.
1673  myPos[1] -= myTileLocalPos[1];
1674  myTileLocalPos[1] = 0;
1675 
1676  myPos[2]++;
1677  myTileLocalPos[2]++;
1678  if (myTileLocalPos[2] >= myTileSize[2])
1679  {
1680  // Wrapped in Z! Finished this tile!
1681  advanceTile();
1682  }
1683  }
1684  }
1685  }
1686 
1687  /// Retrieve the current location of the iterator.
1688  int x() const { return myPos[0]; }
1689  int y() const { return myPos[1]; }
1690  int z() const { return myPos[2]; }
1691  int idx(int idx) const { return myPos[idx]; }
1692 
1693  /// Retrieves the value that we are currently pointing at.
1694  /// This is faster than an operator(x,y,z) as we already know
1695  /// our current tile and that bounds checking isn't needed.
1696  T getValue() const
1697  {
1698  UT_ASSERT_P(myCurTile >= 0);
1699 
1700  UT_VoxelTile<T> *tile;
1701 
1702  tile = myArray->getLinearTile(myCurTile);
1703  return (*tile)(myTileLocalPos[0],
1704  myTileLocalPos[1],
1705  myTileLocalPos[2]);
1706  }
1707 
1708  /// Sets the voxel we are currently pointing to the given value.
1709  void setValue(T t) const
1710  {
1711  UT_ASSERT_P(myCurTile >= 0);
1712 
1713  UT_VoxelTile<T> *tile;
1714 
1715  tile = myArray->getLinearTile(myCurTile);
1716 
1717  tile->setValue(myTileLocalPos[0],
1718  myTileLocalPos[1],
1719  myTileLocalPos[2], t);
1720  }
1721 
1722  /// Returns true if the tile we are currently in is a constant tile.
1723  bool isTileConstant() const
1724  {
1725  UT_ASSERT_P(myCurTile >= 0);
1726 
1727  UT_VoxelTile<T> *tile;
1728 
1729  tile = myArray->getLinearTile(myCurTile);
1730  return tile->isConstant();
1731  }
1732 
1733  /// This tile will iterate over the voxels indexed [start,end).
1735  {
1736  start.x() = myTilePos[0] * TILESIZE;
1737  start.y() = myTilePos[1] * TILESIZE;
1738  start.z() = myTilePos[2] * TILESIZE;
1739  end = start;
1740  end.x() += myTileSize[0];
1741  end.y() += myTileSize[1];
1742  end.z() += myTileSize[2];
1743  }
1744 
1745  /// This tile will iterate over the *inclusive* voxels indexed
1746  /// in the returned boudning box.
1748  {
1750  getTileVoxels(start, end);
1751  return UT_BoundingBoxI(start, end);
1752  }
1753 
1754  /// Returns true if we are at the start of a new tile.
1755  bool isStartOfTile() const
1756  { return !(myTileLocalPos[0] ||
1757  myTileLocalPos[1] ||
1758  myTileLocalPos[2]); }
1759 
1760  /// Returns the VoxelTile we are currently processing
1762  {
1763  UT_ASSERT_P(myCurTile >= 0);
1764  return myArray->getLinearTile(myCurTile);
1765  }
1766  int getLinearTileNum() const
1767  {
1768  return myCurTile;
1769  }
1770 
1771  /// Advances the iterator to point to the next tile. Useful if the
1772  /// constant test showed that you didn't need to deal with this one.
1773  void advanceTile();
1774 
1775  /// Advances the iterator to pointing just before the next tile so
1776  /// the next advance() will be an advanceTile(). This is useful
1777  /// if you want to do a continue; as your break but the forloop
1778  /// is doing advance()
1779  /// Note the iterator is in a bad state until advance() is called.
1780  void skipToEndOfTile();
1781 
1782  /// Sets a flag which causes the iterator to tryCompress()
1783  /// tiles when it is done with them.
1784  bool getCompressOnExit() const { return myShouldCompressOnExit; }
1785  void setCompressOnExit(bool shouldcompress)
1786  { myShouldCompressOnExit = shouldcompress; }
1787 
1788  /// These templated algorithms are designed to apply simple operations
1789  /// across all of the voxels with as little overhead as possible.
1790  /// The iterator should already point to a voxel array and if multithreaded
1791  /// had its partial range set. The source arrays must be matching size.
1792  /// The operator should support a () operator, and the result is
1793  /// vit.setValue( op(vit.getValue(), a->getValue(vit), ...);
1794  /// Passing T instead of UT_VoxelArray will treat it as a constant source
1795  /// Note if both source and destination tiles are constant, only
1796  /// a single operation is invoked.
1797  template <typename OP>
1798  void applyOperation(OP &op);
1799  template <typename OP, typename S>
1800  void applyOperation(OP &op, const UT_VoxelArray<S> &a);
1801  template <typename OP>
1802  void applyOperation(OP &op, T a);
1803  template <typename OP, typename S, typename R>
1804  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1805  const UT_VoxelArray<R> &b);
1806  template <typename OP, typename S, typename R, typename Q>
1807  void applyOperation(OP &op, const UT_VoxelArray<S> &a,
1808  const UT_VoxelArray<R> &b,
1809  const UT_VoxelArray<Q> &c);
1810  /// These variants will invoke op.isNoop(a, b, ...) which will return
1811  /// true if those values won't affect the destination. This allows
1812  /// constant source tiles to be skipped, for example when adding
1813  /// 0.
1814  template <typename OP, typename S>
1815  void applyOperationCheckNoop(OP &op, const UT_VoxelArray<S> &a);
1816  template <typename OP>
1817  void applyOperationCheckNoop(OP &op, T a);
1818 
1819  /// These variants of apply operation also accept a mask array. The
1820  /// operation is applied only where the mask is greater than 0.5.
1821  template <typename OP, typename M>
1822  void maskedApplyOperation(OP &op,
1823  const UT_VoxelArray<M> &mask);
1824  template <typename OP, typename S, typename M>
1825  void maskedApplyOperation(OP &op, const UT_VoxelArray<S> &a,
1826  const UT_VoxelArray<M> &mask);
1827  template <typename OP, typename S, typename R, typename M>
1828  void maskedApplyOperation(OP &op, const UT_VoxelArray<S> &a,
1829  const UT_VoxelArray<R>& b,
1830  const UT_VoxelArray<M> &mask);
1831  template <typename OP, typename S, typename R, typename Q, typename M>
1832  void maskedApplyOperation(OP& op, const UT_VoxelArray<S> &a,
1833  const UT_VoxelArray<R>& b,
1834  const UT_VoxelArray<Q>& c,
1835  const UT_VoxelArray<M> &mask);
1836 
1837  /// Assign operation works like apply operation, but *this is written
1838  /// to without reading, so there is one less parameter to the ()
1839  /// callback. This can optimize constant tile writes as the
1840  /// constant() status of the destination doesn't matter.
1841  template <typename OP, typename S>
1842  void assignOperation(OP &op, const UT_VoxelArray<S> &a);
1843  template <typename OP, typename S, typename R>
1844  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1845  const UT_VoxelArray<R> &b);
1846  template <typename OP, typename S, typename R, typename Q>
1847  void assignOperation(OP &op, const UT_VoxelArray<S> &a,
1848  const UT_VoxelArray<R> &b,
1849  const UT_VoxelArray<Q> &c);
1850 
1851  /// These variants of assign operation also accept a mask array. The
1852  /// assignment operation is performed only where the mask is greater
1853  /// than 0.5.
1854  template <typename OP, typename S, typename M>
1855  void maskedAssignOperation(OP& op, const UT_VoxelArray<S>& a,
1856  const UT_VoxelArray<M>& mask);
1857  template <typename OP, typename S, typename R, typename M>
1858  void maskedAssignOperation(OP& op, const UT_VoxelArray<S>& a,
1859  const UT_VoxelArray<R>& b,
1860  const UT_VoxelArray<M>& mask);
1861  template <typename OP, typename S, typename R, typename Q, typename M>
1862  void maskedAssignOperation(OP& op, const UT_VoxelArray<S>& a,
1863  const UT_VoxelArray<R>& b,
1864  const UT_VoxelArray<Q>& c,
1865  const UT_VoxelArray<M>& mask);
1866 
1867  /// Reduction operators.
1868  /// op.reduce(T a) called for each voxel, *but*,
1869  /// op.reduceMany(T a, int n) called to reduce constant blocks.
1870  template <typename OP>
1871  void reduceOperation(OP &op);
1872 
1873  UT_VoxelArray<T> *getArray() const { return myArray; }
1874 
1875 protected:
1876  /// The array we belong to.
1878  /// The handle that we have locked to get our array. It is null
1879  /// by default which makes the lock/unlock nops.
1881 
1882  /// Absolute index into voxel array.
1883  int myPos[3];
1884 
1885  /// Flag determining if we should compress tiles whenever we
1886  /// advance out of them.
1888 
1891 
1892 public:
1893  /// Our current linear tile idx. A value of -1 implies at end.
1895 
1896  /// Our current index into the tile list
1898 
1899  /// Our start & end tiles for processing a subrange.
1900  /// The tile range is half open [start, end)
1901  int myTileStart, myTileEnd;
1902 
1903  /// Which tile we are as per tx,ty,tz rather than linear index.
1904  int myTilePos[3];
1905 
1906  /// Our position within the current tile.
1907  int myTileLocalPos[3];
1908 
1909  /// The size of the current tile
1910  int myTileSize[3];
1911 
1912  /// The job info to use for tilefetching
1914 
1916 };
1917 
1918 /// Iterator for tiles inside Voxel Arrays
1919 ///
1920 /// This class eliminates the need for having
1921 /// for (z = 0; z < zres; z++)
1922 /// ...
1923 /// for (x = 0; x < xres; x++)
1924 /// loops everywhere.
1925 ///
1926 /// The iterator is similar in principal to an STL iterator, but somewhat
1927 /// simpler. The classic STL loop
1928 /// for ( it = begin(); it != end(); ++it )
1929 /// is done using
1930 /// for ( it.rewind(); !it.atEnd(); it.advance() )
1931 ///
1932 template <typename T>
1934 {
1935 public:
1938  template <typename S>
1942 
1943  template <typename S>
1945  UT_VoxelArray<T> *array)
1946  {
1947  UT_ASSERT_P(vit.isStartOfTile());
1948  myCurTile = array->getLinearTile(vit.getLinearTileNum());
1949  myArray = array;
1950  myTileStart[0] = vit.x();
1951  myTileStart[1] = vit.y();
1952  myTileStart[2] = vit.z();
1953  }
1954 
1956  {
1957  setTile(vit, vit.getArray());
1958  }
1959 
1960  void setLinearTile(exint lineartilenum, UT_VoxelArray<T> *array)
1961  {
1962  myCurTile = array->getLinearTile(lineartilenum);
1963  myArray = array;
1964 
1965  array->linearTileToXYZ(lineartilenum,
1966  myTileStart[0], myTileStart[1], myTileStart[2]);
1967  myTileStart[0] <<= TILEBITS;
1968  myTileStart[1] <<= TILEBITS;
1969  myTileStart[2] <<= TILEBITS;
1970  }
1971 
1972  /// Resets the iterator to point to the first voxel.
1973  void rewind();
1974 
1975  /// Returns true if we have iterated over all of the voxels.
1976  bool atEnd() const
1977  { return myCurTile == 0 || myAtEnd; }
1978 
1979  /// Advances the iterator to point to the next voxel.
1980  void advance()
1981  {
1982  // We try to advance each axis, rolling over to the next.
1983  // If we exhaust this tile, we call advanceTile.
1984  myPos[0]++;
1985  myTileLocalPos[0]++;
1986  if (myTileLocalPos[0] >= myTileSize[0])
1987  {
1988  // Wrapped in X.
1989  myPos[0] -= myTileLocalPos[0];
1990  myTileLocalPos[0] = 0;
1991 
1992  myPos[1]++;
1993  myTileLocalPos[1]++;
1994  if (myTileLocalPos[1] >= myTileSize[1])
1995  {
1996  // Wrapped in Y.
1997  myPos[1] -= myTileLocalPos[1];
1998  myTileLocalPos[1] = 0;
1999 
2000  myPos[2]++;
2001  myTileLocalPos[2]++;
2002  if (myTileLocalPos[2] >= myTileSize[2])
2003  {
2004  // Wrapped in Z! Finished this tile!
2005  advanceTile();
2006  }
2007  }
2008  }
2009  }
2010 
2011  /// Retrieve the current location of the iterator, in the
2012  /// containing voxel array, not in the tile.
2013  int x() const { return myPos[0]; }
2014  int y() const { return myPos[1]; }
2015  int z() const { return myPos[2]; }
2016  int idx(int idx) const { return myPos[idx]; }
2017 
2018  /// Retrieves the value that we are currently pointing at.
2019  /// This is faster than an operator(x,y,z) as we already know
2020  /// our current tile and that bounds checking isn't needed.
2021  T getValue() const
2022  {
2023  UT_ASSERT_P(myCurTile);
2024 
2025  return (*myCurTile)(myTileLocalPos[0],
2026  myTileLocalPos[1],
2027  myTileLocalPos[2]);
2028  }
2029 
2030  /// Sets the voxel we are currently pointing to the given value.
2031  void setValue(T t) const
2032  {
2033  UT_ASSERT_P(myCurTile);
2034 
2035  myCurTile->setValue(myTileLocalPos[0],
2036  myTileLocalPos[1],
2037  myTileLocalPos[2], t);
2038  }
2039 
2040  /// Returns true if the tile we are currently in is a constant tile.
2041  bool isTileConstant() const
2042  {
2043  UT_ASSERT_P(myCurTile);
2044 
2045  return myCurTile->isConstant();
2046  }
2047 
2048  /// Returns true if we are at the start of a new tile.
2049  bool isStartOfTile() const
2050  { return !(myTileLocalPos[0] ||
2051  myTileLocalPos[1] ||
2052  myTileLocalPos[2]); }
2053 
2054  /// Returns the VoxelTile we are currently processing
2056  {
2057  return myCurTile;
2058  }
2059 
2060  /// Advances the iterator to point to the next tile. Since
2061  /// we are restricted to one tile, effectively just ends the iterator.
2062  void advanceTile();
2063 
2064  /// Sets a flag which causes the iterator to tryCompress()
2065  /// tiles when it is done with them.
2066  bool getCompressOnExit() const { return myShouldCompressOnExit; }
2067  void setCompressOnExit(bool shouldcompress)
2068  { myShouldCompressOnExit = shouldcompress; }
2069 
2070  /// Reduction operators.
2071  /// op.reduce(T a) called for each voxel, *but*,
2072  /// op.reduceMany(T a, int n) called to reduce constant blocks.
2073  /// Early exits if op.reduce() returns false.
2074  template <typename OP>
2075  bool reduceOperation(OP &op);
2076 
2077 protected:
2078  /// Current processing tile
2081 
2082  /// Absolute index into voxel array.
2083  int myPos[3];
2084  /// Absolute index of start of tile
2085  int myTileStart[3];
2086 
2087  /// Flag determining if we should compress tiles whenever we
2088  /// advance out of them.
2090 
2091  /// Since we want to allow multiple passes, we can't
2092  /// clear out myCurTile when we hit the end.
2093  bool myAtEnd;
2094 
2095 public:
2096  /// Our position within the current tile.
2097  int myTileLocalPos[3];
2098 
2099  /// The size of the current tile
2100  int myTileSize[3];
2101 };
2102 
2103 /// Probe for Voxel Arrays
2104 ///
2105 /// This class is designed to allow for efficient evaluation
2106 /// of aligned indices of a voxel array, provided the voxels are iterated
2107 /// in a tile-by-tile, x-inner most, manner.
2108 ///
2109 /// This class will create a local copy of the voxel data where needed,
2110 /// uncompressing the information once for every 16 queries. It will
2111 /// also create an aligned buffer so you can safely use v4uf on fpreal32
2112 /// data.
2113 ///
2114 /// For queries where you need surrounding values, the prex and postx can
2115 /// specify padding on the probe. prex should be -1 to allow reading
2116 /// -1 offset, postx 1 to allow reading a 1 offset.
2117 ///
2118 
2119 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
2120 class UT_VoxelProbe
2121 {
2122 public:
2123  UT_VoxelProbe();
2124  UT_VoxelProbe(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
2125  ~UT_VoxelProbe();
2126 
2127  void setArray(UT_VoxelArray<T> *vox, int prex = 0, int postx = 0);
2129  int prex = 0, int postx = 0)
2130  {
2131  SYS_STATIC_ASSERT(DoWrite == false);
2132  setArray((UT_VoxelArray<T> *)vox, prex, postx);
2133  }
2134 
2135  UT_VoxelArray<T> *getArray() const { return myArray; }
2136 
2137  bool isValid() const { return myArray != 0; }
2138 
2139  inline T getValue() const
2140  {
2141  return *myCurLine;
2142  }
2143  inline T getValue(int offset) const
2144  {
2145  return myCurLine[myStride*offset];
2146  }
2147 
2148  inline void setValue(T value)
2149  {
2150  UT_ASSERT_P(DoWrite);
2151  *myCurLine = value;
2152  if (TestForWrites)
2153  myDirty = true;
2154  }
2155 
2156 
2157  /// Resets where we currently point to.
2158  /// Returns true if we had to reset our cache line. If we didn't,
2159  /// and you have multiple probes acting in-step, you can just
2160  /// advanceX() the other probes
2161  template <typename S>
2163  { return setIndex(vit.x(), vit.y(), vit.z()); }
2164  template <typename S>
2166  { return setIndex(vit.x(), vit.y(), vit.z()); }
2167 
2168  bool setIndex(int x, int y, int z);
2169 
2170  /// Blindly advances our current pointer.
2171  inline void advanceX()
2172  {
2173  myCurLine += myStride;
2174  myX++;
2175  UT_ASSERT_P(myX < myMaxValidX);
2176  }
2177 
2178  /// Adjusts our current pointer to the given absolute location,
2179  /// assumes the new value is inside our valid range.
2180  inline void resetX(int x)
2181  {
2182  myCurLine += myStride * (x - myX);
2183  myX = x;
2184  UT_ASSERT_P(myX < myMaxValidX && myX >= myMinValidX);
2185  }
2186 
2187 protected:
2188  void reloadCache(int x, int y, int z);
2189 
2190  void writeCacheLine();
2191 
2192  void buildConstantCache(T value);
2193 
2195  /// myCacheLine[0] is the start of the cache line, so -1 would be
2196  /// the first pre-rolled value
2198  /// Where we actually allocated our cache line, aligned to 4x multiple
2199  /// to ensure SSE compatible.
2201 
2202  int myX, myY, myZ;
2203  int myPreX, myPostX;
2206  /// Half inclusive [,) range of valid x queries for current cache.
2207  int myMinValidX, myMaxValidX;
2208 
2209  /// Determines if we have anything to write back, only
2210  /// valid if TestForWrites is enabled.
2211  bool myDirty;
2212 
2214 
2215  friend class UT_VoxelProbeCube<T>;
2216  friend class UT_VoxelProbeFace<T>;
2217 };
2218 
2219 ///
2220 /// The vector probe is three normal probes into separate voxel arrays
2221 /// making it easier to read and write to aligned vector fields.
2222 /// If the vector field is face-centered, see the UT_VoxelProbeFace.
2223 ///
2224 template <typename T, bool DoRead, bool DoWrite, bool TestForWrites>
2226 {
2227 public:
2229  { }
2231  { setArray(vx, vy, vz); }
2233  {}
2234 
2236  {
2237  myLines[0].setArray(vx);
2238  myLines[1].setArray(vy);
2239  myLines[2].setArray(vz);
2240  }
2241  void setConstArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz)
2242  {
2243  SYS_STATIC_ASSERT(DoWrite == false);
2244  setArray((UT_VoxelArray<T> *)vx, (UT_VoxelArray<T> *)vy, (UT_VoxelArray<T> *)vz);
2245  }
2246 
2247  inline UT_Vector3 getValue() const
2248  {
2249  return UT_Vector3(myLines[0].getValue(), myLines[1].getValue(), myLines[2].getValue());
2250  }
2251  inline T getValue(int axis) const
2252  {
2253  return myLines[axis].getValue();
2254  }
2255 
2256  inline void setValue(const UT_Vector3 &v)
2257  {
2258  myLines[0].setValue(v.x());
2259  myLines[1].setValue(v.y());
2260  myLines[2].setValue(v.z());
2261  }
2262 
2263  inline void setComponent(int axis, T val)
2264  {
2265  myLines[axis].setValue(val);
2266  }
2267 
2268  /// Resets where we currently point to.
2269  /// Returns true if we had to reset our cache line. If we didn't,
2270  /// and you have multiple probes acting in-step, you can just
2271  /// advanceX() the other probes
2272  template <typename S>
2274  { return setIndex(vit.x(), vit.y(), vit.z()); }
2275  template <typename S>
2277  { return setIndex(vit.x(), vit.y(), vit.z()); }
2278 
2279  bool setIndex(int x, int y, int z)
2280  {
2281  if (myLines[0].setIndex(x, y, z))
2282  {
2283  myLines[1].setIndex(x, y, z);
2284  myLines[2].setIndex(x, y, z);
2285  return true;
2286  }
2287  myLines[1].advanceX();
2288  myLines[2].advanceX();
2289  return false;
2290  }
2291 
2292  void advanceX()
2293  { myLines[0].advanceX(); myLines[1].advanceX(); myLines[2].advanceX(); }
2294 
2295 protected:
2297 };
2298 
2299 template <typename T>
2300 class
2302 {
2303 public:
2305  ~UT_VoxelProbeCube();
2306 
2307  void setConstCubeArray(const UT_VoxelArray<T> *vox);
2308  void setConstPlusArray(const UT_VoxelArray<T> *vox);
2309 
2310  /// Allows you to query +/-1 in each direction. In cube update,
2311  /// all are valid. In plus update, only one of x y and z may be
2312  /// non zero.
2314  T
2315  getValue(int x, int y, int z) const
2316  {
2317  UT_ASSERT_P(x >= -1 && x <= 1 &&
2318  y >= -1 && y <= 1 &&
2319  z >= -1 && z <= 1);
2320 
2321  return myLines[y+1][z+1].getValue(x);
2322  }
2323 
2325  T
2327  {
2328  return getValue(offset[0], offset[1], offset[2]);
2329  }
2330 
2331  template <typename S>
2333  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2334  template <typename S>
2336  { return setIndexCube(vit.x(), vit.y(), vit.z()); }
2337  bool setIndexCube(int x, int y, int z);
2338 
2339  template <typename S>
2341  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2342  template <typename S>
2344  { return setIndexPlus(vit.x(), vit.y(), vit.z()); }
2345  bool setIndexPlus(int x, int y, int z);
2346 
2347  /// Computes central difference gradient, does not scale
2348  /// by the step size (which is twice voxelsize)
2349  /// Requires PlusArray
2351  { return UT_Vector3(getValue(1,0,0) - getValue(-1,0,0),
2352  getValue(0,1,0) - getValue(0,-1,0),
2353  getValue(0,0,1) - getValue(0,0,-1)); }
2354 
2355  /// Computes the central difference curvature using the given
2356  /// inverse voxelsize (ie, 1/voxelsize) at this point.
2357  /// Requires CubeArray.
2358  fpreal64 curvature(const UT_Vector3 &invvoxelsize) const;
2359 
2360  /// Computes the laplacian, again with a given 1/voxelsize.
2361  /// Requires PlusArray
2362  fpreal64 laplacian(const UT_Vector3 &invvoxelsize) const;
2363 
2364 protected:
2365  /// Does an rotation of our cache lines, ym becomes y0 and y0 becomes yp,
2366  /// so further queries with y+1 will be cache hits for 2 out of 3.
2367  static void rotateLines(UT_VoxelProbe<T, true, false, false> &ym,
2370 
2372  /// Cached look up position. myValid stores if they are
2373  /// valid values or not
2374  bool myValid;
2375  int myX, myY, myZ;
2376  /// Half inclusive [,) range of valid x queries for current cache.
2377  int myMinValidX, myMaxValidX;
2378 };
2379 
2380 ///
2381 /// UT_VoxelProbeConstant
2382 ///
2383 /// Looks like a voxel probe but only returns a constant value.
2384 ///
2385 template <typename T>
2386 class
2388 {
2389 public:
2392 
2393  template <typename S>
2395  { return true; }
2396  template <typename S>
2398  { return true; }
2399  bool setIndex(int x, int y, int z)
2400  { return true; }
2401 
2402  void setValue(T val) { myValue = val; }
2403  T getValue() const { return myValue; }
2404 protected:
2406 };
2407 
2408 ///
2409 /// UT_VoxelProbeAverage
2410 ///
2411 /// When working with MAC grids one often has slightly misalgined
2412 /// fields. Ie, one field is at the half-grid spacing of another field.
2413 /// The step values are 0 if the dimension is algined, -1 for half a step
2414 /// back (ie, (val(-1)+val(0))/2) and 1 for half a step forward
2415 /// (ie, (val(0)+val(1))/2)
2416 ///
2417 template <typename T, int XStep, int YStep, int ZStep>
2418 class
2420 {
2421 public:
2424 
2425  void setArray(const UT_VoxelArray<T> *vox);
2426 
2427  template <typename S>
2429  { return setIndex(vit.x(), vit.y(), vit.z()); }
2430  template <typename S>
2432  { return setIndex(vit.x(), vit.y(), vit.z()); }
2433  bool setIndex(int x, int y, int z);
2434 
2435  /// Returns the velocity centered at this index, thus an average
2436  /// of the values in each of our internal probes.
2437  inline T getValue() const
2438  {
2439  if (ZStep)
2440  return (valueZ(1) + valueZ(0)) * 0.5;
2441  return valueZ(0);
2442  }
2443 
2444 protected:
2445  inline T valueZ(int z) const
2446  {
2447  if (YStep)
2448  return (valueYZ(1, z) + valueYZ(0, z)) * 0.5;
2449  return valueYZ(0, z);
2450  }
2451 
2452  inline T valueYZ(int y, int z) const
2453  {
2454  if (XStep > 0)
2455  return (myLines[y][z].getValue(1) + myLines[y][z].getValue(0)) * 0.5;
2456  if (XStep < 0)
2457  return (myLines[y][z].getValue(-1) + myLines[y][z].getValue(0)) * 0.5;
2458  return myLines[y][z].getValue();
2459  }
2460 
2461  // Stores [Y][Z] lines.
2463 };
2464 
2465 
2466 ///
2467 /// UT_VoxelProbeFace is designed to walk over three velocity
2468 /// fields that store face-centered values. The indices refer
2469 /// to the centers of the voxels.
2470 ///
2471 template <typename T>
2472 class
2474 {
2475 public:
2477  ~UT_VoxelProbeFace();
2478 
2479  void setArray(const UT_VoxelArray<T> *vx, const UT_VoxelArray<T> *vy, const UT_VoxelArray<T> *vz);
2480  void setVoxelSize(const UT_Vector3 &voxelsize);
2481 
2482  template <typename S>
2484  { return setIndex(vit.x(), vit.y(), vit.z()); }
2485  template <typename S>
2487  { return setIndex(vit.x(), vit.y(), vit.z()); }
2488  bool setIndex(int x, int y, int z);
2489 
2490  /// Get the face values on each face component.
2491  /// Parameters are axis then side.
2492  /// 0 is the lower face, 1 the higher face.
2493  inline T face(int axis, int side) const
2494  {
2495  if (axis == 0)
2496  return myLines[0][0].getValue(side);
2497  else
2498  return myLines[axis][side].getValue();
2499  }
2500 
2501  /// Returns the velocity centered at this index, thus an average
2502  /// of the values in each of our internal probes.
2503  inline UT_Vector3 value() const
2504  {
2505  return UT_Vector3(0.5f * (face(0, 0) + face(0, 1)),
2506  0.5f * (face(1, 0) + face(1, 1)),
2507  0.5f * (face(2, 0) + face(2, 1)));
2508  }
2509 
2510  /// Returns the divergence of this cell.
2511  inline T divergence() const
2512  {
2513  return (face(0,1)-face(0,0)) * myVoxelSize.x()
2514  + (face(1,1)-face(1,0)) * myVoxelSize.y()
2515  + (face(2,1)-face(2,0)) * myVoxelSize.z();
2516 
2517  }
2518 
2519 protected:
2520 
2521  static void swapLines(UT_VoxelProbe<T, true, false, false> &ym,
2523 
2524 
2526 
2527  /// Cached look up position. myValid stores if they are
2528  /// valid values or not
2529  bool myValid;
2530  int myX, myY, myZ;
2531  /// Half inclusive [,) range of valid x queries for current cache.
2532  int myMinValidX, myMaxValidX;
2533 
2534  UT_Vector3 myVoxelSize, myInvVoxelSize;
2535 };
2536 
2537 
2538 #include "UT_VoxelArray.C"
2539 
2540 
2541 // Typedefs for common voxel array types
2545 
2553 // Read only probe
2557 // Write only
2561 // Read/Write always writeback.
2565 // Read/Write with testing
2569 
2570 // TODO: add support for read-write probe cube
2572 
2576 
2580 
2581 #endif
2582 
UT_COWWriteHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayWriteHandleV4
void setTile(const UT_VoxelArrayIterator< T > &vit)
exint exint const UT_JobInfo &info const
int x() const
Retrieve the current location of the iterator.
UT_Interrupt * myInterrupt
UT_Vector3I getVoxelRes() const
#define SYSmax(a, b)
Definition: SYS_Math.h:1535
int xyzTileToLinear(int x, int y, int z) const
void findexToPos(UT_Vector3F ipos, UT_Vector3F &pos) const
SYS_FORCE_INLINE T lerpSample(T *samples, float fx, float fy, float fz) const
Lerps the given sample using trilinear interpolation.
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1221
const UT_VoxelTile< T > & operator=(const UT_VoxelTile< T > &src)
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void findAverage(T &avg) const
Determines the average value of the tile.
#define SYS_STATIC_ASSERT(expr)
UT_Vector3I linearTileToXYZ(int idx) const
int int32
Definition: SYS_Types.h:39
UT_VoxelProbe< fpreal32, true, true, true > UT_VoxelRWTProbeF
void setInterrupt(UT_Interrupt *interrupt)
T valueZ(int z) const
void loadData(UT_IStream &is)
Load an array, requires you have already size()d this array.
UT_VoxelBorderType getBorder() const
exint getDataLength() const
Returns the amount of data used by the tile myData pointer.
bool atEnd() const
Returns true if we have iterated over all of the voxels.
UT_COWHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayHandleF
void match(const UT_VoxelArray< T > &src)
SYS_FORCE_INLINE T getValue(const UT_Vector3I &offset) const
bool isMatching(const UT_VoxelArray< S > &src) const
Axis-aligned bounding box (AABB).
Definition: GEO_Detail.h:43
*get result *(waiting if necessary)*A common idiom is to fire a bunch of sub tasks at the and then *wait for them to all complete We provide a helper class
Definition: thread.h:629
void setValue(UT_Vector3I index, T value)
virtual const char * getName()=0
T valueYZ(int y, int z) const
void resample(const UT_VoxelArray< T > &src, UT_FilterType filtertype=UT_FILTER_POINT, float filterwidthscale=1.0f, int clampaxis=-1)
Fills this by resampling the given voxel array.
const UT_VoxelCompressOptions & getCompressionOptions() const
UT_VoxelTile< T > * getTile() const
Returns the VoxelTile we are currently processing.
void
Definition: png.h:1083
int64 getMemoryUsage(bool inclusive) const
Return the amount of memory used by this array.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
bool setIndex(UT_VoxelTileIterator< S > &vit)
SYS_FORCE_INLINE void freeData()
T operator()(UT_Vector3D pos) const
THREADED_METHOD3_CONST(UT_VoxelArray< T >, numTiles() > 16, flatten, T *, flatarray, exint, ystride, exint, zstride) void flattenPartial(T *flatarray
UT_Vector3 gradient() const
int numVoxels() const
GLuint start
Definition: glcorearb.h:474
UT_COWReadHandle< UT_VoxelArray< T > > myHandle
UT_VoxelArray< fpreal32 > UT_VoxelArrayF
void clampIndex(int &x, int &y, int &z) const
UT_COWReadHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayReadHandleF
virtual T getValue(const UT_VoxelTile< T > &tile, int x, int y, int z) const =0
T * fillCacheLine(T *cacheline, int &stride, int x, int y, int z, bool forcecopy, bool strideofone) const
static void registerCompressionEngine(UT_VoxelTileCompress< T > *engine)
fpreal myQuantizeTol
Tolerance for quantizing to reduced bit depth.
virtual bool lerp(GA_AttributeOperand &d, GA_AttributeOperand &a, GA_AttributeOperand &b, GA_AttributeOperand &t) const
d = SYSlerp(a, b, t);
const GLfloat * c
Definition: glew.h:16631
UT_VoxelVectorProbe< fpreal32, true, true, true > UT_VoxelVectorRWTProbeF
UT_Vector3T< float > UT_Vector3
UT_VoxelTile< T > * myCurTile
Current processing tile.
UT_VoxelVectorProbe(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
UT_VoxelArray< T > * myBaseLevel
int64 exint
Definition: SYS_Types.h:125
T divergence() const
Returns the divergence of this cell.
GLint level
Definition: glcorearb.h:107
SYS_FORCE_INLINE void splitVoxelCoordAxis(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
UT_VoxelBorderType
Definition: UT_VoxelArray.h:67
#define SYSabs(a)
Definition: SYS_Math.h:1537
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
bool myOwnBase
If true, we will delete the base level when we are done.
UT_VoxelArray< UT_Vector4 > UT_VoxelArrayV4
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:76
#define UT_API
Definition: UT_API.h:14
bool posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const
GLuint GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat t1
Definition: glew.h:12900
UT_VoxelArray< T > * myArray
fpreal UTvoxelTileDist(const UT_Vector2 &a, const UT_Vector2 &b)
void setArray(UT_VoxelArray< T > *vox)
T mixValues(T t1, T t2, mipmaptype function) const
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:35
T ysize() const
ImageBuf OIIO_API flatten(const ImageBuf &src, ROI roi={}, int nthreads=0)
void copyWithOffset(const UT_VoxelArray< T > &src, int offx, int offy, int offz)
int indexToLinearTile(int x, int y, int z) const
GLuint const GLchar * name
Definition: glcorearb.h:785
UT_VoxelTileIterator< int64 > UT_VoxelTileIteratorI
bool isConstant(T *cval=0) const
UT_Vector3T< int64 > UT_Vector3I
void makeConstant(T t)
Turns this tile into a constant tile of the given value.
bool indexToPos(int x, int y, int z, UT_Vector3F &pos) const
T face(int axis, int side) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
UT_VoxelVectorProbe< fpreal32, true, true, false > UT_VoxelVectorRWProbeF
SYS_FORCE_INLINE T rawConstVal() const
GLenum src
Definition: glcorearb.h:1792
UT_FilterType
Definition: UT_FilterType.h:16
void UTvoxelTileExpandMinMax(UT_Vector2 v, UT_Vector2 &min, UT_Vector2 &max)
bool atEnd() const
Returns true if we have iterated over all of the voxels.
const UT_JobInfo & info
void setCompressionOptions(const UT_VoxelCompressOptions &options)
float fpreal32
Definition: SYS_Types.h:200
virtual bool load(UT_JSONParser &p, UT_VoxelTile< T > &tile) const
UT_VoxelArray< T > * getArray() const
GLdouble GLdouble t
Definition: glew.h:1403
int myCurTile
Our current linear tile idx. A value of -1 implies at end.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
void setCompressionTolerance(fpreal tol)
void setHandle(UT_COWReadHandle< UT_VoxelArray< T > > handle)
void flatten(S *dst, int dststride) const
Flattens ourself into the given destination buffer.
void makeFpreal16()
Explicit compress to fpreal16. Lossy. No-op if already constant.
void size(int xres, int yres, int zres, bool reset=true)
SYS_FORCE_INLINE T operator()(int x, int y, int z) const
int zres() const
GLsizei samples
Definition: glcorearb.h:1297
virtual bool writeThrough(UT_VoxelTile< T > &tile, int x, int y, int z, T t) const =0
UT_VoxelProbeCube< fpreal32 > UT_VoxelROProbeCubeF
UT_VoxelVectorProbe< fpreal32, true, false, false > UT_VoxelVectorProbeF
GLenum GLsizei GLsizei GLint * values
Definition: glcorearb.h:1601
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
GLint GLenum GLint x
Definition: glcorearb.h:408
virtual ~UT_VoxelTileCompress()
GLsizeiptr size
Definition: glcorearb.h:663
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:856
static fpreal dist(T a, T b)
bool setIndex(int x, int y, int z)
double fpreal64
Definition: SYS_Types.h:201
ImageBuf OIIO_API laplacian(const ImageBuf &src, ROI roi={}, int nthreads=0)
bool getCompressOnExit() const
SYS_FORCE_INLINE bool extractSample(int x, int y, int z, T *sample) const
unsigned char uint8
Definition: SYS_Types.h:36
bool writeThrough(int x, int y, int z, T t)
int yres() const
UT_VoxelTileIterator< fpreal32 > UT_VoxelTileIteratorF
void moveTilesWithOffset(UT_VoxelArray< T > &src, int tileoffx, int tileoffy, int tileoffz)
const T * rawData() const
GLenum array
Definition: glew.h:9108
T getBorderScale(int axis) const
const S * writeTiles(const S *srcdata, int srcstride, const UT_IntArray &tilelist)
bool hasNan() const
Returns true if any NANs are in this tile.
GLuint64EXT * result
Definition: glew.h:14311
GLfloat bias
Definition: glew.h:10316
void setArray(UT_VoxelArray< T > *vx, UT_VoxelArray< T > *vy, UT_VoxelArray< T > *vz)
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE bool extractSampleAxis(int x, int y, int z, T *sample) const
SYS_FORCE_INLINE T & y()
Definition: UT_Vector3.h:508
void resetX(int x)
GLint GLint GLsizei GLint GLenum GLenum type
Definition: glcorearb.h:107
ImageBuf OIIO_API min(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
THREADED_METHOD4(UT_VoxelArray< T >, numTiles() > 4, copyWithOffsetInternal, const UT_VoxelArray< T > &, src, int, offx, int, offy, int, offz) void copyWithOffsetInternalPartial(const UT_VoxelArray< T > &src
exint exint zstride
int myCurTileListIdx
Our current index into the tile list.
void setValue(T t) const
Sets the voxel we are currently pointing to the given value.
virtual void load(UT_IStream &is, UT_VoxelTile< T > &tile) const
GLfloat GLfloat GLfloat v2
Definition: glcorearb.h:817
int64 getMemoryUsage(bool inclusive) const
Returns the amount of memory used by this tile.
THREADED_METHOD4_CONST(UT_VoxelArray< T >, numTiles() > 16, flattenGLFixed8, uint8 *, flatarray, exint, ystride, exint, zstride, T, dummy) void flattenGLFixed8Partial(uint8 *flatarray
int getYRes() const
void weightedSum(int pstart[3], int pend[3], const float *weights[3], int start[3], T &result)
SYS_FORCE_INLINE T lerpAxis(int x, int y, int z, float fx, float fy, float fz) const
int numTiles() const
UT_BoundingBoxI getTileBBox() const
int getLinearTileNum() const
bool getValues(const UT_BoundingBoxI &bbox, T *values, const exint size) const
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
GLint GLuint mask
Definition: glcorearb.h:123
static void saveCompressionTypes(std::ostream &os)
Stores a list of compresson engines to os.
T getBorderValue() const
UT_VoxelArray< T > * myArray
The array we belong to.
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:170
const GLdouble * v
Definition: glcorearb.h:836
T getValue() const
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1221
GLuint GLuint end
Definition: glcorearb.h:474
virtual void save(std::ostream &os, const UT_VoxelTile< T > &tile) const
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
const UT_VoxelArray< T > & operator=(const UT_VoxelArray< T > &src)
Assignment operator:
SYS_FORCE_INLINE T & z()
Definition: UT_Vector3.h:510
UT_VoxelArray< T > * getArray() const
UT_Vector3T< T > SYSclamp(const UT_Vector3T< T > &v, const UT_Vector3T< T > &min, const UT_Vector3T< T > &max)
Definition: UT_Vector3.h:832
bool isValid() const
GLuint GLfloat GLfloat y0
Definition: glew.h:12900
void makeRawUninitialized()
ImageBuf OIIO_API max(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
Definition: VM_SIMD.h:188
void setTile(const UT_VoxelArrayIterator< S > &vit, UT_VoxelArray< T > *array)
void setValue(int x, int y, int z, T t)
UT_VoxelTile< T > * getTile(int tx, int ty, int tz) const
GLdouble GLdouble GLdouble z
Definition: glcorearb.h:847
UT_VoxelProbe< UT_Vector4, true, true, true > UT_VoxelRWTProbeV4
void setCompressOnExit(bool shouldcompress)
T evaluate(const UT_Vector3 &pos, const UT_Filter &filter, fpreal radius, int clampaxis=-1) const
long long int64
Definition: SYS_Types.h:116
UT_VoxelMipMap< fpreal32 > UT_VoxelMipMapF
bool setIndex(UT_VoxelTileIterator< S > &vit)
GLfloat GLfloat p
Definition: glew.h:16656
#define SYS_STATIC_FORCE_INLINE
Definition: SYS_Inline.h:48
GLint GLint GLint GLint GLint GLint GLint GLbitfield GLenum filter
Definition: glcorearb.h:1296
bool tryCompress(const UT_VoxelCompressOptions &options)
virtual bool canSave() const
Does this engine support saving and loading?
int getRes(int dim) const
THREADED_METHOD(UT_VoxelArray< T >, numTiles() > 100, collapseAllTiles) void collapseAllTilesPartial(const UT_JobInfo &info)
int getXRes() const
void setRes(int xr, int yr, int zr)
UT_VoxelArray< int64 > UT_VoxelArrayI
THREADED_METHOD1(UT_VoxelArray< T >, numTiles() > 100, constant, T, t) void const antPartial(T t
bool setIndexPlus(UT_VoxelTileIterator< S > &vit)
UT_API UT_ValArray< UT_VoxelTileCompress< fpreal16 > * > & UTvoxelTileGetCompressionEngines(fpreal16 *dummy)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
GLboolean reset
Definition: glew.h:4989
virtual bool isLossless() const
Returns true if the compression type is lossless.
signed char int8
Definition: SYS_Types.h:35
void getTileVoxels(int idx, UT_Vector3I &start, UT_Vector3I &end) const
idxth tile represents the voxels indexed [start,end).
SYS_FORCE_INLINE bool extractSampleCube(int x, int y, int z, T *sample) const
SYS_FORCE_INLINE bool inlineConstant() const
void writeCacheLine(T *cacheline, int y, int z)
Fills a cache line from an external buffer into our own data.
void advanceX()
Blindly advances our current pointer.
T xsize() const
bool isStartOfTile() const
Returns true if we are at the start of a new tile.
void setValue(int x, int y, int z, T t)
SYS_FORCE_INLINE T lerpVoxelCoordAxis(UT_Vector3F pos) const
UT_VoxelArrayIterator< int64 > UT_VoxelArrayIteratorI
virtual int getDataLength(const UT_VoxelTile< T > &tile) const =0
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
void getTileVoxels(UT_Vector3I &start, UT_Vector3I &end) const
This tile will iterate over the voxels indexed [start,end).
bool myAllowFP16
Conversion to fpreal16, only valid for scalar data.
SYS_FORCE_INLINE T lerpVoxelCoord(UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
int getZRes() const
const UT_JobInfo * myJobInfo
The job info to use for tilefetching.
void setConstArray(const UT_VoxelArray< T > *vox, int prex=0, int postx=0)
GLboolean * data
Definition: glcorearb.h:130
static UT_VoxelTileCompress< T > * getCompressionEngine(int index)
GLclampd zmax
Definition: glew.h:9063
bool getCompressOnExit() const
T volume() const
UT_VoxelTileIterator< UT_Vector4 > UT_VoxelTileIteratorV4
UT_VoxelProbe< fpreal32, false, true, false > UT_VoxelWOProbeF
GLuint GLfloat * val
Definition: glcorearb.h:1607
virtual void findMinMax(const UT_VoxelTile< T > &tile, T &min, T &max) const
Definition: UT_VoxelArray.C:83
void setLinearTile(exint lineartilenum, UT_VoxelArray< T > *array)
bool isRawFull() const
Returns if this tile is in raw full format.
bool hasNan() const
Returns true if any element of the voxel array is NAN.
int myMinValidX
Half inclusive [,) range of valid x queries for current cache.
UT_VoxelVectorProbe< fpreal32, false, true, false > UT_VoxelVectorWOProbeF
UT_VoxelProbe< UT_Vector4, true, false, false > UT_VoxelProbeV4
T operator()(UT_Vector3I index) const
GLsizei const GLint box[]
Definition: glew.h:11654
SYS_FORCE_INLINE void lerpVoxelMinMaxAxis(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
SYS_STATIC_FORCE_INLINE T lerpValues(T v1, T v2, fpreal32 bias)
Lerps two numbers, templated to work with T.
T getValue(int x, int y, int z) const
bool setIndexPlus(UT_VoxelArrayIterator< S > &vit)
SYS_FORCE_INLINE T lerpVoxel(int x, int y, int z, float fx, float fy, float fz) const
T getValue(int offset) const
void copyFragment(int dstx, int dsty, int dstz, const UT_VoxelTile< T > &srctile, int srcx, int srcy, int srcz)
void uncompress()
Turns a compressed tile into a raw tile.
UT_VoxelArray< T > * myArray
short int16
Definition: SYS_Types.h:37
fpreal64 fpreal
Definition: SYS_Types.h:277
THREADED_METHOD3(UT_VoxelArray< T >, numTiles() > 16, extractFromFlattened, const T *, flatarray, exint, ystride, exint, zstride) void extractFromFlattenedPartial(const T *flatarray
int idx(int idx) const
UT_COWReadHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayReadHandleV4
void setConstArray(const UT_VoxelArray< T > *vox)
UT_API UT_Interrupt * UTgetInterrupt()
Obtain global UT_Interrupt singleton.
SYS_FORCE_INLINE void splitVoxelCoord(UT_Vector3F pos, int &x, int &y, int &z, float &fx, float &fy, float &fz) const
GLuint index
Definition: glcorearb.h:785
T getValue(int axis) const
UT_VoxelProbe< fpreal32, true, false, false > UT_VoxelProbeF
int numLevels() const
UT_BoundingBoxT< int64 > UT_BoundingBoxI
SYS_FORCE_INLINE T getValue(int x, int y, int z) const
UT_VoxelProbe< fpreal32, true, true, false > UT_VoxelRWProbeF
bool isRaw() const
Returns if this tile is in raw format.
void saveData(std::ostream &os) const
GLbyte * weights
Definition: glew.h:7581
UT_ValArray< UT_VoxelArray< T > ** > myLevels
int getRes(int axis) const
SYS_FORCE_INLINE void lerpVoxelCoordMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelArrayIterator< S > &vit)
UT_BoundingBoxI getTileBBox(int idx) const
int int int offz
UT_Vector3 value() const
void linearTileToXYZ(int idx, int &x, int &y, int &z) const
UT_Vector3 myVoxelSize
GLsizei const GLfloat * value
Definition: glcorearb.h:823
UT_VoxelProbe< UT_Vector4, true, true, false > UT_VoxelRWProbeV4
GLfloat f
Definition: glcorearb.h:1925
exint numVoxels() const
void save(std::ostream &os) const
#define DEFINE_STD_FUNC(TYPE)
SYS_FORCE_INLINE T & x()
Definition: UT_Vector3.h:506
bool isTileConstant() const
Returns true if the tile we are currently in is a constant tile.
T operator()(int x, int y, int z) const
UT_COWHandle< UT_VoxelArray< UT_Vector4 > > UT_VoxelArrayHandleV4
int isInside(const UT_Vector3T< T > &pt) const
UT_VoxelTile< T > * getLinearTile(int idx) const
void load(UT_IStream &is, const UT_IntArray &compression)
UT_VoxelProbe< UT_Vector4, false, true, false > UT_VoxelWOProbeV4
void setCompressOnExit(bool shouldcompress)
static int lookupCompressionEngine(const char *name)
UT_COWWriteHandle< UT_VoxelArray< fpreal32 > > UT_VoxelArrayWriteHandleF
void setValue(const UT_Vector3 &v)
void setValue(T value)
SYS_FORCE_INLINE void lerpVoxelMinMax(T &lerp, T &lmin, T &lmax, int x, int y, int z, float fx, float fy, float fz) const
bool setIndexCube(UT_VoxelArrayIterator< S > &vit)
#define UT_VOXEL_FREE(x)
Definition: UT_VoxelArray.h:52
GLintptr offset
Definition: glcorearb.h:664
bool setIndexCube(UT_VoxelTileIterator< S > &vit)
void uncompressFull()
Turns a tile into a raw full tile.
void setBorder(UT_VoxelBorderType type, T t)
UT_Vector3 getValue() const
T * rawFullData()
Returns the raw full data of the tile.
GLfloat GLfloat v1
Definition: glcorearb.h:816
GLint GLenum GLboolean GLsizei stride
Definition: glcorearb.h:871
UT_VoxelArrayIterator< fpreal32 > UT_VoxelArrayIteratorF
virtual bool tryCompress(UT_VoxelTile< T > &tile, const UT_VoxelCompressOptions &options, T min, T max) const =0
void setForeignData(void *data, int8 compress_type)
SYS_FORCE_INLINE void lerpVoxelCoordMinMaxAxis(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
static void expandMinMax(T v, T &min, T &max)
Designed to be specialized according to T.
int idx(int idx) const
GLenum GLenum dst
Definition: glcorearb.h:1792
bool extractSampleCube(int x, int y, int z, T *sample) const
void findMinMax(T &min, T &max) const
Finds the minimum and maximum T values.
bool isValidIndex(int x, int y, int z) const
Returns true if the given x, y, z values lie inside the valid index.
#define SYSmin(a, b)
Definition: SYS_Math.h:1536
void setComponent(int axis, T val)
virtual bool save(UT_JSONWriter &w, const UT_VoxelTile< T > &tile) const
SYS_FORCE_INLINE T lerpVoxelAxis(int x, int y, int z, float fx, float fy, float fz) const
void setBorderScale(T scalex, T scaley, T scalez)
bool setIndex(UT_VoxelTileIterator< S > &vit)
bool setIndex(UT_VoxelTileIterator< S > &vit)
void setConstArray(const UT_VoxelArray< T > *vx, const UT_VoxelArray< T > *vy, const UT_VoxelArray< T > *vz)
exint exint T dummy
const UT_VoxelArray< T > * level(int level, int function) const
int xres() const
Read the current resolution.
bool compressionEnabled() const
S * extractTiles(S *dstdata, int stride, const UT_IntArray &tilelist) const
GLint y
Definition: glcorearb.h:102
bool setIndex(int x, int y, int z)
bool setIndex(UT_VoxelArrayIterator< S > &vit)
void writeData(const S *src, int srcstride)
SYS_FORCE_INLINE T * rawConstData() const
bool isSimpleCompression() const
SYS_FORCE_INLINE bool extractSamplePlus(int x, int y, int z, T *sample) const
bool isConstant() const
Returns if this tile is constant.
SYS_FORCE_INLINE T lerpSampleAxis(T *samples, float fx, float fy, float fz) const
GLenum GLuint GLint GLenum face
Definition: glew.h:4630
static void loadCompressionTypes(UT_IStream &is, UT_IntArray &compressions)
UT_VoxelArrayIterator< UT_Vector4 > UT_VoxelArrayIteratorV4
int getTileRes(int dim) const
void evaluateMinMax(T &lerp, T &lmin, T &lmax, UT_Vector3F pos) const
bool setIndex(UT_VoxelTileIterator< S > &vit)
fpreal getCompressionTolerance() const
void flattenPartialAxis(T *flatarray, exint ystride, const UT_JobInfo &info) const
void advance()
Advances the iterator to point to the next voxel.
SYS_FORCE_INLINE T lerp(int x, int y, int z, float fx, float fy, float fz) const