HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
GridOperators.h
Go to the documentation of this file.
1 // Copyright Contributors to the OpenVDB Project
2 // SPDX-License-Identifier: MPL-2.0
3 
4 /// @file tools/GridOperators.h
5 ///
6 /// @brief Apply an operator to an input grid to produce an output grid
7 /// with the same active voxel topology but a potentially different value type.
8 
9 #ifndef OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
10 #define OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
11 
12 #include <openvdb/Grid.h>
13 #include <openvdb/math/Operators.h>
17 #include "ValueTransformer.h" // for tools::foreach()
18 #include <tbb/parallel_for.h>
19 
20 
21 namespace openvdb {
23 namespace OPENVDB_VERSION_NAME {
24 namespace tools {
25 
26 /// @brief VectorToScalarConverter<VectorGridType>::Type is the type of a grid
27 /// having the same tree configuration as VectorGridType but a scalar value type, T,
28 /// where T is the type of the original vector components.
29 /// @details For example, VectorToScalarConverter<Vec3DGrid>::Type is equivalent to DoubleGrid.
30 template<typename VectorGridType> struct VectorToScalarConverter {
32  typedef typename VectorGridType::template ValueConverter<VecComponentValueT>::Type Type;
33 };
34 
35 /// @brief ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid
36 /// having the same tree configuration as ScalarGridType but value type Vec3<T>
37 /// where T is ScalarGridType::ValueType.
38 /// @details For example, ScalarToVectorConverter<DoubleGrid>::Type is equivalent to Vec3DGrid.
39 template<typename ScalarGridType> struct ScalarToVectorConverter {
41  typedef typename ScalarGridType::template ValueConverter<VectorValueT>::Type Type;
42 };
43 
44 
45 /// @brief Compute the Closest-Point Transform (CPT) from a distance field.
46 /// @return a new vector-valued grid with the same numerical precision as the input grid
47 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
48 /// @details When a mask grid is specified, the solution is calculated only in
49 /// the intersection of the mask active topology and the input active topology
50 /// independent of the transforms associated with either grid.
51 template<typename GridType, typename InterruptT> inline
53 cpt(const GridType& grid, bool threaded, InterruptT* interrupt);
54 
55 template<typename GridType, typename MaskT, typename InterruptT> inline
57 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
58 
59 template<typename GridType> inline
61 cpt(const GridType& grid, bool threaded = true)
62 {
63  return cpt<GridType, util::NullInterrupter>(grid, threaded, nullptr);
64 }
65 
66 template<typename GridType, typename MaskT> inline
67 typename ScalarToVectorConverter<GridType>::Type::Ptr
68 cpt(const GridType& grid, const MaskT& mask, bool threaded = true)
69 {
70  return cpt<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
71 }
72 
73 
74 /// @brief Compute the curl of the given vector-valued grid.
75 /// @return a new vector-valued grid
76 /// @details When a mask grid is specified, the solution is calculated only in
77 /// the intersection of the mask active topology and the input active topology
78 /// independent of the transforms associated with either grid.
79 template<typename GridType, typename InterruptT> inline
80 typename GridType::Ptr
81 curl(const GridType& grid, bool threaded, InterruptT* interrupt);
82 
83 template<typename GridType, typename MaskT, typename InterruptT> inline
84 typename GridType::Ptr
85 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
86 
87 template<typename GridType> inline
88 typename GridType::Ptr
89 curl(const GridType& grid, bool threaded = true)
90 {
91  return curl<GridType, util::NullInterrupter>(grid, threaded, nullptr);
92 }
93 
94 template<typename GridType, typename MaskT> inline
95 typename GridType::Ptr
96 curl(const GridType& grid, const MaskT& mask, bool threaded = true)
97 {
98  return curl<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
99 }
100 
101 
102 /// @brief Compute the divergence of the given vector-valued grid.
103 /// @return a new scalar-valued grid with the same numerical precision as the input grid
104 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
105 /// @details When a mask grid is specified, the solution is calculated only in
106 /// the intersection of the mask active topology and the input active topology
107 /// independent of the transforms associated with either grid.
108 template<typename GridType, typename InterruptT> inline
109 typename VectorToScalarConverter<GridType>::Type::Ptr
110 divergence(const GridType& grid, bool threaded, InterruptT* interrupt);
111 
112 template<typename GridType, typename MaskT, typename InterruptT> inline
113 typename VectorToScalarConverter<GridType>::Type::Ptr
114 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
115 
116 template<typename GridType> inline
117 typename VectorToScalarConverter<GridType>::Type::Ptr
118 divergence(const GridType& grid, bool threaded = true)
119 {
120  return divergence<GridType, util::NullInterrupter>(grid, threaded, nullptr);
121 }
122 
123 template<typename GridType, typename MaskT> inline
124 typename VectorToScalarConverter<GridType>::Type::Ptr
125 divergence(const GridType& grid, const MaskT& mask, bool threaded = true)
126 {
127  return divergence<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
128 }
129 
130 
131 /// @brief Compute the gradient of the given scalar grid.
132 /// @return a new vector-valued grid with the same numerical precision as the input grid
133 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
134 /// @details When a mask grid is specified, the solution is calculated only in
135 /// the intersection of the mask active topology and the input active topology
136 /// independent of the transforms associated with either grid.
137 template<typename GridType, typename InterruptT> inline
138 typename ScalarToVectorConverter<GridType>::Type::Ptr
139 gradient(const GridType& grid, bool threaded, InterruptT* interrupt);
140 
141 template<typename GridType, typename MaskT, typename InterruptT> inline
142 typename ScalarToVectorConverter<GridType>::Type::Ptr
143 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
144 
145 template<typename GridType> inline
146 typename ScalarToVectorConverter<GridType>::Type::Ptr
147 gradient(const GridType& grid, bool threaded = true)
148 {
149  return gradient<GridType, util::NullInterrupter>(grid, threaded, nullptr);
150 }
151 
152 template<typename GridType, typename MaskT> inline
153 typename ScalarToVectorConverter<GridType>::Type::Ptr
154 gradient(const GridType& grid, const MaskT& mask, bool threaded = true)
155 {
156  return gradient<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
157 }
158 
159 
160 /// @brief Compute the Laplacian of the given scalar grid.
161 /// @return a new scalar grid
162 /// @details When a mask grid is specified, the solution is calculated only in
163 /// the intersection of the mask active topology and the input active topology
164 /// independent of the transforms associated with either grid.
165 template<typename GridType, typename InterruptT> inline
166 typename GridType::Ptr
167 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt);
168 
169 template<typename GridType, typename MaskT, typename InterruptT> inline
170 typename GridType::Ptr
171 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
172 
173 template<typename GridType> inline
174 typename GridType::Ptr
175 laplacian(const GridType& grid, bool threaded = true)
176 {
177  return laplacian<GridType, util::NullInterrupter>(grid, threaded, nullptr);
178 }
179 
180 template<typename GridType, typename MaskT> inline
181 typename GridType::Ptr
182 laplacian(const GridType& grid, const MaskT mask, bool threaded = true)
183 {
184  return laplacian<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
185 }
186 
187 
188 /// @brief Compute the mean curvature of the given grid.
189 /// @return a new grid
190 /// @details When a mask grid is specified, the solution is calculated only in
191 /// the intersection of the mask active topology and the input active topology
192 /// independent of the transforms associated with either grid.
193 template<typename GridType, typename InterruptT> inline
194 typename GridType::Ptr
195 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt);
196 
197 template<typename GridType, typename MaskT, typename InterruptT> inline
198 typename GridType::Ptr
199 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
200 
201 template<typename GridType> inline
202 typename GridType::Ptr
203 meanCurvature(const GridType& grid, bool threaded = true)
204 {
205  return meanCurvature<GridType, util::NullInterrupter>(grid, threaded, nullptr);
206 }
207 
208 template<typename GridType, typename MaskT> inline
209 typename GridType::Ptr
210 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded = true)
211 {
212  return meanCurvature<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
213 }
214 
215 
216 /// @brief Compute the magnitudes of the vectors of the given vector-valued grid.
217 /// @return a new scalar-valued grid with the same numerical precision as the input grid
218 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
219 /// @details When a mask grid is specified, the solution is calculated only in
220 /// the intersection of the mask active topology and the input active topology
221 /// independent of the transforms associated with either grid.
222 template<typename GridType, typename InterruptT> inline
223 typename VectorToScalarConverter<GridType>::Type::Ptr
224 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt);
225 
226 template<typename GridType, typename MaskT, typename InterruptT> inline
227 typename VectorToScalarConverter<GridType>::Type::Ptr
228 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
229 
230 template<typename GridType> inline
231 typename VectorToScalarConverter<GridType>::Type::Ptr
232 magnitude(const GridType& grid, bool threaded = true)
233 {
234  return magnitude<GridType, util::NullInterrupter>(grid, threaded, nullptr);
235 }
236 
237 template<typename GridType, typename MaskT> inline
238 typename VectorToScalarConverter<GridType>::Type::Ptr
239 magnitude(const GridType& grid, const MaskT& mask, bool threaded = true)
240 {
241  return magnitude<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
242 }
243 
244 
245 /// @brief Normalize the vectors of the given vector-valued grid.
246 /// @return a new vector-valued grid
247 /// @details When a mask grid is specified, the solution is calculated only in
248 /// the intersection of the mask active topology and the input active topology
249 /// independent of the transforms associated with either grid.
250 template<typename GridType, typename InterruptT> inline
251 typename GridType::Ptr
252 normalize(const GridType& grid, bool threaded, InterruptT* interrupt);
253 
254 template<typename GridType, typename MaskT, typename InterruptT> inline
255 typename GridType::Ptr
256 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
257 
258 template<typename GridType> inline
259 typename GridType::Ptr
260 normalize(const GridType& grid, bool threaded = true)
261 {
262  return normalize<GridType, util::NullInterrupter>(grid, threaded, nullptr);
263 }
264 
265 template<typename GridType, typename MaskT> inline
266 typename GridType::Ptr
267 normalize(const GridType& grid, const MaskT& mask, bool threaded = true)
268 {
269  return normalize<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
270 }
271 
272 
273 ////////////////////////////////////////
274 
275 
276 namespace gridop {
277 
278 /// @brief ToMaskGrid<T>::Type is the type of a grid having the same
279 /// tree hierarchy as grid type T but a value equal to its active state.
280 /// @details For example, ToMaskGrid<FloatGrid>::Type is equivalent to MaskGrid.
281 template<typename GridType>
282 struct ToMaskGrid {
284 };
285 
286 
287 /// @brief Apply an operator to an input grid to produce an output grid
288 /// with the same active voxel topology but a potentially different value type.
289 /// @details To facilitate inlining, this class is also templated on a Map type.
290 ///
291 /// @note This is a helper class and should never be used directly.
292 template<
293  typename InGridT,
294  typename MaskGridType,
295  typename OutGridT,
296  typename MapT,
297  typename OperatorT,
298  typename InterruptT = util::NullInterrupter>
300 {
301 public:
302  typedef typename OutGridT::TreeType OutTreeT;
303  typedef typename OutTreeT::LeafNodeType OutLeafT;
305 
306  GridOperator(const InGridT& grid, const MaskGridType* mask, const MapT& map,
307  InterruptT* interrupt = nullptr, bool densify = true)
308  : mAcc(grid.getConstAccessor())
309  , mMap(map)
310  , mInterrupt(interrupt)
311  , mMask(mask)
312  , mDensify(densify) ///< @todo consider adding a "NeedsDensification" operator trait
313  {
314  }
315  GridOperator(const GridOperator&) = default;
316  GridOperator& operator=(const GridOperator&) = default;
317  virtual ~GridOperator() = default;
318 
319  typename OutGridT::Ptr process(bool threaded = true)
320  {
321  if (mInterrupt) mInterrupt->start("Processing grid");
322 
323  // Derive background value of the output grid
324  typename InGridT::TreeType tmp(mAcc.tree().background());
325  typename OutGridT::ValueType backg = OperatorT::result(mMap, tmp, math::Coord(0));
326 
327  // The output tree is topology copy, optionally densified, of the input tree.
328  // (Densification is necessary for some operators because applying the operator to
329  // a constant tile produces distinct output values, particularly along tile borders.)
330  /// @todo Can tiles be handled correctly without densification, or by densifying
331  /// only to the width of the operator stencil?
332  typename OutTreeT::Ptr tree(new OutTreeT(mAcc.tree(), backg, TopologyCopy()));
333  if (mDensify) tree->voxelizeActiveTiles();
334 
335  // create grid with output tree and unit transform
336  typename OutGridT::Ptr result(new OutGridT(tree));
337 
338  // Modify the solution area if a mask was supplied.
339  if (mMask) {
340  result->topologyIntersection(*mMask);
341  }
342 
343  // transform of output grid = transform of input grid
344  result->setTransform(math::Transform::Ptr(new math::Transform( mMap.copy() )));
345 
346  LeafManagerT leafManager(*tree);
347 
348  if (threaded) {
349  tbb::parallel_for(leafManager.leafRange(), *this);
350  } else {
351  (*this)(leafManager.leafRange());
352  }
353 
354  // If the tree wasn't densified, it might have active tiles that need to be processed.
355  if (!mDensify) {
356  using TileIter = typename OutTreeT::ValueOnIter;
357 
358  TileIter tileIter = tree->beginValueOn();
359  tileIter.setMaxDepth(tileIter.getLeafDepth() - 1); // skip leaf values (i.e., voxels)
360 
361  AccessorT inAcc = mAcc; // each thread needs its own accessor, captured by value
362  auto tileOp = [this, inAcc](const TileIter& it) {
363  // Apply the operator to the input grid's tile value at the iterator's
364  // current coordinates, and set the output tile's value to the result.
365  it.setValue(OperatorT::result(this->mMap, inAcc, it.getCoord()));
366  };
367 
368  // Apply the operator to tile values, optionally in parallel.
369  // (But don't share the functor; each thread needs its own accessor.)
370  tools::foreach(tileIter, tileOp, threaded, /*shareFunctor=*/false);
371  }
372 
373  if (mDensify) tree->prune();
374 
375  if (mInterrupt) mInterrupt->end();
376  return result;
377  }
378 
379  /// @brief Iterate sequentially over LeafNodes and voxels in the output
380  /// grid and apply the operator using a value accessor for the input grid.
381  ///
382  /// @note Never call this public method directly - it is called by
383  /// TBB threads only!
384  void operator()(const typename LeafManagerT::LeafRange& range) const
385  {
386  if (util::wasInterrupted(mInterrupt)) tbb::task::self().cancel_group_execution();
387 
388  for (typename LeafManagerT::LeafRange::Iterator leaf=range.begin(); leaf; ++leaf) {
389  for (typename OutLeafT::ValueOnIter value=leaf->beginValueOn(); value; ++value) {
390  value.setValue(OperatorT::result(mMap, mAcc, value.getCoord()));
391  }
392  }
393  }
394 
395 protected:
396  typedef typename InGridT::ConstAccessor AccessorT;
397  mutable AccessorT mAcc;
398  const MapT& mMap;
399  InterruptT* mInterrupt;
400  const MaskGridType* mMask;
401  const bool mDensify;
402 }; // end of GridOperator class
403 
404 } // namespace gridop
405 
406 
407 ////////////////////////////////////////
408 
409 
410 /// @brief Compute the closest-point transform of a scalar grid.
411 template<
412  typename InGridT,
413  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
414  typename InterruptT = util::NullInterrupter>
415 class Cpt
416 {
417 public:
418  typedef InGridT InGridType;
420 
421  Cpt(const InGridType& grid, InterruptT* interrupt = nullptr):
422  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
423  {
424  }
425 
426  Cpt(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
427  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
428  {
429  }
430 
431  typename OutGridType::Ptr process(bool threaded = true, bool useWorldTransform = true)
432  {
433  Functor functor(mInputGrid, mMask, threaded, useWorldTransform, mInterrupt);
434  processTypedMap(mInputGrid.transform(), functor);
435  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_CONTRAVARIANT_ABSOLUTE);
436  return functor.mOutputGrid;
437  }
438 
439 private:
440  struct IsOpT
441  {
442  template<typename MapT, typename AccT>
443  static typename OutGridType::ValueType
444  result(const MapT& map, const AccT& acc, const Coord& xyz)
445  {
446  return math::CPT<MapT, math::CD_2ND>::result(map, acc, xyz);
447  }
448  };
449  struct WsOpT
450  {
451  template<typename MapT, typename AccT>
452  static typename OutGridType::ValueType
453  result(const MapT& map, const AccT& acc, const Coord& xyz)
454  {
455  return math::CPT_RANGE<MapT, math::CD_2ND>::result(map, acc, xyz);
456  }
457  };
458  struct Functor
459  {
460  Functor(const InGridType& grid, const MaskGridType* mask,
461  bool threaded, bool worldspace, InterruptT* interrupt)
462  : mThreaded(threaded)
463  , mWorldSpace(worldspace)
464  , mInputGrid(grid)
465  , mInterrupt(interrupt)
466  , mMask(mask)
467  {}
468 
469  template<typename MapT>
470  void operator()(const MapT& map)
471  {
472  if (mWorldSpace) {
473  gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, WsOpT, InterruptT>
474  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
475  mOutputGrid = op.process(mThreaded); // cache the result
476  } else {
477  gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, IsOpT, InterruptT>
478  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
479  mOutputGrid = op.process(mThreaded); // cache the result
480  }
481  }
482  const bool mThreaded;
483  const bool mWorldSpace;
484  const InGridType& mInputGrid;
485  typename OutGridType::Ptr mOutputGrid;
486  InterruptT* mInterrupt;
487  const MaskGridType* mMask;
488  };
489  const InGridType& mInputGrid;
490  InterruptT* mInterrupt;
491  const MaskGridType* mMask;
492 }; // end of Cpt class
493 
494 
495 ////////////////////////////////////////
496 
497 
498 /// @brief Compute the curl of a vector grid.
499 template<
500  typename GridT,
501  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
502  typename InterruptT = util::NullInterrupter>
503 class Curl
504 {
505 public:
506  typedef GridT InGridType;
507  typedef GridT OutGridType;
508 
509  Curl(const GridT& grid, InterruptT* interrupt = nullptr):
510  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
511  {
512  }
513 
514  Curl(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
515  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
516  {
517  }
518 
519  typename GridT::Ptr process(bool threaded = true)
520  {
521  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
522  processTypedMap(mInputGrid.transform(), functor);
523  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
524  return functor.mOutputGrid;
525  }
526 
527 private:
528  struct Functor
529  {
530  Functor(const GridT& grid, const MaskGridType* mask,
531  bool threaded, InterruptT* interrupt):
532  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
533 
534  template<typename MapT>
535  void operator()(const MapT& map)
536  {
537  typedef math::Curl<MapT, math::CD_2ND> OpT;
538  gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT>
539  op(mInputGrid, mMask, map, mInterrupt);
540  mOutputGrid = op.process(mThreaded); // cache the result
541  }
542 
543  const bool mThreaded;
544  const GridT& mInputGrid;
545  typename GridT::Ptr mOutputGrid;
546  InterruptT* mInterrupt;
547  const MaskGridType* mMask;
548  }; // Private Functor
549 
550  const GridT& mInputGrid;
551  InterruptT* mInterrupt;
552  const MaskGridType* mMask;
553 }; // end of Curl class
554 
555 
556 ////////////////////////////////////////
557 
558 
559 /// @brief Compute the divergence of a vector grid.
560 template<
561  typename InGridT,
562  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
563  typename InterruptT = util::NullInterrupter>
565 {
566 public:
567  typedef InGridT InGridType;
569 
570  Divergence(const InGridT& grid, InterruptT* interrupt = nullptr):
571  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
572  {
573  }
574 
575  Divergence(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
576  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
577  {
578  }
579 
580  typename OutGridType::Ptr process(bool threaded = true)
581  {
582  if (mInputGrid.getGridClass() == GRID_STAGGERED) {
583  Functor<math::FD_1ST> functor(mInputGrid, mMask, threaded, mInterrupt);
584  processTypedMap(mInputGrid.transform(), functor);
585  return functor.mOutputGrid;
586  } else {
587  Functor<math::CD_2ND> functor(mInputGrid, mMask, threaded, mInterrupt);
588  processTypedMap(mInputGrid.transform(), functor);
589  return functor.mOutputGrid;
590  }
591  }
592 
593 protected:
594  template<math::DScheme DiffScheme>
595  struct Functor
596  {
597  Functor(const InGridT& grid, const MaskGridType* mask,
598  bool threaded, InterruptT* interrupt):
599  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
600 
601  template<typename MapT>
602  void operator()(const MapT& map)
603  {
606  op(mInputGrid, mMask, map, mInterrupt);
607  mOutputGrid = op.process(mThreaded); // cache the result
608  }
609 
610  const bool mThreaded;
612  typename OutGridType::Ptr mOutputGrid;
613  InterruptT* mInterrupt;
614  const MaskGridType* mMask;
615  }; // Private Functor
616 
618  InterruptT* mInterrupt;
619  const MaskGridType* mMask;
620 }; // end of Divergence class
621 
622 
623 ////////////////////////////////////////
624 
625 
626 /// @brief Compute the gradient of a scalar grid.
627 template<
628  typename InGridT,
629  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
630  typename InterruptT = util::NullInterrupter>
631 class Gradient
632 {
633 public:
634  typedef InGridT InGridType;
636 
637  Gradient(const InGridT& grid, InterruptT* interrupt = nullptr):
638  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
639  {
640  }
641 
642  Gradient(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
643  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
644  {
645  }
646 
647  typename OutGridType::Ptr process(bool threaded = true)
648  {
649  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
650  processTypedMap(mInputGrid.transform(), functor);
651  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
652  return functor.mOutputGrid;
653  }
654 
655 protected:
656  struct Functor
657  {
658  Functor(const InGridT& grid, const MaskGridType* mask,
659  bool threaded, InterruptT* interrupt):
660  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
661 
662  template<typename MapT>
663  void operator()(const MapT& map)
664  {
667  op(mInputGrid, mMask, map, mInterrupt);
668  mOutputGrid = op.process(mThreaded); // cache the result
669  }
670 
671  const bool mThreaded;
672  const InGridT& mInputGrid;
673  typename OutGridType::Ptr mOutputGrid;
674  InterruptT* mInterrupt;
675  const MaskGridType* mMask;
676  }; // Private Functor
677 
678  const InGridT& mInputGrid;
679  InterruptT* mInterrupt;
680  const MaskGridType* mMask;
681 }; // end of Gradient class
682 
683 
684 ////////////////////////////////////////
685 
686 
687 template<
688  typename GridT,
689  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
690  typename InterruptT = util::NullInterrupter>
692 {
693 public:
694  typedef GridT InGridType;
695  typedef GridT OutGridType;
696 
697  Laplacian(const GridT& grid, InterruptT* interrupt = nullptr):
698  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
699  {
700  }
701 
702  Laplacian(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
703  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
704  {
705  }
706 
707  typename GridT::Ptr process(bool threaded = true)
708  {
709  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
710  processTypedMap(mInputGrid.transform(), functor);
711  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
712  return functor.mOutputGrid;
713  }
714 
715 protected:
716  struct Functor
717  {
718  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
719  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
720 
721  template<typename MapT>
722  void operator()(const MapT& map)
723  {
726  op(mInputGrid, mMask, map, mInterrupt);
727  mOutputGrid = op.process(mThreaded); // cache the result
728  }
729 
730  const bool mThreaded;
731  const GridT& mInputGrid;
732  typename GridT::Ptr mOutputGrid;
733  InterruptT* mInterrupt;
734  const MaskGridType* mMask;
735  }; // Private Functor
736 
737  const GridT& mInputGrid;
738  InterruptT* mInterrupt;
739  const MaskGridType* mMask;
740 }; // end of Laplacian class
741 
742 
743 ////////////////////////////////////////
744 
745 
746 template<
747  typename GridT,
748  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
749  typename InterruptT = util::NullInterrupter>
751 {
752 public:
753  typedef GridT InGridType;
754  typedef GridT OutGridType;
755 
756  MeanCurvature(const GridT& grid, InterruptT* interrupt = nullptr):
757  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
758  {
759  }
760 
761  MeanCurvature(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
762  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
763  {
764  }
765 
766  typename GridT::Ptr process(bool threaded = true)
767  {
768  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
769  processTypedMap(mInputGrid.transform(), functor);
770  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
771  return functor.mOutputGrid;
772  }
773 
774 protected:
775  struct Functor
776  {
777  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
778  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
779 
780  template<typename MapT>
781  void operator()(const MapT& map)
782  {
785  op(mInputGrid, mMask, map, mInterrupt);
786  mOutputGrid = op.process(mThreaded); // cache the result
787  }
788 
789  const bool mThreaded;
790  const GridT& mInputGrid;
791  typename GridT::Ptr mOutputGrid;
792  InterruptT* mInterrupt;
793  const MaskGridType* mMask;
794  }; // Private Functor
795 
796  const GridT& mInputGrid;
797  InterruptT* mInterrupt;
798  const MaskGridType* mMask;
799 }; // end of MeanCurvature class
800 
801 
802 ////////////////////////////////////////
803 
804 
805 template<
806  typename InGridT,
807  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
808  typename InterruptT = util::NullInterrupter>
810 {
811 public:
812  typedef InGridT InGridType;
814 
815  Magnitude(const InGridType& grid, InterruptT* interrupt = nullptr):
816  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
817  {
818  }
819 
820  Magnitude(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
821  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
822  {
823  }
824 
825  typename OutGridType::Ptr process(bool threaded = true)
826  {
827  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
828  processTypedMap(mInputGrid.transform(), functor);
829  return functor.mOutputGrid;
830  }
831 
832 protected:
833  struct OpT
834  {
835  template<typename MapT, typename AccT>
836  static typename OutGridType::ValueType
837  result(const MapT&, const AccT& acc, const Coord& xyz) { return acc.getValue(xyz).length();}
838  };
839  struct Functor
840  {
841  Functor(const InGridT& grid, const MaskGridType* mask,
842  bool threaded, InterruptT* interrupt):
843  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
844 
845  template<typename MapT>
846  void operator()(const MapT& map)
847  {
849  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
850  mOutputGrid = op.process(mThreaded); // cache the result
851  }
852 
853  const bool mThreaded;
855  typename OutGridType::Ptr mOutputGrid;
856  InterruptT* mInterrupt;
857  const MaskGridType* mMask;
858  }; // Private Functor
859 
861  InterruptT* mInterrupt;
862  const MaskGridType* mMask;
863 }; // end of Magnitude class
864 
865 
866 ////////////////////////////////////////
867 
868 
869 template<
870  typename GridT,
871  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
872  typename InterruptT = util::NullInterrupter>
874 {
875 public:
876  typedef GridT InGridType;
877  typedef GridT OutGridType;
878 
879  Normalize(const GridT& grid, InterruptT* interrupt = nullptr):
880  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
881  {
882  }
883 
884  Normalize(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
885  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
886  {
887  }
888 
889  typename GridT::Ptr process(bool threaded = true)
890  {
891  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
892  processTypedMap(mInputGrid.transform(), functor);
893  if (typename GridT::Ptr outGrid = functor.mOutputGrid) {
894  const VecType vecType = mInputGrid.getVectorType();
895  if (vecType == VEC_COVARIANT) {
896  outGrid->setVectorType(VEC_COVARIANT_NORMALIZE);
897  } else {
898  outGrid->setVectorType(vecType);
899  }
900  }
901  return functor.mOutputGrid;
902  }
903 
904 protected:
905  struct OpT
906  {
907  template<typename MapT, typename AccT>
908  static typename OutGridType::ValueType
909  result(const MapT&, const AccT& acc, const Coord& xyz)
910  {
911  typename OutGridType::ValueType vec = acc.getValue(xyz);
912  if ( !vec.normalize() ) vec.setZero();
913  return vec;
914  }
915  };
916  struct Functor
917  {
918  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
919  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
920 
921  template<typename MapT>
922  void operator()(const MapT& map)
923  {
925  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
926  mOutputGrid = op.process(mThreaded); // cache the result
927  }
928 
929  const bool mThreaded;
930  const GridT& mInputGrid;
931  typename GridT::Ptr mOutputGrid;
932  InterruptT* mInterrupt;
933  const MaskGridType* mMask;
934  }; // Private Functor
935 
936  const GridT& mInputGrid;
937  InterruptT* mInterrupt;
938  const MaskGridType* mMask;
939 }; // end of Normalize class
940 
941 
942 ////////////////////////////////////////
943 
944 
945 template<typename GridType, typename InterruptT> inline
947 cpt(const GridType& grid, bool threaded, InterruptT* interrupt)
948 {
949  Cpt<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
950  return op.process(threaded);
951 }
952 
953 template<typename GridType, typename MaskT, typename InterruptT> inline
954 typename ScalarToVectorConverter<GridType>::Type::Ptr
955 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
956 {
957  Cpt<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
958  return op.process(threaded);
959 }
960 
961 template<typename GridType, typename InterruptT> inline
962 typename GridType::Ptr
963 curl(const GridType& grid, bool threaded, InterruptT* interrupt)
964 {
965  Curl<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
966  return op.process(threaded);
967 }
968 
969 template<typename GridType, typename MaskT, typename InterruptT> inline
970 typename GridType::Ptr
971 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
972 {
973  Curl<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
974  return op.process(threaded);
975 }
976 
977 template<typename GridType, typename InterruptT> inline
978 typename VectorToScalarConverter<GridType>::Type::Ptr
979 divergence(const GridType& grid, bool threaded, InterruptT* interrupt)
980 {
982  op(grid, interrupt);
983  return op.process(threaded);
984 }
985 
986 template<typename GridType, typename MaskT, typename InterruptT> inline
987 typename VectorToScalarConverter<GridType>::Type::Ptr
988 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
989 {
990  Divergence<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
991  return op.process(threaded);
992 }
993 
994 template<typename GridType, typename InterruptT> inline
995 typename ScalarToVectorConverter<GridType>::Type::Ptr
996 gradient(const GridType& grid, bool threaded, InterruptT* interrupt)
997 {
999  op(grid, interrupt);
1000  return op.process(threaded);
1001 }
1002 
1003 template<typename GridType, typename MaskT, typename InterruptT> inline
1004 typename ScalarToVectorConverter<GridType>::Type::Ptr
1005 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1006 {
1007  Gradient<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1008  return op.process(threaded);
1009 }
1010 
1011 template<typename GridType, typename InterruptT> inline
1012 typename GridType::Ptr
1013 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt)
1014 {
1016  op(grid, interrupt);
1017  return op.process(threaded);
1018 }
1019 
1020 template<typename GridType, typename MaskT, typename InterruptT> inline
1021 typename GridType::Ptr
1022 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1023 {
1024  Laplacian<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1025  return op.process(threaded);
1026 }
1027 
1028 template<typename GridType, typename InterruptT> inline
1029 typename GridType::Ptr
1030 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt)
1031 {
1033  op(grid, interrupt);
1034  return op.process(threaded);
1035 }
1036 
1037 template<typename GridType, typename MaskT, typename InterruptT> inline
1038 typename GridType::Ptr
1039 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1040 {
1041  MeanCurvature<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1042  return op.process(threaded);
1043 }
1044 
1045 template<typename GridType, typename InterruptT> inline
1046 typename VectorToScalarConverter<GridType>::Type::Ptr
1047 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt)
1048 {
1050  op(grid, interrupt);
1051  return op.process(threaded);
1052 }
1053 
1054 template<typename GridType, typename MaskT, typename InterruptT> inline
1055 typename VectorToScalarConverter<GridType>::Type::Ptr
1056 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1057 {
1058  Magnitude<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1059  return op.process(threaded);
1060 }
1061 
1062 template<typename GridType, typename InterruptT> inline
1063 typename GridType::Ptr
1064 normalize(const GridType& grid, bool threaded, InterruptT* interrupt)
1065 {
1067  op(grid, interrupt);
1068  return op.process(threaded);
1069 }
1070 
1071 template<typename GridType, typename MaskT, typename InterruptT> inline
1072 typename GridType::Ptr
1073 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1074 {
1075  Normalize<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1076  return op.process(threaded);
1077 }
1078 
1079 } // namespace tools
1080 } // namespace OPENVDB_VERSION_NAME
1081 } // namespace openvdb
1082 
1083 #endif // OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
Compute the closest-point transform of a scalar grid.
Compute the gradient of a scalar grid.
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
OutGridType::Ptr process(bool threaded=true)
void parallel_for(int64_t start, int64_t end, std::function< void(int64_t index)> &&task, parallel_options opt=parallel_options(0, Split_Y, 1))
Definition: parallel.h:153
GLenum GLint * range
Definition: glew.h:3500
OutGridType::Ptr process(bool threaded=true, bool useWorldTransform=true)
Normalize(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
LeafRange leafRange(size_t grainsize=1) const
Return a TBB-compatible LeafRange.
Definition: LeafManager.h:358
OutGridType::Ptr process(bool threaded=true)
static OutGridType::ValueType result(const MapT &, const AccT &acc, const Coord &xyz)
VectorGridType::template ValueConverter< VecComponentValueT >::Type Type
Definition: GridOperators.h:32
VectorToScalarConverter<VectorGridType>::Type is the type of a grid having the same tree configuratio...
Definition: GridOperators.h:30
GridOperator(const InGridT &grid, const MaskGridType *mask, const MapT &map, InterruptT *interrupt=nullptr, bool densify=true)
MeanCurvature(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Compute the divergence of a vector grid.
GridT::Ptr process(bool threaded=true)
Cpt(const InGridType &grid, InterruptT *interrupt=nullptr)
#define OPENVDB_USE_VERSION_NAMESPACE
Definition: version.h:166
MeanCurvature(const GridT &grid, InterruptT *interrupt=nullptr)
ToMaskGrid<T>::Type is the type of a grid having the same tree hierarchy as grid type T but a value e...
GLenum GLint GLuint mask
Definition: glew.h:1845
ScalarToVectorConverter< InGridT >::Type OutGridType
Signed (x, y, z) 32-bit integer coordinates.
Definition: Coord.h:25
Gradient(const InGridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Dummy NOOP interrupter class defining interface.
ScalarToVectorConverter< InGridT >::Type OutGridType
uint64 value_type
Definition: GA_PrimCompat.h:29
Laplacian(const GridT &grid, InterruptT *interrupt=nullptr)
GridOperator & operator=(const GridOperator &)=default
VectorToScalarConverter< GridType >::Type::Ptr magnitude(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the magnitudes of the vectors of the given vector-valued grid.
Grid< typename GridType::TreeType::template ValueConverter< ValueMask >::Type > Type
void foreach(const IterT &iter, XformOp &op, bool threaded=true, bool shareOp=true)
Cpt(const InGridType &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Curl(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid having the same tree configuratio...
Definition: GridOperators.h:39
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Divergence(const InGridT &grid, InterruptT *interrupt=nullptr)
Compute the Laplacian at a given location in a grid using finite differencing of various orders...
Definition: Operators.h:1416
Apply an operator to an input grid to produce an output grid with the same active voxel topology but ...
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
GridT::Ptr process(bool threaded=true)
static math::Vec3< typename Accessor::ValueType > result(const MapType &map, const Accessor &grid, const Coord &ijk)
Definition: Operators.h:1666
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
ScalarGridType::template ValueConverter< VectorValueT >::Type Type
Definition: GridOperators.h:41
This class manages a linear array of pointers to a given tree's leaf nodes, as well as optional auxil...
Definition: LeafManager.h:82
Curl(const GridT &grid, InterruptT *interrupt=nullptr)
VectorToScalarConverter< InGridT >::Type OutGridType
void operator()(const typename LeafManagerT::LeafRange &range) const
Iterate sequentially over LeafNodes and voxels in the output grid and apply the operator using a valu...
GridType::Ptr normalize(const GridType &grid, bool threaded, InterruptT *interrupt)
Normalize the vectors of the given vector-valued grid.
Center difference gradient operators, defined with respect to the range-space of the map...
Definition: Operators.h:616
Container class that associates a tree with a transform and metadata.
Definition: Grid.h:28
VectorToScalarConverter< InGridT >::Type OutGridType
Compute the curl of a vector-valued grid using differencing of various orders in the space defined by...
Definition: Operators.h:1271
static Vec3< typename Accessor::ValueType > result(const MapType &map, const Accessor &grid, const Coord &ijk)
Definition: Operators.h:1719
GridType::Ptr laplacian(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the Laplacian of the given scalar grid.
Magnitude(const InGridType &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
OutGridType::Ptr process(bool threaded=true)
VectorGridType::ValueType::value_type VecComponentValueT
Definition: GridOperators.h:31
ScalarToVectorConverter< GridType >::Type::Ptr gradient(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the gradient of the given scalar grid.
Gradient(const InGridT &grid, InterruptT *interrupt=nullptr)
Divergence(const InGridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Magnitude(const InGridType &grid, InterruptT *interrupt=nullptr)
Compute the curl of a vector grid.
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
math::Vec3< typename ScalarGridType::ValueType > VectorValueT
Definition: GridOperators.h:40
GLuint64EXT * result
Definition: glew.h:14007
Laplacian(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Tag dispatch class that distinguishes topology copy constructors from deep copy constructors.
Definition: Types.h:1045
VectorToScalarConverter< GridType >::Type::Ptr divergence(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the divergence of the given vector-valued grid.
A LeafManager manages a linear array of pointers to a given tree's leaf nodes, as well as optional au...
Compute the divergence of a vector-valued grid using differencing of various orders, the result defined with respect to the range-space of the map.
Definition: Operators.h:946
GridType::Ptr meanCurvature(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the mean curvature of the given grid.
Normalize(const GridT &grid, InterruptT *interrupt=nullptr)
GridType::Ptr curl(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the curl of the given vector-valued grid.
bool wasInterrupted(T *i, int percent=-1)
ScalarToVectorConverter< GridType >::Type::Ptr cpt(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the Closest-Point Transform (CPT) from a distance field.
#define OPENVDB_VERSION_NAME
The version namespace name for this library version.
Definition: version.h:112
GLsizei const GLfloat * value
Definition: glew.h:1849
GridT::Ptr process(bool threaded=true)
static OutGridType::ValueType result(const MapT &, const AccT &acc, const Coord &xyz)
bool processTypedMap(TransformType &transform, OpType &op)
Utility function that, given a generic map pointer, calls a functor on the fully-resoved map...
Definition: Transform.h:233
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)