HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
GridOperators.h
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////
2 //
3 // Copyright (c) 2012-2018 DreamWorks Animation LLC
4 //
5 // All rights reserved. This software is distributed under the
6 // Mozilla Public License 2.0 ( http://www.mozilla.org/MPL/2.0/ )
7 //
8 // Redistributions of source code must retain the above copyright
9 // and license notice and the following restrictions and disclaimer.
10 //
11 // * Neither the name of DreamWorks Animation nor the names of
12 // its contributors may be used to endorse or promote products derived
13 // from this software without specific prior written permission.
14 //
15 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY INDIRECT, INCIDENTAL,
20 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 // IN NO EVENT SHALL THE COPYRIGHT HOLDERS' AND CONTRIBUTORS' AGGREGATE
27 // LIABILITY FOR ALL CLAIMS REGARDLESS OF THEIR BASIS EXCEED US$250.00.
28 //
29 ///////////////////////////////////////////////////////////////////////////
30 
31 /// @file tools/GridOperators.h
32 ///
33 /// @brief Apply an operator to an input grid to produce an output grid
34 /// with the same active voxel topology but a potentially different value type.
35 
36 #ifndef OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
37 #define OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
38 
39 #include <openvdb/Grid.h>
40 #include <openvdb/math/Operators.h>
44 #include "ValueTransformer.h" // for tools::foreach()
45 #include <tbb/parallel_for.h>
46 
47 
48 namespace openvdb {
50 namespace OPENVDB_VERSION_NAME {
51 namespace tools {
52 
53 /// @brief VectorToScalarConverter<VectorGridType>::Type is the type of a grid
54 /// having the same tree configuration as VectorGridType but a scalar value type, T,
55 /// where T is the type of the original vector components.
56 /// @details For example, VectorToScalarConverter<Vec3DGrid>::Type is equivalent to DoubleGrid.
57 template<typename VectorGridType> struct VectorToScalarConverter {
59  typedef typename VectorGridType::template ValueConverter<VecComponentValueT>::Type Type;
60 };
61 
62 /// @brief ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid
63 /// having the same tree configuration as ScalarGridType but value type Vec3<T>
64 /// where T is ScalarGridType::ValueType.
65 /// @details For example, ScalarToVectorConverter<DoubleGrid>::Type is equivalent to Vec3DGrid.
66 template<typename ScalarGridType> struct ScalarToVectorConverter {
68  typedef typename ScalarGridType::template ValueConverter<VectorValueT>::Type Type;
69 };
70 
71 
72 /// @brief Compute the Closest-Point Transform (CPT) from a distance field.
73 /// @return a new vector-valued grid with the same numerical precision as the input grid
74 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
75 /// @details When a mask grid is specified, the solution is calculated only in
76 /// the intersection of the mask active topology and the input active topology
77 /// independent of the transforms associated with either grid.
78 template<typename GridType, typename InterruptT> inline
80 cpt(const GridType& grid, bool threaded, InterruptT* interrupt);
81 
82 template<typename GridType, typename MaskT, typename InterruptT> inline
84 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
85 
86 template<typename GridType> inline
88 cpt(const GridType& grid, bool threaded = true)
89 {
90  return cpt<GridType, util::NullInterrupter>(grid, threaded, nullptr);
91 }
92 
93 template<typename GridType, typename MaskT> inline
94 typename ScalarToVectorConverter<GridType>::Type::Ptr
95 cpt(const GridType& grid, const MaskT& mask, bool threaded = true)
96 {
97  return cpt<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
98 }
99 
100 
101 /// @brief Compute the curl of the given vector-valued grid.
102 /// @return a new vector-valued grid
103 /// @details When a mask grid is specified, the solution is calculated only in
104 /// the intersection of the mask active topology and the input active topology
105 /// independent of the transforms associated with either grid.
106 template<typename GridType, typename InterruptT> inline
107 typename GridType::Ptr
108 curl(const GridType& grid, bool threaded, InterruptT* interrupt);
109 
110 template<typename GridType, typename MaskT, typename InterruptT> inline
111 typename GridType::Ptr
112 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
113 
114 template<typename GridType> inline
115 typename GridType::Ptr
116 curl(const GridType& grid, bool threaded = true)
117 {
118  return curl<GridType, util::NullInterrupter>(grid, threaded, nullptr);
119 }
120 
121 template<typename GridType, typename MaskT> inline
122 typename GridType::Ptr
123 curl(const GridType& grid, const MaskT& mask, bool threaded = true)
124 {
125  return curl<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
126 }
127 
128 
129 /// @brief Compute the divergence of the given vector-valued grid.
130 /// @return a new scalar-valued grid with the same numerical precision as the input grid
131 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
132 /// @details When a mask grid is specified, the solution is calculated only in
133 /// the intersection of the mask active topology and the input active topology
134 /// independent of the transforms associated with either grid.
135 template<typename GridType, typename InterruptT> inline
136 typename VectorToScalarConverter<GridType>::Type::Ptr
137 divergence(const GridType& grid, bool threaded, InterruptT* interrupt);
138 
139 template<typename GridType, typename MaskT, typename InterruptT> inline
140 typename VectorToScalarConverter<GridType>::Type::Ptr
141 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
142 
143 template<typename GridType> inline
144 typename VectorToScalarConverter<GridType>::Type::Ptr
145 divergence(const GridType& grid, bool threaded = true)
146 {
147  return divergence<GridType, util::NullInterrupter>(grid, threaded, nullptr);
148 }
149 
150 template<typename GridType, typename MaskT> inline
151 typename VectorToScalarConverter<GridType>::Type::Ptr
152 divergence(const GridType& grid, const MaskT& mask, bool threaded = true)
153 {
154  return divergence<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
155 }
156 
157 
158 /// @brief Compute the gradient of the given scalar grid.
159 /// @return a new vector-valued grid with the same numerical precision as the input grid
160 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
161 /// @details When a mask grid is specified, the solution is calculated only in
162 /// the intersection of the mask active topology and the input active topology
163 /// independent of the transforms associated with either grid.
164 template<typename GridType, typename InterruptT> inline
165 typename ScalarToVectorConverter<GridType>::Type::Ptr
166 gradient(const GridType& grid, bool threaded, InterruptT* interrupt);
167 
168 template<typename GridType, typename MaskT, typename InterruptT> inline
169 typename ScalarToVectorConverter<GridType>::Type::Ptr
170 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
171 
172 template<typename GridType> inline
173 typename ScalarToVectorConverter<GridType>::Type::Ptr
174 gradient(const GridType& grid, bool threaded = true)
175 {
176  return gradient<GridType, util::NullInterrupter>(grid, threaded, nullptr);
177 }
178 
179 template<typename GridType, typename MaskT> inline
180 typename ScalarToVectorConverter<GridType>::Type::Ptr
181 gradient(const GridType& grid, const MaskT& mask, bool threaded = true)
182 {
183  return gradient<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
184 }
185 
186 
187 /// @brief Compute the Laplacian of the given scalar grid.
188 /// @return a new scalar grid
189 /// @details When a mask grid is specified, the solution is calculated only in
190 /// the intersection of the mask active topology and the input active topology
191 /// independent of the transforms associated with either grid.
192 template<typename GridType, typename InterruptT> inline
193 typename GridType::Ptr
194 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt);
195 
196 template<typename GridType, typename MaskT, typename InterruptT> inline
197 typename GridType::Ptr
198 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
199 
200 template<typename GridType> inline
201 typename GridType::Ptr
202 laplacian(const GridType& grid, bool threaded = true)
203 {
204  return laplacian<GridType, util::NullInterrupter>(grid, threaded, nullptr);
205 }
206 
207 template<typename GridType, typename MaskT> inline
208 typename GridType::Ptr
209 laplacian(const GridType& grid, const MaskT mask, bool threaded = true)
210 {
211  return laplacian<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
212 }
213 
214 
215 /// @brief Compute the mean curvature of the given grid.
216 /// @return a new grid
217 /// @details When a mask grid is specified, the solution is calculated only in
218 /// the intersection of the mask active topology and the input active topology
219 /// independent of the transforms associated with either grid.
220 template<typename GridType, typename InterruptT> inline
221 typename GridType::Ptr
222 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt);
223 
224 template<typename GridType, typename MaskT, typename InterruptT> inline
225 typename GridType::Ptr
226 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
227 
228 template<typename GridType> inline
229 typename GridType::Ptr
230 meanCurvature(const GridType& grid, bool threaded = true)
231 {
232  return meanCurvature<GridType, util::NullInterrupter>(grid, threaded, nullptr);
233 }
234 
235 template<typename GridType, typename MaskT> inline
236 typename GridType::Ptr
237 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded = true)
238 {
239  return meanCurvature<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
240 }
241 
242 
243 /// @brief Compute the magnitudes of the vectors of the given vector-valued grid.
244 /// @return a new scalar-valued grid with the same numerical precision as the input grid
245 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
246 /// @details When a mask grid is specified, the solution is calculated only in
247 /// the intersection of the mask active topology and the input active topology
248 /// independent of the transforms associated with either grid.
249 template<typename GridType, typename InterruptT> inline
250 typename VectorToScalarConverter<GridType>::Type::Ptr
251 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt);
252 
253 template<typename GridType, typename MaskT, typename InterruptT> inline
254 typename VectorToScalarConverter<GridType>::Type::Ptr
255 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
256 
257 template<typename GridType> inline
258 typename VectorToScalarConverter<GridType>::Type::Ptr
259 magnitude(const GridType& grid, bool threaded = true)
260 {
261  return magnitude<GridType, util::NullInterrupter>(grid, threaded, nullptr);
262 }
263 
264 template<typename GridType, typename MaskT> inline
265 typename VectorToScalarConverter<GridType>::Type::Ptr
266 magnitude(const GridType& grid, const MaskT& mask, bool threaded = true)
267 {
268  return magnitude<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
269 }
270 
271 
272 /// @brief Normalize the vectors of the given vector-valued grid.
273 /// @return a new vector-valued grid
274 /// @details When a mask grid is specified, the solution is calculated only in
275 /// the intersection of the mask active topology and the input active topology
276 /// independent of the transforms associated with either grid.
277 template<typename GridType, typename InterruptT> inline
278 typename GridType::Ptr
279 normalize(const GridType& grid, bool threaded, InterruptT* interrupt);
280 
281 template<typename GridType, typename MaskT, typename InterruptT> inline
282 typename GridType::Ptr
283 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
284 
285 template<typename GridType> inline
286 typename GridType::Ptr
287 normalize(const GridType& grid, bool threaded = true)
288 {
289  return normalize<GridType, util::NullInterrupter>(grid, threaded, nullptr);
290 }
291 
292 template<typename GridType, typename MaskT> inline
293 typename GridType::Ptr
294 normalize(const GridType& grid, const MaskT& mask, bool threaded = true)
295 {
296  return normalize<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
297 }
298 
299 
300 ////////////////////////////////////////
301 
302 
303 namespace gridop {
304 
305 /// @brief ToMaskGrid<T>::Type is the type of a grid having the same
306 /// tree hierarchy as grid type T but a value equal to its active state.
307 /// @details For example, ToMaskGrid<FloatGrid>::Type is equivalent to MaskGrid.
308 template<typename GridType>
309 struct ToMaskGrid {
311 };
312 
313 
314 /// @brief Apply an operator to an input grid to produce an output grid
315 /// with the same active voxel topology but a potentially different value type.
316 /// @details To facilitate inlining, this class is also templated on a Map type.
317 ///
318 /// @note This is a helper class and should never be used directly.
319 template<
320  typename InGridT,
321  typename MaskGridType,
322  typename OutGridT,
323  typename MapT,
324  typename OperatorT,
325  typename InterruptT = util::NullInterrupter>
327 {
328 public:
329  typedef typename OutGridT::TreeType OutTreeT;
330  typedef typename OutTreeT::LeafNodeType OutLeafT;
332 
333  GridOperator(const InGridT& grid, const MaskGridType* mask, const MapT& map,
334  InterruptT* interrupt = nullptr, bool densify = true)
335  : mAcc(grid.getConstAccessor())
336  , mMap(map)
337  , mInterrupt(interrupt)
338  , mMask(mask)
339  , mDensify(densify) ///< @todo consider adding a "NeedsDensification" operator trait
340  {
341  }
342  GridOperator(const GridOperator&) = default;
343  GridOperator& operator=(const GridOperator&) = default;
344  virtual ~GridOperator() = default;
345 
346  typename OutGridT::Ptr process(bool threaded = true)
347  {
348  if (mInterrupt) mInterrupt->start("Processing grid");
349 
350  // Derive background value of the output grid
351  typename InGridT::TreeType tmp(mAcc.tree().background());
352  typename OutGridT::ValueType backg = OperatorT::result(mMap, tmp, math::Coord(0));
353 
354  // The output tree is topology copy, optionally densified, of the input tree.
355  // (Densification is necessary for some operators because applying the operator to
356  // a constant tile produces distinct output values, particularly along tile borders.)
357  /// @todo Can tiles be handled correctly without densification, or by densifying
358  /// only to the width of the operator stencil?
359  typename OutTreeT::Ptr tree(new OutTreeT(mAcc.tree(), backg, TopologyCopy()));
360  if (mDensify) tree->voxelizeActiveTiles();
361 
362  // create grid with output tree and unit transform
363  typename OutGridT::Ptr result(new OutGridT(tree));
364 
365  // Modify the solution area if a mask was supplied.
366  if (mMask) {
367  result->topologyIntersection(*mMask);
368  }
369 
370  // transform of output grid = transform of input grid
371  result->setTransform(math::Transform::Ptr(new math::Transform( mMap.copy() )));
372 
373  LeafManagerT leafManager(*tree);
374 
375  if (threaded) {
376  tbb::parallel_for(leafManager.leafRange(), *this);
377  } else {
378  (*this)(leafManager.leafRange());
379  }
380 
381  // If the tree wasn't densified, it might have active tiles that need to be processed.
382  if (!mDensify) {
383  using TileIter = typename OutTreeT::ValueOnIter;
384 
385  TileIter tileIter = tree->beginValueOn();
386  tileIter.setMaxDepth(tileIter.getLeafDepth() - 1); // skip leaf values (i.e., voxels)
387 
388  AccessorT inAcc = mAcc; // each thread needs its own accessor, captured by value
389  auto tileOp = [this, inAcc](const TileIter& it) {
390  // Apply the operator to the input grid's tile value at the iterator's
391  // current coordinates, and set the output tile's value to the result.
392  it.setValue(OperatorT::result(this->mMap, inAcc, it.getCoord()));
393  };
394 
395  // Apply the operator to tile values, optionally in parallel.
396  // (But don't share the functor; each thread needs its own accessor.)
397  tools::foreach(tileIter, tileOp, threaded, /*shareFunctor=*/false);
398  }
399 
400  if (mDensify) tree->prune();
401 
402  if (mInterrupt) mInterrupt->end();
403  return result;
404  }
405 
406  /// @brief Iterate sequentially over LeafNodes and voxels in the output
407  /// grid and apply the operator using a value accessor for the input grid.
408  ///
409  /// @note Never call this public method directly - it is called by
410  /// TBB threads only!
411  void operator()(const typename LeafManagerT::LeafRange& range) const
412  {
413  if (util::wasInterrupted(mInterrupt)) tbb::task::self().cancel_group_execution();
414 
415  for (typename LeafManagerT::LeafRange::Iterator leaf=range.begin(); leaf; ++leaf) {
416  for (typename OutLeafT::ValueOnIter value=leaf->beginValueOn(); value; ++value) {
417  value.setValue(OperatorT::result(mMap, mAcc, value.getCoord()));
418  }
419  }
420  }
421 
422 protected:
423  typedef typename InGridT::ConstAccessor AccessorT;
424  mutable AccessorT mAcc;
425  const MapT& mMap;
426  InterruptT* mInterrupt;
427  const MaskGridType* mMask;
428  const bool mDensify;
429 }; // end of GridOperator class
430 
431 } // namespace gridop
432 
433 
434 ////////////////////////////////////////
435 
436 
437 /// @brief Compute the closest-point transform of a scalar grid.
438 template<
439  typename InGridT,
440  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
441  typename InterruptT = util::NullInterrupter>
442 class Cpt
443 {
444 public:
445  typedef InGridT InGridType;
447 
448  Cpt(const InGridType& grid, InterruptT* interrupt = nullptr):
449  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
450  {
451  }
452 
453  Cpt(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
454  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
455  {
456  }
457 
458  typename OutGridType::Ptr process(bool threaded = true, bool useWorldTransform = true)
459  {
460  Functor functor(mInputGrid, mMask, threaded, useWorldTransform, mInterrupt);
461  processTypedMap(mInputGrid.transform(), functor);
462  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_CONTRAVARIANT_ABSOLUTE);
463  return functor.mOutputGrid;
464  }
465 
466 private:
467  struct IsOpT
468  {
469  template<typename MapT, typename AccT>
470  static typename OutGridType::ValueType
471  result(const MapT& map, const AccT& acc, const Coord& xyz)
472  {
473  return math::CPT<MapT, math::CD_2ND>::result(map, acc, xyz);
474  }
475  };
476  struct WsOpT
477  {
478  template<typename MapT, typename AccT>
479  static typename OutGridType::ValueType
480  result(const MapT& map, const AccT& acc, const Coord& xyz)
481  {
482  return math::CPT_RANGE<MapT, math::CD_2ND>::result(map, acc, xyz);
483  }
484  };
485  struct Functor
486  {
487  Functor(const InGridType& grid, const MaskGridType* mask,
488  bool threaded, bool worldspace, InterruptT* interrupt)
489  : mThreaded(threaded)
490  , mWorldSpace(worldspace)
491  , mInputGrid(grid)
492  , mInterrupt(interrupt)
493  , mMask(mask)
494  {}
495 
496  template<typename MapT>
497  void operator()(const MapT& map)
498  {
499  if (mWorldSpace) {
500  gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, WsOpT, InterruptT>
501  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
502  mOutputGrid = op.process(mThreaded); // cache the result
503  } else {
504  gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, IsOpT, InterruptT>
505  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
506  mOutputGrid = op.process(mThreaded); // cache the result
507  }
508  }
509  const bool mThreaded;
510  const bool mWorldSpace;
511  const InGridType& mInputGrid;
512  typename OutGridType::Ptr mOutputGrid;
513  InterruptT* mInterrupt;
514  const MaskGridType* mMask;
515  };
516  const InGridType& mInputGrid;
517  InterruptT* mInterrupt;
518  const MaskGridType* mMask;
519 }; // end of Cpt class
520 
521 
522 ////////////////////////////////////////
523 
524 
525 /// @brief Compute the curl of a vector grid.
526 template<
527  typename GridT,
528  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
529  typename InterruptT = util::NullInterrupter>
530 class Curl
531 {
532 public:
533  typedef GridT InGridType;
534  typedef GridT OutGridType;
535 
536  Curl(const GridT& grid, InterruptT* interrupt = nullptr):
537  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
538  {
539  }
540 
541  Curl(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
542  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
543  {
544  }
545 
546  typename GridT::Ptr process(bool threaded = true)
547  {
548  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
549  processTypedMap(mInputGrid.transform(), functor);
550  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
551  return functor.mOutputGrid;
552  }
553 
554 private:
555  struct Functor
556  {
557  Functor(const GridT& grid, const MaskGridType* mask,
558  bool threaded, InterruptT* interrupt):
559  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
560 
561  template<typename MapT>
562  void operator()(const MapT& map)
563  {
564  typedef math::Curl<MapT, math::CD_2ND> OpT;
565  gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT>
566  op(mInputGrid, mMask, map, mInterrupt);
567  mOutputGrid = op.process(mThreaded); // cache the result
568  }
569 
570  const bool mThreaded;
571  const GridT& mInputGrid;
572  typename GridT::Ptr mOutputGrid;
573  InterruptT* mInterrupt;
574  const MaskGridType* mMask;
575  }; // Private Functor
576 
577  const GridT& mInputGrid;
578  InterruptT* mInterrupt;
579  const MaskGridType* mMask;
580 }; // end of Curl class
581 
582 
583 ////////////////////////////////////////
584 
585 
586 /// @brief Compute the divergence of a vector grid.
587 template<
588  typename InGridT,
589  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
590  typename InterruptT = util::NullInterrupter>
592 {
593 public:
594  typedef InGridT InGridType;
596 
597  Divergence(const InGridT& grid, InterruptT* interrupt = nullptr):
598  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
599  {
600  }
601 
602  Divergence(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
603  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
604  {
605  }
606 
607  typename OutGridType::Ptr process(bool threaded = true)
608  {
609  if (mInputGrid.getGridClass() == GRID_STAGGERED) {
610  Functor<math::FD_1ST> functor(mInputGrid, mMask, threaded, mInterrupt);
611  processTypedMap(mInputGrid.transform(), functor);
612  return functor.mOutputGrid;
613  } else {
614  Functor<math::CD_2ND> functor(mInputGrid, mMask, threaded, mInterrupt);
615  processTypedMap(mInputGrid.transform(), functor);
616  return functor.mOutputGrid;
617  }
618  }
619 
620 protected:
621  template<math::DScheme DiffScheme>
622  struct Functor
623  {
624  Functor(const InGridT& grid, const MaskGridType* mask,
625  bool threaded, InterruptT* interrupt):
626  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
627 
628  template<typename MapT>
629  void operator()(const MapT& map)
630  {
633  op(mInputGrid, mMask, map, mInterrupt);
634  mOutputGrid = op.process(mThreaded); // cache the result
635  }
636 
637  const bool mThreaded;
639  typename OutGridType::Ptr mOutputGrid;
640  InterruptT* mInterrupt;
641  const MaskGridType* mMask;
642  }; // Private Functor
643 
645  InterruptT* mInterrupt;
646  const MaskGridType* mMask;
647 }; // end of Divergence class
648 
649 
650 ////////////////////////////////////////
651 
652 
653 /// @brief Compute the gradient of a scalar grid.
654 template<
655  typename InGridT,
656  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
657  typename InterruptT = util::NullInterrupter>
658 class Gradient
659 {
660 public:
661  typedef InGridT InGridType;
663 
664  Gradient(const InGridT& grid, InterruptT* interrupt = nullptr):
665  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
666  {
667  }
668 
669  Gradient(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
670  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
671  {
672  }
673 
674  typename OutGridType::Ptr process(bool threaded = true)
675  {
676  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
677  processTypedMap(mInputGrid.transform(), functor);
678  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
679  return functor.mOutputGrid;
680  }
681 
682 protected:
683  struct Functor
684  {
685  Functor(const InGridT& grid, const MaskGridType* mask,
686  bool threaded, InterruptT* interrupt):
687  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
688 
689  template<typename MapT>
690  void operator()(const MapT& map)
691  {
694  op(mInputGrid, mMask, map, mInterrupt);
695  mOutputGrid = op.process(mThreaded); // cache the result
696  }
697 
698  const bool mThreaded;
699  const InGridT& mInputGrid;
700  typename OutGridType::Ptr mOutputGrid;
701  InterruptT* mInterrupt;
702  const MaskGridType* mMask;
703  }; // Private Functor
704 
705  const InGridT& mInputGrid;
706  InterruptT* mInterrupt;
707  const MaskGridType* mMask;
708 }; // end of Gradient class
709 
710 
711 ////////////////////////////////////////
712 
713 
714 template<
715  typename GridT,
716  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
717  typename InterruptT = util::NullInterrupter>
719 {
720 public:
721  typedef GridT InGridType;
722  typedef GridT OutGridType;
723 
724  Laplacian(const GridT& grid, InterruptT* interrupt = nullptr):
725  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
726  {
727  }
728 
729  Laplacian(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
730  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
731  {
732  }
733 
734  typename GridT::Ptr process(bool threaded = true)
735  {
736  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
737  processTypedMap(mInputGrid.transform(), functor);
738  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
739  return functor.mOutputGrid;
740  }
741 
742 protected:
743  struct Functor
744  {
745  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
746  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
747 
748  template<typename MapT>
749  void operator()(const MapT& map)
750  {
753  op(mInputGrid, mMask, map, mInterrupt);
754  mOutputGrid = op.process(mThreaded); // cache the result
755  }
756 
757  const bool mThreaded;
758  const GridT& mInputGrid;
759  typename GridT::Ptr mOutputGrid;
760  InterruptT* mInterrupt;
761  const MaskGridType* mMask;
762  }; // Private Functor
763 
764  const GridT& mInputGrid;
765  InterruptT* mInterrupt;
766  const MaskGridType* mMask;
767 }; // end of Laplacian class
768 
769 
770 ////////////////////////////////////////
771 
772 
773 template<
774  typename GridT,
775  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
776  typename InterruptT = util::NullInterrupter>
778 {
779 public:
780  typedef GridT InGridType;
781  typedef GridT OutGridType;
782 
783  MeanCurvature(const GridT& grid, InterruptT* interrupt = nullptr):
784  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
785  {
786  }
787 
788  MeanCurvature(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
789  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
790  {
791  }
792 
793  typename GridT::Ptr process(bool threaded = true)
794  {
795  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
796  processTypedMap(mInputGrid.transform(), functor);
797  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
798  return functor.mOutputGrid;
799  }
800 
801 protected:
802  struct Functor
803  {
804  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
805  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
806 
807  template<typename MapT>
808  void operator()(const MapT& map)
809  {
812  op(mInputGrid, mMask, map, mInterrupt);
813  mOutputGrid = op.process(mThreaded); // cache the result
814  }
815 
816  const bool mThreaded;
817  const GridT& mInputGrid;
818  typename GridT::Ptr mOutputGrid;
819  InterruptT* mInterrupt;
820  const MaskGridType* mMask;
821  }; // Private Functor
822 
823  const GridT& mInputGrid;
824  InterruptT* mInterrupt;
825  const MaskGridType* mMask;
826 }; // end of MeanCurvature class
827 
828 
829 ////////////////////////////////////////
830 
831 
832 template<
833  typename InGridT,
834  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
835  typename InterruptT = util::NullInterrupter>
837 {
838 public:
839  typedef InGridT InGridType;
841 
842  Magnitude(const InGridType& grid, InterruptT* interrupt = nullptr):
843  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
844  {
845  }
846 
847  Magnitude(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
848  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
849  {
850  }
851 
852  typename OutGridType::Ptr process(bool threaded = true)
853  {
854  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
855  processTypedMap(mInputGrid.transform(), functor);
856  return functor.mOutputGrid;
857  }
858 
859 protected:
860  struct OpT
861  {
862  template<typename MapT, typename AccT>
863  static typename OutGridType::ValueType
864  result(const MapT&, const AccT& acc, const Coord& xyz) { return acc.getValue(xyz).length();}
865  };
866  struct Functor
867  {
868  Functor(const InGridT& grid, const MaskGridType* mask,
869  bool threaded, InterruptT* interrupt):
870  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
871 
872  template<typename MapT>
873  void operator()(const MapT& map)
874  {
876  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
877  mOutputGrid = op.process(mThreaded); // cache the result
878  }
879 
880  const bool mThreaded;
882  typename OutGridType::Ptr mOutputGrid;
883  InterruptT* mInterrupt;
884  const MaskGridType* mMask;
885  }; // Private Functor
886 
888  InterruptT* mInterrupt;
889  const MaskGridType* mMask;
890 }; // end of Magnitude class
891 
892 
893 ////////////////////////////////////////
894 
895 
896 template<
897  typename GridT,
898  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
899  typename InterruptT = util::NullInterrupter>
901 {
902 public:
903  typedef GridT InGridType;
904  typedef GridT OutGridType;
905 
906  Normalize(const GridT& grid, InterruptT* interrupt = nullptr):
907  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
908  {
909  }
910 
911  Normalize(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
912  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
913  {
914  }
915 
916  typename GridT::Ptr process(bool threaded = true)
917  {
918  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
919  processTypedMap(mInputGrid.transform(), functor);
920  if (typename GridT::Ptr outGrid = functor.mOutputGrid) {
921  const VecType vecType = mInputGrid.getVectorType();
922  if (vecType == VEC_COVARIANT) {
923  outGrid->setVectorType(VEC_COVARIANT_NORMALIZE);
924  } else {
925  outGrid->setVectorType(vecType);
926  }
927  }
928  return functor.mOutputGrid;
929  }
930 
931 protected:
932  struct OpT
933  {
934  template<typename MapT, typename AccT>
935  static typename OutGridType::ValueType
936  result(const MapT&, const AccT& acc, const Coord& xyz)
937  {
938  typename OutGridType::ValueType vec = acc.getValue(xyz);
939  if ( !vec.normalize() ) vec.setZero();
940  return vec;
941  }
942  };
943  struct Functor
944  {
945  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
946  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
947 
948  template<typename MapT>
949  void operator()(const MapT& map)
950  {
952  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
953  mOutputGrid = op.process(mThreaded); // cache the result
954  }
955 
956  const bool mThreaded;
957  const GridT& mInputGrid;
958  typename GridT::Ptr mOutputGrid;
959  InterruptT* mInterrupt;
960  const MaskGridType* mMask;
961  }; // Private Functor
962 
963  const GridT& mInputGrid;
964  InterruptT* mInterrupt;
965  const MaskGridType* mMask;
966 }; // end of Normalize class
967 
968 
969 ////////////////////////////////////////
970 
971 
972 template<typename GridType, typename InterruptT> inline
974 cpt(const GridType& grid, bool threaded, InterruptT* interrupt)
975 {
976  Cpt<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
977  return op.process(threaded);
978 }
979 
980 template<typename GridType, typename MaskT, typename InterruptT> inline
981 typename ScalarToVectorConverter<GridType>::Type::Ptr
982 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
983 {
984  Cpt<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
985  return op.process(threaded);
986 }
987 
988 template<typename GridType, typename InterruptT> inline
989 typename GridType::Ptr
990 curl(const GridType& grid, bool threaded, InterruptT* interrupt)
991 {
992  Curl<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
993  return op.process(threaded);
994 }
995 
996 template<typename GridType, typename MaskT, typename InterruptT> inline
997 typename GridType::Ptr
998 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
999 {
1000  Curl<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1001  return op.process(threaded);
1002 }
1003 
1004 template<typename GridType, typename InterruptT> inline
1005 typename VectorToScalarConverter<GridType>::Type::Ptr
1006 divergence(const GridType& grid, bool threaded, InterruptT* interrupt)
1007 {
1009  op(grid, interrupt);
1010  return op.process(threaded);
1011 }
1012 
1013 template<typename GridType, typename MaskT, typename InterruptT> inline
1014 typename VectorToScalarConverter<GridType>::Type::Ptr
1015 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1016 {
1017  Divergence<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1018  return op.process(threaded);
1019 }
1020 
1021 template<typename GridType, typename InterruptT> inline
1022 typename ScalarToVectorConverter<GridType>::Type::Ptr
1023 gradient(const GridType& grid, bool threaded, InterruptT* interrupt)
1024 {
1026  op(grid, interrupt);
1027  return op.process(threaded);
1028 }
1029 
1030 template<typename GridType, typename MaskT, typename InterruptT> inline
1031 typename ScalarToVectorConverter<GridType>::Type::Ptr
1032 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1033 {
1034  Gradient<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1035  return op.process(threaded);
1036 }
1037 
1038 template<typename GridType, typename InterruptT> inline
1039 typename GridType::Ptr
1040 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt)
1041 {
1043  op(grid, interrupt);
1044  return op.process(threaded);
1045 }
1046 
1047 template<typename GridType, typename MaskT, typename InterruptT> inline
1048 typename GridType::Ptr
1049 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1050 {
1051  Laplacian<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1052  return op.process(threaded);
1053 }
1054 
1055 template<typename GridType, typename InterruptT> inline
1056 typename GridType::Ptr
1057 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt)
1058 {
1060  op(grid, interrupt);
1061  return op.process(threaded);
1062 }
1063 
1064 template<typename GridType, typename MaskT, typename InterruptT> inline
1065 typename GridType::Ptr
1066 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1067 {
1068  MeanCurvature<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1069  return op.process(threaded);
1070 }
1071 
1072 template<typename GridType, typename InterruptT> inline
1073 typename VectorToScalarConverter<GridType>::Type::Ptr
1074 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt)
1075 {
1077  op(grid, interrupt);
1078  return op.process(threaded);
1079 }
1080 
1081 template<typename GridType, typename MaskT, typename InterruptT> inline
1082 typename VectorToScalarConverter<GridType>::Type::Ptr
1083 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1084 {
1085  Magnitude<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1086  return op.process(threaded);
1087 }
1088 
1089 template<typename GridType, typename InterruptT> inline
1090 typename GridType::Ptr
1091 normalize(const GridType& grid, bool threaded, InterruptT* interrupt)
1092 {
1094  op(grid, interrupt);
1095  return op.process(threaded);
1096 }
1097 
1098 template<typename GridType, typename MaskT, typename InterruptT> inline
1099 typename GridType::Ptr
1100 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1101 {
1102  Normalize<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1103  return op.process(threaded);
1104 }
1105 
1106 } // namespace tools
1107 } // namespace OPENVDB_VERSION_NAME
1108 } // namespace openvdb
1109 
1110 #endif // OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
1111 
1112 // Copyright (c) 2012-2018 DreamWorks Animation LLC
1113 // All rights reserved. This software is distributed under the
1114 // Mozilla Public License 2.0 ( http://www.mozilla.org/MPL/2.0/ )
Compute the closest-point transform of a scalar grid.
Compute the gradient of a scalar grid.
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
OutGridType::Ptr process(bool threaded=true)
OutGridType::Ptr process(bool threaded=true, bool useWorldTransform=true)
GLenum GLint * range
Definition: glcorearb.h:1924
Normalize(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
LeafRange leafRange(size_t grainsize=1) const
Return a TBB-compatible LeafRange.
Definition: LeafManager.h:386
OutGridType::Ptr process(bool threaded=true)
static OutGridType::ValueType result(const MapT &, const AccT &acc, const Coord &xyz)
VectorGridType::template ValueConverter< VecComponentValueT >::Type Type
Definition: GridOperators.h:59
VectorToScalarConverter<VectorGridType>::Type is the type of a grid having the same tree configuratio...
Definition: GridOperators.h:57
GridOperator(const InGridT &grid, const MaskGridType *mask, const MapT &map, InterruptT *interrupt=nullptr, bool densify=true)
MeanCurvature(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Compute the divergence of a vector grid.
GridT::Ptr process(bool threaded=true)
Cpt(const InGridType &grid, InterruptT *interrupt=nullptr)
GLint GLuint mask
Definition: glcorearb.h:123
#define OPENVDB_USE_VERSION_NAMESPACE
Definition: version.h:189
MeanCurvature(const GridT &grid, InterruptT *interrupt=nullptr)
ToMaskGrid<T>::Type is the type of a grid having the same tree hierarchy as grid type T but a value e...
ScalarToVectorConverter< InGridT >::Type OutGridType
Signed (x, y, z) 32-bit integer coordinates.
Definition: Coord.h:51
Gradient(const InGridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Dummy NOOP interrupter class defining interface.
ScalarToVectorConverter< InGridT >::Type OutGridType
uint64 value_type
Definition: GA_PrimCompat.h:29
Laplacian(const GridT &grid, InterruptT *interrupt=nullptr)
GridOperator & operator=(const GridOperator &)=default
VectorToScalarConverter< GridType >::Type::Ptr magnitude(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the magnitudes of the vectors of the given vector-valued grid.
Grid< typename GridType::TreeType::template ValueConverter< ValueMask >::Type > Type
void foreach(const IterT &iter, XformOp &op, bool threaded=true, bool shareOp=true)
Cpt(const InGridType &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Curl(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid having the same tree configuratio...
Definition: GridOperators.h:66
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Divergence(const InGridT &grid, InterruptT *interrupt=nullptr)
Compute the Laplacian at a given location in a grid using finite differencing of various orders...
Definition: Operators.h:1423
Apply an operator to an input grid to produce an output grid with the same active voxel topology but ...
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
GridT::Ptr process(bool threaded=true)
static math::Vec3< typename Accessor::ValueType > result(const MapType &map, const Accessor &grid, const Coord &ijk)
Definition: Operators.h:1661
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
ScalarGridType::template ValueConverter< VectorValueT >::Type Type
Definition: GridOperators.h:68
This class manages a linear array of pointers to a given tree's leaf nodes, as well as optional auxil...
Definition: LeafManager.h:110
Curl(const GridT &grid, InterruptT *interrupt=nullptr)
VectorToScalarConverter< InGridT >::Type OutGridType
void operator()(const typename LeafManagerT::LeafRange &range) const
Iterate sequentially over LeafNodes and voxels in the output grid and apply the operator using a valu...
GridType::Ptr normalize(const GridType &grid, bool threaded, InterruptT *interrupt)
Normalize the vectors of the given vector-valued grid.
Center difference gradient operators, defined with respect to the range-space of the map...
Definition: Operators.h:643
Container class that associates a tree with a transform and metadata.
Definition: Grid.h:55
VectorToScalarConverter< InGridT >::Type OutGridType
Compute the curl of a vector-valued grid using differencing of various orders in the space defined by...
Definition: Operators.h:1278
static Vec3< typename Accessor::ValueType > result(const MapType &map, const Accessor &grid, const Coord &ijk)
Definition: Operators.h:1714
GridType::Ptr laplacian(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the Laplacian of the given scalar grid.
GLsizei const GLfloat * value
Definition: glcorearb.h:823
Magnitude(const InGridType &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
OutGridType::Ptr process(bool threaded=true)
VectorGridType::ValueType::value_type VecComponentValueT
Definition: GridOperators.h:58
ScalarToVectorConverter< GridType >::Type::Ptr gradient(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the gradient of the given scalar grid.
Gradient(const InGridT &grid, InterruptT *interrupt=nullptr)
Divergence(const InGridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Magnitude(const InGridType &grid, InterruptT *interrupt=nullptr)
Compute the curl of a vector grid.
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
math::Vec3< typename ScalarGridType::ValueType > VectorValueT
Definition: GridOperators.h:67
Laplacian(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Tag dispatch class that distinguishes topology copy constructors from deep copy constructors.
Definition: Types.h:518
VectorToScalarConverter< GridType >::Type::Ptr divergence(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the divergence of the given vector-valued grid.
A LeafManager manages a linear array of pointers to a given tree's leaf nodes, as well as optional au...
Compute the divergence of a vector-valued grid using differencing of various orders, the result defined with respect to the range-space of the map.
Definition: Operators.h:953
GridType::Ptr meanCurvature(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the mean curvature of the given grid.
Normalize(const GridT &grid, InterruptT *interrupt=nullptr)
GridType::Ptr curl(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the curl of the given vector-valued grid.
bool wasInterrupted(T *i, int percent=-1)
ScalarToVectorConverter< GridType >::Type::Ptr cpt(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the Closest-Point Transform (CPT) from a distance field.
#define OPENVDB_VERSION_NAME
The version namespace name for this library version.
Definition: version.h:135
GridT::Ptr process(bool threaded=true)
static OutGridType::ValueType result(const MapT &, const AccT &acc, const Coord &xyz)
bool processTypedMap(TransformType &transform, OpType &op)
Utility function that, given a generic map pointer, calls a functor on the fully-resoved map...
Definition: Transform.h:268
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)