HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
GridOperators.h
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////
2 //
3 // Copyright (c) 2012-2017 DreamWorks Animation LLC
4 //
5 // All rights reserved. This software is distributed under the
6 // Mozilla Public License 2.0 ( http://www.mozilla.org/MPL/2.0/ )
7 //
8 // Redistributions of source code must retain the above copyright
9 // and license notice and the following restrictions and disclaimer.
10 //
11 // * Neither the name of DreamWorks Animation nor the names of
12 // its contributors may be used to endorse or promote products derived
13 // from this software without specific prior written permission.
14 //
15 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY INDIRECT, INCIDENTAL,
20 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 // IN NO EVENT SHALL THE COPYRIGHT HOLDERS' AND CONTRIBUTORS' AGGREGATE
27 // LIABILITY FOR ALL CLAIMS REGARDLESS OF THEIR BASIS EXCEED US$250.00.
28 //
29 ///////////////////////////////////////////////////////////////////////////
30 
31 /// @file GridOperators.h
32 ///
33 /// @brief Applies an operator on an input grid to produce an output
34 /// grid with the same topology but potentially different value type.
35 
36 #ifndef OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
37 #define OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
38 
39 #include <openvdb/Grid.h>
40 #include <openvdb/math/Operators.h>
44 #include <tbb/parallel_for.h>
45 
46 
47 namespace openvdb {
49 namespace OPENVDB_VERSION_NAME {
50 namespace tools {
51 
52 /// @brief VectorToScalarConverter<VectorGridType>::Type is the type of a grid
53 /// having the same tree configuration as VectorGridType but a scalar value type, T,
54 /// where T is the type of the original vector components.
55 /// @details For example, VectorToScalarConverter<Vec3DGrid>::Type is equivalent to DoubleGrid.
56 template<typename VectorGridType> struct VectorToScalarConverter {
58  typedef typename VectorGridType::template ValueConverter<VecComponentValueT>::Type Type;
59 };
60 
61 /// @brief ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid
62 /// having the same tree configuration as ScalarGridType but value type Vec3<T>
63 /// where T is ScalarGridType::ValueType.
64 /// @details For example, ScalarToVectorConverter<DoubleGrid>::Type is equivalent to Vec3DGrid.
65 template<typename ScalarGridType> struct ScalarToVectorConverter {
67  typedef typename ScalarGridType::template ValueConverter<VectorValueT>::Type Type;
68 };
69 
70 
71 /// @brief Compute the Closest-Point Transform (CPT) from a distance field.
72 /// @return a new vector-valued grid with the same numerical precision as the input grid
73 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
74 /// @details When a mask grid is specified, the solution is calculated only in
75 /// the intersection of the mask active topology and the input active topology
76 /// independent of the transforms associated with either grid.
77 /// @note The current implementation assumes all the input distance values
78 /// are represented by leaf voxels and not tiles. This is true for all
79 /// narrow-band level sets, which this class was originally developed for.
80 /// In the future we will expand this class to also handle tile values.
81 template<typename GridType, typename InterruptT> inline
83 cpt(const GridType& grid, bool threaded, InterruptT* interrupt);
84 
85 template<typename GridType, typename MaskT, typename InterruptT> inline
87 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
88 
89 template<typename GridType> inline
91 cpt(const GridType& grid, bool threaded = true)
92 {
93  return cpt<GridType, util::NullInterrupter>(grid, threaded, nullptr);
94 }
95 
96 template<typename GridType, typename MaskT> inline
97 typename ScalarToVectorConverter<GridType>::Type::Ptr
98 cpt(const GridType& grid, const MaskT& mask, bool threaded = true)
99 {
100  return cpt<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
101 }
102 
103 
104 /// @brief Compute the curl of the given vector-valued grid.
105 /// @return a new vector-valued grid
106 /// @details When a mask grid is specified, the solution is calculated only in
107 /// the intersection of the mask active topology and the input active topology
108 /// independent of the transforms associated with either grid.
109 template<typename GridType, typename InterruptT> inline
110 typename GridType::Ptr
111 curl(const GridType& grid, bool threaded, InterruptT* interrupt);
112 
113 template<typename GridType, typename MaskT, typename InterruptT> inline
114 typename GridType::Ptr
115 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
116 
117 template<typename GridType> inline
118 typename GridType::Ptr
119 curl(const GridType& grid, bool threaded = true)
120 {
121  return curl<GridType, util::NullInterrupter>(grid, threaded, nullptr);
122 }
123 
124 template<typename GridType, typename MaskT> inline
125 typename GridType::Ptr
126 curl(const GridType& grid, const MaskT& mask, bool threaded = true)
127 {
128  return curl<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
129 }
130 
131 
132 /// @brief Compute the divergence of the given vector-valued grid.
133 /// @return a new scalar-valued grid with the same numerical precision as the input grid
134 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
135 /// @details When a mask grid is specified, the solution is calculated only in
136 /// the intersection of the mask active topology and the input active topology
137 /// independent of the transforms associated with either grid.
138 template<typename GridType, typename InterruptT> inline
139 typename VectorToScalarConverter<GridType>::Type::Ptr
140 divergence(const GridType& grid, bool threaded, InterruptT* interrupt);
141 
142 template<typename GridType, typename MaskT, typename InterruptT> inline
143 typename VectorToScalarConverter<GridType>::Type::Ptr
144 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
145 
146 template<typename GridType> inline
147 typename VectorToScalarConverter<GridType>::Type::Ptr
148 divergence(const GridType& grid, bool threaded = true)
149 {
150  return divergence<GridType, util::NullInterrupter>(grid, threaded, nullptr);
151 }
152 
153 template<typename GridType, typename MaskT> inline
154 typename VectorToScalarConverter<GridType>::Type::Ptr
155 divergence(const GridType& grid, const MaskT& mask, bool threaded = true)
156 {
157  return divergence<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
158 }
159 
160 
161 /// @brief Compute the gradient of the given scalar grid.
162 /// @return a new vector-valued grid with the same numerical precision as the input grid
163 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
164 /// @details When a mask grid is specified, the solution is calculated only in
165 /// the intersection of the mask active topology and the input active topology
166 /// independent of the transforms associated with either grid.
167 template<typename GridType, typename InterruptT> inline
168 typename ScalarToVectorConverter<GridType>::Type::Ptr
169 gradient(const GridType& grid, bool threaded, InterruptT* interrupt);
170 
171 template<typename GridType, typename MaskT, typename InterruptT> inline
172 typename ScalarToVectorConverter<GridType>::Type::Ptr
173 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
174 
175 template<typename GridType> inline
176 typename ScalarToVectorConverter<GridType>::Type::Ptr
177 gradient(const GridType& grid, bool threaded = true)
178 {
179  return gradient<GridType, util::NullInterrupter>(grid, threaded, nullptr);
180 }
181 
182 template<typename GridType, typename MaskT> inline
183 typename ScalarToVectorConverter<GridType>::Type::Ptr
184 gradient(const GridType& grid, const MaskT& mask, bool threaded = true)
185 {
186  return gradient<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
187 }
188 
189 
190 /// @brief Compute the Laplacian of the given scalar grid.
191 /// @return a new scalar grid
192 /// @details When a mask grid is specified, the solution is calculated only in
193 /// the intersection of the mask active topology and the input active topology
194 /// independent of the transforms associated with either grid.
195 template<typename GridType, typename InterruptT> inline
196 typename GridType::Ptr
197 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt);
198 
199 template<typename GridType, typename MaskT, typename InterruptT> inline
200 typename GridType::Ptr
201 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
202 
203 template<typename GridType> inline
204 typename GridType::Ptr
205 laplacian(const GridType& grid, bool threaded = true)
206 {
207  return laplacian<GridType, util::NullInterrupter>(grid, threaded, nullptr);
208 }
209 
210 template<typename GridType, typename MaskT> inline
211 typename GridType::Ptr
212 laplacian(const GridType& grid, const MaskT mask, bool threaded = true)
213 {
214  return laplacian<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
215 }
216 
217 
218 /// @brief Compute the mean curvature of the given grid.
219 /// @return a new grid
220 /// @details When a mask grid is specified, the solution is calculated only in
221 /// the intersection of the mask active topology and the input active topology
222 /// independent of the transforms associated with either grid.
223 template<typename GridType, typename InterruptT> inline
224 typename GridType::Ptr
225 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt);
226 
227 template<typename GridType, typename MaskT, typename InterruptT> inline
228 typename GridType::Ptr
229 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
230 
231 template<typename GridType> inline
232 typename GridType::Ptr
233 meanCurvature(const GridType& grid, bool threaded = true)
234 {
235  return meanCurvature<GridType, util::NullInterrupter>(grid, threaded, nullptr);
236 }
237 
238 template<typename GridType, typename MaskT> inline
239 typename GridType::Ptr
240 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded = true)
241 {
242  return meanCurvature<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
243 }
244 
245 
246 /// @brief Compute the magnitudes of the vectors of the given vector-valued grid.
247 /// @return a new scalar-valued grid with the same numerical precision as the input grid
248 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
249 /// @details When a mask grid is specified, the solution is calculated only in
250 /// the intersection of the mask active topology and the input active topology
251 /// independent of the transforms associated with either grid.
252 template<typename GridType, typename InterruptT> inline
253 typename VectorToScalarConverter<GridType>::Type::Ptr
254 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt);
255 
256 template<typename GridType, typename MaskT, typename InterruptT> inline
257 typename VectorToScalarConverter<GridType>::Type::Ptr
258 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
259 
260 template<typename GridType> inline
261 typename VectorToScalarConverter<GridType>::Type::Ptr
262 magnitude(const GridType& grid, bool threaded = true)
263 {
264  return magnitude<GridType, util::NullInterrupter>(grid, threaded, nullptr);
265 }
266 
267 template<typename GridType, typename MaskT> inline
268 typename VectorToScalarConverter<GridType>::Type::Ptr
269 magnitude(const GridType& grid, const MaskT& mask, bool threaded = true)
270 {
271  return magnitude<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
272 }
273 
274 
275 /// @brief Normalize the vectors of the given vector-valued grid.
276 /// @return a new vector-valued grid
277 /// @details When a mask grid is specified, the solution is calculated only in
278 /// the intersection of the mask active topology and the input active topology
279 /// independent of the transforms associated with either grid.
280 template<typename GridType, typename InterruptT> inline
281 typename GridType::Ptr
282 normalize(const GridType& grid, bool threaded, InterruptT* interrupt);
283 
284 template<typename GridType, typename MaskT, typename InterruptT> inline
285 typename GridType::Ptr
286 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
287 
288 template<typename GridType> inline
289 typename GridType::Ptr
290 normalize(const GridType& grid, bool threaded = true)
291 {
292  return normalize<GridType, util::NullInterrupter>(grid, threaded, nullptr);
293 }
294 
295 template<typename GridType, typename MaskT> inline
296 typename GridType::Ptr
297 normalize(const GridType& grid, const MaskT& mask, bool threaded = true)
298 {
299  return normalize<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
300 }
301 
302 
303 ////////////////////////////////////////
304 
305 
306 namespace gridop {
307 
308 /// @brief ToMaskGrid<T>::Type is the type of a grid having the same
309 /// tree hierarchy as grid type T but a value equal to its active state.
310 /// @details For example, ToMaskGrid<FloatGrid>::Type is equivalent to MaskGrid.
311 template<typename GridType>
312 struct ToMaskGrid {
314 };
315 
316 
317 /// @brief Apply an operator on an input grid to produce an output grid
318 /// with the same topology but a possibly different value type.
319 /// @details To facilitate inlining, this class is also templated on a Map type.
320 ///
321 /// @note This is a helper class and should never be used directly.
322 ///
323 /// @note The current implementation assumes all the input
324 /// values are represented by leaf voxels and not tiles. In the
325 /// future we will expand this class to also handle tile values.
326 template<
327  typename InGridT,
328  typename MaskGridType,
329  typename OutGridT,
330  typename MapT,
331  typename OperatorT,
332  typename InterruptT = util::NullInterrupter>
334 {
335 public:
336  typedef typename OutGridT::TreeType OutTreeT;
337  typedef typename OutTreeT::LeafNodeType OutLeafT;
339 
340  GridOperator(const InGridT& grid, const MaskGridType* mask, const MapT& map,
341  InterruptT* interrupt = nullptr):
342  mAcc(grid.getConstAccessor()), mMap(map), mInterrupt(interrupt), mMask(mask)
343  {
344  }
345  GridOperator(const GridOperator&) = default;
346  GridOperator& operator=(const GridOperator&) = default;
347  virtual ~GridOperator() = default;
348 
349  typename OutGridT::Ptr process(bool threaded = true)
350  {
351  if (mInterrupt) mInterrupt->start("Processing grid");
352 
353  // Derive background value of the output grid
354  typename InGridT::TreeType tmp(mAcc.tree().background());
355  typename OutGridT::ValueType backg = OperatorT::result(mMap, tmp, math::Coord(0));
356 
357  // output tree = topology copy of input tree!
358  typename OutTreeT::Ptr tree(new OutTreeT(mAcc.tree(), backg, TopologyCopy()));
359 
360 
361  // create grid with output tree and unit transform
362  typename OutGridT::Ptr result(new OutGridT(tree));
363 
364  // Modify the solution area if a mask was supplied.
365  if (mMask) {
366  result->topologyIntersection(*mMask);
367  }
368 
369  // transform of output grid = transform of input grid
370  result->setTransform(math::Transform::Ptr(new math::Transform( mMap.copy() )));
371 
372  LeafManagerT leafManager(*tree);
373 
374  if (threaded) {
375  tbb::parallel_for(leafManager.leafRange(), *this);
376  } else {
377  (*this)(leafManager.leafRange());
378  }
379 
380  if (mInterrupt) mInterrupt->end();
381  return result;
382  }
383 
384  /// @brief Iterate sequentially over LeafNodes and voxels in the output
385  /// grid and compute the Laplacian using a valueAccessor for the
386  /// input grid.
387  ///
388  /// @note Never call this public method directly - it is called by
389  /// TBB threads only!
390  void operator()(const typename LeafManagerT::LeafRange& range) const
391  {
392  if (util::wasInterrupted(mInterrupt)) tbb::task::self().cancel_group_execution();
393 
394  for (typename LeafManagerT::LeafRange::Iterator leaf=range.begin(); leaf; ++leaf) {
395  for (typename OutLeafT::ValueOnIter value=leaf->beginValueOn(); value; ++value) {
396  value.setValue(OperatorT::result(mMap, mAcc, value.getCoord()));
397  }
398  }
399  }
400 
401 protected:
402  typedef typename InGridT::ConstAccessor AccessorT;
403  mutable AccessorT mAcc;
404  const MapT& mMap;
405  InterruptT* mInterrupt;
406  const MaskGridType* mMask;
407 }; // end of GridOperator class
408 
409 } // namespace gridop
410 
411 
412 ////////////////////////////////////////
413 
414 
415 /// @brief Compute the closest-point transform of a scalar grid.
416 template<
417  typename InGridT,
418  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
419  typename InterruptT = util::NullInterrupter>
420 class Cpt
421 {
422 public:
423  typedef InGridT InGridType;
425 
426  Cpt(const InGridType& grid, InterruptT* interrupt = nullptr):
427  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
428  {
429  }
430 
431  Cpt(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
432  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
433  {
434  }
435 
436  typename OutGridType::Ptr process(bool threaded = true, bool useWorldTransform = true)
437  {
438  Functor functor(mInputGrid, mMask, threaded, useWorldTransform, mInterrupt);
439  processTypedMap(mInputGrid.transform(), functor);
440  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_CONTRAVARIANT_ABSOLUTE);
441  return functor.mOutputGrid;
442  }
443 
444 private:
445  struct IsOpT
446  {
447  template<typename MapT, typename AccT>
448  static typename OutGridType::ValueType
449  result(const MapT& map, const AccT& acc, const Coord& xyz)
450  {
451  return math::CPT<MapT, math::CD_2ND>::result(map, acc, xyz);
452  }
453  };
454  struct WsOpT
455  {
456  template<typename MapT, typename AccT>
457  static typename OutGridType::ValueType
458  result(const MapT& map, const AccT& acc, const Coord& xyz)
459  {
460  return math::CPT_RANGE<MapT, math::CD_2ND>::result(map, acc, xyz);
461  }
462  };
463  struct Functor
464  {
465  Functor(const InGridType& grid, const MaskGridType* mask,
466  bool threaded, bool worldspace, InterruptT* interrupt)
467  : mThreaded(threaded)
468  , mWorldSpace(worldspace)
469  , mInputGrid(grid)
470  , mInterrupt(interrupt)
471  , mMask(mask)
472  {}
473 
474  template<typename MapT>
475  void operator()(const MapT& map)
476  {
477  if (mWorldSpace) {
478  gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, WsOpT, InterruptT>
479  op(mInputGrid, mMask, map, mInterrupt);
480  mOutputGrid = op.process(mThreaded); // cache the result
481  } else {
482  gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, IsOpT, InterruptT>
483  op(mInputGrid, mMask, map, mInterrupt);
484  mOutputGrid = op.process(mThreaded); // cache the result
485  }
486  }
487  const bool mThreaded;
488  const bool mWorldSpace;
489  const InGridType& mInputGrid;
490  typename OutGridType::Ptr mOutputGrid;
491  InterruptT* mInterrupt;
492  const MaskGridType* mMask;
493  };
494  const InGridType& mInputGrid;
495  InterruptT* mInterrupt;
496  const MaskGridType* mMask;
497 }; // end of Cpt class
498 
499 
500 ////////////////////////////////////////
501 
502 
503 /// @brief Compute the curl of a vector grid.
504 template<
505  typename GridT,
506  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
507  typename InterruptT = util::NullInterrupter>
508 class Curl
509 {
510 public:
511  typedef GridT InGridType;
512  typedef GridT OutGridType;
513 
514  Curl(const GridT& grid, InterruptT* interrupt = nullptr):
515  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
516  {
517  }
518 
519  Curl(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
520  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
521  {
522  }
523 
524  typename GridT::Ptr process(bool threaded = true)
525  {
526  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
527  processTypedMap(mInputGrid.transform(), functor);
528  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
529  return functor.mOutputGrid;
530  }
531 
532 private:
533  struct Functor
534  {
535  Functor(const GridT& grid, const MaskGridType* mask,
536  bool threaded, InterruptT* interrupt):
537  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
538 
539  template<typename MapT>
540  void operator()(const MapT& map)
541  {
542  typedef math::Curl<MapT, math::CD_2ND> OpT;
543  gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT>
544  op(mInputGrid, mMask, map, mInterrupt);
545  mOutputGrid = op.process(mThreaded); // cache the result
546  }
547 
548  const bool mThreaded;
549  const GridT& mInputGrid;
550  typename GridT::Ptr mOutputGrid;
551  InterruptT* mInterrupt;
552  const MaskGridType* mMask;
553  }; // Private Functor
554 
555  const GridT& mInputGrid;
556  InterruptT* mInterrupt;
557  const MaskGridType* mMask;
558 }; // end of Curl class
559 
560 
561 ////////////////////////////////////////
562 
563 
564 /// @brief Compute the divergence of a vector grid.
565 template<
566  typename InGridT,
567  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
568  typename InterruptT = util::NullInterrupter>
570 {
571 public:
572  typedef InGridT InGridType;
574 
575  Divergence(const InGridT& grid, InterruptT* interrupt = nullptr):
576  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
577  {
578  }
579 
580  Divergence(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
581  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
582  {
583  }
584 
585  typename OutGridType::Ptr process(bool threaded = true)
586  {
587  if (mInputGrid.getGridClass() == GRID_STAGGERED) {
588  Functor<math::FD_1ST> functor(mInputGrid, mMask, threaded, mInterrupt);
589  processTypedMap(mInputGrid.transform(), functor);
590  return functor.mOutputGrid;
591  } else {
592  Functor<math::CD_2ND> functor(mInputGrid, mMask, threaded, mInterrupt);
593  processTypedMap(mInputGrid.transform(), functor);
594  return functor.mOutputGrid;
595  }
596  }
597 
598 protected:
599  template<math::DScheme DiffScheme>
600  struct Functor
601  {
602  Functor(const InGridT& grid, const MaskGridType* mask,
603  bool threaded, InterruptT* interrupt):
604  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
605 
606  template<typename MapT>
607  void operator()(const MapT& map)
608  {
611  op(mInputGrid, mMask, map, mInterrupt);
612  mOutputGrid = op.process(mThreaded); // cache the result
613  }
614 
615  const bool mThreaded;
617  typename OutGridType::Ptr mOutputGrid;
618  InterruptT* mInterrupt;
619  const MaskGridType* mMask;
620  }; // Private Functor
621 
623  InterruptT* mInterrupt;
624  const MaskGridType* mMask;
625 }; // end of Divergence class
626 
627 
628 ////////////////////////////////////////
629 
630 
631 /// @brief Compute the gradient of a scalar grid.
632 template<
633  typename InGridT,
634  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
635  typename InterruptT = util::NullInterrupter>
636 class Gradient
637 {
638 public:
639  typedef InGridT InGridType;
641 
642  Gradient(const InGridT& grid, InterruptT* interrupt = nullptr):
643  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
644  {
645  }
646 
647  Gradient(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
648  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
649  {
650  }
651 
652  typename OutGridType::Ptr process(bool threaded = true)
653  {
654  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
655  processTypedMap(mInputGrid.transform(), functor);
656  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
657  return functor.mOutputGrid;
658  }
659 
660 protected:
661  struct Functor
662  {
663  Functor(const InGridT& grid, const MaskGridType* mask,
664  bool threaded, InterruptT* interrupt):
665  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
666 
667  template<typename MapT>
668  void operator()(const MapT& map)
669  {
672  op(mInputGrid, mMask, map, mInterrupt);
673  mOutputGrid = op.process(mThreaded); // cache the result
674  }
675 
676  const bool mThreaded;
677  const InGridT& mInputGrid;
678  typename OutGridType::Ptr mOutputGrid;
679  InterruptT* mInterrupt;
680  const MaskGridType* mMask;
681  }; // Private Functor
682 
683  const InGridT& mInputGrid;
684  InterruptT* mInterrupt;
685  const MaskGridType* mMask;
686 }; // end of Gradient class
687 
688 
689 ////////////////////////////////////////
690 
691 
692 template<
693  typename GridT,
694  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
695  typename InterruptT = util::NullInterrupter>
697 {
698 public:
699  typedef GridT InGridType;
700  typedef GridT OutGridType;
701 
702  Laplacian(const GridT& grid, InterruptT* interrupt = nullptr):
703  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
704  {
705  }
706 
707  Laplacian(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
708  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
709  {
710  }
711 
712  typename GridT::Ptr process(bool threaded = true)
713  {
714  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
715  processTypedMap(mInputGrid.transform(), functor);
716  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
717  return functor.mOutputGrid;
718  }
719 
720 protected:
721  struct Functor
722  {
723  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
724  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
725 
726  template<typename MapT>
727  void operator()(const MapT& map)
728  {
731  op(mInputGrid, mMask, map);
732  mOutputGrid = op.process(mThreaded); // cache the result
733  }
734 
735  const bool mThreaded;
736  const GridT& mInputGrid;
737  typename GridT::Ptr mOutputGrid;
738  InterruptT* mInterrupt;
739  const MaskGridType* mMask;
740  }; // Private Functor
741 
742  const GridT& mInputGrid;
743  InterruptT* mInterrupt;
744  const MaskGridType* mMask;
745 }; // end of Laplacian class
746 
747 
748 ////////////////////////////////////////
749 
750 
751 template<
752  typename GridT,
753  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
754  typename InterruptT = util::NullInterrupter>
756 {
757 public:
758  typedef GridT InGridType;
759  typedef GridT OutGridType;
760 
761  MeanCurvature(const GridT& grid, InterruptT* interrupt = nullptr):
762  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
763  {
764  }
765 
766  MeanCurvature(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
767  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
768  {
769  }
770 
771  typename GridT::Ptr process(bool threaded = true)
772  {
773  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
774  processTypedMap(mInputGrid.transform(), functor);
775  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
776  return functor.mOutputGrid;
777  }
778 
779 protected:
780  struct Functor
781  {
782  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
783  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
784 
785  template<typename MapT>
786  void operator()(const MapT& map)
787  {
790  op(mInputGrid, mMask, map);
791  mOutputGrid = op.process(mThreaded); // cache the result
792  }
793 
794  const bool mThreaded;
795  const GridT& mInputGrid;
796  typename GridT::Ptr mOutputGrid;
797  InterruptT* mInterrupt;
798  const MaskGridType* mMask;
799  }; // Private Functor
800 
801  const GridT& mInputGrid;
802  InterruptT* mInterrupt;
803  const MaskGridType* mMask;
804 }; // end of MeanCurvature class
805 
806 
807 ////////////////////////////////////////
808 
809 
810 template<
811  typename InGridT,
812  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
813  typename InterruptT = util::NullInterrupter>
815 {
816 public:
817  typedef InGridT InGridType;
819 
820  Magnitude(const InGridType& grid, InterruptT* interrupt = nullptr):
821  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
822  {
823  }
824 
825  Magnitude(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
826  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
827  {
828  }
829 
830  typename OutGridType::Ptr process(bool threaded = true)
831  {
832  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
833  processTypedMap(mInputGrid.transform(), functor);
834  return functor.mOutputGrid;
835  }
836 
837 protected:
838  struct OpT
839  {
840  template<typename MapT, typename AccT>
841  static typename OutGridType::ValueType
842  result(const MapT&, const AccT& acc, const Coord& xyz) { return acc.getValue(xyz).length();}
843  };
844  struct Functor
845  {
846  Functor(const InGridT& grid, const MaskGridType* mask,
847  bool threaded, InterruptT* interrupt):
848  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
849 
850  template<typename MapT>
851  void operator()(const MapT& map)
852  {
854  op(mInputGrid, mMask, map);
855  mOutputGrid = op.process(mThreaded); // cache the result
856  }
857 
858  const bool mThreaded;
860  typename OutGridType::Ptr mOutputGrid;
861  InterruptT* mInterrupt;
862  const MaskGridType* mMask;
863  }; // Private Functor
864 
866  InterruptT* mInterrupt;
867  const MaskGridType* mMask;
868 }; // end of Magnitude class
869 
870 
871 ////////////////////////////////////////
872 
873 
874 template<
875  typename GridT,
876  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
877  typename InterruptT = util::NullInterrupter>
879 {
880 public:
881  typedef GridT InGridType;
882  typedef GridT OutGridType;
883 
884  Normalize(const GridT& grid, InterruptT* interrupt = nullptr):
885  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
886  {
887  }
888 
889  Normalize(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
890  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
891  {
892  }
893 
894  typename GridT::Ptr process(bool threaded = true)
895  {
896  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
897  processTypedMap(mInputGrid.transform(), functor);
898  if (typename GridT::Ptr outGrid = functor.mOutputGrid) {
899  const VecType vecType = mInputGrid.getVectorType();
900  if (vecType == VEC_COVARIANT) {
901  outGrid->setVectorType(VEC_COVARIANT_NORMALIZE);
902  } else {
903  outGrid->setVectorType(vecType);
904  }
905  }
906  return functor.mOutputGrid;
907  }
908 
909 protected:
910  struct OpT
911  {
912  template<typename MapT, typename AccT>
913  static typename OutGridType::ValueType
914  result(const MapT&, const AccT& acc, const Coord& xyz)
915  {
916  typename OutGridType::ValueType vec = acc.getValue(xyz);
917  if ( !vec.normalize() ) vec.setZero();
918  return vec;
919  }
920  };
921  struct Functor
922  {
923  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
924  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
925 
926  template<typename MapT>
927  void operator()(const MapT& map)
928  {
930  op(mInputGrid, mMask,map);
931  mOutputGrid = op.process(mThreaded); // cache the result
932  }
933 
934  const bool mThreaded;
935  const GridT& mInputGrid;
936  typename GridT::Ptr mOutputGrid;
937  InterruptT* mInterrupt;
938  const MaskGridType* mMask;
939  }; // Private Functor
940 
941  const GridT& mInputGrid;
942  InterruptT* mInterrupt;
943  const MaskGridType* mMask;
944 }; // end of Normalize class
945 
946 
947 ////////////////////////////////////////
948 
949 
950 template<typename GridType, typename InterruptT> inline
952 cpt(const GridType& grid, bool threaded, InterruptT* interrupt)
953 {
954  Cpt<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
955  return op.process(threaded);
956 }
957 
958 template<typename GridType, typename MaskT, typename InterruptT> inline
959 typename ScalarToVectorConverter<GridType>::Type::Ptr
960 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
961 {
962  Cpt<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
963  return op.process(threaded);
964 }
965 
966 template<typename GridType, typename InterruptT> inline
967 typename GridType::Ptr
968 curl(const GridType& grid, bool threaded, InterruptT* interrupt)
969 {
970  Curl<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
971  return op.process(threaded);
972 }
973 
974 template<typename GridType, typename MaskT, typename InterruptT> inline
975 typename GridType::Ptr
976 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
977 {
978  Curl<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
979  return op.process(threaded);
980 }
981 
982 template<typename GridType, typename InterruptT> inline
983 typename VectorToScalarConverter<GridType>::Type::Ptr
984 divergence(const GridType& grid, bool threaded, InterruptT* interrupt)
985 {
987  op(grid, interrupt);
988  return op.process(threaded);
989 }
990 
991 template<typename GridType, typename MaskT, typename InterruptT> inline
992 typename VectorToScalarConverter<GridType>::Type::Ptr
993 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
994 {
995  Divergence<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
996  return op.process(threaded);
997 }
998 
999 template<typename GridType, typename InterruptT> inline
1000 typename ScalarToVectorConverter<GridType>::Type::Ptr
1001 gradient(const GridType& grid, bool threaded, InterruptT* interrupt)
1002 {
1004  op(grid, interrupt);
1005  return op.process(threaded);
1006 }
1007 
1008 template<typename GridType, typename MaskT, typename InterruptT> inline
1009 typename ScalarToVectorConverter<GridType>::Type::Ptr
1010 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1011 {
1012  Gradient<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1013  return op.process(threaded);
1014 }
1015 
1016 template<typename GridType, typename InterruptT> inline
1017 typename GridType::Ptr
1018 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt)
1019 {
1021  op(grid, interrupt);
1022  return op.process(threaded);
1023 }
1024 
1025 template<typename GridType, typename MaskT, typename InterruptT> inline
1026 typename GridType::Ptr
1027 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1028 {
1029  Laplacian<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1030  return op.process(threaded);
1031 }
1032 
1033 template<typename GridType, typename InterruptT> inline
1034 typename GridType::Ptr
1035 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt)
1036 {
1038  op(grid, interrupt);
1039  return op.process(threaded);
1040 }
1041 
1042 template<typename GridType, typename MaskT, typename InterruptT> inline
1043 typename GridType::Ptr
1044 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1045 {
1046  MeanCurvature<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1047  return op.process(threaded);
1048 }
1049 
1050 template<typename GridType, typename InterruptT> inline
1051 typename VectorToScalarConverter<GridType>::Type::Ptr
1052 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt)
1053 {
1055  op(grid, interrupt);
1056  return op.process(threaded);
1057 }
1058 
1059 template<typename GridType, typename MaskT, typename InterruptT> inline
1060 typename VectorToScalarConverter<GridType>::Type::Ptr
1061 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1062 {
1063  Magnitude<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1064  return op.process(threaded);
1065 }
1066 
1067 template<typename GridType, typename InterruptT> inline
1068 typename GridType::Ptr
1069 normalize(const GridType& grid, bool threaded, InterruptT* interrupt)
1070 {
1072  op(grid, interrupt);
1073  return op.process(threaded);
1074 }
1075 
1076 template<typename GridType, typename MaskT, typename InterruptT> inline
1077 typename GridType::Ptr
1078 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1079 {
1080  Normalize<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1081  return op.process(threaded);
1082 }
1083 
1084 } // namespace tools
1085 } // namespace OPENVDB_VERSION_NAME
1086 } // namespace openvdb
1087 
1088 #endif // OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
1089 
1090 // Copyright (c) 2012-2017 DreamWorks Animation LLC
1091 // All rights reserved. This software is distributed under the
1092 // Mozilla Public License 2.0 ( http://www.mozilla.org/MPL/2.0/ )
GridOperator(const InGridT &grid, const MaskGridType *mask, const MapT &map, InterruptT *interrupt=nullptr)
Compute the closest-point transform of a scalar grid.
Compute the gradient of a scalar grid.
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
OutGridType::Ptr process(bool threaded=true)
OutGridType::Ptr process(bool threaded=true, bool useWorldTransform=true)
GLenum GLint * range
Definition: glcorearb.h:1924
Normalize(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
LeafRange leafRange(size_t grainsize=1) const
Return a TBB-compatible LeafRange.
Definition: LeafManager.h:391
OutGridType::Ptr process(bool threaded=true)
static OutGridType::ValueType result(const MapT &, const AccT &acc, const Coord &xyz)
VectorGridType::template ValueConverter< VecComponentValueT >::Type Type
Definition: GridOperators.h:58
VectorToScalarConverter<VectorGridType>::Type is the type of a grid having the same tree configuratio...
Definition: GridOperators.h:56
MeanCurvature(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Compute the divergence of a vector grid.
GridT::Ptr process(bool threaded=true)
Cpt(const InGridType &grid, InterruptT *interrupt=nullptr)
GLint GLuint mask
Definition: glcorearb.h:123
MeanCurvature(const GridT &grid, InterruptT *interrupt=nullptr)
ToMaskGrid<T>::Type is the type of a grid having the same tree hierarchy as grid type T but a value e...
ScalarToVectorConverter< InGridT >::Type OutGridType
Signed (x, y, z) 32-bit integer coordinates.
Definition: Coord.h:48
Gradient(const InGridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Dummy NOOP interrupter class defining interface.
ScalarToVectorConverter< InGridT >::Type OutGridType
uint64 value_type
Definition: GA_PrimCompat.h:29
Laplacian(const GridT &grid, InterruptT *interrupt=nullptr)
GridOperator & operator=(const GridOperator &)=default
VectorToScalarConverter< GridType >::Type::Ptr magnitude(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the magnitudes of the vectors of the given vector-valued grid.
Grid< typename GridType::TreeType::template ValueConverter< ValueMask >::Type > Type
Cpt(const InGridType &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Curl(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid having the same tree configuratio...
Definition: GridOperators.h:65
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Divergence(const InGridT &grid, InterruptT *interrupt=nullptr)
Compute the Laplacian at a given location in a grid using finite differencing of various orders...
Definition: Operators.h:1422
Apply an operator on an input grid to produce an output grid with the same topology but a possibly di...
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Calculate an axis-aligned bounding box in index space from a bounding sphere in world space...
Definition: Transform.h:66
GridT::Ptr process(bool threaded=true)
#define OPENVDB_VERSION_NAME
Definition: version.h:43
static math::Vec3< typename Accessor::ValueType > result(const MapType &map, const Accessor &grid, const Coord &ijk)
Definition: Operators.h:1660
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
ScalarGridType::template ValueConverter< VectorValueT >::Type Type
Definition: GridOperators.h:67
This class manages a linear array of pointers to a given tree's leaf nodes, as well as optional auxil...
Definition: LeafManager.h:115
Curl(const GridT &grid, InterruptT *interrupt=nullptr)
VectorToScalarConverter< InGridT >::Type OutGridType
void operator()(const typename LeafManagerT::LeafRange &range) const
Iterate sequentially over LeafNodes and voxels in the output grid and compute the Laplacian using a v...
GridType::Ptr normalize(const GridType &grid, bool threaded, InterruptT *interrupt)
Normalize the vectors of the given vector-valued grid.
Center difference gradient operators, defined with respect to the range-space of the map...
Definition: Operators.h:642
Container class that associates a tree with a transform and metadata.
Definition: Grid.h:54
VectorToScalarConverter< InGridT >::Type OutGridType
Compute the curl of a vector-valued grid using differencing of various orders in the space defined by...
Definition: Operators.h:1277
static Vec3< typename Accessor::ValueType > result(const MapType &map, const Accessor &grid, const Coord &ijk)
Definition: Operators.h:1713
GridType::Ptr laplacian(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the Laplacian of the given scalar grid.
GLsizei const GLfloat * value
Definition: glcorearb.h:823
Magnitude(const InGridType &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
OutGridType::Ptr process(bool threaded=true)
VectorGridType::ValueType::value_type VecComponentValueT
Definition: GridOperators.h:57
ScalarToVectorConverter< GridType >::Type::Ptr gradient(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the gradient of the given scalar grid.
Gradient(const InGridT &grid, InterruptT *interrupt=nullptr)
Divergence(const InGridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Magnitude(const InGridType &grid, InterruptT *interrupt=nullptr)
Compute the curl of a vector grid.
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
math::Vec3< typename ScalarGridType::ValueType > VectorValueT
Definition: GridOperators.h:66
Laplacian(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Tag dispatch class that distinguishes topology copy constructors from deep copy constructors.
Definition: Types.h:503
VectorToScalarConverter< GridType >::Type::Ptr divergence(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the divergence of the given vector-valued grid.
A LeafManager manages a linear array of pointers to a given tree's leaf nodes, as well as optional au...
Compute the divergence of a vector-valued grid using differencing of various orders, the result defined with respect to the range-space of the map.
Definition: Operators.h:952
#define OPENVDB_USE_VERSION_NAMESPACE
Definition: version.h:71
GridType::Ptr meanCurvature(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the mean curvature of the given grid.
Normalize(const GridT &grid, InterruptT *interrupt=nullptr)
GridType::Ptr curl(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the curl of the given vector-valued grid.
bool wasInterrupted(T *i, int percent=-1)
ScalarToVectorConverter< GridType >::Type::Ptr cpt(const GridType &grid, bool threaded, InterruptT *interrupt)
Compute the Closest-Point Transform (CPT) from a distance field.
GridT::Ptr process(bool threaded=true)
static OutGridType::ValueType result(const MapT &, const AccT &acc, const Coord &xyz)
bool processTypedMap(TransformType &transform, OpType &op)
Utility function that, given a generic map pointer, calls a functor on the fully-resoved map...
Definition: Transform.h:268
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)