HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
GU_Copy2.C
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2022
3  * Side Effects Software Inc. All rights reserved.
4  *
5  * Redistribution and use of Houdini Development Kit samples in source and
6  * binary forms, with or without modification, are permitted provided that the
7  * following conditions are met:
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  * 2. The name of Side Effects Software may not be used to endorse or
11  * promote products derived from this software without specific prior
12  * written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE `AS IS' AND ANY EXPRESS
15  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17  * NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
19  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
20  * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
21  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
22  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
23  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  *
25  *----------------------------------------------------------------------------
26  * Definitions of functions and structures for copying geometry.
27  */
28 
29 #include "GU_Copy2.h"
30 
31 #include "GEO_BuildPrimitives.h"
32 
33 #include <GU/GU_Detail.h>
34 #include <GU/GU_DetailHandle.h>
35 #include <GU/GU_PackedGeometry.h>
36 #include <GU/GU_PrimPacked.h>
37 #include <GEO/GEO_Normal.h>
38 #include <GEO/GEO_PackedTypes.h>
40 #include <GA/GA_ATINumeric.h>
41 #include <GA/GA_ATITopology.h>
42 #include <GA/GA_Attribute.h>
43 #include <GA/GA_AttributeDict.h>
44 #include <GA/GA_AttributeSet.h>
46 #include <GA/GA_Edge.h>
47 #include <GA/GA_EdgeGroup.h>
48 #include <GA/GA_ElementGroup.h>
50 #include <GA/GA_Handle.h>
51 #include <GA/GA_Iterator.h>
52 #include <GA/GA_OffsetList.h>
53 #include <GA/GA_PageArray.h>
54 #include <GA/GA_PolyCounts.h>
55 #include <GA/GA_Primitive.h>
56 #include <GA/GA_PrimitiveTypes.h>
57 #include <GA/GA_Range.h>
58 #include <GA/GA_RTIOffsetList.h>
59 #include <GA/GA_SplittableRange.h>
60 #include <GA/GA_Types.h>
61 #include <UT/UT_Array.h>
62 #include <UT/UT_ArrayStringMap.h>
63 #include <UT/UT_Assert.h>
64 #include <UT/UT_Matrix3.h>
65 #include <UT/UT_Matrix4.h>
66 #include <UT/UT_PageArray.h>
67 #include <UT/UT_PageArrayImpl.h>
68 #include <UT/UT_Quaternion.h>
69 #include <UT/UT_Vector3.h>
70 #include <UT/UT_ParallelUtil.h>
71 #include <UT/UT_SmallArray.h>
72 #include <UT/UT_StringHolder.h>
73 #include <UT/UT_TaskGroup.h>
74 #include <UT/UT_UniquePtr.h>
75 #include <UT/UT_VectorTypes.h>
76 #include <SYS/SYS_StaticAssert.h>
77 #include <SYS/SYS_Types.h>
78 
79 #include <algorithm> // For std::upper_bound
80 #include <utility> // For std::pair
81 
82 namespace HDK_Sample {
83 
84 namespace GU_Copy {
85 
87 static GA_AttributeOwner
88 guConflictAttribOwner(GA_AttributeOwner owner)
89 {
90  if (owner == GA_ATTRIB_POINT)
91  return GA_ATTRIB_VERTEX;
92  if (owner == GA_ATTRIB_VERTEX)
93  return GA_ATTRIB_POINT;
94  return GA_ATTRIB_INVALID;
95 }
96 
97 static GA_TypeInfo
98 guGetTransformTypeInfo(const GA_ATINumeric *attrib, const bool has_transform_matrices)
99 {
100  int tuple_size = attrib->getTupleSize();
101  if (tuple_size < 3 || !attrib->needsTransform())
102  return GA_TYPE_VOID;
103 
104  GA_TypeInfo attrib_type_info = attrib->getTypeInfo();
105  if (tuple_size == 3)
106  {
107  // Vectors and normals don't react to translations.
108  if (attrib_type_info == GA_TYPE_POINT ||
109  (has_transform_matrices && (attrib_type_info == GA_TYPE_VECTOR || attrib_type_info == GA_TYPE_NORMAL)))
110  {
111  return attrib_type_info;
112  }
113  }
114  else if (tuple_size == 4)
115  {
116  // Quaternions don't react to translations.
117  if ((has_transform_matrices && attrib_type_info == GA_TYPE_QUATERNION) || attrib_type_info == GA_TYPE_HPOINT)
118  return attrib_type_info;
119  }
120  else if (tuple_size == 9)
121  {
122  // 3x3 matrices don't react to translations.
123  if (has_transform_matrices && attrib_type_info == GA_TYPE_TRANSFORM)
124  return attrib_type_info;
125  }
126  else if (tuple_size == 16)
127  {
128  if (attrib_type_info == GA_TYPE_TRANSFORM)
129  return attrib_type_info;
130  }
131  return GA_TYPE_VOID;
132 }
133 
134 void
136  GU_Detail *output_geo,
137  const GU_Detail *source,
138  const GU_Detail *target,
139  GU_CopyToPointsCache *cache,
140  const GU_CopyToPointsCache::TargetAttribInfoMap *target_attrib_info,
141  const GU_CopyToPointsCache::TargetAttribInfoMap *target_group_info)
142 {
143  // Remove attributes from previous cook that are not present in source
144  // or that mismatch the type in source.
145  UT_SmallArray<GA_Attribute*> attribs_to_delete;
146  for (int owneri = 0; owneri < GA_ATTRIB_OWNER_N; ++owneri)
147  {
148  attribs_to_delete.clear();
149  bool pos_storage_mismatch = false;
150  GA_AttributeOwner owner = GA_AttributeOwner(owneri);
151  output_geo->getAttributeDict(owner).forEachAttribute([source,owner,
152  &attribs_to_delete,output_geo,&pos_storage_mismatch,
153  target_group_info,target_attrib_info,target,cache](GA_Attribute *attrib)
154  {
155  const GA_AttributeScope scope = attrib->getScope();
156  const bool is_group = (scope == GA_SCOPE_GROUP);
157  if (scope == GA_SCOPE_PRIVATE ||
158  (is_group && UTverify_cast<GA_ElementGroup*>(attrib)->isInternal()))
159  {
160  // Don't delete topology attributes.
161  if (!GA_ATITopology::isType(attrib))
162  attribs_to_delete.append(attrib);
163  return;
164  }
165  const UT_StringHolder &name = attrib->getName();
166  const GA_Attribute *source_attrib = source ? source->findAttribute(owner, scope, name) : nullptr;
167  const GU_CopyToPointsCache::TargetAttribInfoMap *target_info = is_group ? target_group_info : target_attrib_info;
169  if (target_info)
170  it = target_info->find(name);
171  if (!target_info || it.atEnd() || it->second.myCopyTo != owner)
172  {
173  if (!source_attrib || !source_attrib->matchesStorage(attrib))
174  {
175  // Be careful with P, since we can't delete it.
176  if (owner == GA_ATTRIB_POINT && attrib == output_geo->getP())
177  pos_storage_mismatch = (source_attrib != nullptr);
178  else
179  attribs_to_delete.append(attrib);
180  }
181  else // if (source) This is a redundant check, since if !source, source_attrib is null.
182  {
183  // If there was previously not a source attribute, this was
184  // a target attribute, so to reduce the risk of data ID
185  // havoc, since this is an uncommon case, we just delete.
186  // NOTE: The check for non-null source is because the source
187  // data IDs are irrelevant if they were just copied
188  // into a packed primitive.
189  // NOTE: Don't delete P. (It won't be in the cache on the first cook.)
190  auto *source_dataids = is_group ? cache->mySourceGroupDataIDs : cache->mySourceAttribDataIDs;
191  if (!source_dataids[owner].contains(name) && attrib != output_geo->getP())
192  {
193  attribs_to_delete.append(attrib);
194  }
195  }
196  }
197  else if (it->second.myCombineMethod == GU_CopyToPointsCache::AttribCombineMethod::COPY || !source_attrib)
198  {
199  // NOTE: P is never applied from target, so we don't need the
200  // special case here or in the next case.
201  const GA_Attribute *target_attrib = target->findAttribute(GA_ATTRIB_POINT, scope, name);
202  UT_ASSERT(target_attrib);
203  if (!target_attrib->matchesStorage(attrib))
204  attribs_to_delete.append(attrib);
205  else if (source)
206  {
207  // If we previously cloned from a source attribute,
208  // (we're now going to be cloning from a target attribute),
209  // to avoid data ID havoc, we delete.
210  // Without this case, there were problems where a target
211  // attribute being combined with a source attribute
212  // wasn't getting its data ID bumped or updating
213  // properly when the source attribute was no longer
214  // in the source on the next cook.
215  // NOTE: The check for non-null source is because the source
216  // data IDs are irrelevant if they were just copied
217  // into a packed primitive.
218  auto *source_dataids = is_group ? cache->mySourceGroupDataIDs : cache->mySourceAttribDataIDs;
219  if (source_dataids[owner].contains(name))
220  {
221  attribs_to_delete.append(attrib);
222  }
223  }
224  }
225  else if (!source_attrib->matchesStorage(attrib))
226  {
227  attribs_to_delete.append(attrib);
228  }
229  else // if (source) This is a redundant check, since if !source, source_attrib is null.
230  {
231  // If there was previously not a source attribute, this was
232  // a target attribute, so to reduce the risk of data ID
233  // havoc, since this is an uncommon case, we just delete.
234  // NOTE: The check for non-null source is because the source
235  // data IDs are irrelevant if they were just copied
236  // into a packed primitive.
237  auto *source_dataids = is_group ? cache->mySourceGroupDataIDs : cache->mySourceAttribDataIDs;
238  if (!source_dataids[owner].contains(name))
239  {
240  attribs_to_delete.append(attrib);
241  }
242  }
243  });
244 
245  for (exint i = 0, n = attribs_to_delete.size(); i < n; ++i)
246  {
247  const UT_StringHolder &name = attribs_to_delete[i]->getName();
248 
249  // Remove it from any data ID caches before deleting it,
250  // else we'll need to get a full UT_StringHolder to keep
251  // the name in scope.
252  if (attribs_to_delete[i]->getScope() == GA_SCOPE_GROUP)
253  {
254  cache->mySourceGroupDataIDs[owneri].erase(name);
255  cache->myTargetGroupInfo.erase(name);
256  output_geo->destroyElementGroup(owner, name);
257  }
258  else
259  {
260  cache->mySourceAttribDataIDs[owneri].erase(name);
261  cache->myTargetAttribInfo.erase(name);
262  output_geo->destroyAttribute(owner, name);
263  }
264 
265  }
266 
267  if (pos_storage_mismatch)
268  {
269  // Separate handling for P, since we can't delete it.
270  UTverify_cast<GA_ATINumeric*>(output_geo->getP())->setStorage(
271  UTverify_cast<const GA_ATINumeric*>(source->getP())->getStorage());
272 
274  }
275  }
276 
277  // Remove edge groups from previous cook that are not present in source.
278  UT_SmallArray<GA_EdgeGroup*> edgegroups_to_delete;
279  for (auto it = output_geo->edgeGroups().beginTraverse(); !it.atEnd(); ++it)
280  {
281  GA_EdgeGroup *edgegroup = it.group();
282  if (edgegroup->isInternal())
283  {
284  edgegroups_to_delete.append(edgegroup);
285  continue;
286  }
287  const GA_EdgeGroup *source_edgegroup = source ? source->findEdgeGroup(edgegroup->getName()) : nullptr;
288  if (!source_edgegroup || source_edgegroup->isInternal())
289  {
290  edgegroups_to_delete.append(edgegroup);
291  }
292  }
293  for (exint i = 0, n = edgegroups_to_delete.size(); i < n; ++i)
294  {
295  const UT_StringHolder &name = edgegroups_to_delete[i]->getName();
296  output_geo->destroyEdgeGroup(name);
297  cache->mySourceEdgeGroupDataIDs.erase(name);
298  }
299 }
300 
301 void
303  GU_PointTransformCache *cache,
304  const GA_OffsetListRef &target_point_list,
305  const GU_Detail *target,
306  const bool transform_using_more_than_P,
307  const bool allow_implicit_N,
308  bool &transforms_changed)
309 {
310  const exint num_target_points = target_point_list.size();
311  if (cache->myTransformCacheSize > 0 && num_target_points != cache->myTransformCacheSize)
312  {
313  UT_ASSERT(transforms_changed);
314  transforms_changed = true;
315  cache->clearTransformArrays();
316  }
317  if (num_target_points == 0)
318  {
319  UT_ASSERT(cache->myTransformCacheSize == 0);
320  return;
321  }
322 
323  GA_AttributeInstanceMatrix target_transform_attribs;
324  bool using_implicit_N = false;
325  if (transform_using_more_than_P)
326  {
327  target_transform_attribs.initialize(target->pointAttribs());
328  if (!target_transform_attribs.getN().isValid() && allow_implicit_N && target->getNumPrimitives() != 0)
329  {
330  using_implicit_N = true;
331  }
332  }
333  transforms_changed |= (using_implicit_N != cache->myTargetUsingImplicitN);
334  cache->myTargetUsingImplicitN = using_implicit_N;
335 
336  if (using_implicit_N)
337  {
338  // Implicit normals depend on the primitive list and the topology,
339  // (also depend on P, checked below), so check data IDs.
340  GA_DataId primlist_dataid = target->getPrimitiveList().getDataId();
341  GA_DataId topology_dataid = target->getTopology().getDataId();
342  transforms_changed |=
343  primlist_dataid != cache->myTargetPrimListDataID ||
344  topology_dataid != cache->myTargetTopologyDataID;
345  cache->myTargetPrimListDataID = primlist_dataid;
346  cache->myTargetTopologyDataID = topology_dataid;
347  }
348  else
349  {
352  }
353 
355  target_transform_attribs.getDataIds(new_transform_data_ids);
356  transforms_changed |=
357  target->getP()->getDataId() == GA_INVALID_DATAID ||
359  target->getP()->getDataId() != cache->myTargetPDataID;
360  if (!transforms_changed)
361  {
363  {
364  // NOTE: GA_AttributeInstanceMatrix uses a different value to
365  // indicate that an attribute is not present, so this
366  // check still supports missing attributes.
367  if (new_transform_data_ids[i] == GA_INVALID_DATAID ||
369  new_transform_data_ids[i] != cache->myTargetTransformDataIDs[i])
370  {
371  transforms_changed = true;
372  break;
373  }
374  }
375  }
376  if (transforms_changed)
377  {
378  //cache->myTransforming = true;
379  memcpy(cache->myTargetTransformDataIDs, new_transform_data_ids, GA_AttributeInstanceMatrix::theNumAttribs*sizeof(GA_DataId));
380  cache->myTargetPDataID = target->getP()->getDataId();
381 
382  // We always cache the full transform in double-precision,
383  // in case on future cooks, it's needed for new source attributes,
384  // when !transforms_changed.
385  bool onlyP = !target_transform_attribs.hasAnyAttribs() && !using_implicit_N;
386  if (!onlyP && !cache->myTransformMatrices3D)
387  cache->myTransformMatrices3D.reset(new UT_Matrix3D[num_target_points]);
388  if (!cache->myTransformTranslates3D)
389  cache->myTransformTranslates3D.reset(new UT_Vector3D[num_target_points]);
390  cache->myTransformCacheSize = num_target_points;
391 
392  // Recompute and cache needed transforms
393  const GA_ROHandleV3D targetP(target->getP());
394  if (onlyP)
395  {
396  cache->myTransformMatrices3D.reset();
397  UT_Vector3D *translates = cache->myTransformTranslates3D.get();
398  UT_ASSERT(targetP.isValid());
399  auto &&functor = [&target_point_list,&targetP,translates](const UT_BlockedRange<exint> &r)
400  {
401  for (exint i = r.begin(), end = r.end(); i < end; ++i)
402  {
403  GA_Offset target_ptoff = target_point_list[i];
404  translates[i] = targetP.get(target_ptoff);
405  }
406  };
407  if (num_target_points > 1024)
408  UTparallelFor(UT_BlockedRange<exint>(0, num_target_points), functor, 2, 512);
409  else
410  functor(UT_BlockedRange<exint>(0, num_target_points));
411  }
412  else
413  {
414  // Implicit N doesn't need to be in cache, since we cache
415  // the transforms themselves.
416  // If things like pscale are changing and not P or the primitive
417  // list or topology, it might be worth caching the implicit N,
418  // but that's probably an uncommon edge case to optimize for.
419  GA_AttributeUPtr implicitN;
420  if (using_implicit_N)
421  {
423  GA_RWHandleV3 implicitN_h(implicitN.get());
424 
425  // Compute implicit normals based on P and the primitives in target.
426  GEOcomputeNormals(*target, implicitN_h);
427  target_transform_attribs.setN(implicitN.get());
428  }
429 
430  UT_Matrix3D *matrices = cache->myTransformMatrices3D.get();
431  UT_Vector3D *translates = cache->myTransformTranslates3D.get();
432  UT_ASSERT(targetP.isValid());
433  auto &&functor = [&target_transform_attribs,&target_point_list,&targetP,matrices,translates](const UT_BlockedRange<exint> &r)
434  {
435  for (exint i = r.begin(), end = r.end(); i < end; ++i)
436  {
437  GA_Offset target_ptoff = target_point_list[i];
439  target_transform_attribs.getMatrix(transform, targetP.get(target_ptoff), target_ptoff);
440 
441  // Save transform in matrices and translates
442  matrices[i] = UT_Matrix3D(transform);
443  transform.getTranslates(translates[i]);
444  }
445  };
446  if (num_target_points > 512)
447  UTparallelFor(UT_BlockedRange<exint>(0, num_target_points), functor, 2, 256);
448  else
449  functor(UT_BlockedRange<exint>(0, num_target_points));
450  }
451  }
452 }
453 
454 void
456  GU_Detail *output_geo,
457  const GU_Detail *source,
458  exint *num_source_attribs,
459  bool has_transform_matrices,
460  bool *needed_transforms,
461  const GU_Detail *target,
462  GU_CopyToPointsCache::TargetAttribInfoMap *target_attrib_info,
464  exint *num_target_attribs)
465 {
466  using AttribCombineMethod = GU_CopyToPointsCache::AttribCombineMethod;
467 
468  for (int owneri = source ? 0 : GA_ATTRIB_OWNER_N; owneri < GA_ATTRIB_OWNER_N; ++owneri)
469  {
470  GA_AttributeOwner owner = GA_AttributeOwner(owneri);
472  [owner,output_geo,num_source_attribs,
473  needed_transforms,has_transform_matrices,
474  target_attrib_info](const GA_Attribute *source_attrib)
475  {
476  const UT_StringHolder &name = source_attrib->getName();
477  if (target_attrib_info)
478  {
479  // If copying from target, skip, since it'll be added from target.
480  // Target attributes take precedence over source attributes,
481  // because users can always remove the target attributes from
482  // the pattern parameters, but they don't have control over the
483  // source attributes in this node.
484  // NOTE: Point and vertex attributes of the same name are not allowed,
485  // so we check for both.
486  auto it = target_attrib_info->find(name);
487  if (!it.atEnd() && (it->second.myCopyTo == owner || it->second.myCopyTo == guConflictAttribOwner(owner)) &&
488  it->second.myCombineMethod == AttribCombineMethod::COPY)
489  return;
490  }
491 
492  GA_Attribute *dest_attrib = output_geo->findAttribute(owner, name);
493  UT_ASSERT(!dest_attrib || dest_attrib->matchesStorage(source_attrib));
494  if (!dest_attrib)
495  {
496  dest_attrib = output_geo->getAttributes().cloneAttribute(owner, name, GA_AttributeSet::namevalidcertificate(), *source_attrib,
497  true, (owner == GA_ATTRIB_DETAIL) ? GA_DATA_ID_CLONE : GA_DATA_ID_BUMP);
498  }
499 
500  // Copy detail attributes immediately
501  if (owner == GA_ATTRIB_DETAIL)
502  {
503  dest_attrib->replace(*source_attrib);
504  }
505  else
506  {
507  if (num_source_attribs)
508  ++num_source_attribs[owner];
509 
510  // Just copy non-storage metadata for the rest; (storage type already matches).
511  dest_attrib->copyNonStorageMetadata(source_attrib);
512 
513  if (!needed_transforms)
514  return;
515 
516  GA_ATINumeric *dest_numeric = GA_ATINumeric::cast(dest_attrib);
517  if (dest_numeric)
518  {
519  GA_TypeInfo transform_type = guGetTransformTypeInfo(dest_numeric, has_transform_matrices);
520  if (transform_type != GA_TYPE_VOID)
521  {
522  using namespace NeededTransforms;
523  bool double_precision = (dest_numeric->getStorage() == GA_STORE_REAL64);
524  if (transform_type == GA_TYPE_POINT || transform_type == GA_TYPE_HPOINT)
525  {
526  needed_transforms[matrix3f] |= !double_precision;
527  needed_transforms[translate3f] |= !double_precision;
528  }
529  else if (transform_type == GA_TYPE_VECTOR)
530  {
531  needed_transforms[matrix3f] |= !double_precision;
532  }
533  else if (transform_type == GA_TYPE_NORMAL)
534  {
535  needed_transforms[inverse3d] |= double_precision;
536  needed_transforms[inverse3f] |= !double_precision;
537  }
538  else if (transform_type == GA_TYPE_QUATERNION)
539  {
540  needed_transforms[quaterniond] |= double_precision;
541  needed_transforms[quaternionf] |= !double_precision;
542  }
543  else if (transform_type == GA_TYPE_TRANSFORM)
544  {
545  needed_transforms[matrix3f] |= !double_precision;
546  needed_transforms[translate3f] |= !double_precision && (dest_numeric->getTupleSize() == 16);
547  }
548  }
549  }
550  }
551  });
552 
553  if (owner != GA_ATTRIB_DETAIL)
554  {
555  // Now for the element groups
556  for (auto it = source->getElementGroupTable(owner).beginTraverse(); !it.atEnd(); ++it)
557  {
558  const GA_ElementGroup *source_group = it.group();
559  if (source_group->isInternal())
560  continue;
561 
562  if (target_group_info)
563  {
564  // If copying from target, skip, since it'll be added from target.
565  auto it = target_group_info->find(source_group->getName());
566  if (!it.atEnd() && it->second.myCopyTo == owner)
567  return;
568  }
569 
570  GA_ElementGroup *dest_group = output_geo->findElementGroup(owner, source_group->getName());
571  if (!dest_group)
572  {
573  dest_group = UTverify_cast<GA_ElementGroup *>(output_geo->getElementGroupTable(owner).newGroup(source_group->getName()));
574  UT_ASSERT_MSG(!dest_group->isOrdered(), "Writing to groups in parallel requires unordered groups, and ordering isn't as useful for copied geometry");
575  }
576  else if (dest_group->isOrdered())
577  {
578  dest_group->clearOrdered();
579  }
580  if (num_source_attribs)
581  ++num_source_attribs[owner];
582  }
583  }
584  }
585 
586  // Add edge groups from source that are not in output_geo.
587  if (source)
588  {
589  for (auto it = source->edgeGroups().beginTraverse(); !it.atEnd(); ++it)
590  {
591  const GA_EdgeGroup *source_group = it.group();
592  if (source_group->isInternal())
593  continue;
594  GA_EdgeGroup *dest_group = output_geo->findEdgeGroup(source_group->getName());
595  if (!dest_group)
596  {
597  dest_group = UTverify_cast<GA_EdgeGroup *>(output_geo->edgeGroups().newGroup(source_group->getName()));
598  }
599  }
600  }
601 
602  if (!target || !target_attrib_info || !target_group_info)
603  return;
604 
605  for (auto it = target_attrib_info->begin(); !it.atEnd(); ++it)
606  {
607  const UT_StringHolder &name = it->first;
608  GA_AttributeOwner output_owner = it->second.myCopyTo;
609  AttribCombineMethod method = it->second.myCombineMethod;
610  if (source && method != AttribCombineMethod::COPY)
611  {
612  // If source has the attribute and the method isn't copying from target,
613  // we've already cloned the attribute above, so skip.
614  const GA_Attribute *source_attrib = source->findAttribute(output_owner, name);
615  if (source_attrib)
616  {
617  if (num_target_attribs)
618  ++num_target_attribs[output_owner];
619  continue;
620  }
621  // NOTE: Point and vertex attributes of the same name are not allowed,
622  // so we check for both.
623  GA_AttributeOwner conflict_owner = guConflictAttribOwner(output_owner);
624  if (conflict_owner != GA_ATTRIB_INVALID)
625  {
626  source_attrib = source->findAttribute(conflict_owner, name);
627  if (source_attrib)
628  {
629  // For simplicity, instead of trying to promote to the specified target type,
630  // just stick with the type in source.
631  // TODO: Maybe for completeness in the future we should promote,
632  // but the partial cooking case checking gets very complicated,
633  // so I'm not adding that right now.
634  it->second.myCopyTo = conflict_owner;
635  if (num_target_attribs)
636  ++num_target_attribs[conflict_owner];
637  continue;
638  }
639  }
640  }
641 
642  const GA_Attribute *target_attrib = target->findAttribute(GA_ATTRIB_POINT, name);
643  GA_Attribute *dest_attrib = output_geo->findAttribute(output_owner, name);
644  UT_ASSERT(!dest_attrib || dest_attrib->matchesStorage(target_attrib));
645  if (!dest_attrib)
646  {
647  dest_attrib = output_geo->getAttributes().cloneAttribute(output_owner, name, GA_AttributeSet::namevalidcertificate(), *target_attrib,
648  true, GA_DATA_ID_BUMP);
649 
650  // We want multiplying with no attribute to be equivalent to copying,
651  // and adding to no attribute is automatically equivalent to copying,
652  // so it's easiest to just change it to copy here.
653  // Subtracting is different, but equivalent to subtracting from zero,
654  // so we can leave it as is.
655  if (method == AttribCombineMethod::MULTIPLY ||
656  method == AttribCombineMethod::ADD)
657  {
658  it->second.myCombineMethod = AttribCombineMethod::COPY;
659  }
660  }
661  UT_ASSERT(dest_attrib != nullptr);
662 
663  // Just copy non-storage metadata for the rest; (storage type already matches).
664  dest_attrib->copyNonStorageMetadata(target_attrib);
665 
666  if (num_target_attribs)
667  ++num_target_attribs[output_owner];
668  }
669 
670  for (auto it = target_group_info->begin(); !it.atEnd(); ++it)
671  {
672  const UT_StringHolder &name = it->first;
673  GA_AttributeOwner output_owner = it->second.myCopyTo;
674  AttribCombineMethod method = it->second.myCombineMethod;
675  if (source && method != AttribCombineMethod::COPY)
676  {
677  // If source has the group and the method isn't copying from target,
678  // we've already cloned the group above, so skip.
679  const GA_ElementGroup *source_group = source->findElementGroup(output_owner, name);
680  if (source_group)
681  {
682  if (num_target_attribs)
683  ++num_target_attribs[output_owner];
684  continue;
685  }
686  // NOTE: Point and vertex attributes of the same name are not allowed,
687  // so we check for both.
688  GA_AttributeOwner conflict_owner = guConflictAttribOwner(output_owner);
689  if (conflict_owner != GA_ATTRIB_INVALID)
690  {
691  source_group = source->findElementGroup(conflict_owner, name);
692  if (source_group)
693  {
694  // For simplicity, instead of trying to promote to the specified target type,
695  // just stick with the type in source.
696  // TODO: Maybe for completeness in the future we should promote,
697  // but the partial cooking case checking gets very complicated,
698  // so I'm not adding that right now.
699  it->second.myCopyTo = conflict_owner;
700  if (num_target_attribs)
701  ++num_target_attribs[conflict_owner];
702  continue;
703  }
704  }
705  }
706 
707  UT_ASSERT(target->findPointGroup(name) != nullptr);
708  GA_ElementGroup *dest_group = output_geo->findElementGroup(output_owner, name);
709  if (!dest_group)
710  {
711  dest_group = UTverify_cast<GA_ElementGroup *>(output_geo->getElementGroupTable(output_owner).newGroup(name));
712 
713  // We want intersecting with no group to be equivalent to copying,
714  // and unioning with no group is automatically equivalent to copying,
715  // so it's easiest to just change it to copy here.
716  if (method == AttribCombineMethod::MULTIPLY ||
717  method == AttribCombineMethod::ADD)
718  {
719  it->second.myCombineMethod = AttribCombineMethod::COPY;
720  }
721  // Subtracting from no group is equivalent to doing nothing,
722  // (apart from ensuring that the group exists).
723  if (method == AttribCombineMethod::SUBTRACT)
724  {
725  it->second.myCombineMethod = AttribCombineMethod::NONE;
726  }
727  }
728  UT_ASSERT_MSG(!dest_group->isOrdered(), "Writing to groups in parallel requires unordered groups, and ordering isn't as useful for copied geometry");
729 
730  if (num_target_attribs)
731  ++num_target_attribs[output_owner];
732  }
733 }
734 
735 void
737  GU_PointTransformCache *cache,
738  exint num_target_points,
739  bool transforms_changed,
740  const bool needed_transforms[NeededTransforms::num_needed_transforms])
741 {
742  using namespace NeededTransforms;
743 
744  bool has_transform_matrices = (cache->myTransformMatrices3D.get() != nullptr);
745  if (needed_transforms[translate3f] && num_target_points > 0)
746  {
747  bool compute = transforms_changed;
748  if (!cache->myTransformTranslates3F)
749  {
750  cache->myTransformTranslates3F.reset(new UT_Vector3F[num_target_points]);
751  compute = true;
752  }
753  if (compute)
754  {
755  const UT_Vector3D *vector3d = cache->myTransformTranslates3D.get();
756  UT_Vector3F *vector3f = cache->myTransformTranslates3F.get();
757  auto &&functor = [vector3d,vector3f](const UT_BlockedRange<exint> &r)
758  {
759  for (exint i = r.begin(), end = r.end(); i < end; ++i)
760  {
761  vector3f[i] = UT_Vector3F(vector3d[i]);
762  }
763  };
764  if (num_target_points > 1024)
765  UTparallelFor(UT_BlockedRange<exint>(0, num_target_points), functor, 2, 512);
766  else
767  functor(UT_BlockedRange<exint>(0, num_target_points));
768  }
769  }
770 
771  if (!has_transform_matrices)
772  {
773  // Only translates, so no matrices, inverses, or quaternions
774  cache->myTransformMatrices3F.reset();
775  cache->myTransformInverse3F.reset();
776  cache->myTransformInverse3D.reset();
777  cache->myTransformQuaternionsF.reset();
778  cache->myTransformQuaternionsD.reset();
779  return;
780  }
781  if (num_target_points <= 0)
782  return;
783 
784  if (needed_transforms[matrix3f])
785  {
786  bool compute = transforms_changed;
787  if (!cache->myTransformMatrices3F)
788  {
789  cache->myTransformMatrices3F.reset(new UT_Matrix3F[num_target_points]);
790  compute = true;
791  }
792  if (compute)
793  {
794  const UT_Matrix3D *matrices3d = cache->myTransformMatrices3D.get();
795  UT_Matrix3F *matrices3f = cache->myTransformMatrices3F.get();
796  auto &&functor = [matrices3d,matrices3f](const UT_BlockedRange<exint> &r)
797  {
798  for (exint i = r.begin(), end = r.end(); i < end; ++i)
799  {
800  matrices3f[i] = UT_Matrix3F(matrices3d[i]);
801  }
802  };
803  if (num_target_points > 1024)
804  UTparallelFor(UT_BlockedRange<exint>(0, num_target_points), functor, 2, 512);
805  else
806  functor(UT_BlockedRange<exint>(0, num_target_points));
807  }
808  }
809  if (needed_transforms[inverse3f] || needed_transforms[inverse3d])
810  {
811  bool compute = transforms_changed;
812  if (needed_transforms[inverse3f] && !cache->myTransformInverse3F)
813  {
814  cache->myTransformInverse3F.reset(new UT_Matrix3F[num_target_points]);
815  compute = true;
816  }
817  if (needed_transforms[inverse3d] && !cache->myTransformInverse3D)
818  {
819  cache->myTransformInverse3D.reset(new UT_Matrix3D[num_target_points]);
820  compute = true;
821  }
822  if (compute)
823  {
824  const UT_Matrix3D *matrices3d = cache->myTransformMatrices3D.get();
825  UT_Matrix3D *inverses3d = cache->myTransformInverse3D.get();
826  UT_Matrix3F *inverses3f = cache->myTransformInverse3F.get();
827  auto &&functor = [matrices3d,inverses3d,inverses3f](const UT_BlockedRange<exint> &r)
828  {
829  for (exint i = r.begin(), end = r.end(); i < end; ++i)
830  {
831  UT_Matrix3D inverse;
832  auto singular = matrices3d[i].invert(inverse);
833  if (singular)
834  {
835  // FIXME: Check if 1, 2, or 3 zero dimensions!!!
836  inverse.identity();
837  }
838 
839  // This determinant check and scale are from GA_AttributeTransformer.
840  // They're presumably so that normals get flipped when applying
841  // a negative scale. I'm not sure whether that's ideal behaviour,
842  // but it's consistent with previous behaviour, so I'm sticking with it.
843  if (matrices3d[i].determinant() < 0)
844  inverse.scale(-1, -1, -1);
845 
846  if (inverses3d)
847  inverses3d[i] = inverse;
848  if (inverses3f)
849  inverses3f[i] = UT_Matrix3F(inverse);
850  }
851  };
852  if (num_target_points > 512)
853  UTparallelFor(UT_BlockedRange<exint>(0, num_target_points), functor, 2, 256);
854  else
855  functor(UT_BlockedRange<exint>(0, num_target_points));
856  }
857  }
858  if (needed_transforms[quaternionf] || needed_transforms[quaterniond])
859  {
860  bool compute = transforms_changed;
861  if (needed_transforms[quaternionf] && !cache->myTransformQuaternionsF)
862  {
863  cache->myTransformQuaternionsF.reset(new UT_QuaternionF[num_target_points]);
864  compute = true;
865  }
866  if (needed_transforms[quaterniond] && !cache->myTransformQuaternionsD)
867  {
868  cache->myTransformQuaternionsD.reset(new UT_QuaternionD[num_target_points]);
869  compute = true;
870  }
871  if (compute)
872  {
873  const UT_Matrix3D *matrices3d = cache->myTransformMatrices3D.get();
874  UT_QuaternionD *quaternionsd = cache->myTransformQuaternionsD.get();
875  UT_QuaternionF *quaternionsf = cache->myTransformQuaternionsF.get();
876  auto &&functor = [matrices3d,quaternionsd,quaternionsf](const UT_BlockedRange<exint> &r)
877  {
878  for (exint i = r.begin(), end = r.end(); i < end; ++i)
879  {
880  UT_QuaternionD quaternion;
881  quaternion.updateFromArbitraryMatrix(matrices3d[i]);
882 
883  if (quaternionsd)
884  quaternionsd[i] = quaternion;
885  if (quaternionsf)
886  quaternionsf[i] = UT_QuaternionF(quaternion);
887  }
888  };
889  if (num_target_points > 512)
890  UTparallelFor(UT_BlockedRange<exint>(0, num_target_points), functor, 2, 256);
891  else
892  functor(UT_BlockedRange<exint>(0, num_target_points));
893  }
894  }
895 }
896 
897 static void
898 guFindStartInTarget(
899  const GA_Offset start,
900  exint &targeti,
901  exint &piece_elementi,
902  exint &piece_element_count,
903  const GA_OffsetList **piece_offset_list,
904  const GA_Offset start_offset,
905  const exint *const piece_offset_starts,
906  const exint *const piece_offset_starts_end,
907  const exint num_target_points,
908  const exint *const target_to_piecei,
909  const GU_CopyToPointsCache::PieceData *const piece_data,
910  const int owneri,
911  const GA_OffsetList *const source_offset_list,
912  const exint source_offset_list_size)
913 {
914  const exint output_primi = start - start_offset;
915  if (piece_offset_starts)
916  {
917  // Find the first entry in piece_prim_starts where the next entry is greater than output_primi,
918  // (in other words, the last entry whose value is less than or equal to output_primi,
919  // so output_primi is in that piece.) The -1 is to go back one.
920  targeti = (std::upper_bound(
921  piece_offset_starts,
922  piece_offset_starts_end,
923  output_primi) - piece_offset_starts) - 1;
924  UT_ASSERT_P(targeti >= 0 && targeti < num_target_points);
925  piece_elementi = output_primi - piece_offset_starts[targeti];
926  const exint piecei = target_to_piecei[targeti];
927  const GU_CopyToPointsCache::PieceData &current_piece = piece_data[piecei];
928  const GA_OffsetList &local_piece_offset_list = current_piece.mySourceOffsetLists[owneri];
929  piece_element_count = local_piece_offset_list.size();
930  if (piece_offset_list != nullptr)
931  *piece_offset_list = &local_piece_offset_list;
932  }
933  else
934  {
935  piece_element_count = source_offset_list_size;
936  if (piece_offset_list != nullptr)
937  {
938  UT_ASSERT_P(source_offset_list_size == source_offset_list->size());
939  *piece_offset_list = source_offset_list;
940  }
941  targeti = output_primi / piece_element_count;
942  piece_elementi = output_primi % piece_element_count;
943  }
944 };
945 
946 static inline void
947 guIteratePieceElement(
948  exint &piece_elementi,
949  exint &piece_element_count,
950  exint &targeti,
951  const exint *const piece_offset_starts,
952  const exint num_target_points,
953  const exint *const target_to_piecei,
954  const GU_CopyToPointsCache::PieceData *const piece_data,
955  const int owneri,
956  const GA_OffsetList *&piece_offset_list)
957 {
958  ++piece_elementi;
959  // NOTE: This must be while instead of if, because there can be zero primitives in a piece.
960  while (piece_elementi >= piece_element_count)
961  {
962  piece_elementi = 0;
963  ++targeti;
964 
965  if (targeti >= num_target_points)
966  break;
967 
968  if (piece_offset_starts != nullptr)
969  {
970  exint piecei = target_to_piecei[targeti];
971  const GU_CopyToPointsCache::PieceData &current_piece = piece_data[piecei];
972  piece_offset_list = &current_piece.mySourceOffsetLists[owneri];
973  piece_element_count = piece_offset_list->size();
974  }
975  }
976 }
977 
978 /// This is the same as guIteratePieceElement, but also looking up the target offset,
979 /// and not returning the piece_offset_list.
980 static inline void
981 guIteratePieceElementOff(
982  exint &piece_elementi,
983  exint &piece_element_count,
984  exint &targeti,
985  const exint *const piece_offset_starts,
986  const exint num_target_points,
987  const exint *const target_to_piecei,
988  const GU_CopyToPointsCache::PieceData *const piece_data,
989  const int owneri,
990  GA_Offset &target_off,
991  const GA_OffsetListRef &target_point_list)
992 {
993  ++piece_elementi;
994  // NOTE: This must be while instead of if, because there can be zero primitives in a piece.
995  while (piece_elementi >= piece_element_count)
996  {
997  piece_elementi = 0;
998  ++targeti;
999 
1000  if (targeti >= num_target_points)
1001  break;
1002 
1003  target_off = target_point_list[targeti];
1004 
1005  if (piece_offset_starts != nullptr)
1006  {
1007  exint piecei = target_to_piecei[targeti];
1008  const GU_CopyToPointsCache::PieceData &current_piece = piece_data[piecei];
1009  const GA_OffsetList &piece_offset_list = current_piece.mySourceOffsetLists[owneri];
1010  piece_element_count = piece_offset_list.size();
1011  }
1012  }
1013 }
1014 
1015 /// This is the same as guIteratePieceElement, but also looking up whether
1016 /// the target offset is in the given group, and not returning the piece_offset_list.
1017 static inline void
1018 guIteratePieceElementGroup(
1019  exint &piece_elementi,
1020  exint &piece_element_count,
1021  exint &targeti,
1022  const exint *const piece_offset_starts,
1023  const exint num_target_points,
1024  const exint *const target_to_piecei,
1025  const GU_CopyToPointsCache::PieceData *const piece_data,
1026  const int owneri,
1027  bool &target_in_group,
1028  const GA_OffsetListRef &target_point_list,
1029  const GA_PointGroup *const target_group)
1030 {
1031  ++piece_elementi;
1032  // NOTE: This must be while instead of if, because there can be zero primitives in a piece.
1033  while (piece_elementi >= piece_element_count)
1034  {
1035  piece_elementi = 0;
1036  ++targeti;
1037 
1038  if (targeti >= num_target_points)
1039  break;
1040 
1041  const GA_Offset target_off = target_point_list[targeti];
1042  target_in_group = target_group->contains(target_off);
1043 
1044  if (piece_offset_starts != nullptr)
1045  {
1046  exint piecei = target_to_piecei[targeti];
1047  const GU_CopyToPointsCache::PieceData &current_piece = piece_data[piecei];
1048  const GA_OffsetList &piece_offset_list = current_piece.mySourceOffsetLists[owneri];
1049  piece_element_count = piece_offset_list.size();
1050  }
1051  }
1052 }
1053 
1054 static void
1055 guApplyTransformToAttribute(
1056  const GU_CopyToPointsCache *const cache,
1057  GA_TypeInfo transform_type,
1058  GA_ATINumeric *output_numeric,
1059  const GA_ATINumeric *source_numeric,
1060  const GA_OffsetList &source_offset_list,
1061  const bool copy_source_attribs_in_parallel,
1062  UT_TaskGroup &task_group,
1063  const GA_SplittableRange &output_splittable_range,
1064  const GA_Offset start_offset,
1065  const exint *target_to_piecei,
1066  const exint num_target_points,
1067  const exint *piece_offset_starts,
1068  const exint *piece_offset_starts_end,
1069  const GU_CopyToPointsCache::PieceData *piece_data)
1070 {
1071  const GA_IndexMap &index_map = output_numeric->getIndexMap();
1072  if (index_map.indexSize() == 0)
1073  return;
1074 
1075  int owneri = output_numeric->getOwner();
1076 
1077  const UT_Matrix3F *const transform_matrices_3f = cache->myTransformMatrices3F.get();
1078  const UT_Matrix3D *const transform_matrices_3d = cache->myTransformMatrices3D.get();
1079  const UT_Vector3F *const transform_translates_3f = cache->myTransformTranslates3F.get();
1080  const UT_Vector3D *const transform_translates_3d = cache->myTransformTranslates3D.get();
1081  if (transform_type == GA_TYPE_POINT)
1082  {
1083  auto &&functor = [output_numeric,source_numeric,&source_offset_list,
1084  transform_matrices_3f,transform_matrices_3d,transform_translates_3f,transform_translates_3d,
1085  start_offset,piece_offset_starts,piece_offset_starts_end,
1086  num_target_points,target_to_piecei,piece_data,owneri](const GA_SplittableRange &r)
1087  {
1089  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
1090  {
1091  exint targeti;
1092  exint piece_elementi;
1093  exint piece_element_count;
1094  const GA_OffsetList *piece_offset_list;
1095  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
1096  &piece_offset_list, start_offset, piece_offset_starts, piece_offset_starts_end,
1097  num_target_points, target_to_piecei, piece_data, owneri,
1098  &source_offset_list, source_offset_list.size());
1099 
1100  if (output_numeric->getStorage() == GA_STORE_REAL32)
1101  {
1102  GA_PageArray<fpreal32,3> &output_data = output_numeric->getData().castType<fpreal32>().castTupleSize<3>();
1103  const GA_PageArray<fpreal32,3> &source_data = source_numeric->getData().castType<fpreal32>().castTupleSize<3>();
1104  // FIXME: Find longer contiguous spans to transform, for better performance.
1105  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1106  {
1107  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1108  UT_Vector3F pos = source_data.getVector(source_off);
1109  if (transform_matrices_3f)
1110  pos = (pos * transform_matrices_3f[targeti]) + transform_translates_3f[targeti];
1111  else
1112  pos += transform_translates_3f[targeti];
1113  output_data.setVector(dest_off, pos);
1114 
1115  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1116  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1117  }
1118  }
1119  else if (output_numeric->getStorage() == GA_STORE_REAL64)
1120  {
1121  GA_PageArray<fpreal64,3> &output_data = output_numeric->getData().castType<fpreal64>().castTupleSize<3>();
1122  const GA_PageArray<fpreal64,3> &source_data = source_numeric->getData().castType<fpreal64>().castTupleSize<3>();
1123  // FIXME: Find longer contiguous spans to transform, for better performance.
1124  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1125  {
1126  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1127  UT_Vector3D pos = source_data.getVector(source_off);
1128  if (transform_matrices_3d)
1129  pos = (pos * transform_matrices_3d[targeti]) + transform_translates_3d[targeti];
1130  else
1131  pos += transform_translates_3d[targeti];
1132  output_data.setVector(dest_off, pos);
1133 
1134  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1135  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1136  }
1137  }
1138  else
1139  {
1140  GA_RWHandleV3 output_data(output_numeric);
1141  GA_ROHandleV3 source_data(source_numeric);
1142  UT_ASSERT_P(output_data.isValid() && source_data.isValid());
1143  // FIXME: Find longer contiguous spans to transform, for better performance.
1144  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1145  {
1146  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1147  UT_Vector3F pos = source_data.get(source_off);
1148  if (transform_matrices_3f)
1149  pos = (pos * transform_matrices_3f[targeti]) + transform_translates_3f[targeti];
1150  else
1151  pos += transform_translates_3f[targeti];
1152  output_data.set(dest_off, pos);
1153 
1154  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1155  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1156  }
1157  }
1158  }
1159  };
1160  if (copy_source_attribs_in_parallel)
1161  UTparallelForRunInTaskGroup(task_group, output_splittable_range, functor);
1162  else
1163  functor(output_splittable_range);
1164  }
1165  else if (transform_type == GA_TYPE_VECTOR)
1166  {
1167  auto &&functor = [output_numeric,source_numeric,&source_offset_list,
1168  transform_matrices_3f,transform_matrices_3d,
1169  start_offset,piece_offset_starts,piece_offset_starts_end,
1170  num_target_points,target_to_piecei,piece_data,owneri](const GA_SplittableRange &r)
1171  {
1173  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
1174  {
1175  exint targeti;
1176  exint piece_elementi;
1177  exint piece_element_count;
1178  const GA_OffsetList *piece_offset_list;
1179  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
1180  &piece_offset_list, start_offset, piece_offset_starts, piece_offset_starts_end,
1181  num_target_points, target_to_piecei, piece_data, owneri,
1182  &source_offset_list, source_offset_list.size());
1183 
1184  if (output_numeric->getStorage() == GA_STORE_REAL32)
1185  {
1186  UT_ASSERT_P(transform_matrices_3f);
1187  GA_PageArray<fpreal32,3> &output_data = output_numeric->getData().castType<fpreal32>().castTupleSize<3>();
1188  const GA_PageArray<fpreal32,3> &source_data = source_numeric->getData().castType<fpreal32>().castTupleSize<3>();
1189  // FIXME: Find longer contiguous spans to transform, for better performance.
1190  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1191  {
1192  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1193  UT_Vector3F vec = source_data.getVector(source_off);
1194  vec.rowVecMult(transform_matrices_3f[targeti]);
1195  output_data.setVector(dest_off, vec);
1196 
1197  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1198  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1199  }
1200  }
1201  else if (output_numeric->getStorage() == GA_STORE_REAL64)
1202  {
1203  UT_ASSERT_P(transform_matrices_3d);
1204  GA_PageArray<fpreal64,3> &output_data = output_numeric->getData().castType<fpreal64>().castTupleSize<3>();
1205  const GA_PageArray<fpreal64,3> &source_data = source_numeric->getData().castType<fpreal64>().castTupleSize<3>();
1206  // FIXME: Find longer contiguous spans to transform, for better performance.
1207  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1208  {
1209  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1210  UT_Vector3D vec = source_data.getVector(source_off);
1211  vec.rowVecMult(transform_matrices_3d[targeti]);
1212  output_data.setVector(dest_off, vec);
1213 
1214  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1215  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1216  }
1217  }
1218  else
1219  {
1220  UT_ASSERT_P(transform_matrices_3f);
1221  GA_RWHandleV3 output_data(output_numeric);
1222  GA_ROHandleV3 source_data(source_numeric);
1223  UT_ASSERT_P(output_data.isValid() && source_data.isValid());
1224  // FIXME: Find longer contiguous spans to transform, for better performance.
1225  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1226  {
1227  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1228  UT_Vector3F vec = source_data.get(source_off);
1229  vec.rowVecMult(transform_matrices_3f[targeti]);
1230  output_data.set(dest_off, vec);
1231 
1232  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1233  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1234  }
1235  }
1236  }
1237  };
1238  if (copy_source_attribs_in_parallel)
1239  UTparallelForRunInTaskGroup(task_group, output_splittable_range, functor);
1240  else
1241  functor(output_splittable_range);
1242  }
1243  else if (transform_type == GA_TYPE_NORMAL)
1244  {
1245  const UT_Matrix3F *transform_inverse_3f = cache->myTransformInverse3F.get();
1246  const UT_Matrix3D *transform_inverse_3d = cache->myTransformInverse3D.get();
1247  auto &&functor = [output_numeric,source_numeric,&source_offset_list,
1248  transform_inverse_3f,transform_inverse_3d,
1249  start_offset,piece_offset_starts,piece_offset_starts_end,
1250  num_target_points,target_to_piecei,piece_data,owneri](const GA_SplittableRange &r)
1251  {
1253  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
1254  {
1255  exint targeti;
1256  exint piece_elementi;
1257  exint piece_element_count;
1258  const GA_OffsetList *piece_offset_list;
1259  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
1260  &piece_offset_list, start_offset, piece_offset_starts, piece_offset_starts_end,
1261  num_target_points, target_to_piecei, piece_data, owneri,
1262  &source_offset_list, source_offset_list.size());
1263 
1264  if (output_numeric->getStorage() == GA_STORE_REAL32)
1265  {
1266  UT_ASSERT_P(transform_inverse_3f);
1267  GA_PageArray<fpreal32,3> &output_data = output_numeric->getData().castType<fpreal32>().castTupleSize<3>();
1268  const GA_PageArray<fpreal32,3> &source_data = source_numeric->getData().castType<fpreal32>().castTupleSize<3>();
1269  // FIXME: Find longer contiguous spans to transform, for better performance.
1270  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1271  {
1272  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1273  UT_Vector3F nml = source_data.getVector(source_off);
1274  float orig_length2 = nml.length2();
1275  nml.colVecMult(transform_inverse_3f[targeti]);
1276  float new_length2 = nml.length2();
1277  // Preserve normal length
1278  if (new_length2 != 0)
1279  nml *= SYSsqrt(orig_length2/new_length2);
1280  output_data.setVector(dest_off, nml);
1281 
1282  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1283  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1284  }
1285  }
1286  else if (output_numeric->getStorage() == GA_STORE_REAL64)
1287  {
1288  UT_ASSERT_P(transform_inverse_3d);
1289  GA_PageArray<fpreal64,3> &output_data = output_numeric->getData().castType<fpreal64>().castTupleSize<3>();
1290  const GA_PageArray<fpreal64,3> &source_data = source_numeric->getData().castType<fpreal64>().castTupleSize<3>();
1291  // FIXME: Find longer contiguous spans to transform, for better performance.
1292  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1293  {
1294  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1295  UT_Vector3D nml = source_data.getVector(source_off);
1296  float orig_length2 = nml.length2();
1297  nml.colVecMult(transform_inverse_3d[targeti]);
1298  float new_length2 = nml.length2();
1299  // Preserve normal length
1300  if (new_length2 != 0)
1301  nml *= SYSsqrt(orig_length2/new_length2);
1302  output_data.setVector(dest_off, nml);
1303 
1304  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1305  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1306  }
1307  }
1308  else
1309  {
1310  UT_ASSERT_P(transform_inverse_3f);
1311  GA_RWHandleV3 output_data(output_numeric);
1312  GA_ROHandleV3 source_data(source_numeric);
1313  UT_ASSERT_P(output_data.isValid() && source_data.isValid());
1314  // FIXME: Find longer contiguous spans to transform, for better performance.
1315  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1316  {
1317  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1318  UT_Vector3F nml = source_data.get(source_off);
1319  float orig_length2 = nml.length2();
1320  nml.colVecMult(transform_inverse_3f[targeti]);
1321  float new_length2 = nml.length2();
1322  // Preserve normal length
1323  if (new_length2 != 0)
1324  nml *= SYSsqrt(orig_length2/new_length2);
1325  output_data.set(dest_off, nml);
1326 
1327  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1328  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1329  }
1330  }
1331  }
1332  };
1333  if (copy_source_attribs_in_parallel)
1334  UTparallelForRunInTaskGroup(task_group, output_splittable_range, functor);
1335  else
1336  functor(output_splittable_range);
1337  }
1338  else if (transform_type == GA_TYPE_QUATERNION)
1339  {
1340  const UT_QuaternionF *transform_quaternions_3f = cache->myTransformQuaternionsF.get();
1341  const UT_QuaternionD *transform_quaternions_3d = cache->myTransformQuaternionsD.get();
1342  auto &&functor = [output_numeric,source_numeric,&source_offset_list,
1343  transform_quaternions_3f,transform_quaternions_3d,
1344  start_offset,piece_offset_starts,piece_offset_starts_end,
1345  num_target_points,target_to_piecei,piece_data,owneri](const GA_SplittableRange &r)
1346  {
1348  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
1349  {
1350  exint targeti;
1351  exint piece_elementi;
1352  exint piece_element_count;
1353  const GA_OffsetList *piece_offset_list;
1354  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
1355  &piece_offset_list, start_offset, piece_offset_starts, piece_offset_starts_end,
1356  num_target_points, target_to_piecei, piece_data, owneri,
1357  &source_offset_list, source_offset_list.size());
1358 
1359  if (output_numeric->getStorage() == GA_STORE_REAL32)
1360  {
1361  UT_ASSERT_P(transform_quaternions_3f);
1362  GA_PageArray<fpreal32,4> &output_data = output_numeric->getData().castType<fpreal32>().castTupleSize<4>();
1363  const GA_PageArray<fpreal32,4> &source_data = source_numeric->getData().castType<fpreal32>().castTupleSize<4>();
1364  // FIXME: Find longer contiguous spans to transform, for better performance.
1365  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1366  {
1367  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1368  UT_QuaternionF q(source_data.getVector(source_off));
1369  q = transform_quaternions_3f[targeti] * q;
1370  output_data.setVector(dest_off, *(const UT_FixedVector<fpreal32,4>*)&q);
1371 
1372  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1373  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1374  }
1375  }
1376  else if (output_numeric->getStorage() == GA_STORE_REAL64)
1377  {
1378  UT_ASSERT_P(transform_quaternions_3d);
1379  GA_PageArray<fpreal64,4> &output_data = output_numeric->getData().castType<fpreal64>().castTupleSize<4>();
1380  const GA_PageArray<fpreal64,4> &source_data = source_numeric->getData().castType<fpreal64>().castTupleSize<4>();
1381  // FIXME: Find longer contiguous spans to transform, for better performance.
1382  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1383  {
1384  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1385  UT_QuaternionD q(source_data.getVector(source_off));
1386  q = transform_quaternions_3d[targeti] * q;
1387  output_data.setVector(dest_off, *(const UT_FixedVector<fpreal64,4>*)&q);
1388 
1389  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1390  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1391  }
1392  }
1393  else
1394  {
1395  UT_ASSERT_P(transform_quaternions_3f);
1396  GA_RWHandleQ output_data(output_numeric);
1397  GA_ROHandleQ source_data(source_numeric);
1398  UT_ASSERT_P(output_data.isValid() && source_data.isValid());
1399  // FIXME: Find longer contiguous spans to transform, for better performance.
1400  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1401  {
1402  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1403  UT_QuaternionF q = source_data.get(source_off);
1404  q = transform_quaternions_3f[targeti] * q;
1405  output_data.set(dest_off, q);
1406 
1407  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1408  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1409  }
1410  }
1411  }
1412  };
1413  if (copy_source_attribs_in_parallel)
1414  UTparallelForRunInTaskGroup(task_group, output_splittable_range, functor);
1415  else
1416  functor(output_splittable_range);
1417  }
1418  else if (transform_type == GA_TYPE_HPOINT)
1419  {
1420  auto &&functor = [output_numeric,source_numeric,&source_offset_list,
1421  transform_matrices_3f,transform_matrices_3d,transform_translates_3f,transform_translates_3d,
1422  start_offset,piece_offset_starts,piece_offset_starts_end,
1423  num_target_points,target_to_piecei,piece_data,owneri](const GA_SplittableRange &r)
1424  {
1425  UT_Matrix3F identity3f(1);
1426  UT_Matrix3D identity3d(1);
1428  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
1429  {
1430  exint targeti;
1431  exint piece_elementi;
1432  exint piece_element_count;
1433  const GA_OffsetList *piece_offset_list;
1434  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
1435  &piece_offset_list, start_offset, piece_offset_starts, piece_offset_starts_end,
1436  num_target_points, target_to_piecei, piece_data, owneri,
1437  &source_offset_list, source_offset_list.size());
1438 
1439  if (output_numeric->getStorage() == GA_STORE_REAL32)
1440  {
1441  GA_PageArray<fpreal32,4> &output_data = output_numeric->getData().castType<fpreal32>().castTupleSize<4>();
1442  const GA_PageArray<fpreal32,4> &source_data = source_numeric->getData().castType<fpreal32>().castTupleSize<4>();
1443  // FIXME: Find longer contiguous spans to transform, for better performance.
1444  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1445  {
1446  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1447  UT_Vector4F hp(source_data.getVector(source_off));
1448  hp.homogenize();
1449  UT_Matrix4F transform(transform_matrices_3f ? transform_matrices_3f[targeti] : identity3f);
1450  transform.setTranslates(transform_translates_3f[targeti]);
1451  hp *= transform;
1452  hp.dehomogenize();
1453  output_data.setVector(dest_off, hp);
1454 
1455  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1456  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1457  }
1458  }
1459  else if (output_numeric->getStorage() == GA_STORE_REAL64)
1460  {
1461  GA_PageArray<fpreal64,4> &output_data = output_numeric->getData().castType<fpreal64>().castTupleSize<4>();
1462  const GA_PageArray<fpreal64,4> &source_data = source_numeric->getData().castType<fpreal64>().castTupleSize<4>();
1463  // FIXME: Find longer contiguous spans to transform, for better performance.
1464  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1465  {
1466  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1467  UT_Vector4D hp(source_data.getVector(source_off));
1468  hp.homogenize();
1469  UT_Matrix4D transform(transform_matrices_3d ? transform_matrices_3d[targeti] : identity3d);
1470  transform.setTranslates(transform_translates_3d[targeti]);
1471  hp *= transform;
1472  hp.dehomogenize();
1473  output_data.setVector(dest_off, hp);
1474 
1475  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1476  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1477  }
1478  }
1479  else
1480  {
1481  GA_RWHandleV4 output_data(output_numeric);
1482  GA_ROHandleV4 source_data(source_numeric);
1483  UT_ASSERT_P(output_data.isValid() && source_data.isValid());
1484  // FIXME: Find longer contiguous spans to transform, for better performance.
1485  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1486  {
1487  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1488  UT_Vector4F hp = source_data.get(source_off);
1489  hp.homogenize();
1490  UT_Matrix4F transform(transform_matrices_3f ? transform_matrices_3f[targeti] : identity3f);
1491  transform.setTranslates(transform_translates_3f[targeti]);
1492  hp *= transform;
1493  hp.dehomogenize();
1494  output_data.set(dest_off, hp);
1495 
1496  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1497  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1498  }
1499  }
1500  }
1501  };
1502  if (copy_source_attribs_in_parallel)
1503  UTparallelForRunInTaskGroup(task_group, output_splittable_range, functor);
1504  else
1505  functor(output_splittable_range);
1506  }
1507  else if (output_numeric->getTupleSize() == 9) // transform_type == GA_TYPE_TRANSFORM
1508  {
1509  UT_ASSERT(transform_type == GA_TYPE_TRANSFORM);
1510  auto &&functor = [output_numeric,source_numeric,&source_offset_list,
1511  transform_matrices_3f,transform_matrices_3d,
1512  start_offset,piece_offset_starts,piece_offset_starts_end,
1513  num_target_points,target_to_piecei,piece_data,owneri](const GA_SplittableRange &r)
1514  {
1516  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
1517  {
1518  exint targeti;
1519  exint piece_elementi;
1520  exint piece_element_count;
1521  const GA_OffsetList *piece_offset_list;
1522  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
1523  &piece_offset_list, start_offset, piece_offset_starts, piece_offset_starts_end,
1524  num_target_points, target_to_piecei, piece_data, owneri,
1525  &source_offset_list, source_offset_list.size());
1526 
1527  if (output_numeric->getStorage() == GA_STORE_REAL32)
1528  {
1529  UT_ASSERT_P(transform_matrices_3f);
1530  GA_PageArray<fpreal32,9> &output_data = output_numeric->getData().castType<fpreal32>().castTupleSize<9>();
1531  const GA_PageArray<fpreal32,9> &source_data = source_numeric->getData().castType<fpreal32>().castTupleSize<9>();
1532  // FIXME: Find longer contiguous spans to transform, for better performance.
1533  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1534  {
1535  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1536  UT_Matrix3F mat(source_data.getVector(source_off));
1537  mat *= transform_matrices_3f[targeti];
1538  output_data.setVector(dest_off, *(const UT_FixedVector<fpreal32,9>*)&mat);
1539 
1540  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1541  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1542  }
1543  }
1544  else if (output_numeric->getStorage() == GA_STORE_REAL64)
1545  {
1546  UT_ASSERT_P(transform_matrices_3d);
1547  GA_PageArray<fpreal64,9> &output_data = output_numeric->getData().castType<fpreal64>().castTupleSize<9>();
1548  const GA_PageArray<fpreal64,9> &source_data = source_numeric->getData().castType<fpreal64>().castTupleSize<9>();
1549  // FIXME: Find longer contiguous spans to transform, for better performance.
1550  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1551  {
1552  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1553  UT_Matrix3D mat(source_data.getVector(source_off));
1554  mat *= transform_matrices_3d[targeti];
1555  output_data.setVector(dest_off, *(const UT_FixedVector<fpreal64,9>*)&mat);
1556 
1557  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1558  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1559  }
1560  }
1561  else
1562  {
1563  UT_ASSERT_P(transform_matrices_3f);
1564  GA_RWHandleM3 output_data(output_numeric);
1565  GA_ROHandleM3 source_data(source_numeric);
1566  UT_ASSERT_P(output_data.isValid() && source_data.isValid());
1567  // FIXME: Find longer contiguous spans to transform, for better performance.
1568  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1569  {
1570  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1571  UT_Matrix3F mat = source_data.get(source_off);
1572  mat *= transform_matrices_3f[targeti];
1573  output_data.set(dest_off, mat);
1574 
1575  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1576  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1577  }
1578  }
1579  }
1580  };
1581  if (copy_source_attribs_in_parallel)
1582  UTparallelForRunInTaskGroup(task_group, output_splittable_range, functor);
1583  else
1584  functor(output_splittable_range);
1585  }
1586  else
1587  {
1588  UT_ASSERT(transform_type == GA_TYPE_TRANSFORM);
1589  UT_ASSERT(output_numeric->getTupleSize() == 16);
1590  auto &&functor = [output_numeric,source_numeric,&source_offset_list,
1591  transform_matrices_3f,transform_matrices_3d,transform_translates_3f,transform_translates_3d,
1592  start_offset,piece_offset_starts,piece_offset_starts_end,
1593  num_target_points,target_to_piecei,piece_data,owneri](const GA_SplittableRange &r)
1594  {
1596  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
1597  {
1598  exint targeti;
1599  exint piece_elementi;
1600  exint piece_element_count;
1601  const GA_OffsetList *piece_offset_list;
1602  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
1603  &piece_offset_list, start_offset, piece_offset_starts, piece_offset_starts_end,
1604  num_target_points, target_to_piecei, piece_data, owneri,
1605  &source_offset_list, source_offset_list.size());
1606 
1607  if (output_numeric->getStorage() == GA_STORE_REAL32)
1608  {
1609  GA_PageArray<fpreal32,16> &output_data = output_numeric->getData().castType<fpreal32>().castTupleSize<16>();
1610  const GA_PageArray<fpreal32,16> &source_data = source_numeric->getData().castType<fpreal32>().castTupleSize<16>();
1611  // FIXME: Find longer contiguous spans to transform, for better performance.
1612  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1613  {
1614  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1615  UT_Matrix4F mat(source_data.getVector(source_off));
1616  if (transform_matrices_3f)
1617  {
1618  UT_Matrix4F transform(transform_matrices_3f[targeti]);
1619  transform.setTranslates(transform_translates_3f[targeti]);
1620  mat *= transform;
1621  }
1622  else
1623  mat.translate(transform_translates_3f[targeti]);
1624  output_data.setVector(dest_off, *(const UT_FixedVector<fpreal32,16>*)&mat);
1625 
1626  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1627  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1628  }
1629  }
1630  else if (output_numeric->getStorage() == GA_STORE_REAL64)
1631  {
1632  GA_PageArray<fpreal64,16> &output_data = output_numeric->getData().castType<fpreal64>().castTupleSize<16>();
1633  const GA_PageArray<fpreal64,16> &source_data = source_numeric->getData().castType<fpreal64>().castTupleSize<16>();
1634  // FIXME: Find longer contiguous spans to transform, for better performance.
1635  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1636  {
1637  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1638  UT_Matrix4D mat(source_data.getVector(source_off));
1639  if (transform_matrices_3d)
1640  {
1641  UT_Matrix4D transform(transform_matrices_3d[targeti]);
1642  transform.setTranslates(transform_translates_3d[targeti]);
1643  mat *= transform;
1644  }
1645  else
1646  mat.translate(transform_translates_3d[targeti]);
1647  output_data.setVector(dest_off, *(const UT_FixedVector<fpreal64,16>*)&mat);
1648 
1649  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1650  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1651  }
1652  }
1653  else
1654  {
1655  GA_RWHandleM4 output_data(output_numeric);
1656  GA_ROHandleM4 source_data(source_numeric);
1657  UT_ASSERT_P(output_data.isValid() && source_data.isValid());
1658  // FIXME: Find longer contiguous spans to transform, for better performance.
1659  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
1660  {
1661  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
1662  UT_Matrix4F mat = source_data.get(source_off);
1663  if (transform_matrices_3f)
1664  {
1665  UT_Matrix4F transform(transform_matrices_3f[targeti]);
1666  transform.setTranslates(transform_translates_3f[targeti]);
1667  mat *= transform;
1668  }
1669  else
1670  mat.translate(transform_translates_3f[targeti]);
1671  output_data.set(dest_off, mat);
1672 
1673  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
1674  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
1675  }
1676  }
1677  }
1678  };
1679  if (copy_source_attribs_in_parallel)
1680  UTparallelForRunInTaskGroup(task_group, output_splittable_range, functor);
1681  else
1682  functor(output_splittable_range);
1683  }
1684 }
1685 
1686 void
1688  GU_Detail *output_geo,
1689  const GU_Detail *const source,
1690  const GA_OffsetList &source_point_list_cache,
1691  const GA_OffsetList &source_vertex_list_cache,
1692  const GA_OffsetList &source_prim_list_cache,
1693  const exint ncopies)
1694 {
1695  exint source_point_count = source_point_list_cache.size();
1696  exint source_vertex_count = source_vertex_list_cache.size();
1697  exint source_prim_count = source_prim_list_cache.size();
1698 
1699  GA_Size totalnpoints = source_point_count * ncopies;
1700  GA_Offset startpt = output_geo->appendPointBlock(totalnpoints);
1701 
1702  if (source_prim_count <= 0)
1703  return;
1704 
1705  UT_SmallArray<std::pair<int,exint>, 2*sizeof(std::pair<int,exint>)> prim_type_count_pairs;
1706  bool all_source_prims = source_prim_list_cache.isSame(source->getPrimitiveMap().getOffsetFromIndexList());
1707  GA_Range source_primrange;
1708  if (!all_source_prims)
1709  source_primrange = GA_RTIOffsetList(source->getPrimitiveMap(), source_prim_list_cache);
1710  source->getPrimitiveList().getPrimitiveTypeCounts(prim_type_count_pairs, !all_source_prims ? &source_primrange : nullptr);
1711  UT_ASSERT(!prim_type_count_pairs.isEmpty());
1712 
1713  // Compute a ptoff to point-within-a-copy index structure if we're not copying all points.
1714  GA_PageArray<exint,1> source_ptoff_to_pointi;
1715  bool all_source_points = source_point_list_cache.isSame(source->getPointMap().getOffsetFromIndexList());
1716  bool have_reverse_point_map = !all_source_points && !source_point_list_cache.isTrivial();
1717  if (have_reverse_point_map)
1718  {
1719  source_ptoff_to_pointi.setSize(source->getNumPointOffsets(), exint(-1));
1720  // TODO: Parallelize this if it's worthwhile.
1721  for (exint pointi = 0; pointi < source_point_count; ++pointi)
1722  {
1723  source_ptoff_to_pointi.set(source_point_list_cache[pointi], pointi);
1724  }
1725  }
1726 
1727  // It's safe for hassharedpoints to be true when there are no shared points,
1728  // but not vice versa.
1729  bool hassharedpoints = true;
1730  // In this case, "contiguous" means "starting from startpt and contiguous"
1731  bool hascontiguouspoints = false;
1732  if (source_point_count >= source_vertex_count)
1733  {
1734  // If there is at least one point per vertex, there's a
1735  // decent chance that none of the source points are shared,
1736  // which can make building the primitives much faster,
1737  // so we check.
1738 
1739  hascontiguouspoints = true;
1740  hassharedpoints = false;
1741 
1742  // TODO: Parallelize this.
1743  GA_Offset last_point = GA_INVALID_OFFSET;
1744  for (exint primi = 0; primi < source_prim_count; ++primi)
1745  {
1746  GA_Offset primoff = source_prim_list_cache[primi];
1747  const GA_OffsetListRef vertices = source->getPrimitiveVertexList(primoff);
1748  if (vertices.size() == 0)
1749  continue;
1750  if (!GAisValid(last_point))
1751  {
1752  last_point = source->vertexPoint(vertices[0]);
1753  if (last_point != source_point_list_cache[0])
1754  {
1755  hascontiguouspoints = false;
1756  }
1757  }
1758  else
1759  {
1760  GA_Offset current_point = source->vertexPoint(vertices[0]);
1761  hascontiguouspoints &= (current_point == last_point+1);
1762 
1763  // This isn't a perfect check for sharing, but since
1764  // we don't want to do a full sort, we just check whether
1765  // the point offsets are strictly increasing.
1766  // If points are shared, this check will make hassharedpoints true.
1767  // If no points are shared, this can also end up being true sometimes.
1768  hassharedpoints |= (current_point <= last_point);
1769  if (hassharedpoints)
1770  break;
1771  last_point = current_point;
1772  }
1773  for (exint i = 1, n = vertices.size(); i < n; ++i)
1774  {
1775  GA_Offset current_point = source->vertexPoint(vertices[i]);
1776  hascontiguouspoints &= (current_point == last_point+1);
1777 
1778  // See comment above about how this isn't a perfect check.
1779  hassharedpoints |= (current_point <= last_point);
1780  if (hassharedpoints)
1781  break;
1782  last_point = current_point;
1783  }
1784  if (hassharedpoints)
1785  break;
1786  }
1787  }
1788 
1789  UT_UniquePtr<exint[]> vertexpointnumbers_deleter(!hascontiguouspoints ? new exint[source_vertex_count] : nullptr);
1790  GA_PolyCounts vertexlistsizelist;
1791  UT_SmallArray<exint, 2*sizeof(exint)> closed_span_lengths;
1792  closed_span_lengths.append(0);
1793  exint *vertexpointnumbers = vertexpointnumbers_deleter.get();
1794  // TODO: Parallelize this if it's worthwhile.
1795  exint vertexi = 0;
1796  for (exint primi = 0; primi < source_prim_count; ++primi)
1797  {
1798  GA_Offset primoff = source_prim_list_cache[primi];
1799  const GA_OffsetListRef vertices = source->getPrimitiveVertexList(primoff);
1800  GA_Size n = vertices.size();
1801 
1802  vertexlistsizelist.append(n);
1803 
1804  bool closed = vertices.getExtraFlag();
1805  // Index 0 (size 1) always represents open, and so does every even index (odd size).
1806  // Every odd index (even size) always represents closed.
1807  // This condition checks if we're switching between open and closed.
1808  if ((closed_span_lengths.size()&1) == exint(closed))
1809  closed_span_lengths.append(1);
1810  else
1811  ++(closed_span_lengths.last());
1812 
1813  if (!hascontiguouspoints)
1814  {
1815  if (have_reverse_point_map)
1816  {
1817  for (exint i = 0; i < n; ++i)
1818  {
1819  GA_Offset source_ptoff = source->vertexPoint(vertices(i));
1820  exint pointi = source_ptoff_to_pointi.get(source_ptoff);
1821  vertexpointnumbers[vertexi] = pointi;
1822  ++vertexi;
1823  }
1824  }
1825  else if (source_point_list_cache.isTrivial())
1826  {
1827  for (exint i = 0; i < n; ++i)
1828  {
1829  GA_Offset source_ptoff = source->vertexPoint(vertices(i));
1830  exint pointi = source_point_list_cache.find(source_ptoff);
1831  UT_ASSERT_P(pointi >= 0);
1832  vertexpointnumbers[vertexi] = pointi;
1833  ++vertexi;
1834  }
1835  }
1836  else
1837  {
1838  for (exint i = 0; i < n; ++i)
1839  {
1840  GA_Offset source_ptoff = source->vertexPoint(vertices(i));
1841  exint pointi = exint(source->pointIndex(source_ptoff));
1842  vertexpointnumbers[vertexi] = pointi;
1843  ++vertexi;
1844  }
1845  }
1846  }
1847  }
1848 
1849  GA_Offset start_primoff = GEObuildPrimitives(
1850  output_geo,
1851  prim_type_count_pairs.getArray(),
1852  startpt,
1853  source_point_count,
1854  vertexlistsizelist,
1855  vertexpointnumbers,
1856  hassharedpoints,
1857  closed_span_lengths.getArray(),
1858  ncopies);
1859 
1860 
1861  // Early exit if only polygons and tetrahedra,
1862  // since they have no member data outside of GA_Primitive,
1863  // and might be stored compressed in GA_PrimitiveList.
1864  exint num_polys_and_tets =
1865  output_geo->countPrimitiveType(GA_PRIMPOLY) +
1867  if (output_geo->getNumPrimitives() == num_polys_and_tets)
1868  return;
1869 
1870  // Copy primitive subclass data for types other than polygons and tetrahedra.
1871  UT_BlockedRange<GA_Offset> primrange(start_primoff, start_primoff+output_geo->getNumPrimitives());
1872  auto &&functor = [output_geo,source,&source_prim_list_cache,source_prim_count](const UT_BlockedRange<GA_Offset> &r)
1873  {
1874  // FIXME: Find longer contiguous spans to copy, for better performance.
1875  for (GA_Offset dest_off = r.begin(), end = r.end(); dest_off < end; ++dest_off)
1876  {
1877  exint sourcei = exint(dest_off) % source_prim_count;
1878  GA_Offset source_off = source_prim_list_cache[sourcei];
1879  const GA_Primitive *source_prim = source->getPrimitive(source_off);
1880  GA_Primitive *output_prim = output_geo->getPrimitive(dest_off);
1881  output_prim->copySubclassData(source_prim);
1882  }
1883  };
1884  if (output_geo->getNumPrimitives() >= 1024)
1885  {
1886  UTparallelForLightItems(primrange, functor);
1887  }
1888  else
1889  {
1890  functor(primrange);
1891  }
1892 }
1893 
1894 void
1896  GA_OffsetList &offset_list,
1897  const GU_Detail *const detail,
1898  const GA_ElementGroup *const group,
1899  const GA_AttributeOwner owner)
1900 {
1901  if (group == nullptr)
1902  {
1903  offset_list = detail->getIndexMap(owner).getOffsetFromIndexList();
1904  }
1905  else
1906  {
1907  offset_list.clear();
1908  GA_Offset start;
1909  GA_Offset end;
1910  GA_Range range(*group);
1911  for (GA_Iterator it(range); it.fullBlockAdvance(start, end); )
1912  {
1913  offset_list.setTrivialRange(offset_list.size(), start, end-start);
1914  }
1915  }
1916 }
1917 
1918 void
1920  GU_Detail *output_geo,
1921  const GU_Detail *const source,
1922  const exint source_point_count,
1923  const exint source_vertex_count,
1924  const exint source_prim_count,
1925  const GA_OffsetList &source_point_list_cache,
1926  GA_OffsetList &source_vertex_list_cache,
1927  const GA_OffsetList &source_prim_list_cache,
1928  const GA_PointGroup *const source_pointgroup,
1929  const GA_PrimitiveGroup *const source_primgroup,
1930  const exint ncopies)
1931 {
1932  if (ncopies <= 0)
1933  {
1934  source_vertex_list_cache.clear();
1935  return;
1936  }
1937 
1938  if (source_prim_count <= 0)
1939  {
1940  source_vertex_list_cache.clear();
1941 
1942  GA_Size totalnpoints = source_point_count * ncopies;
1943  output_geo->appendPointBlock(totalnpoints);
1944  return;
1945  }
1946 
1947  // We need to build and cache a structure for quickly looking up
1948  // vertex offsets in order to easily copy vertex attributes
1949  // in parallel.
1950  {
1951  // TODO: Parallelize this if it's worthwhile.
1952  source_vertex_list_cache.clear();
1953  GA_Offset start;
1954  GA_Offset end;
1955  for (GA_Iterator it(source->getPrimitiveRange(source_primgroup)); it.fullBlockAdvance(start, end); )
1956  {
1957  for (GA_Offset primoff = start; primoff < end; ++primoff)
1958  {
1959  const GA_OffsetListRef vertices = source->getPrimitiveVertexList(primoff);
1960  source_vertex_list_cache.append(vertices);
1961  }
1962  }
1963  }
1964  UT_ASSERT(source_vertex_list_cache.size() == source_vertex_count);
1965 
1967  output_geo,
1968  source,
1969  source_point_list_cache,
1970  source_vertex_list_cache,
1971  source_prim_list_cache,
1972  ncopies);
1973 }
1974 
1975 void
1977  GU_Detail *const output_geo,
1978  const exint num_packed_prims)
1979 {
1980  if (num_packed_prims <= 0)
1981  return;
1982 
1983  // Create points
1984  GA_Offset start_ptoff = output_geo->appendPointBlock(num_packed_prims);
1985  GA_Offset end_ptoff = start_ptoff+num_packed_prims;
1986 
1987  // Create primitives and vertices
1988  GA_Offset start_vtxoff;
1989  output_geo->appendPrimitivesAndVertices(
1990  GU_PackedGeometry::typeId(), num_packed_prims, 1, start_vtxoff);
1991  GA_Offset end_vtxoff = start_vtxoff+num_packed_prims;
1992 
1993  // Wire the vertices to the points.
1994  // There are no shared points, so it's relatively easy.
1995  GA_ATITopology *const vertexToPoint = output_geo->getTopology().getPointRef();
1996  GA_ATITopology *const pointToVertex = output_geo->getTopology().getVertexRef();
1997  if (vertexToPoint)
1998  {
1999  UTparallelForLightItems(GA_SplittableRange(GA_Range(output_geo->getVertexMap(), start_vtxoff, end_vtxoff)),
2000  geo_SetTopoMappedParallel<int>(vertexToPoint, start_ptoff, start_vtxoff, nullptr));
2001  }
2002  if (pointToVertex)
2003  {
2004  UTparallelForLightItems(GA_SplittableRange(GA_Range(output_geo->getPointMap(), start_ptoff, end_ptoff)),
2005  geo_SetTopoMappedParallel<int>(pointToVertex, start_vtxoff, start_ptoff, nullptr));
2006  }
2007 }
2008 
2009 void
2011  GU_Detail *const output_geo,
2012  const GA_SplittableRange *const output_splittable_ranges,
2013  const GU_Detail *const source,
2014  const exint num_target_points,
2015  GU_CopyToPointsCache *const cache,
2016  const GA_OffsetList *const source_offset_lists,
2017  const exint *const num_source_attribs,
2018  const bool no_transforms,
2019  const bool had_transform_matrices,
2020  const bool has_transform_matrices,
2021  const bool topology_changed,
2022  const bool transforms_changed,
2023  const GU_Detail *const target,
2024  const GU_CopyToPointsCache::TargetAttribInfoMap *const target_attrib_info,
2025  const GU_CopyToPointsCache::TargetAttribInfoMap *const target_group_info,
2026  const exint *const target_to_piecei,
2027  const UT_Array<exint> *const owner_piece_offset_starts,
2028  const GU_CopyToPointsCache::PieceData *const piece_data)
2029 {
2030  if (num_target_points <= 0)
2031  return;
2032 
2033  using AttribCombineMethod = GU_CopyToPointsCache::AttribCombineMethod;
2034 
2035  UT_ASSERT(source_offset_lists != nullptr);
2036  // If target is present or transforms are applied, cache is required.
2037  UT_ASSERT((target == nullptr && no_transforms) || cache != nullptr);
2038  const exint num_output_elements =
2039  output_geo->getNumVertices() * num_source_attribs[GA_ATTRIB_VERTEX] +
2040  output_geo->getNumPoints() * num_source_attribs[GA_ATTRIB_POINT] +
2041  output_geo->getNumPrimitives() * num_source_attribs[GA_ATTRIB_PRIMITIVE];
2042  const bool copy_source_attribs_in_parallel = num_output_elements >= 4096;
2043  UT_TaskGroup task_group;
2044  for (int owneri = 0; owneri < 3; ++owneri)
2045  {
2046  const exint *piece_offset_starts = nullptr;
2047  const exint *piece_offset_starts_end = nullptr;
2048  if (owner_piece_offset_starts)
2049  {
2050  auto &array = owner_piece_offset_starts[owneri];
2051  piece_offset_starts = array.getArray();
2052  piece_offset_starts_end = array.getArray() + array.size();
2053  }
2054 
2055  const GA_OffsetList &source_offset_list = source_offset_lists[owneri];
2056  GA_AttributeOwner owner = GA_AttributeOwner(owneri);
2057  const GA_IndexMap &index_map = output_geo->getIndexMap(owner);
2058  if (index_map.indexSize() == 0)
2059  continue;
2060 
2061  // To get the start offset of the copied geometry without the overhead of using a GA_Iterator
2062  // (which copyies the GA_Range), we just use iterateCreate and iterateRewind directly.
2063  GA_IteratorState iterator_state;
2064  output_splittable_ranges[owneri].iterateCreate(iterator_state);
2065  GA_Offset start_offset;
2066  GA_Offset first_block_end;
2067  output_splittable_ranges[owneri].iterateRewind(iterator_state, start_offset, first_block_end);
2068 
2069  for (auto it = output_geo->getAttributeDict(owner).begin(GA_SCOPE_PUBLIC); !it.atEnd(); ++it)
2070  {
2071  GA_Attribute *output_attrib = it.attrib();
2072  const UT_StringHolder &name = output_attrib->getName();
2073  const GA_Attribute *source_attrib = source->findAttribute(owner, name);
2074  if (!source_attrib)
2075  continue;
2076 
2077  // Check for interactions with target attributes.
2078  AttribCombineMethod target_method = AttribCombineMethod::NONE;
2079  if (target)
2080  {
2081  auto target_it = target_attrib_info->find(name);
2082  if (!target_it.atEnd())
2083  {
2084  if (target_it->second.myCopyTo == owner)
2085  {
2086  target_method = target_it->second.myCombineMethod;
2087 
2088  // Target attributes take precedence if copying.
2089  if (target_method == AttribCombineMethod::COPY)
2090  continue;
2091  }
2092  else if (target_it->second.myCombineMethod == AttribCombineMethod::COPY &&
2093  target_it->second.myCopyTo == guConflictAttribOwner(owner))
2094  {
2095  // Target attributes take precedence if copying,
2096  // including if destination type differs between point and vertex.
2097  // NOTE: We don't have to check point vs. vertex for
2098  // mult/add/sub, because we already forced target type
2099  // to match source type in those situations.
2100  continue;
2101  }
2102  }
2103  }
2104 
2105  AttribCombineMethod prev_target_method = AttribCombineMethod::NONE;
2107  bool target_changed = false;
2108  if (target)
2109  {
2110  prev_target_it = cache->myTargetAttribInfo.find(name);
2111  if (!prev_target_it.atEnd())
2112  {
2113  if (prev_target_it->second.myCopyTo == owner)
2114  {
2115  prev_target_method = prev_target_it->second.myCombineMethod;
2116 
2117  if (prev_target_method != AttribCombineMethod::NONE)
2118  {
2119  const GA_Attribute *target_attrib = target->findPointAttribute(name);
2120  GA_DataId prev_target_dataid = prev_target_it->second.myDataID;
2121  target_changed = !target_attrib ||
2122  !GAisValid(prev_target_dataid) ||
2123  !GAisValid(target_attrib->getDataId()) ||
2124  target_attrib->getDataId() != prev_target_dataid;
2125  }
2126  }
2127  }
2128  if (target_method != prev_target_method)
2129  {
2130  target_changed = true;
2131  }
2132  }
2133 
2134  GA_ATINumeric *output_numeric = GA_ATINumeric::cast(output_attrib);
2135  if (output_numeric)
2136  {
2137  const GA_ATINumeric *source_numeric = UTverify_cast<const GA_ATINumeric*>(source_attrib);
2138 
2139  GA_TypeInfo transform_type = no_transforms ? GA_TYPE_VOID : guGetTransformTypeInfo(output_numeric, has_transform_matrices);
2140 
2141  if (transform_type != GA_TYPE_VOID)
2142  {
2143  UT_ASSERT(cache != nullptr);
2144  // If !topology_changed, and !transforms_changed, and the
2145  // source_attrib data ID hasn't changed, don't re-transform.
2146  if (!topology_changed && !transforms_changed && !target_changed)
2147  {
2148  auto it = cache->mySourceAttribDataIDs[owneri].find(name);
2149  if (!it.atEnd() && it->second != GA_INVALID_DATAID && it->second == source_attrib->getDataId())
2150  {
2151  continue;
2152  }
2153  }
2154  cache->mySourceAttribDataIDs[owneri][name] = source_attrib->getDataId();
2155  output_attrib->bumpDataId();
2156 
2157  // Remove any entry from the target cache, so that the code for
2158  // applying target attributes is forced to re-apply the operation.
2159  if (target && !prev_target_it.atEnd())
2160  cache->myTargetAttribInfo.erase(prev_target_it);
2161 
2162  guApplyTransformToAttribute(
2163  cache,
2164  transform_type,
2165  output_numeric,
2166  source_numeric,
2167  source_offset_list,
2168  copy_source_attribs_in_parallel,
2169  task_group,
2170  output_splittable_ranges[owneri],
2171  start_offset,
2172  target_to_piecei,
2173  num_target_points,
2174  piece_offset_starts,
2175  piece_offset_starts_end,
2176  piece_data);
2177  }
2178  else
2179  {
2180  if (cache != nullptr)
2181  {
2182  // If !topology_changed and the source_attrib data ID hasn't changed since last time, don't re-copy.
2183  if (!topology_changed && !target_changed)
2184  {
2185  auto it = cache->mySourceAttribDataIDs[owneri].find(name);
2186  if (!it.atEnd() && it->second != GA_INVALID_DATAID && it->second == source_attrib->getDataId())
2187  {
2188  // NOTE: no_transforms should only be false when copying for a packed primitive,
2189  // in which case, there are never transforms, so we don't have to worry
2190  // about it changing whether an attribute is transformed or copied.
2191  if (no_transforms || has_transform_matrices || !had_transform_matrices)
2192  continue;
2193 
2194  // If this attribute was transforming last time and is no longer transforming,
2195  // it needs to be copied, even though it hasn't changed.
2196  GA_TypeInfo old_transform_type = guGetTransformTypeInfo(output_numeric, has_transform_matrices);
2197  if (old_transform_type == GA_TYPE_VOID)
2198  {
2199  // Wasn't transforming last time either,
2200  // and the attribute hasn't changed, so we can skip it.
2201  continue;
2202  }
2203  }
2204  }
2205  cache->mySourceAttribDataIDs[owneri][name] = source_attrib->getDataId();
2206 
2207  // Remove any entry from the target cache, so that the code for
2208  // applying target attributes is forced to re-apply the operation.
2209  if (target && !prev_target_it.atEnd())
2210  cache->myTargetAttribInfo.erase(prev_target_it);
2211  }
2212 
2213  output_attrib->bumpDataId();
2214 
2215  auto &&functor = [output_numeric,source_numeric,&source_offset_list,
2216  piece_offset_starts,piece_offset_starts_end,num_target_points,
2217  target_to_piecei,piece_data,owneri,start_offset](const GA_SplittableRange &r)
2218  {
2219  auto &output_data = output_numeric->getData();
2220  const auto &source_data = source_numeric->getData();
2221 
2222  GA_Offset start;
2223  GA_Offset end;
2224  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
2225  {
2226  exint targeti;
2227  exint piece_elementi;
2228  exint piece_element_count;
2229  const GA_OffsetList *piece_offset_list;
2230  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
2231  &piece_offset_list, start_offset, piece_offset_starts, piece_offset_starts_end,
2232  num_target_points, target_to_piecei, piece_data, owneri,
2233  &source_offset_list, source_offset_list.size());
2234 
2235  // FIXME: Consider switching on type and tuple size outside the loop.
2236  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
2237  {
2238  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
2239  output_data.moveRange(source_data, source_off, dest_off, GA_Offset(1));
2240 
2241  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
2242  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
2243  }
2244  }
2245  };
2246  if (copy_source_attribs_in_parallel)
2247  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owneri], functor);
2248  else
2249  functor(output_splittable_ranges[owneri]);
2250  }
2251 
2252  }
2253  else
2254  {
2255  if (cache != nullptr)
2256  {
2257  // If !topology_changed and the source_attrib data ID hasn't changed since last time, don't re-copy.
2258  if (!topology_changed && !target_changed)
2259  {
2260  auto it = cache->mySourceAttribDataIDs[owneri].find(name);
2261  if (!it.atEnd() && it->second != GA_INVALID_DATAID && it->second == source_attrib->getDataId())
2262  continue;
2263  }
2264  cache->mySourceAttribDataIDs[owneri][name] = source_attrib->getDataId();
2265 
2266  // Remove any entry from the target cache, so that the code for
2267  // applying target attributes is forced to re-apply the operation.
2268  if (target && !prev_target_it.atEnd())
2269  cache->myTargetAttribInfo.erase(prev_target_it);
2270  }
2271 
2272  output_attrib->bumpDataId();
2273 
2274  GA_ATIString *output_string = GA_ATIString::cast(output_attrib);
2275  if (output_string)
2276  {
2277  // Special case for string attributes for batch adding of string references.
2278  const GA_ATIString *source_string = UTverify_cast<const GA_ATIString *>(source_attrib);
2279  auto &&functor = [output_string,source_string,&source_offset_list,
2280  piece_offset_starts,piece_offset_starts_end,num_target_points,
2281  target_to_piecei,piece_data,start_offset,owneri](const GA_SplittableRange &r)
2282  {
2283  GA_RWBatchHandleS output_string_handle(output_string);
2284  UT_ASSERT_P(output_string_handle.isValid());
2285  GA_ROHandleS source_string_handle(source_string);
2286  GA_Offset start;
2287  GA_Offset end;
2288  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
2289  {
2290  exint targeti;
2291  exint piece_elementi;
2292  exint piece_element_count;
2293  const GA_OffsetList *piece_offset_list;
2294  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
2295  &piece_offset_list, start_offset, piece_offset_starts, piece_offset_starts_end,
2296  num_target_points, target_to_piecei, piece_data, owneri,
2297  &source_offset_list, source_offset_list.size());
2298 
2299  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
2300  {
2301  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
2302  output_string_handle.set(dest_off, source_string_handle.get(source_off));
2303 
2304  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
2305  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
2306  }
2307  }
2308  };
2309  if (copy_source_attribs_in_parallel)
2310  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owneri], functor);
2311  else
2312  functor(output_splittable_ranges[owneri]);
2313  }
2314  else
2315  {
2316  auto &&functor = [output_attrib,source_attrib,&source_offset_list,
2317  piece_offset_starts,piece_offset_starts_end,num_target_points,
2318  target_to_piecei,piece_data,start_offset,owneri](const GA_SplittableRange &r)
2319  {
2320  GA_Offset start;
2321  GA_Offset end;
2322  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
2323  {
2324  exint targeti;
2325  exint piece_elementi;
2326  exint piece_element_count;
2327  const GA_OffsetList *piece_offset_list;
2328  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
2329  &piece_offset_list, start_offset, piece_offset_starts, piece_offset_starts_end,
2330  num_target_points, target_to_piecei, piece_data, owneri,
2331  &source_offset_list, source_offset_list.size());
2332 
2333  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
2334  {
2335  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
2336  output_attrib->copy(dest_off, *source_attrib, source_off);
2337 
2338  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
2339  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
2340  }
2341  }
2342  };
2343  if (copy_source_attribs_in_parallel)
2344  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owneri], functor);
2345  else
2346  functor(output_splittable_ranges[owneri]);
2347  }
2348  }
2349  }
2350  }
2351 
2352  // Don't forget the element groups
2353  for (int owneri = 0; owneri < 3; ++owneri)
2354  {
2355  const exint *piece_offset_starts = nullptr;
2356  const exint *piece_offset_starts_end = nullptr;
2357  if (owner_piece_offset_starts)
2358  {
2359  auto &array = owner_piece_offset_starts[owneri];
2360  piece_offset_starts = array.getArray();
2361  piece_offset_starts_end = array.getArray() + array.size();
2362  }
2363 
2364  const GA_OffsetList &source_offset_list = source_offset_lists[owneri];
2365  GA_AttributeOwner owner = GA_AttributeOwner(owneri);
2366  const GA_IndexMap &index_map = output_geo->getIndexMap(owner);
2367  if (index_map.indexSize() == 0)
2368  continue;
2369 
2370  // To get the start offset of the copied geometry without the overhead of using a GA_Iterator
2371  // (which copyies the GA_Range), we just use iterateCreate and iterateRewind directly.
2372  GA_IteratorState iterator_state;
2373  output_splittable_ranges[owner].iterateCreate(iterator_state);
2374  GA_Offset start_offset;
2375  GA_Offset first_block_end;
2376  output_splittable_ranges[owner].iterateRewind(iterator_state, start_offset, first_block_end);
2377 
2378  for (auto it = output_geo->getElementGroupTable(owner).beginTraverse(); !it.atEnd(); ++it)
2379  {
2380  GA_ElementGroup *output_group = it.group();
2381  UT_ASSERT_MSG(!output_group->isInternal(),
2382  "GUremoveUnnecessaryAttribs removes internal groups and "
2383  "GUaddAttributesFromSourceOrTarget doesn't add them");
2384 
2385  const UT_StringHolder &name = output_group->getName();
2386 
2387  // Check for interactions with target groups.
2388  AttribCombineMethod target_method = AttribCombineMethod::NONE;
2389  if (target)
2390  {
2391  auto target_it = target_group_info->find(name);
2392  if (!target_it.atEnd())
2393  {
2394  if (target_it->second.myCopyTo == owner)
2395  {
2396  target_method = target_it->second.myCombineMethod;
2397 
2398  // Target groups take precedence if copying.
2399  if (target_method == AttribCombineMethod::COPY)
2400  continue;
2401  }
2402  else if (target_it->second.myCombineMethod == AttribCombineMethod::COPY &&
2403  target_it->second.myCopyTo == guConflictAttribOwner(owner))
2404  {
2405  // Target groups take precedence if copying,
2406  // including if destination type differs between point and vertex.
2407  // NOTE: We don't have to check point vs. vertex for
2408  // mult/add/sub, because we already forced target type
2409  // to match source type in those situations.
2410  continue;
2411  }
2412  }
2413  }
2414 
2415  AttribCombineMethod prev_target_method = AttribCombineMethod::NONE;
2417  bool target_changed = false;
2418  if (target)
2419  {
2420  prev_target_it = cache->myTargetGroupInfo.find(name);
2421  if (!prev_target_it.atEnd())
2422  {
2423  if (prev_target_it->second.myCopyTo == owner)
2424  {
2425  prev_target_method = prev_target_it->second.myCombineMethod;
2426 
2427  if (prev_target_method != AttribCombineMethod::NONE)
2428  {
2429  const GA_Attribute *target_group = target->findPointGroup(name);
2430  GA_DataId prev_target_dataid = prev_target_it->second.myDataID;
2431  target_changed = !target_group ||
2432  !GAisValid(prev_target_dataid) ||
2433  !GAisValid(target_group->getDataId()) ||
2434  target_group->getDataId() != prev_target_dataid;
2435  }
2436  }
2437  }
2438  if (target_method != prev_target_method)
2439  {
2440  target_changed = true;
2441  }
2442  }
2443 
2444  const GA_ElementGroup *source_group = source->getElementGroupTable(owner).find(name);
2445  UT_ASSERT(source_group);
2446 
2447  if (cache != nullptr)
2448  {
2449  // If !topology_changed and the source_group data ID hasn't changed since last time, don't re-copy.
2450  if (!topology_changed && !target_changed)
2451  {
2452  auto it = cache->mySourceGroupDataIDs[owneri].find(name);
2453  if (!it.atEnd() && it->second != GA_INVALID_DATAID && it->second == source_group->getDataId())
2454  continue;
2455  }
2456  cache->mySourceGroupDataIDs[owneri][name] = source_group->getDataId();
2457 
2458  // Remove any entry from the target cache, so that the code for
2459  // applying target groups is forced to re-apply the operation.
2460  if (target && !prev_target_it.atEnd())
2461  cache->myTargetAttribInfo.erase(prev_target_it);
2462  }
2463 
2464  output_group->bumpDataId();
2465  output_group->invalidateGroupEntries();
2466 
2467  auto &&functor = [output_group,source_group,&source_offset_list,
2468  piece_offset_starts,piece_offset_starts_end,owneri,target_to_piecei,piece_data,
2469  num_target_points,start_offset](const GA_SplittableRange &r)
2470  {
2471  GA_Offset start;
2472  GA_Offset end;
2473  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
2474  {
2475  exint targeti;
2476  exint piece_elementi;
2477  exint piece_element_count;
2478  const GA_OffsetList *piece_offset_list;
2479  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
2480  &piece_offset_list, start_offset, piece_offset_starts, piece_offset_starts_end,
2481  num_target_points, target_to_piecei, piece_data, owneri,
2482  &source_offset_list, source_offset_list.size());
2483 
2484  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
2485  {
2486  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
2487  output_group->setElement(dest_off, source_group->contains(source_off));
2488 
2489  guIteratePieceElement(piece_elementi, piece_element_count, targeti, piece_offset_starts,
2490  num_target_points, target_to_piecei, piece_data, owneri, piece_offset_list);
2491  }
2492  }
2493  };
2494  if (copy_source_attribs_in_parallel)
2495  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owneri], functor);
2496  else
2497  functor(output_splittable_ranges[owneri]);
2498  }
2499  }
2500 
2501  // Don't forget the edge groups, either.
2502  // FIXME: This doesn't work for the piece attribute case!!!
2503  GA_PageArray<exint,1> source_to_sourcei;
2504  bool all_source_points;
2505  if (output_geo->edgeGroups().entries() > 0 && piece_data == nullptr)
2506  {
2507  // We need a source-point-offset-to-source-point-in-group mapping.
2508  const GA_OffsetList &source_point_list = source_offset_lists[GA_ATTRIB_POINT];
2509  all_source_points = source_point_list.isSame(source->getPointMap().getOffsetFromIndexList());
2510  if (!all_source_points)
2511  {
2512  source_to_sourcei.setSize(source->getNumPointOffsets(), exint(-1));
2513  for (exint i = 0, n = source_point_list.size(); i < n; ++i)
2514  {
2515  source_to_sourcei.set(source_point_list[i], i);
2516  }
2517  }
2518  }
2519  for (auto it = output_geo->edgeGroups().beginTraverse(); piece_data == nullptr && !it.atEnd(); ++it)
2520  {
2521  GA_EdgeGroup *output_group = it.group();
2522  UT_ASSERT_MSG(!output_group->isInternal(),
2523  "GUremoveUnnecessaryAttribs removes internal groups and "
2524  "GUaddAttributesFromSourceOrTarget doesn't add them");
2525  const GA_EdgeGroup *source_group = source->findEdgeGroup(output_group->getName());
2526  UT_ASSERT(source_group);
2527 
2528  if (cache != nullptr)
2529  {
2530  // If !topology_changed and the source_group data ID hasn't changed since last time, don't re-copy.
2531  if (!topology_changed)
2532  {
2533  auto it = cache->mySourceEdgeGroupDataIDs.find(source_group->getName());
2534  if (!it.atEnd() && it->second != GA_INVALID_DATAID && it->second == source_group->getDataId())
2535  continue;
2536  }
2537  cache->mySourceEdgeGroupDataIDs[source_group->getName()] = source_group->getDataId();
2538  }
2539  output_group->bumpDataId();
2540 
2541  // There are two main possible approaches:
2542  // A) Iterate through source_group, checking whether both points are in source_pointgroup (if source_pointgroup is non-null)
2543  // B) Iterate through all edges of one copy in output_geo, checking whether corresponding edges are in source_group
2544  // After either, the results from the first copy could be easily duplicated for other copies,
2545  // but which one is more efficient depends on the portion of source_group that's in the output
2546  // and the portion of output_geo that is not in source_group.
2547  // I've semi-arbitrarily chosen A, under the assumption that most edge groups are a small
2548  // portion of the total edges in the geometry and that the most common case is to copy
2549  // all of the input geometry.
2550 
2551  const exint source_point_count = source_offset_lists[GA_ATTRIB_POINT].size();
2552 
2553  for (auto it = source_group->begin(); !it.atEnd(); ++it)
2554  {
2555  const GA_Edge source_edge = it.getEdge();
2556  GA_Index index0;
2557  GA_Index index1;
2558  if (all_source_points)
2559  {
2560  index0 = source->pointIndex(source_edge.p0());
2561  index1 = source->pointIndex(source_edge.p1());
2562  }
2563  else
2564  {
2565  index0 = GA_Index(source_to_sourcei.get(source_edge.p0()));
2566  index1 = GA_Index(source_to_sourcei.get(source_edge.p1()));
2567 
2568  // Both of the points must be in the source point list.
2569  if (index0 == -1 || index1 == -1)
2570  continue;
2571  }
2572  GA_Edge output_edge(
2573  output_geo->pointOffset(index0),
2574  output_geo->pointOffset(index1));
2575 
2576  UT_ASSERT(num_target_points > 0);
2577  output_group->add(output_edge);
2578 
2579  for (exint copy = 1; copy < num_target_points; ++copy)
2580  {
2581  output_edge.p0() += source_point_count;
2582  output_edge.p1() += source_point_count;
2583  output_group->add(output_edge);
2584  }
2585  }
2586  }
2587 
2588  // If we're transforming, apply the transform to transforming primitives like in GEO_Detail::transform
2589  if (!no_transforms && output_geo->hasTransformingPrimitives() && (topology_changed || transforms_changed))
2590  {
2591  output_geo->getPrimitiveList().bumpDataId();
2592 
2593  UT_ASSERT(cache != nullptr);
2594 
2595  const exint *piece_offset_starts = nullptr;
2596  const exint *piece_offset_starts_end = nullptr;
2597  if (owner_piece_offset_starts)
2598  {
2599  auto &array = owner_piece_offset_starts[GA_ATTRIB_PRIMITIVE];
2600  piece_offset_starts = array.getArray();
2601  piece_offset_starts_end = array.getArray() + array.size();
2602  }
2603 
2604  const GA_OffsetList &source_prim_list = source_offset_lists[GA_ATTRIB_PRIMITIVE];
2605  const UT_Matrix3D *transform_matrices_3d = cache->myTransformMatrices3D.get();
2606  const UT_Vector3D *transform_translates_3d = cache->myTransformTranslates3D.get();
2607  auto &&functor = [transform_matrices_3d,transform_translates_3d,output_geo,
2608  piece_offset_starts,piece_offset_starts_end,target_to_piecei,piece_data,
2609  num_target_points,topology_changed,source,&source_prim_list](const GA_SplittableRange &r)
2610  {
2611  const exint source_prim_count = source_prim_list.size();
2612  GA_Offset start_primoff = output_geo->primitiveOffset(GA_Index(0));
2613  GA_Offset start;
2614  GA_Offset end;
2615  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
2616  {
2617  exint targeti;
2618  exint piece_elementi;
2619  exint piece_element_count;
2620  const GA_OffsetList *piece_offset_list;
2621  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
2622  &piece_offset_list, start_primoff, piece_offset_starts, piece_offset_starts_end,
2623  num_target_points, target_to_piecei, piece_data, GA_ATTRIB_PRIMITIVE,
2624  &source_prim_list, source_prim_count);
2625 
2627  if (transform_matrices_3d)
2628  transform = UT_Matrix4D(transform_matrices_3d[targeti]);
2629  else
2630  transform.identity();
2631  transform.setTranslates(transform_translates_3d[targeti]);
2632 
2633  // TODO: Remove the cast to UT_Matrix4 once
2634  // GEO_Primitive::transform accepts UT_Matrix4D.
2635  UT_Matrix4 transformf(transform);
2636 
2637  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
2638  {
2639  GEO_Primitive *prim = output_geo->getGEOPrimitive(dest_off);
2640 
2641  if (!topology_changed)
2642  {
2643  // We haven't re-copied the untransformed primitive,
2644  // so we need to copy it now. Note that VDB primitives
2645  // sometimes transform actual voxel data when transformed,
2646  // so we can't just call setLocalTransform.
2647  GA_Offset source_off = (*piece_offset_list)[piece_elementi];
2648  const GA_Primitive *source_prim = source->getPrimitive(source_off);
2649  prim->copySubclassData(source_prim);
2650  }
2651 
2652  prim->transform(transformf);
2653 
2654  // The iteration below is similar to guIteratePieceElement above,
2655  // but also assembling target transforms.
2656  ++piece_elementi;
2657  // NOTE: This must be while instead of if, because there can be zero primitives in a piece.
2658  while (piece_elementi >= piece_element_count)
2659  {
2660  piece_elementi = 0;
2661  ++targeti;
2662 
2663  if (targeti >= num_target_points)
2664  break;
2665 
2666  if (transform_matrices_3d)
2667  transform = UT_Matrix4D(transform_matrices_3d[targeti]);
2668  else
2669  transform.identity();
2670  transform.setTranslates(transform_translates_3d[targeti]);
2671  // TODO: Remove the cast to UT_Matrix4 once
2672  // GEO_Primitive::transform accepts UT_Matrix4D.
2673  transformf = UT_Matrix4(transform);
2674 
2675  if (piece_offset_starts != nullptr)
2676  {
2677  exint piecei = target_to_piecei[targeti];
2678  const GU_CopyToPointsCache::PieceData &current_piece = piece_data[piecei];
2679  piece_offset_list = &current_piece.mySourceOffsetLists[GA_ATTRIB_PRIMITIVE];
2680  piece_element_count = piece_offset_list->size();
2681  }
2682  }
2683  }
2684  }
2685  };
2686  if (copy_source_attribs_in_parallel)
2687  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[GA_ATTRIB_PRIMITIVE], functor);
2688  else
2689  functor(output_splittable_ranges[GA_ATTRIB_PRIMITIVE]);
2690  }
2691 
2692  if (copy_source_attribs_in_parallel)
2693  task_group.wait();
2694 }
2695 
2696 void
2698  GU_Detail *const output_geo,
2699  const GA_SplittableRange *const output_splittable_ranges,
2700  const exint ncopies,
2701  GU_CopyToPointsCache *const cache,
2702  const exint source_point_count,
2703  const exint source_vertex_count,
2704  const exint source_prim_count,
2705  const exint *const num_target_attribs,
2706  const GA_OffsetListRef &target_point_list,
2707  const GU_Detail *const target,
2708  GU_CopyToPointsCache::TargetAttribInfoMap &target_attrib_info,
2709  GU_CopyToPointsCache::TargetAttribInfoMap &target_group_info,
2710  const bool topology_changed,
2711  const exint *const target_to_piecei,
2712  const UT_Array<exint> *const owner_piece_offset_starts,
2713  const GU_CopyToPointsCache::PieceData *const piece_data)
2714 {
2715  if (ncopies <= 0)
2716  return;
2717 
2718  using AttribCombineMethod = GU_CopyToPointsCache::AttribCombineMethod;
2719 
2720  const exint num_target_output_elements =
2721  output_geo->getNumVertices() * num_target_attribs[GA_ATTRIB_VERTEX] +
2722  output_geo->getNumPoints() * num_target_attribs[GA_ATTRIB_POINT] +
2723  output_geo->getNumPrimitives() * num_target_attribs[GA_ATTRIB_PRIMITIVE];
2724  const bool copy_target_attribs_in_parallel = num_target_output_elements >= 4096;
2725 
2726  if (num_target_output_elements <= 0)
2727  return;
2728 
2729  exint source_element_counts[3] =
2730  {
2731  source_vertex_count,
2732  source_point_count,
2733  source_prim_count
2734  };
2736  "Arrays above and loop below are assuming the order of GA_AttributeOwner enum");
2737 
2738  const exint num_target_points = target_point_list.size();
2739 
2740  UT_TaskGroup task_group;
2741  for (auto it = target_attrib_info.begin(); !it.atEnd(); ++it)
2742  {
2743  const UT_StringHolder &name = it->first;
2744  const GA_Attribute *target_attrib = target->findPointAttribute(name);
2745  UT_ASSERT(target_attrib != nullptr);
2746 
2747  const GA_AttributeOwner owner = it->second.myCopyTo;
2748  GA_Attribute *output_attrib = output_geo->findAttribute(owner, name);
2749  UT_ASSERT(output_attrib != nullptr);
2750 
2751  const GA_IndexMap &index_map = output_geo->getIndexMap(owner);
2752  if (index_map.indexSize() == 0)
2753  continue;
2754 
2755  // To get the start offset of the copied geometry without the overhead of using a GA_Iterator
2756  // (which copyies the GA_Range), we just use iterateCreate and iterateRewind directly.
2757  GA_IteratorState iterator_state;
2758  output_splittable_ranges[owner].iterateCreate(iterator_state);
2759  GA_Offset start_offset;
2760  GA_Offset first_block_end;
2761  output_splittable_ranges[owner].iterateRewind(iterator_state, start_offset, first_block_end);
2762 
2763  exint num_elements_per_copy = source_element_counts[owner];
2764  const AttribCombineMethod method = it->second.myCombineMethod;
2765  if (method == AttribCombineMethod::NONE)
2766  continue;
2767 
2768  if (!topology_changed)
2769  {
2770  // If unchanged since last cook, no need to re-apply operation.
2771  // NOTE: GUcopyAttributesFromSource removed cache entries for
2772  // attributes that were re-copied from source.
2773  auto prev_it = cache->myTargetAttribInfo.find(name);
2774  if (!prev_it.atEnd() &&
2775  prev_it->second.myDataID == it->second.myDataID &&
2776  prev_it->second.myCopyTo == it->second.myCopyTo &&
2777  prev_it->second.myCombineMethod == it->second.myCombineMethod)
2778  {
2779  continue;
2780  }
2781  }
2782 
2783  const exint *piece_offset_starts = nullptr;
2784  const exint *piece_offset_starts_end = nullptr;
2785  if (owner_piece_offset_starts)
2786  {
2787  auto &array = owner_piece_offset_starts[owner];
2788  piece_offset_starts = array.getArray();
2789  piece_offset_starts_end = array.getArray() + array.size();
2790  }
2791 
2792  output_attrib->bumpDataId();
2793 
2794  if (method == AttribCombineMethod::COPY)
2795  {
2796  // Copy attribute values from target to output_geo
2797  // FIXME: Specialize for numeric and string attributes to reduce
2798  // performance impact of the GA_Attribute::copy() virtual function calls.
2799  auto &&functor = [output_attrib,target_attrib,&target_point_list,num_elements_per_copy,
2800  start_offset,piece_offset_starts,piece_offset_starts_end,
2801  num_target_points,target_to_piecei,piece_data,owner](const GA_SplittableRange &r)
2802  {
2803  GA_Offset start;
2804  GA_Offset end;
2805  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
2806  {
2807  exint targeti;
2808  exint piece_elementi;
2809  exint piece_element_count;
2810  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
2811  nullptr, start_offset, piece_offset_starts, piece_offset_starts_end,
2812  num_target_points, target_to_piecei, piece_data, owner,
2813  nullptr, num_elements_per_copy);
2814 
2815  GA_Offset target_off = target_point_list[targeti];
2816 
2817  // FIXME: Find longer contiguous spans to copy, for better performance.
2818  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
2819  {
2820  output_attrib->copy(dest_off, *target_attrib, target_off);
2821 
2822  guIteratePieceElementOff(piece_elementi, piece_element_count, targeti, piece_offset_starts,
2823  num_target_points, target_to_piecei, piece_data, owner, target_off, target_point_list);
2824  }
2825  }
2826  };
2827  if (copy_target_attribs_in_parallel)
2828  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owner], functor);
2829  else
2830  functor(output_splittable_ranges[owner]);
2831  }
2832  else
2833  {
2834  GA_ATINumeric *output_numeric = UTverify_cast<GA_ATINumeric *>(output_attrib);
2835  const GA_ATINumeric *target_numeric = UTverify_cast<const GA_ATINumeric *>(target_attrib);
2836  const exint min_tuple_size = SYSmin(output_numeric->getTupleSize(), target_numeric->getTupleSize());
2837  GA_PageArray<void, -1> &output_data = output_numeric->getData();
2838  const GA_PageArray<void, -1> &target_data = target_numeric->getData();
2839  if (method == AttribCombineMethod::MULTIPLY)
2840  {
2841  // Multiply by target attribute values: output_geo (i.e. source) * target.
2842  auto &&functor = [&output_data,&target_data,&target_point_list,num_elements_per_copy,min_tuple_size,
2843  start_offset,piece_offset_starts,piece_offset_starts_end,
2844  num_target_points,target_to_piecei,piece_data,owner](const GA_SplittableRange &r)
2845  {
2846  GA_Offset start;
2847  GA_Offset end;
2848  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
2849  {
2850  exint targeti;
2851  exint piece_elementi;
2852  exint piece_element_count;
2853  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
2854  nullptr, start_offset, piece_offset_starts, piece_offset_starts_end,
2855  num_target_points, target_to_piecei, piece_data, owner,
2856  nullptr, num_elements_per_copy);
2857 
2858  GA_Offset target_off = target_point_list[targeti];
2859 
2860  // FIXME: Find longer contiguous spans to copy, for better performance.
2861  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
2862  {
2863  for (exint component = 0; component < min_tuple_size; ++component)
2864  {
2865  // FIXME: This is just mimicking what the old code did, but
2866  // it doesn't maintain full precision for 64-bit integers,
2867  // so we can do better.
2868  // The performance probably isn't great, either.
2869  // TODO: Maybe we should handle transform matrices and quaternions
2870  // in a special way, too.
2871  fpreal64 existing_value = output_data.get<fpreal64>(dest_off, component);
2872  fpreal64 target_value = target_data.get<fpreal64>(target_off, component);
2873  fpreal64 product = existing_value*target_value;
2874  output_data.set(dest_off, component, product);
2875  }
2876 
2877  guIteratePieceElementOff(piece_elementi, piece_element_count, targeti, piece_offset_starts,
2878  num_target_points, target_to_piecei, piece_data, owner, target_off, target_point_list);
2879  }
2880  }
2881  };
2882  if (copy_target_attribs_in_parallel)
2883  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owner], functor);
2884  else
2885  functor(output_splittable_ranges[owner]);
2886  }
2887  else if (method == AttribCombineMethod::ADD)
2888  {
2889  // Add target attribute values: output_geo (i.e. source) + target.
2890  auto &&functor = [&output_data,&target_data,&target_point_list,num_elements_per_copy,min_tuple_size,
2891  start_offset,piece_offset_starts,piece_offset_starts_end,
2892  num_target_points,target_to_piecei,piece_data,owner](const GA_SplittableRange &r)
2893  {
2894  GA_Offset start;
2895  GA_Offset end;
2896  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
2897  {
2898  exint targeti;
2899  exint piece_elementi;
2900  exint piece_element_count;
2901  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
2902  nullptr, start_offset, piece_offset_starts, piece_offset_starts_end,
2903  num_target_points, target_to_piecei, piece_data, owner,
2904  nullptr, num_elements_per_copy);
2905 
2906  GA_Offset target_off = target_point_list[targeti];
2907 
2908  // FIXME: Find longer contiguous spans to copy, for better performance.
2909  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
2910  {
2911  for (exint component = 0; component < min_tuple_size; ++component)
2912  {
2913  // FIXME: This is just mimicking what the old code did, but
2914  // it doesn't maintain full precision for 64-bit integers,
2915  // so we can do better.
2916  // The performance probably isn't great, either.
2917  fpreal64 existing_value = output_data.get<fpreal64>(dest_off, component);
2918  fpreal64 target_value = target_data.get<fpreal64>(target_off, component);
2919  fpreal64 sum = existing_value + target_value;
2920  output_data.set(dest_off, component, sum);
2921  }
2922 
2923  guIteratePieceElementOff(piece_elementi, piece_element_count, targeti, piece_offset_starts,
2924  num_target_points, target_to_piecei, piece_data, owner, target_off, target_point_list);
2925  }
2926  }
2927  };
2928  if (copy_target_attribs_in_parallel)
2929  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owner], functor);
2930  else
2931  functor(output_splittable_ranges[owner]);
2932  }
2933  else // (method == AttribCombineMethod::SUBTRACT)
2934  {
2935  // Subtract target attribute values: output_geo (i.e. source or zero) - target.
2936  auto &&functor = [&output_data,&target_data,&target_point_list,num_elements_per_copy,min_tuple_size,
2937  start_offset,piece_offset_starts,piece_offset_starts_end,
2938  num_target_points,target_to_piecei,piece_data,owner](const GA_SplittableRange &r)
2939  {
2940  GA_Offset start;
2941  GA_Offset end;
2942  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
2943  {
2944  exint targeti;
2945  exint piece_elementi;
2946  exint piece_element_count;
2947  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
2948  nullptr, start_offset, piece_offset_starts, piece_offset_starts_end,
2949  num_target_points, target_to_piecei, piece_data, owner,
2950  nullptr, num_elements_per_copy);
2951 
2952  GA_Offset target_off = target_point_list[targeti];
2953 
2954  // FIXME: Find longer contiguous spans to copy, for better performance.
2955  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
2956  {
2957  for (exint component = 0; component < min_tuple_size; ++component)
2958  {
2959  // FIXME: This is just mimicking what the old code did, but
2960  // it doesn't maintain full precision for 64-bit integers,
2961  // so we can do better.
2962  // The performance probably isn't great, either.
2963  fpreal64 existing_value = output_data.get<fpreal64>(dest_off, component);
2964  fpreal64 target_value = target_data.get<fpreal64>(target_off, component);
2965  fpreal64 difference = existing_value - target_value;
2966  output_data.set(dest_off, component, difference);
2967  }
2968 
2969  guIteratePieceElementOff(piece_elementi, piece_element_count, targeti, piece_offset_starts,
2970  num_target_points, target_to_piecei, piece_data, owner, target_off, target_point_list);
2971  }
2972  }
2973  };
2974  if (copy_target_attribs_in_parallel)
2975  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owner], functor);
2976  else
2977  functor(output_splittable_ranges[owner]);
2978  }
2979  }
2980  }
2981  for (auto it = target_group_info.begin(); !it.atEnd(); ++it)
2982  {
2983  const UT_StringHolder &name = it->first;
2984  const GA_PointGroup *target_group = target->findPointGroup(name);
2985  UT_ASSERT(target_group != nullptr);
2986 
2987  const GA_AttributeOwner owner = it->second.myCopyTo;
2988  GA_ElementGroup *output_group = output_geo->findElementGroup(owner, name);
2989  UT_ASSERT(output_group != nullptr);
2990 
2991  const GA_IndexMap &index_map = output_geo->getIndexMap(owner);
2992  if (index_map.indexSize() == 0)
2993  continue;
2994 
2995  // To get the start offset of the copied geometry without the overhead of using a GA_Iterator
2996  // (which copyies the GA_Range), we just use iterateCreate and iterateRewind directly.
2997  GA_IteratorState iterator_state;
2998  output_splittable_ranges[owner].iterateCreate(iterator_state);
2999  GA_Offset start_offset;
3000  GA_Offset first_block_end;
3001  output_splittable_ranges[owner].iterateRewind(iterator_state, start_offset, first_block_end);
3002 
3003  const AttribCombineMethod method = it->second.myCombineMethod;
3004  if (method == AttribCombineMethod::NONE)
3005  continue;
3006 
3007  if (!topology_changed)
3008  {
3009  // If unchanged since last cook, no need to re-apply operation.
3010  // NOTE: GUcopyAttributesFromSource removed cache entries for
3011  // attributes that were re-copied from source.
3012  auto prev_it = cache->myTargetAttribInfo.find(name);
3013  if (!prev_it.atEnd() &&
3014  prev_it->second.myDataID == it->second.myDataID &&
3015  prev_it->second.myCopyTo == it->second.myCopyTo &&
3016  prev_it->second.myCombineMethod == it->second.myCombineMethod)
3017  {
3018  continue;
3019  }
3020  }
3021 
3022  const exint *piece_offset_starts = nullptr;
3023  const exint *piece_offset_starts_end = nullptr;
3024  if (owner_piece_offset_starts)
3025  {
3026  auto &array = owner_piece_offset_starts[owner];
3027  piece_offset_starts = array.getArray();
3028  piece_offset_starts_end = array.getArray() + array.size();
3029  }
3030 
3031  output_group->bumpDataId();
3032  output_group->invalidateGroupEntries();
3033 
3034  if (method == AttribCombineMethod::COPY)
3035  {
3036  // Copy group membership from target to output_geo
3037  exint num_elements_per_copy = source_element_counts[owner];
3038  auto &&functor = [output_group,target_group,&target_point_list,num_elements_per_copy,
3039  start_offset,piece_offset_starts,piece_offset_starts_end,
3040  num_target_points,target_to_piecei,piece_data,owner](const GA_SplittableRange &r)
3041  {
3042  GA_Offset start;
3043  GA_Offset end;
3044  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
3045  {
3046  exint targeti;
3047  exint piece_elementi;
3048  exint piece_element_count;
3049  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
3050  nullptr, start_offset, piece_offset_starts, piece_offset_starts_end,
3051  num_target_points, target_to_piecei, piece_data, owner,
3052  nullptr, num_elements_per_copy);
3053 
3054  GA_Offset target_off = target_point_list[targeti];
3055  bool target_in_group = target_group->contains(target_off);
3056 
3057  // FIXME: Find longer contiguous spans to copy, for better performance.
3058  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
3059  {
3060  output_group->setElement(dest_off, target_in_group);
3061 
3062  guIteratePieceElementGroup(piece_elementi, piece_element_count, targeti, piece_offset_starts,
3063  num_target_points, target_to_piecei, piece_data, owner, target_in_group, target_point_list, target_group);
3064  }
3065  }
3066  };
3067  if (copy_target_attribs_in_parallel)
3068  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owner], functor);
3069  else
3070  functor(output_splittable_ranges[owner]);
3071  }
3072  else if (method == AttribCombineMethod::MULTIPLY)
3073  {
3074  // Intersect group membership from target to output_geo
3075  exint num_elements_per_copy = source_element_counts[owner];
3076  auto &&functor = [output_group,target_group,&target_point_list,num_elements_per_copy,
3077  start_offset,piece_offset_starts,piece_offset_starts_end,
3078  num_target_points,target_to_piecei,piece_data,owner](const GA_SplittableRange &r)
3079  {
3080  GA_Offset start;
3081  GA_Offset end;
3082  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
3083  {
3084  exint targeti;
3085  exint piece_elementi;
3086  exint piece_element_count;
3087  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
3088  nullptr, start_offset, piece_offset_starts, piece_offset_starts_end,
3089  num_target_points, target_to_piecei, piece_data, owner,
3090  nullptr, num_elements_per_copy);
3091 
3092  GA_Offset target_off = target_point_list[targeti];
3093  bool target_in_group = target_group->contains(target_off);
3094 
3095  // FIXME: Find longer contiguous spans to copy, for better performance.
3096  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
3097  {
3098  if (!target_in_group)
3099  output_group->setElement(dest_off, false);
3100 
3101  guIteratePieceElementGroup(piece_elementi, piece_element_count, targeti, piece_offset_starts,
3102  num_target_points, target_to_piecei, piece_data, owner, target_in_group, target_point_list, target_group);
3103  }
3104  }
3105  };
3106  if (copy_target_attribs_in_parallel)
3107  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owner], functor);
3108  else
3109  functor(output_splittable_ranges[owner]);
3110  }
3111  else if (method == AttribCombineMethod::ADD)
3112  {
3113  // Union group membership from target to output_geo
3114  exint num_elements_per_copy = source_element_counts[owner];
3115  auto &&functor = [output_group,target_group,&target_point_list,num_elements_per_copy,
3116  start_offset,piece_offset_starts,piece_offset_starts_end,
3117  num_target_points,target_to_piecei,piece_data,owner](const GA_SplittableRange &r)
3118  {
3119  GA_Offset start;
3120  GA_Offset end;
3121  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
3122  {
3123  exint targeti;
3124  exint piece_elementi;
3125  exint piece_element_count;
3126  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
3127  nullptr, start_offset, piece_offset_starts, piece_offset_starts_end,
3128  num_target_points, target_to_piecei, piece_data, owner,
3129  nullptr, num_elements_per_copy);
3130 
3131  GA_Offset target_off = target_point_list[targeti];
3132  bool target_in_group = target_group->contains(target_off);
3133 
3134  // FIXME: Find longer contiguous spans to copy, for better performance.
3135  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
3136  {
3137  if (target_in_group)
3138  output_group->setElement(dest_off, true);
3139 
3140  guIteratePieceElementGroup(piece_elementi, piece_element_count, targeti, piece_offset_starts,
3141  num_target_points, target_to_piecei, piece_data, owner, target_in_group, target_point_list, target_group);
3142  }
3143  }
3144  };
3145  if (copy_target_attribs_in_parallel)
3146  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owner], functor);
3147  else
3148  functor(output_splittable_ranges[owner]);
3149  }
3150  else // if (method == AttribCombineMethod::SUBTRACT)
3151  {
3152  // Subtract group membership of target from output_geo
3153  exint num_elements_per_copy = source_element_counts[owner];
3154  auto &&functor = [output_group,target_group,&target_point_list,num_elements_per_copy,
3155  start_offset,piece_offset_starts,piece_offset_starts_end,
3156  num_target_points,target_to_piecei,piece_data,owner](const GA_SplittableRange &r)
3157  {
3158  GA_Offset start;
3159  GA_Offset end;
3160  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
3161  {
3162  exint targeti;
3163  exint piece_elementi;
3164  exint piece_element_count;
3165  guFindStartInTarget(start, targeti, piece_elementi, piece_element_count,
3166  nullptr, start_offset, piece_offset_starts, piece_offset_starts_end,
3167  num_target_points, target_to_piecei, piece_data, owner,
3168  nullptr, num_elements_per_copy);
3169 
3170  GA_Offset target_off = target_point_list[targeti];
3171  bool target_in_group = target_group->contains(target_off);
3172 
3173  // FIXME: Find longer contiguous spans to copy, for better performance.
3174  for (GA_Offset dest_off = start; dest_off < end; ++dest_off)
3175  {
3176  if (target_in_group)
3177  output_group->setElement(dest_off, false);
3178 
3179  guIteratePieceElementGroup(piece_elementi, piece_element_count, targeti, piece_offset_starts,
3180  num_target_points, target_to_piecei, piece_data, owner, target_in_group, target_point_list, target_group);
3181  }
3182  }
3183  };
3184  if (copy_target_attribs_in_parallel)
3185  UTparallelForRunInTaskGroup(task_group, output_splittable_ranges[owner], functor);
3186  else
3187  functor(output_splittable_ranges[owner]);
3188  }
3189  }
3190 
3191  if (copy_target_attribs_in_parallel)
3192  task_group.wait();
3193 
3194  cache->myTargetAttribInfo.swap(target_attrib_info);
3195  cache->myTargetGroupInfo.swap(target_group_info);
3196  target_attrib_info.clear();
3197  target_group_info.clear();
3198 }
3199 
3200 void
3202  GU_Detail *output_geo,
3203  GU_CopyToPointsCache *cache,
3204  const bool had_transform_matrices,
3205  const exint num_packed_prims,
3206  const UT_Vector3 *const constant_pivot)
3207 {
3208  UT_ASSERT(num_packed_prims == output_geo->getNumPoints());
3209  UT_ASSERT(num_packed_prims == output_geo->getNumPrimitives());
3210  if (num_packed_prims <= 0)
3211  return;
3212 
3213  const UT_Matrix3D *const transform_matrices_3d = cache->myTransformMatrices3D.get();
3214  const UT_Vector3D *const transform_translates_3d = cache->myTransformTranslates3D.get();
3215  GA_RWHandleV3 output_pos3f(output_geo->getP());
3216  GA_RWHandleV3D output_pos3d(output_geo->getP());
3217  GA_Offset start_ptoff = output_geo->pointOffset(GA_Index(0));
3218  GA_Offset start_primoff = output_geo->primitiveOffset(GA_Index(0));
3219  auto &&functor = [output_geo,
3220  start_ptoff,start_primoff,transform_translates_3d,transform_matrices_3d,
3221  output_pos3f,output_pos3d,had_transform_matrices,constant_pivot](const GA_Range &r)
3222  {
3223  GA_Offset start;
3224  GA_Offset end;
3225  for (GA_Iterator it(r); it.fullBlockAdvance(start, end); )
3226  {
3227  if (transform_matrices_3d || had_transform_matrices)
3228  {
3229  UT_Matrix3D identity;
3230  if (!transform_matrices_3d)
3231  identity.identity();
3232 
3233  exint transformi = start - start_primoff;
3234  for (GA_Offset primoff = start; primoff < end; ++primoff, ++transformi)
3235  {
3236  GA_Primitive *prim = output_geo->getPrimitive(primoff);
3237  GU_PrimPacked *packed_prim = UTverify_cast<GU_PrimPacked *>(prim);
3238  if (transform_matrices_3d)
3239  {
3240  const UT_Matrix3D &transform = transform_matrices_3d[transformi];
3241  packed_prim->setLocalTransform(transform);
3242  }
3243  else // had_transform_matrices
3244  {
3245  packed_prim->setLocalTransform(identity);
3246  }
3247  }
3248  }
3249 
3250  if (!constant_pivot)
3251  {
3252  // Apply pivots from primitives to P.
3253  exint transformi = start - start_primoff;
3254  for (GA_Offset primoff = start; primoff < end; ++primoff, ++transformi)
3255  {
3256  GA_Primitive *prim = output_geo->getPrimitive(primoff);
3257  GU_PrimPacked *packed_prim = UTverify_cast<GU_PrimPacked *>(prim);
3258  GA_Offset ptoff = start_ptoff + transformi;
3259  UT_Vector3 pivot;
3260  packed_prim->getPivot(pivot);
3261  // Need to transform pivot position before adding it to P
3262  UT_Vector3D transformed_pivot(pivot);
3263  if (transform_matrices_3d)
3264  transformed_pivot = transformed_pivot*transform_matrices_3d[transformi];
3265  if (transform_translates_3d)
3266  transformed_pivot += transform_translates_3d[transformi];
3267  output_pos3d.set(ptoff, transformed_pivot);
3268  }
3269  continue;
3270  }
3271 
3272  // NOTE: This conversion to ptoff relies on that there's one point per primitive,
3273  // that both are contiguous, and that both are in the same order.
3274  exint transformi = start - start_primoff;
3275  GA_Offset local_start_ptoff = start_ptoff + transformi;
3276  GA_Offset local_end_ptoff = start_ptoff + (end-start_primoff);
3277  if (!transform_translates_3d)
3278  {
3279  for (GA_Offset ptoff = local_start_ptoff; ptoff < local_end_ptoff; ++ptoff, ++transformi)
3280  {
3281  // Need to transform pivot position before adding it to P
3282  UT_Vector3D transformed_pivot(*constant_pivot);
3283  if (transform_matrices_3d)
3284  transformed_pivot = transformed_pivot*transform_matrices_3d[transformi];
3285 
3286  output_pos3d.set(ptoff, transformed_pivot);
3287  }
3288  }
3289  else if (constant_pivot->isZero())
3290  {
3291  if (output_pos3f->getStorage() == GA_STORE_REAL64)
3292  {
3293  for (GA_Offset ptoff = local_start_ptoff; ptoff < local_end_ptoff; ++ptoff, ++transformi)
3294  {
3295  output_pos3d.set(ptoff, transform_translates_3d[transformi]);
3296  }
3297  }
3298  else
3299  {
3300  for (GA_Offset ptoff = local_start_ptoff; ptoff < local_end_ptoff; ++ptoff, ++transformi)
3301  {
3302  output_pos3f.set(ptoff, UT_Vector3F(transform_translates_3d[transformi]));
3303  }
3304  }
3305  }
3306  else
3307  {
3308  for (GA_Offset ptoff = local_start_ptoff; ptoff < local_end_ptoff; ++ptoff, ++transformi)
3309  {
3310  // Need to transform pivot position before adding it to P
3311  UT_Vector3D transformed_pivot(*constant_pivot);
3312  if (transform_matrices_3d)
3313  transformed_pivot = transformed_pivot*transform_matrices_3d[transformi];
3314 
3315  output_pos3d.set(ptoff, transform_translates_3d[transformi] + transformed_pivot);
3316  }
3317  }
3318  }
3319  };
3320 
3321  constexpr exint PARALLEL_THRESHOLD = 2048;
3322  if (num_packed_prims >= PARALLEL_THRESHOLD)
3324  else
3325  functor(output_geo->getPrimitiveRange());
3326 }
3327 
3328 void
3330  GU_Detail *output_geo,
3331  GU_CopyToPointsCache *cache,
3332  const bool topology_changed,
3333  const bool had_transform_matrices,
3334  const GU_Detail *const target,
3335  const GA_OffsetListRef &target_point_list,
3336  GU_CopyToPointsCache::TargetAttribInfoMap &target_attrib_info,
3337  GU_CopyToPointsCache::TargetAttribInfoMap &target_group_info,
3338  const UT_Vector3 *const constant_pivot)
3339 {
3340  // *** Transform ***
3341 
3342  exint num_target_points = target_point_list.size();
3343  GUupdatePackedPrimTransforms(output_geo, cache, had_transform_matrices, num_target_points, constant_pivot);
3344 
3345  // Remove any attributes from output_geo that are not being applied from target.
3346  // (They've already all been removed if topology_changed.)
3347  if (!topology_changed)
3348  {
3350  output_geo,
3351  nullptr,
3352  target,
3353  cache,
3354  &target_attrib_info,
3355  &target_group_info);
3356  }
3357 
3358  // Add attributes from target that are not in output_geo.
3359  exint num_target_attribs[3] = {0,0,0};
3361  "Array above is assuming the order of GA_AttributeOwner enum");
3362 
3364  output_geo,
3365  nullptr,
3366  nullptr,
3367  false,
3368  nullptr,
3369  target,
3370  &target_attrib_info,
3371  &target_group_info,
3372  num_target_attribs);
3373 
3374  // Copy attributes from target points
3375  GA_SplittableRange output_splittable_ranges[3] =
3376  {
3377  GA_SplittableRange(output_geo->getVertexRange()),
3378  GA_SplittableRange(output_geo->getPointRange()),
3379  GA_SplittableRange(output_geo->getPrimitiveRange())
3380  };
3382  "Array above is assuming the order of GA_AttributeOwner enum");
3383 
3385  output_geo,
3386  output_splittable_ranges,
3387  num_target_points,
3388  cache,
3389  1, 1, 1, // 1 point, 1 vertex, 1 primitive
3390  num_target_attribs,
3391  target_point_list,
3392  target,
3393  target_attrib_info,
3394  target_group_info,
3395  topology_changed);
3396 }
3397 
3398 void
3400  GU_Detail *output_geo,
3401  const GEO_ViewportLOD lod,
3402  const GU_CopyToPointsCache::PackedPivot pivot_type,
3403  GU_CopyToPointsCache *cache,
3404  const GU_ConstDetailHandle source_handle,
3405  const GU_Detail *source,
3406  const GA_PointGroup *source_pointgroup,
3407  const GA_PrimitiveGroup *source_primgroup,
3408  bool source_topology_changed,
3409  bool had_transform_matrices,
3410  bool transforms_changed,
3411  const exint num_packed_prims,
3412  const GU_Detail *target,
3413  const GA_OffsetListRef *target_point_list,
3414  GU_CopyToPointsCache::TargetAttribInfoMap *target_attrib_info,
3415  GU_CopyToPointsCache::TargetAttribInfoMap *target_group_info)
3416 {
3417  const bool topology_changed =
3418  !cache->myPrevPack ||
3419  num_packed_prims != cache->myPrevTargetPtCount ||
3420  output_geo->getNumPoints() != num_packed_prims ||
3421  output_geo->getUniqueId() != cache->myPrevOutputDetailID;
3422  const bool source_changed =
3423  !cache->myPrevPack ||
3424  source->getUniqueId() != cache->myPrevSourceUniqueID ||
3425  source->getMetaCacheCount() != cache->myPrevSourceMetaCacheCount ||
3426  source_topology_changed;
3427  const bool lod_changed = (lod != cache->myPrevViewportLOD);
3428  const bool source_intrinsic_changed =
3429  source_changed ||
3430  pivot_type != cache->myPrevPivotEnum ||
3431  lod_changed;
3432 
3433  // *** Creating Packed Primitives ***
3434 
3435  if (topology_changed)
3436  {
3437  output_geo->clearAndDestroy();
3438 
3439  GUcreateEmptyPackedGeometryPrims(output_geo, num_packed_prims);
3440  }
3441  GA_Offset start_primoff = (output_geo->getNumPrimitives() > 0) ? output_geo->primitiveOffset(GA_Index(0)) : GA_INVALID_OFFSET;
3442 
3443  // *** Updating Content ***
3444 
3445  bool centroid_pivot = (pivot_type == GU_CopyToPointsCache::PackedPivot::CENTROID);
3446 
3447  UT_Vector3 pivot(0,0,0);
3448  if ((source_intrinsic_changed || source_changed || topology_changed) && num_packed_prims > 0)
3449  {
3450  // Create a packed geometry implementation
3451  const GU_PackedGeometry *packed_geo = nullptr;
3452  GU_PackedGeometry *packed_geo_nc = nullptr;
3453  const bool impl_changed = (source_changed || topology_changed);
3454  if (impl_changed)
3455  {
3456  packed_geo_nc = new GU_PackedGeometry();
3457  if (!source_primgroup && !source_pointgroup)
3458  {
3459  // Easy case: just instance the source input geometry.
3460  packed_geo_nc->setDetailPtr(source_handle);
3461  }
3462  else
3463  {
3464  // Hard case: copy the source geometry in the groups.
3465  GU_DetailHandle detail_handle;
3466  GU_Detail *packed_detail = new GU_Detail();
3467  detail_handle.allocateAndSet(packed_detail);
3468  exint source_point_count = source_pointgroup ? source_pointgroup->entries() : source->getNumPoints();
3469  exint source_prim_count = source_primgroup ? source_primgroup->entries() : source->getNumPrimitives();
3470 
3472  packed_detail,
3473  source,
3474  source_point_count,
3475  cache->mySourceVertexCount,
3476  source_prim_count,
3480  source_pointgroup,
3481  source_primgroup,
3482  1);
3483 
3484  exint num_source_attribs[3];
3486  "Array above depends on owners other than detail being less than 3");
3488  packed_detail,
3489  source,
3490  num_source_attribs);
3491 
3492  GA_SplittableRange output_splittable_ranges[3] =
3493  {
3494  GA_SplittableRange(packed_detail->getVertexRange()),
3495  GA_SplittableRange(packed_detail->getPointRange()),
3496  GA_SplittableRange(packed_detail->getPrimitiveRange())
3497  };
3499  "Array above is assuming the order of GA_AttributeOwner enum");
3500 
3502  packed_detail,
3503  output_splittable_ranges,
3504  source,
3505  1,
3506  cache,
3507  cache->mySourceOffsetLists,
3508  num_source_attribs,
3509  true,
3510  false,
3511  false,
3512  true,
3513  true);
3514 
3515  packed_geo_nc->setDetailPtr(detail_handle);
3516  }
3517 
3518  packed_geo = packed_geo_nc;
3519 
3520  // Add all of the reference counter refs at once
3521  intrusive_ptr_add_ref(packed_geo, num_packed_prims);
3522  }
3523  else
3524  {
3525  // source_intrinsic_changed is true, but source_changed is false and topology_changed is false.
3526  GA_Primitive *prim = output_geo->getPrimitive(start_primoff);
3527  GU_PrimPacked *packed_prim = UTverify_cast<GU_PrimPacked *>(prim);
3528  packed_geo = UTverify_cast<const GU_PackedGeometry*>(packed_prim->sharedImplementation());
3529  }
3530 
3531  // Cache the bounds in advance, even if we don't need the box center for the pivot,
3532  // to avoid thread contention downstream when anything requests it.
3533  UT_BoundingBox bbox;
3534  packed_geo->getBoundsCached(bbox);
3535  if (centroid_pivot)
3536  {
3537  if (bbox.isValid())
3538  pivot = bbox.center();
3539  }
3540 
3541  // Set the implementations of all of the primitives at once.
3542  auto &&functor = [output_geo,packed_geo_nc,
3543  &pivot,lod,impl_changed](const UT_BlockedRange<GA_Offset> &r)
3544  {
3545  for (GA_Offset primoff = r.begin(), end = r.end(); primoff < end; ++primoff)
3546  {
3547  GA_Primitive *prim = output_geo->getPrimitive(primoff);
3548  GU_PrimPacked *packed_prim = UTverify_cast<GU_PrimPacked *>(prim);
3549  if (impl_changed)
3550  {
3551  // It's okay to set the implementation if it hasn't changed,
3552  // since setImplementation checks if the pointer is equal.
3553  // It doesn't incur the previous impl's decref in that case.
3554  packed_prim->setImplementation(packed_geo_nc, false);
3555  }
3556 
3557  // NOTE: Applying pivot to position is handled below.
3558  packed_prim->setPivot(pivot);
3559 
3560  packed_prim->setViewportLOD(lod);
3561  }
3562  };
3563 
3564  const UT_BlockedRange<GA_Offset> prim_range(start_primoff, start_primoff+num_packed_prims);
3565  constexpr exint PARALLEL_THRESHOLD = 2048;
3566  if (num_packed_prims >= PARALLEL_THRESHOLD)
3567  UTparallelForLightItems(prim_range, functor);
3568  else
3569  functor(prim_range);
3570  }
3571  else if ((num_packed_prims > 0) && centroid_pivot)
3572  {
3573  GA_Primitive *prim = output_geo->getPrimitive(start_primoff);
3574  GU_PrimPacked *packed_prim = UTverify_cast<GU_PrimPacked *>(prim);
3575  const GU_PackedImpl *packed_impl = packed_prim->sharedImplementation();
3576  UT_BoundingBox bbox;
3577  packed_impl->getBoundsCached(bbox);
3578  if (bbox.isValid())
3579  pivot = bbox.center();
3580  }
3581 
3582  const bool pivots_changed =
3583  pivot_type != cache->myPrevPivotEnum ||
3584  (centroid_pivot && source_changed);
3585 
3586  if (num_packed_prims > 0)
3587  {
3588  if (target != nullptr)
3589  {
3590  UT_ASSERT(target_point_list != nullptr && num_packed_prims == target_point_list->size());
3592  output_geo,
3593  cache,
3594  topology_changed,
3595  had_transform_matrices,
3596  target,
3597  *target_point_list,
3598  *target_attrib_info,
3599  *target_group_info,
3600  &pivot);
3601  }
3602  else
3603  {
3604  // No target, so only update the primitive transforms.
3606  output_geo,
3607  cache,
3608  had_transform_matrices,
3609  num_packed_prims,
3610  &pivot);
3611  }
3612  }
3613 
3614  if (topology_changed)
3615  {
3616  output_geo->bumpDataIdsForAddOrRemove(true, true, true);
3617  }
3618  if (source_changed && num_packed_prims > 0)
3619  {
3620  output_geo->getPrimitiveList().bumpDataId();
3621  }
3622  if (transforms_changed || pivots_changed)
3623  {
3624  output_geo->getP()->bumpDataId();
3625  // If there are no transform matrices, the primitives weren't transformed.
3626  // If source_changed, we already bumped the primitive list data ID.
3627  bool has_transform_matrices = (cache->myTransformMatrices3D.get() != nullptr);
3628  if ((has_transform_matrices || had_transform_matrices) && !source_changed)
3629  output_geo->getPrimitiveList().bumpDataId();
3630  }
3631  if (lod_changed)
3632  {
3633  output_geo->getPrimitiveList().bumpDataId();
3634  }
3635 
3636  cache->myPrevPack = true;
3637  cache->myPrevTargetPtCount = num_packed_prims;
3638  cache->myPrevSourceGroupDataID = source->getUniqueId();
3639  cache->myPrevSourceMetaCacheCount = source->getMetaCacheCount();
3640  cache->myPrevPivotEnum = pivot_type;
3641  cache->myPrevViewportLOD = lod;
3642  cache->myPrevOutputDetailID = output_geo->getUniqueId();
3643  cache->myPrevSourceUniqueID = source->getUniqueId();
3644  cache->myPrevSourceMetaCacheCount = source->getMetaCacheCount();
3645 }
3646 
3647 } // namespace GU_Copy
3648 
3649 } // End of HDK_Sample namespace
void GUcopyPackAllSame(GU_Detail *output_geo, const GEO_ViewportLOD lod, const GU_CopyToPointsCache::PackedPivot pivot_type, GU_CopyToPointsCache *cache, const GU_ConstDetailHandle source_handle, const GU_Detail *source, const GA_PointGroup *source_pointgroup, const GA_PrimitiveGroup *source_primgroup, bool source_topology_changed, bool had_transform_matrices, bool transforms_changed, const exint num_packed_prims, const GU_Detail *target, const GA_OffsetListRef *target_point_list, GU_CopyToPointsCache::TargetAttribInfoMap *target_attrib_info, GU_CopyToPointsCache::TargetAttribInfoMap *target_group_info)
Definition: GU_Copy2.C:3399
SYS_FORCE_INLINE void clear()
clear removes all of the entries
SYS_FORCE_INLINE const GU_PackedImpl * sharedImplementation() const
A class to manage an ordered array which has fixed offset handles.
Definition: GA_IndexMap.h:63
constexpr SYS_FORCE_INLINE T length2() const noexcept
Definition: UT_Vector3.h:346
void GUcreatePointOrPrimList(GA_OffsetList &offset_list, const GU_Detail *const detail, const GA_ElementGroup *const group, const GA_AttributeOwner owner)
Definition: GU_Copy2.C:1895
T & last()
Definition: UT_Array.h:759
void GUcopyAttributesFromSource(GU_Detail *const output_geo, const GA_SplittableRange *const output_splittable_ranges, const GU_Detail *const source, const exint num_target_points, GU_CopyToPointsCache *const cache, const GA_OffsetList *const source_offset_lists, const exint *const num_source_attribs, const bool no_transforms, const bool had_transform_matrices, const bool has_transform_matrices, const bool topology_changed, const bool transforms_changed, const GU_Detail *const target, const GU_CopyToPointsCache::TargetAttribInfoMap *const target_attrib_info, const GU_CopyToPointsCache::TargetAttribInfoMap *const target_group_info, const exint *const target_to_piecei, const UT_Array< exint > *const owner_piece_offset_starts, const GU_CopyToPointsCache::PieceData *const piece_data)
Definition: GU_Copy2.C:2010
SYS_FORCE_INLINE void bumpDataId()
Definition: GA_Attribute.h:305
SYS_FORCE_INLINE void forEachAttribute(FUNCTOR &&functor) const
static SYS_FORCE_INLINE bool isType(const GA_Attribute *attrib)
UT_UniquePtr< UT_Matrix3F[]> myTransformMatrices3F
Definition: GU_Copy2.h:73
Definition of a geometry attribute.
Definition: GA_Attribute.h:197
UT_Matrix4T< fpreal64 > UT_Matrix4D
Data has no numeric representation.
Definition: GA_Types.h:102
UT_UniquePtr< UT_Vector3D[]> myTransformTranslates3D
Definition: GU_Copy2.h:95
void GUupdatePackedPrimTransforms(GU_Detail *output_geo, GU_CopyToPointsCache *cache, const bool had_transform_matrices, const exint num_packed_prims, const UT_Vector3 *const constant_pivot)
Definition: GU_Copy2.C:3201
void clear()
Definition: UT_ArraySet.h:366
UT_ArrayStringMap< GA_DataId > mySourceGroupDataIDs[3]
Definition: GU_Copy2.h:155
SYS_FORCE_INLINE GA_Primitive * getPrimitive(GA_Offset prim_off)
Definition: GA_Detail.h:428
SYS_FORCE_INLINE const GA_AttributeDict & pointAttribs() const
Definition: GEO_Detail.h:1917
UT_UniquePtr< UT_Vector3F[]> myTransformTranslates3F
Definition: GU_Copy2.h:75
FromType append(ToType value)
Add a single entry (may grow array)
GA_Range getVertexRange(const GA_VertexGroup *group=0) const
Get a range of all vertices in the detail.
Definition: GA_Detail.h:1752
const GA_IndexMap & getPrimitiveMap() const
Definition: GA_Detail.h:742
iterator begin(GA_AttributeScope scope=GA_SCOPE_INVALID) const
GA_DataId getDataId() const
Iteration over a range of elements.
Definition: GA_Iterator.h:28
FromType find(ToType value, FromType s=FromType(0)) const
void scale(T sx, T sy, T sz)
Definition: UT_Matrix3.h:849
void
Definition: png.h:1083
#define SYS_STATIC_ASSERT_MSG(expr, msg)
OIIO_UTIL_API bool copy(string_view from, string_view to, std::string &err)
const DataType & getData() const
int64 GA_DataId
Definition: GA_Types.h:686
SYS_FORCE_INLINE void colVecMult(const UT_Matrix3F &m)
Definition: UT_Matrix3.h:1541
virtual void copySubclassData(const GA_Primitive *source)
Definition: GA_Primitive.h:499
GLuint start
Definition: glcorearb.h:475
bool GAisValid(GA_Size v)
Definition: GA_Types.h:648
GA_Size entries() const overridefinal
Will return the number of primary elements.
void homogenize()
Express the point in homogeneous coordinates or vice-versa.
Definition: UT_Vector4.h:541
void clearAndDestroy()
Clear all the points/primitives out of this detail.
Definition: GEO_Detail.h:254
int getTupleSize() const
GA_ElementGroup * findElementGroup(GA_AttributeOwner owner, const UT_StringRef &name)
UT_UniquePtr< UT_Matrix3D[]> myTransformInverse3D
Definition: GU_Copy2.h:78
SYS_FORCE_INLINE bool isValid() const
Definition: GA_Handle.h:1084
void GUcreateVertexListAndGeometryFromSource(GU_Detail *output_geo, const GU_Detail *const source, const exint source_point_count, const exint source_vertex_count, const exint source_prim_count, const GA_OffsetList &source_point_list_cache, GA_OffsetList &source_vertex_list_cache, const GA_OffsetList &source_prim_list_cache, const GA_PointGroup *const source_pointgroup, const GA_PrimitiveGroup *const source_primgroup, const exint ncopies)
Definition: GU_Copy2.C:1919
SYS_FORCE_INLINE bool getExtraFlag() const
Synonym for isClosed()
GA_DataId getDataId() const
Return the data ID for the topology attributes.
int64 exint
Definition: SYS_Types.h:125
exint entries() const
Definition: GA_GroupTable.h:66
SYS_FORCE_INLINE DEST_DATA_T get(GA_Offseti, exint component=0) const
Definition: UT_PageArray.h:493
void GUcomputeTransformTypeCaches(GU_PointTransformCache *cache, exint num_target_points, bool transforms_changed, const bool needed_transforms[NeededTransforms::num_needed_transforms])
Definition: GU_Copy2.C:736
GA_Attribute * getP()
Convenience method to access the P attribute.
Definition: GA_Detail.h:163
void setTrivialRange(FromType startindex, ToType startvalue, GA_Size nelements)
void GUcreateGeometryFromSource(GU_Detail *output_geo, const GU_Detail *const source, const GA_OffsetList &source_point_list_cache, const GA_OffsetList &source_vertex_list_cache, const GA_OffsetList &source_prim_list_cache, const exint ncopies)
NOTE: This does not clear output_geo.
Definition: GU_Copy2.C:1687
void clearOrdered()
Clear all order information, including any mixed entries.
void GEO_API GEOcomputeNormals(const GEO_Detail &detail, const GA_RWHandleV3 &normalattrib, const GA_Group *group=NULL, const float cuspangledegrees=GEO_DEFAULT_ADJUSTED_CUSP_ANGLE, const GEO_NormalMethod method=GEO_NormalMethod::ANGLE_WEIGHTED, const bool copy_orig_if_zero=false)
bool hasTransformingPrimitives() const
GA_EdgeGroupTable & edgeGroups()
Definition: GA_Detail.h:1187
SYS_FORCE_INLINE TO_T UTverify_cast(FROM_T from)
Definition: UT_Assert.h:226
void GUcopyAttributesFromTarget(GU_Detail *const output_geo, const GA_SplittableRange *const output_splittable_ranges, const exint ncopies, GU_CopyToPointsCache *const cache, const exint source_point_count, const exint source_vertex_count, const exint source_prim_count, const exint *const num_target_attribs, const GA_OffsetListRef &target_point_list, const GU_Detail *const target, GU_CopyToPointsCache::TargetAttribInfoMap &target_attrib_info, GU_CopyToPointsCache::TargetAttribInfoMap &target_group_info, const bool topology_changed, const exint *const target_to_piecei, const UT_Array< exint > *const owner_piece_offset_starts, const GU_CopyToPointsCache::PieceData *const piece_data)
Definition: GU_Copy2.C:2697
GLuint const GLchar * name
Definition: glcorearb.h:786
Standard user attribute level.
Definition: GA_Types.h:147
SYS_FORCE_INLINE UT_FixedVector< DEST_DATA_T, DEST_TSIZE > getVector(GA_Offseti) const
Definition: UT_PageArray.h:503
void UTparallelFor(const Range &range, const Body &body, const int subscribe_ratio=2, const int min_grain_size=1, const bool force_use_task_scope=false)
#define GA_INVALID_DATAID
Definition: GA_Types.h:687
virtual bool matchesStorage(const GA_Attribute *that) const
Definition: GA_Attribute.h:750
bool getBoundsCached(UT_BoundingBox &box) const
const UT_StringHolder & getName() const
Definition: GA_Attribute.h:282
GA_DataId myTargetTransformDataIDs[GA_AttributeInstanceMatrix::theNumAttribs]
This is for keeping track of whether transforms have changed since the last cook. ...
Definition: GU_Copy2.h:86
SYS_FORCE_INLINE void set(GA_Offseti, SRC_DATA_T v)
component == 0 in this version
Definition: UT_PageArray.h:616
UT_UniquePtr< UT_QuaternionF[]> myTransformQuaternionsF
Definition: GU_Copy2.h:79
float fpreal32
Definition: SYS_Types.h:200
exint size() const
Definition: UT_Array.h:609
GA_OffsetList mySourceOffsetLists[3]
Definition: GU_Copy2.h:173
exint GA_Size
Defines the bit width for index and offset types in GA.
Definition: GA_Types.h:234
UT_Matrix4T< float > UT_Matrix4
UT_UniquePtr< UT_Matrix3D[]> myTransformMatrices3D
Definition: GU_Copy2.h:74
#define GA_INVALID_OFFSET
Definition: GA_Types.h:677
iterator find(const Key &key)
Definition: UT_ArrayMap.h:158
GA_Size countPrimitiveType(const GA_PrimitiveTypeId &type) const
Definition: GA_Detail.h:2283
bool hasAnyAttribs() const
Returns true if there are any attributes bound.
GA_GroupTable::iterator< GA_EdgeGroup > beginTraverse() const
Geometry Embedded procedural.
GLenum target
Definition: glcorearb.h:1667
A range of elements in an index-map.
Definition: GA_Range.h:42
std::unique_ptr< T, Deleter > UT_UniquePtr
A smart pointer for unique ownership of dynamically allocated objects.
Definition: UT_UniquePtr.h:39
SYS_FORCE_INLINE GA_ElementGroup * find(const UT_StringRef &name) const
void setViewportLOD(GEO_ViewportLOD vlod)
double fpreal64
Definition: SYS_Types.h:201
#define UT_ASSERT_MSG(ZZ,...)
Definition: UT_Assert.h:156
UT_UniquePtr< UT_Matrix3D[]> myTransformMatrices3D
Definition: GU_Copy2.h:93
GLuint GLenum GLenum transform
Definition: glew.h:15055
void allocateAndSet(GU_Detail *gdp, bool own=true)
GA_Size GA_Offset
Definition: GA_Types.h:640
GLdouble GLdouble GLdouble GLdouble q
Definition: glew.h:1419
GA_Offset GEObuildPrimitives(GEO_Detail *detail, const std::pair< int, exint > *primtype_count_pairs, const GA_Offset init_startpt, const GA_Size npoints_per_copy, const GA_PolyCounts &vertexlistsizelist, const INT_T *vertexpointnumbers, const bool hassharedpoints, const exint *closed_span_lengths, const exint ncopies)
tbb::task_group_status wait()
Definition: UT_TaskGroup.h:131
void updateFromArbitraryMatrix(const UT_Matrix3 &)
GLenum array
Definition: glew.h:9108
int invert()
Invert this matrix and return 0 if OK, 1 if singular.
GA_AttributeScope
Definition: GA_Types.h:141
SYS_FORCE_INLINE bool isTrivial() const
const GA_ElementGroupTable & getElementGroupTable(GA_AttributeOwner owner) const
const GA_EdgeGroup * findEdgeGroup(const UT_StringRef &name) const
UT_UniquePtr< UT_Vector3F[]> myTransformTranslates3F
Definition: GU_Copy2.h:94
const GA_ROHandleV3D & getN() const
GA_Range getPointRange(const GA_PointGroup *group=0) const
Get a range of all points in the detail.
Definition: GA_Detail.h:1722
const GA_IndexMap & getPointMap() const
Definition: GA_Detail.h:740
GA_Attribute * cloneAttribute(GA_AttributeOwner owner, const UT_StringHolder &name, namevalidcertificate, const GA_Attribute &src, bool clone_options, GA_DataIdStrategy data_id_strategy=GA_DATA_ID_BUMP, const GA_ReuseStrategy &reuse=GA_ReuseStrategy())
UT_QuaternionT< fpreal32 > UT_QuaternionF
void identity()
Set the matrix to identity.
Definition: UT_Matrix3.h:1125
UT_Vector3T< T > center() const
void UTparallelForLightItems(const Range &range, const Body &body)
SYS_FORCE_INLINE void setElement(GA_Offset ai, bool v)
virtual void replace(const GA_Attribute &src)=0
SYS_FORCE_INLINE GA_Offset appendPointBlock(GA_Size npoints)
Append new points, returning the first offset of the contiguous block.
Definition: GA_Detail.h:329
SYS_FORCE_INLINE bool destroyElementGroup(GA_AttributeOwner owner, const UT_StringRef &name)
Definition: GA_Detail.h:1234
void bumpDataId()
Use this to mark primitives or their intrinsic data as dirty.
UT_UniquePtr< UT_QuaternionD[]> myTransformQuaternionsD
Definition: GU_Copy2.h:80
GEO_ViewportLOD myPrevViewportLOD
Definition: GU_Copy2.h:146
void GUcreateEmptyPackedGeometryPrims(GU_Detail *const output_geo, const exint num_packed_prims)
Definition: GU_Copy2.C:1976
void UTparallelForRunInTaskGroup(UT_TaskGroup &task_group, RANGE &&range, BODY &&body)
Definition: UT_TaskGroup.h:139
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:152
SYS_FORCE_INLINE const GA_PointGroup * findPointGroup(const UT_StringRef &name) const
Definition: GA_Detail.h:1246
static SYS_FORCE_INLINE GA_ATINumeric * cast(GA_Attribute *attrib)
Definition: GA_ATINumeric.h:65
GLsizei GLsizei GLchar * source
Definition: glcorearb.h:803
SYS_FORCE_INLINE GA_OffsetListRef getPrimitiveVertexList(GA_Offset primoff) const
Definition: GA_Primitive.h:877
void getMatrix(UT_Matrix4 &xform, const UT_Vector3 &P, GA_Offset offset, float default_pscale=1) const
UT_UniquePtr< UT_Vector3D[]> myTransformTranslates3D
Definition: GU_Copy2.h:76
GLuint GLuint end
Definition: glcorearb.h:475
void setLocalTransform(const UT_Matrix3D &matrix) override
iterator begin()
Returns a non-const iterator for the beginning of the set.
Definition: UT_ArraySet.h:658
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
void GUsetupPointTransforms(GU_PointTransformCache *cache, const GA_OffsetListRef &target_point_list, const GU_Detail *target, const bool transform_using_more_than_P, const bool allow_implicit_N, bool &transforms_changed)
Definition: GU_Copy2.C:302
GA_AttributeSet & getAttributes()
Definition: GA_Detail.h:795
const GA_IndexMap & getIndexMap(GA_AttributeOwner owner) const
SYS_FORCE_INLINE void rowVecMult(const UT_Matrix3F &m)
Definition: UT_Matrix3.h:1529
SYS_FORCE_INLINE GA_ATINumericUPtr createDetachedTupleAttribute(GA_AttributeOwner owner, GA_Storage storage, int tuple_size, const GA_Defaults &defaults=GA_Defaults(0.0f), const GA_AttributeOptions *attribute_options=nullptr) const
Definition: GA_Detail.h:891
UT_ArrayStringMap< GA_DataId > mySourceAttribDataIDs[3]
Definition: GU_Copy2.h:154
SYS_FORCE_INLINE GA_Offset getNumPointOffsets() const
Definition: GA_Detail.h:340
static SYS_FORCE_INLINE GA_ATIString * cast(GA_Attribute *attrib)
Definition: GA_ATIString.h:57
virtual void setPivot(const UT_Vector3 &pos)
UT_Matrix3T< fpreal32 > UT_Matrix3F
const GA_IndexMap & getVertexMap() const
Definition: GA_Detail.h:741
T p1() const
Definition: GA_Edge.h:33
bool isInternal() const
Definition: GA_Group.h:45
GA_GroupTable::iterator< GA_ElementGroup > beginTraverse() const
SYS_FORCE_INLINE GA_Offset vertexPoint(GA_Offset vertex) const
Given a vertex, return the point it references.
Definition: GA_Detail.h:528
void identity()
Set the matrix to identity.
Definition: UT_Matrix4.h:1090
bool isOrdered() const overridefinal
Returns true if the group is currently ordered.
GA_Group * newGroup(const UT_StringHolder &name)
GA_OffsetList mySourceOffsetLists[3]
Definition: GU_Copy2.h:149
SYS_FORCE_INLINE GA_DataId getDataId() const
Definition: GA_Attribute.h:298
SYS_FORCE_INLINE const GA_Attribute * findAttribute(GA_AttributeScope scope, const UT_StringRef &name, const GA_AttributeOwner search_order[], int search_size) const
Definition: GA_Detail.h:1006
const UT_StringHolder & getName() const
Definition: GA_Group.h:42
SYS_FORCE_INLINE bool destroyEdgeGroup(const UT_StringRef &name)
Definition: GA_Detail.h:1274
GA_Size GA_Index
Define the strictness of GA_Offset/GA_Index.
Definition: GA_Types.h:634
GA_AttributeScope getScope() const
Definition: GA_Attribute.h:211
virtual void copyNonStorageMetadata(const GA_Attribute *that)
Definition: GA_Attribute.h:764
void getPrimitiveTypeCounts(UT_Array< std::pair< int, exint >> &type_count_pairs, const GA_Range *range=nullptr) const
exint append()
Definition: UT_Array.h:137
GA_TypeInfo
Definition: GA_Types.h:99
UT_UniquePtr< GA_Attribute > GA_AttributeUPtr
Definition: GA_Attribute.h:929
exint getUniqueId() const
Definition: GA_Detail.h:116
void setTranslates(const UT_Vector3T< S > &translates)
Definition: UT_Matrix4.h:1400
GA_Topology & getTopology()
Definition: GA_Detail.h:797
virtual void getPivot(UT_Vector3 &pos) const
void GUhandleTargetAttribsForPackedPrims(GU_Detail *output_geo, GU_CopyToPointsCache *cache, const bool topology_changed, const bool had_transform_matrices, const GU_Detail *const target, const GA_OffsetListRef &target_point_list, GU_CopyToPointsCache::TargetAttribInfoMap &target_attrib_info, GU_CopyToPointsCache::TargetAttribInfoMap &target_group_info, const UT_Vector3 *const constant_pivot)
Definition: GU_Copy2.C:3329
GLdouble n
Definition: glcorearb.h:2008
SYS_FORCE_INLINE bool isValid() const
Definition: GA_Handle.h:186
UT_Vector3T< fpreal32 > UT_Vector3F
Define a range based on a specific offset list.
bool isSame(const GA_ListTypeRef &that) const
SYS_FORCE_INLINE GA_Index pointIndex(GA_Offset offset) const
Given a point's data offset, return its index.
Definition: GA_Detail.h:348
void moveRange(GA_Offsetsrcstart, GA_Offsetdeststart, GA_Offsetnelements)
GA_AttributeOwner getOwner() const
virtual void transform(const UT_Matrix4 &)
SYS_FORCE_INLINE GA_Size getNumVertices() const
Return the number verticies in the entire detail.
Definition: GA_Detail.h:503
Data represents a quaternion. Token "quaternion".
Definition: GA_Types.h:118
GA_AttributeOwner
Definition: GA_Types.h:33
void GUaddAttributesFromSourceOrTarget(GU_Detail *output_geo, const GU_Detail *source, exint *num_source_attribs, bool has_transform_matrices, bool *needed_transforms, const GU_Detail *target, GU_CopyToPointsCache::TargetAttribInfoMap *target_attrib_info, GU_CopyToPointsCache::TargetAttribInfoMap *target_group_info, exint *num_target_attribs)
Definition: GU_Copy2.C:455
UT_UniquePtr< UT_Matrix3F[]> myTransformInverse3F
Definition: GU_Copy2.h:96
UT_UniquePtr< UT_QuaternionD[]> myTransformQuaternionsD
Definition: GU_Copy2.h:99
void GUremoveUnnecessaryAttribs(GU_Detail *output_geo, const GU_Detail *source, const GU_Detail *target, GU_CopyToPointsCache *cache, const GU_CopyToPointsCache::TargetAttribInfoMap *target_attrib_info, const GU_CopyToPointsCache::TargetAttribInfoMap *target_group_info)
Definition: GU_Copy2.C:135
void intrusive_ptr_add_ref(T *x)
Definition: refcnt.h:208
void translate(T dx, T dy, T dz=0)
Definition: UT_Matrix4.h:748
GEO_ViewportLOD
SYS_FORCE_INLINE GA_TypeInfo getTypeInfo() const
Definition: GA_Attribute.h:251
SYS_FORCE_INLINE bool isValid() const
Check whether the bounding box contains at least one point.
SYS_FORCE_INLINE GA_Index indexSize() const
Definition: GA_IndexMap.h:103
const GA_IndexMap & getIndexMap() const
Definition: GA_Attribute.h:206
SYS_FORCE_INLINE GA_Offset primitiveOffset(GA_Index index) const
Given a primitive's index (in append order), return its data offset.
Definition: GA_Detail.h:418
virtual bool copy(GA_Offset desti, GA_Offset srci)
Definition: GA_Attribute.h:799
Data represents a normal vector. Token "normal".
Definition: GA_Types.h:112
void append(GA_Size size, GA_Size count=1)
SYS_FORCE_INLINE const GA_ATITopology * getVertexRef() const
Definition: GA_Topology.h:225
constexpr SYS_FORCE_INLINE bool isZero() const noexcept
Definition: UT_Vector3.h:383
SYS_FORCE_INLINE GA_Size getNumPrimitives() const
Return the number of primitives.
Definition: GA_Detail.h:407
void getDataIds(GA_DataId dataIds[theNumAttribs]) const
iterator erase(iterator pos)
Compute an instance transform given a set of attributes.
Data represents a direction vector. Token "vector".
Definition: GA_Types.h:110
int64 getMetaCacheCount() const
Definition: GA_Detail.h:2343
UT_ArrayStringMap< GA_DataId > mySourceEdgeGroupDataIDs
Definition: GU_Copy2.h:156
void swap(set_type &that)
Swaps another set with this one.
Definition: UT_ArraySet.h:330
GA_API const UT_StringHolder pivot
GLboolean GLuint group
Definition: glew.h:2750
const GA_PrimitiveList & getPrimitiveList() const
Definition: GA_Detail.h:791
void setDetailPtr(const GU_DetailHandle &d)
GLenum GLint * range
Definition: glcorearb.h:1925
bool add(const GA_Edge &edge, GA_Offset primoff=GA_INVALID_OFFSET)
Data represents a position in space. Token "point".
Definition: GA_Types.h:104
void setN(const GA_Attribute *N)
Overrides the N attribute with the specified attribute.
UT_UniquePtr< UT_Matrix3F[]> myTransformInverse3F
Definition: GU_Copy2.h:77
#define UT_ASSERT(ZZ)
Definition: UT_Assert.h:153
SYS_FORCE_INLINE const GA_PageArray< DEST_DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED > & castType() const
Definition: GA_PageArray.h:732
T p0() const
Definition: GA_Edge.h:31
SYS_FORCE_INLINE void set(GA_Offset off, const HOLDER &str)
Store the str at the given offset.
Definition: GA_Handle.h:1106
void dehomogenize()
Express the point in homogeneous coordinates or vice-versa.
Definition: UT_Vector4.h:547
const GA_AttributeDict & getAttributeDict(GA_AttributeOwner owner) const
Definition: GA_Detail.h:831
TargetAttribInfoMap myTargetGroupInfo
Definition: GU_Copy2.h:184
void clear()
Resets list to an empty list.
Definition: UT_Array.h:679
SYS_FORCE_INLINE void setImplementation(const GU_PackedImpl *impl, bool add_ref=true, bool remove_ref=true)
SYS_FORCE_INLINE const GA_Attribute * findPointAttribute(GA_AttributeScope s, const UT_StringRef &name) const
Definition: GA_Detail.h:1033
GLboolean r
Definition: glcorearb.h:1222
GA_Range getPrimitiveRange(const GA_PrimitiveGroup *group=0) const
Get a range of all primitives in the detail.
Definition: GA_Detail.h:1725
#define SYSmin(a, b)
Definition: SYS_Math.h:1514
bool OIIO_UTIL_API contains(string_view a, string_view b)
Does 'a' contain the string 'b' within it?
exint myPrevSourceUniqueID
These are only used when myPrevPack is true.
Definition: GU_Copy2.h:143
void destroyAttribute(GA_AttributeOwner owner, GA_AttributeScope scope, const UT_StringRef &name, const GA_AttributeFilter *filter=0)
void bumpDataIdsForAddOrRemove(bool added_or_removed_points, bool added_or_removed_vertices, bool added_or_removed_primitives)
SYS_FORCE_INLINE GA_Offset pointOffset(GA_Index index) const
Given a point's index (in append order), return its data offset.
Definition: GA_Detail.h:344
Declare prior to use.
SYS_FORCE_INLINE const GA_ATITopology * getPointRef() const
Definition: GA_Topology.h:221
Data represents a transform matrix. Token "matrix".
Definition: GA_Types.h:116
UT_UniquePtr< UT_QuaternionF[]> myTransformQuaternionsF
Definition: GU_Copy2.h:98
T * getArray() const
Definition: UT_Array.h:779
GLint lod
Definition: glcorearb.h:2765
void initialize(const GA_AttributeDict &dict, const UT_StringRef &N_name=GA_Names::N, const UT_StringRef &v_name=GA_Names::v)
void invalidateGroupEntries()
UT_UniquePtr< UT_Matrix3D[]> myTransformInverse3D
Definition: GU_Copy2.h:97
GA_OffsetList getOffsetFromIndexList() const
Definition: GA_IndexMap.h:244
bool fullBlockAdvance(GA_Offset &start, GA_Offset &end)
UT_UniquePtr< UT_Matrix3F[]> myTransformMatrices3F
Definition: GU_Copy2.h:92
TargetAttribInfoMap myTargetAttribInfo
Definition: GU_Copy2.h:183
SYS_FORCE_INLINE FromType size() const
Returns the number of used elements in the list (always <= capacity())
void bumpDataId()
void getTranslates(UT_Vector3T< S > &translates) const
Definition: UT_Matrix4.h:1390
static GA_PrimitiveTypeId typeId()
Get the type ID for the GU_PackedGeometry primitive type.
SYS_FORCE_INLINE bool contains(GA_Offset offset) const
UT_Matrix3T< fpreal64 > UT_Matrix3D
SYS_FORCE_INLINE GA_Size getNumPoints() const
Return the number of points.
Definition: GA_Detail.h:333
GA_Storage getStorage() const
GA_Offset appendPrimitivesAndVertices(const GA_PrimitiveTypeId &type, GA_Size nprimitives, GA_Size nvertices_each, GA_Offset &vertex_block_start, bool closed_flag=false)