HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
UT_PageArrayImpl.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_PageArrayImpl.h (UT Library, C++)
7  *
8  * COMMENTS: Implementations of functions of UT_PageArray that
9  * aren't needed in most places that use it.
10  */
11 
12 #pragma once
13 
14 #ifndef __UT_PageArrayImpl__
15 #define __UT_PageArrayImpl__
16 
17 #include "UT_PageArray.h"
18 
19 #include "UT_MemoryCounter.h"
20 #include "UT_StackBuffer.h"
21 #include "UT_Storage.h"
22 #include "UT_Swap.h"
23 
24 #include <SYS/SYS_Types.h>
25 
26 
27 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
28 void
30 {
31  UT_ASSERT_P(newsize >= IDX_T(0));
33 
34  setCapacityIfNeeded(newsize);
35  hardenTable();
36  PageTable *pages = myImpl.getPages();
37  UT_ASSERT_P(pages || newsize == IDX_T(0));
38  if (pages)
39  {
40  IDX_T oldsize = pages->size();
41 
42  if (!PAGESHARDENED || TSIZE >= 0)
43  pages->setSize(newsize);
44  else
45  pages->setSize(newsize, myImpl.getTupleSize());
46 
47  if (newsize > oldsize)
48  {
49  if (TSIZE >= 1)
50  pages->fill(oldsize, newsize, initval);
51  else if (TSIZE == -1 && myImpl.getTupleSize() > 0)
52  pages->fill(oldsize, newsize, initval, myImpl.getTupleSize());
53  }
54  }
55 }
56 
57 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
58 void
60 {
61  UT_ASSERT_P(newsize >= IDX_T(0));
63  UT_ASSERT_P(getStorage() != UT_Storage::INVALID);
64  UT_ASSERT_P(TSIZE >= 1);
65 
66  setCapacityIfNeeded(newsize);
67  hardenTable();
68  PageTable *pages = myImpl.getPages();
69  UT_ASSERT_P(pages || newsize == IDX_T(0));
70  if (pages)
71  {
72  IDX_T oldsize = pages->size();
73 
74  // No need to destruct if smaller, since it's a POD type.
75 
76  pages->setSize(newsize);
77 
78  if (newsize > oldsize)
79  pages->fill(oldsize, newsize, initval);
80  }
81 }
82 
83 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
84 void
86 {
87  UT_ASSERT_P(newsize >= IDX_T(0));
88 
89  setCapacityIfNeeded(newsize);
90  hardenTable();
91  PageTable *pages = myImpl.getPages();
92  UT_ASSERT_P(pages || newsize == IDX_T(0));
93  if (pages)
94  {
95  IDX_T oldsize = pages->size();
96 
97  // No need to destruct if smaller, since it's a POD type.
98 
99  if (!PAGESHARDENED || TSIZE >= 0)
100  pages->setSize(newsize);
101  else
102  pages->setSize(newsize, myImpl.getTupleSize());
103 
104  if (newsize > oldsize)
105  setConstant(oldsize, newsize, initval);
106  }
107 }
108 
109 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
110 void
112 {
113  UT_ASSERT_P(end >= start);
114  UT_ASSERT_P(start >= IDX_T(0));
115  UT_ASSERT_P(end <= capacity());
117 
118  if (end <= start)
119  return;
120 
121  hardenTable();
122  PageTable *pages = myImpl.getPages();
123  if (!pages)
124  return;
125 
126  if (TSIZE >= 1)
127  pages->fill(start, end, v);
128  else if (TSIZE == -1 && myImpl.getTupleSize() > 0)
129  pages->fill(start, end, v, myImpl.getTupleSize());
130 }
131 
132 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
133 void
135 {
136  UT_ASSERT_P(end >= start);
137  UT_ASSERT_P(start >= IDX_T(0));
138  UT_ASSERT_P(end <= capacity());
140  UT_ASSERT_P(TSIZE >= 1);
141 
142  if (end <= start)
143  return;
144 
145  hardenTable();
146  PageTable *pages = myImpl.getPages();
147  if (!pages)
148  return;
149  pages->fill(start, end, v);
150 }
151 
152 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
153 void
155 {
156  UT_ASSERT_P(end >= start);
157  UT_ASSERT_P(start >= IDX_T(0));
158  UT_ASSERT_P(end <= capacity());
159 
160  auto &hard = hardenTable();
161 
162  UT_Storage storage = getStorage();
163 
164  // If the storage type is not known at compile time,
165  // switch, cast, and call again.
166  if (SYSisSame<DATA_T,void>())
167  {
168  switch (storage)
169  {
170  case UT_Storage::INT8:
171  hard.template castType<int8>().setConstant(start, end, v); return;
172  case UT_Storage::INT16:
173  hard.template castType<int16>().setConstant(start, end, v); return;
174  case UT_Storage::INT32:
175  hard.template castType<int32>().setConstant(start, end, v); return;
176  case UT_Storage::INT64:
177  hard.template castType<int64>().setConstant(start, end, v); return;
178  case UT_Storage::REAL16:
179  hard.template castType<fpreal16>().setConstant(start, end, v); return;
180  case UT_Storage::REAL32:
181  hard.template castType<fpreal32>().setConstant(start, end, v); return;
182  case UT_Storage::REAL64:
183  hard.template castType<fpreal64>().setConstant(start, end, v); return;
184  case UT_Storage::INVALID:
185  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
186  break;
187  }
188  return;
189  }
190 
191  if (end <= start)
192  return;
193 
194  PageTable *pages = myImpl.getPages();
195  if (!pages)
196  return;
197 
198  const exint tuplesize = getTupleSize();
199  if (tuplesize == 0)
200  return;
201 
202  // UT_Defaults is almost always tuple size 1, so have a special case for it.
203  if (v.getTupleSize() == 1 || tuplesize == 1)
204  {
205  if (TSIZE >= 1)
206  {
207  if (UTisIntStorage(storage))
208  pages->fill(start, end, NotVoidType(v.getI(0)));
209  else
210  pages->fill(start, end, NotVoidType(v.getF(0)));
211  }
212  else
213  {
214  if (UTisIntStorage(storage))
215  pages->fill(start, end, NotVoidType(v.getI(0)), tuplesize);
216  else
217  pages->fill(start, end, NotVoidType(v.getF(0)), tuplesize);
218  }
219  }
220  else
221  {
223  if (UTisIntStorage(storage))
224  {
225  for (exint i = 0; i < tuplesize; ++i)
226  buf[i] = NotVoidType(v.getI(i));
227  }
228  else
229  {
230  for (exint i = 0; i < tuplesize; ++i)
231  buf[i] = NotVoidType(v.getF(i));
232  }
233  pages->fill(start, end, buf, tuplesize);
234  }
235 }
236 
237 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
238 void
240 {
241  const UT_Storage oldstorage = getStorage();
242  UT_ASSERT_P(newstorage != UT_Storage::INVALID);
243  UT_ASSERT_MSG_P((SYS_IsSame<DATA_T,void>::value) || (newstorage == oldstorage), "Can't change the storage of an array whose type is fixed.");
244 
245  // Nothing to do if same type, or bad type
246  if (newstorage == oldstorage || newstorage == UT_Storage::INVALID)
247  return;
248 
249  PageTable *const oldpages = myImpl.getPages();
250 
251  // If there's no data, we only need to set the storage.
252  exint tuplesize = getTupleSize();
253  if (tuplesize == 0 || !oldpages)
254  {
255  myImpl.setStorage(newstorage);
256  return;
257  }
258 
259  UT_ASSERT_P(numPages(oldpages->capacity()) >= 1);
260 
261  // Copy the data into a new array with the new storage type
262  ThisType newarray(getTupleSize(), newstorage);
263  newarray.setCapacity(capacity());
264  IDX_T n = size();
265  newarray.setSize(n);
266  newarray.moveRange(*this,IDX_T(0),IDX_T(0),IDX_T(n));
267 
268  // decRef depends on knowing the type
269  switch (oldstorage)
270  {
271  case UT_Storage::INT8:
272  castType<int8>().myImpl.getPages()->decRef(tuplesize); break;
273  case UT_Storage::INT16:
274  castType<int16>().myImpl.getPages()->decRef(tuplesize); break;
275  case UT_Storage::INT32:
276  castType<int32>().myImpl.getPages()->decRef(tuplesize); break;
277  case UT_Storage::INT64:
278  castType<int64>().myImpl.getPages()->decRef(tuplesize); break;
279  case UT_Storage::REAL16:
280  castType<fpreal16>().myImpl.getPages()->decRef(tuplesize); break;
281  case UT_Storage::REAL32:
282  castType<fpreal32>().myImpl.getPages()->decRef(tuplesize); break;
283  case UT_Storage::REAL64:
284  castType<fpreal64>().myImpl.getPages()->decRef(tuplesize); break;
285  case UT_Storage::INVALID:
286  // NOTE: Can't have a UT_PageArray with DATA_T void and invalid storage.
287  myImpl.getPages()->decRef(tuplesize); break;
288  }
289 
290  // Take ownership of the page table.
291  PageTable *newpages = newarray.myImpl.getPages();
292  UT_ASSERT_P(newpages);
293  newpages->incRef();
294 
295  myImpl.setStorage(newstorage);
296  myImpl.getPages() = newpages;
297 }
298 
299 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
300 void
302 {
303  exint oldtuplesize = getTupleSize();
304  UT_ASSERT_P(newtuplesize >= 0);
305  UT_ASSERT_MSG_P((TSIZE == -1) || (newtuplesize == oldtuplesize), "Can't change the tuple size of an array whose tuple size is fixed.");
306 
307  // Nothing to do if same size, or bad size
308  if (newtuplesize == oldtuplesize || newtuplesize < 0)
309  return;
310 
311  PageTable *const oldpages = myImpl.getPages();
312 
313  // If there's no data, we only need to set the tuple size.
314  if (!oldpages)
315  {
316  myImpl.setTupleSize(newtuplesize);
317  return;
318  }
319 
320  UT_ASSERT_P(numPages(oldpages->capacity()) >= 1);
321 
322  // Copy the data into a new array with the new storage type
323  ThisType newarray(newtuplesize, getStorage());
324  newarray.setCapacity(capacity());
325  IDX_T n = size();
326  newarray.setSize(n, v);
327  newarray.moveRange(*this,IDX_T(0),IDX_T(0),IDX_T(n));
328 
329  // decRef depends on knowing the type
330  switch (getStorage())
331  {
332  case UT_Storage::INT8:
333  castType<int8>().myImpl.getPages()->decRef(oldtuplesize); break;
334  case UT_Storage::INT16:
335  castType<int16>().myImpl.getPages()->decRef(oldtuplesize); break;
336  case UT_Storage::INT32:
337  castType<int32>().myImpl.getPages()->decRef(oldtuplesize); break;
338  case UT_Storage::INT64:
339  castType<int64>().myImpl.getPages()->decRef(oldtuplesize); break;
340  case UT_Storage::REAL16:
341  castType<fpreal16>().myImpl.getPages()->decRef(oldtuplesize); break;
342  case UT_Storage::REAL32:
343  castType<fpreal32>().myImpl.getPages()->decRef(oldtuplesize); break;
344  case UT_Storage::REAL64:
345  castType<fpreal64>().myImpl.getPages()->decRef(oldtuplesize); break;
346  case UT_Storage::INVALID:
347  // NOTE: Can't have a UT_PageArray with DATA_T void and invalid storage.
348  myImpl.getPages()->decRef(oldtuplesize); break;
349  }
350 
351  // Take ownership of the page table.
352  PageTable *newpages = newarray.myImpl.getPages();
353  UT_ASSERT_P(newpages);
354  newpages->incRef();
355 
356  myImpl.setTupleSize(newtuplesize);
357  myImpl.getPages() = newpages;
358 }
359 
360 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
361 int64
363 {
364  int64 mem = inclusive ? sizeof(*this) : 0;
365 
366  const PageTable *pages = myImpl.getPages();
367  if (!pages)
368  return mem;
369 
370  UT_PageNum npages = numPages(pages->capacity());
371  mem += exint(npages) * sizeof(PageTableEntry);
372 
373  exint tuplebytes = ((getStorage() != UT_Storage::INVALID) ? UTstorageSize(getStorage()) : sizeof(NotVoidType))*getTupleSize();
374 
375  // Case for a single, possibly small page
376  if (npages == UT_PageNum(1) && !pages->getFirstPage()->isConstant())
377  {
378  mem += sizeof(SYS_AtomicCounter) + tuplebytes*exint(pages->capacity());
379  return mem;
380  }
381 
382  for (UT_PageNum i(0); i < npages; ++i)
383  {
384  const PageTableEntry *const page = pages->getPPage(i);
385  mem += page->getMemoryUsage(tuplebytes);
386  }
387 
388  return mem;
389 }
390 
391 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
392 void
394 {
395  if (counter.mustCountUnshared() && inclusive)
396  {
397  UT_MEMORY_DEBUG_LOG("UT_PageArray",int64(sizeof(*this)));
398  counter.countUnshared(sizeof(*this));
399  }
400 
401  const PageTable *pages = myImpl.getPages();
402  if (!pages)
403  return;
404 
405  UT_PageNum npages = numPages(pages->capacity());
406  int64 tablemem = exint(npages) * sizeof(PageTableEntry);
407  if (!pages->isShared())
408  {
409  if (counter.mustCountUnshared())
410  {
411  UT_MEMORY_DEBUG_LOG("UT_PageArray::PageTable",int64(tablemem));
412  counter.countUnshared(tablemem);
413  }
414  }
415  else
416  {
417  UT_ASSERT_MSG_P(pages->getRefCount() > 1, "Why is something unref'ing data while we're counting memory?");
418  if (counter.mustCountShared())
419  {
420  UT_MEMORY_DEBUG_LOG_SHARED("UT_PageArray::PageTable",int64(tablemem),pages,pages->getRefCount());
421  bool already_counted = counter.countShared(tablemem, pages->getRefCount(), pages);
422 
423  // If this counter has already counted a reference to this page
424  // table, it's also already counted its pages, below, and since
425  // this is the *same* set of references it's already counted,
426  // not different references to the same pages we'd get incorrect
427  // reference count tracking if we counted the pages again, so we
428  // just return.
429  if (already_counted)
430  return;
431  }
432  }
433 
434  exint tuplebytes = ((getStorage() != UT_Storage::INVALID) ? UTstorageSize(getStorage()) : sizeof(NotVoidType))*getTupleSize();
435 
436  // Case for a single, possibly small page
437  if (npages == UT_PageNum(1) && !pages->getFirstPage()->isConstant())
438  {
439  const PageTableEntry *const page = pages->getFirstPage();
440  int64 pagemem = sizeof(SYS_AtomicCounter) + tuplebytes*exint(pages->capacity());
441  if (!page->isShared())
442  {
443  if (counter.mustCountUnshared())
444  {
445  UT_MEMORY_DEBUG_LOG("UT_PageArray::Page0",int64(pagemem));
446  counter.countUnshared(pagemem);
447  }
448  }
449  else
450  {
451  UT_ASSERT_MSG_P(page->getRefCount() > 1, "Why is something unref'ing data while we're counting memory?");
452  if (counter.mustCountShared())
453  {
454  const void *masked = page->isConstant() ? page->getMaskedPtrVoid() : page->getFirstPtrVoid();
455  UT_MEMORY_DEBUG_LOG_SHARED("UT_PageArray::Page0",int64(pagemem),masked,page->getRefCount());
456  counter.countShared(pagemem, page->getRefCount(), masked);
457  }
458  }
459  return;
460  }
461 
462  for (UT_PageNum i(0); i < npages; ++i)
463  {
464  const PageTableEntry *const page = pages->getPPage(i);
465  int64 pagemem = page->getMemoryUsage(tuplebytes);
466  if (!pagemem)
467  continue;
468 
469  if (!page->isShared())
470  {
471  if (counter.mustCountUnshared())
472  {
473  UT_MEMORY_DEBUG_LOG("UT_PageArray::Page",int64(pagemem));
474  counter.countUnshared(pagemem);
475  }
476  }
477  else
478  {
479  UT_ASSERT_P(page->getRefCount() > 1);
480  if (counter.mustCountShared())
481  {
482  const void *masked = page->isConstant() ? page->getMaskedPtrVoid() : page->getFirstPtrVoid();
483  UT_MEMORY_DEBUG_LOG_SHARED("UT_PageArray::Page",int64(pagemem),masked,page->getRefCount());
484  counter.countShared(pagemem, page->getRefCount(), masked);
485  }
486  }
487  }
488 }
489 
490 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
491 void
493 {
494  // Just defer to the general function.
495  // It handles overlapping ranges and constant pages correctly.
496  moveRange(*this, srcstart, deststart, nelements);
497 }
498 
499 // This is a very big function, but don't let it scare you.
500 // Much of the code is only applicable to particular template types.
501 // If it weren't for constant pages, this would be *much* simpler.
502 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
503 template<typename SRC_DATA_T,exint SRC_TSIZE,bool SRC_TABLEHARDENED,bool SRC_PAGESHARDENED>
504 void
507  IDX_T srcstart, IDX_T deststart, IDX_T nelements)
508 {
510  typedef typename SrcType::PageTable SrcPageTable;
511  typedef typename SrcType::PageTableEntry SrcPageTableEntry;
512  typedef typename SrcType::NotVoidType SrcNotVoidType;
513 
514  UT_ASSERT_P(nelements >= IDX_T(0));
515  UT_ASSERT_P(srcstart >= IDX_T(0) && deststart >= IDX_T(0));
516  UT_ASSERT_P(srcstart+nelements <= src.size() && deststart+nelements <= size());
517 
518  UT_ASSERT_P((SYSisSame<DATA_T,SRC_DATA_T>()) || (getStorage() != UT_Storage::INVALID && src.getStorage() != UT_Storage::INVALID));
519 
520  // If there's even a chance we might write values, we should harden the
521  // table and record it in the templates so that we don't harden again.
522  auto &hard = hardenTable();
523 
524  // If the destination storage type is not known at compile time,
525  // switch, cast, and call again.
526  if (SYSisSame<DATA_T,void>())
527  {
528  UT_Storage storage = getStorage();
529  switch (storage)
530  {
531  case UT_Storage::INT8:
532  hard.template castType<int8>().moveRange(src, srcstart, deststart, nelements); return;
533  case UT_Storage::INT16:
534  hard.template castType<int16>().moveRange(src, srcstart, deststart, nelements); return;
535  case UT_Storage::INT32:
536  hard.template castType<int32>().moveRange(src, srcstart, deststart, nelements); return;
537  case UT_Storage::INT64:
538  hard.template castType<int64>().moveRange(src, srcstart, deststart, nelements); return;
539  case UT_Storage::REAL16:
540  hard.template castType<fpreal16>().moveRange(src, srcstart, deststart, nelements); return;
541  case UT_Storage::REAL32:
542  hard.template castType<fpreal32>().moveRange(src, srcstart, deststart, nelements); return;
543  case UT_Storage::REAL64:
544  hard.template castType<fpreal64>().moveRange(src, srcstart, deststart, nelements); return;
545  case UT_Storage::INVALID:
546  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
547  break;
548  }
549  return;
550  }
551 
552  // If the source storage type is not known at compile time,
553  // switch, cast, and call again.
554  if (SYSisSame<SRC_DATA_T,void>())
555  {
556  // Avoid switch on storage type if src is dest.
557  if ((const void*)&src==(void*)this)
558  {
559  hard.moveRange(src.template castType<DATA_T>(), srcstart, deststart, nelements);
560  return;
561  }
562 
563  UT_Storage src_storage = src.getStorage();
564  switch (src_storage)
565  {
566  case UT_Storage::INT8:
567  hard.moveRange(src.template castType<int8>(), srcstart, deststart, nelements); return;
568  case UT_Storage::INT16:
569  hard.moveRange(src.template castType<int16>(), srcstart, deststart, nelements); return;
570  case UT_Storage::INT32:
571  hard.moveRange(src.template castType<int32>(), srcstart, deststart, nelements); return;
572  case UT_Storage::INT64:
573  hard.moveRange(src.template castType<int64>(), srcstart, deststart, nelements); return;
574  case UT_Storage::REAL16:
575  hard.moveRange(src.template castType<fpreal16>(), srcstart, deststart, nelements); return;
576  case UT_Storage::REAL32:
577  hard.moveRange(src.template castType<fpreal32>(), srcstart, deststart, nelements); return;
578  case UT_Storage::REAL64:
579  hard.moveRange(src.template castType<fpreal64>(), srcstart, deststart, nelements); return;
580  case UT_Storage::INVALID:
581  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
582  }
583  return;
584  }
585 
586  // We now have both the source type and the destination type known at compile time.
587  UT_ASSERT_P((!SYSisSame<DATA_T,void>()) && (!SYSisSame<SRC_DATA_T,void>()));
588 
589  // Check if zero elements or moving data to location it's already in.
590  if (nelements <= IDX_T(0) || (SYSisSame<DATA_T,SRC_DATA_T>() && (const void*)&src==(void*)this && srcstart == deststart))
591  return;
592 
593  UT_PageOff srcoff = pageOff(srcstart);
594  UT_PageOff destoff = pageOff(deststart);
595 
596  // Just copy the minimum of the tuple sizes.
597  // Hopefully the compiler optimizes approriately if the values are
598  // known at compile time.
599  const exint srctuplesize = src.getTupleSize();
600  const exint desttuplesize = getTupleSize();
601 
602  // Nothing to do if either tuple size is zero.
603  if (srctuplesize == 0 || desttuplesize == 0)
604  return;
605 
606  const SrcPageTable *srcpagetable = src.myImpl.getPages();
607  PageTable *destpagetable = myImpl.getPages();
608 
609  // Since nelements is > 0, srcpagetable and destpagetable should be non-NULL.
610  UT_ASSERT_P(srcpagetable && destpagetable);
611 
612  UT_PageNum srcpagenum = pageNum(srcstart);
613  UT_PageNum destpagenum = pageNum(deststart);
614 
615  // NOTE: Shouldn't need to check for smaller first page here
616  // (until below), since that page boundary isn't allowed
617  // to be crossed by the ranges.
618  if (srcoff+UT_PageOff(exint(nelements)) <= UT_PageOff(thePageSize) && destoff+UT_PageOff(exint(nelements)) <= UT_PageOff(thePageSize))
619  {
620  // *************************************************************
621  // * CASE 1: Source and dest each confined to 1 page *
622  // *************************************************************
623 
624  // NOTE: We can dereference here because we don't pass any address into srcpage
625  // outside of this scope.
626  const SrcPageTableEntry *const srcpage = srcpagetable->getPPage(srcpagenum);
627  PageTableEntry *destpage = destpagetable->getPPage(destpagenum);
628 
629  // This is the only case that can have both srcpage and destpage be small pages.
630  bool issmalldestpage = destpagetable->capacity() < IDX_T(thePageSize);
631  UT_PageOff destpagecapacity(thePageSize);
632  if (issmalldestpage)
633  destpagecapacity = destpagetable->capacity();
634 
635  // If dest is a full page and src is also a full page or constant, just use replacePage.
636  bool isfullpage = (nelements == IDX_T(thePageSize));
637  if (!isfullpage && destoff == UT_PageOff(0) && deststart+nelements == size())
638  {
639  // If srcpage and destpage aren't the same capacity, destpage can't reference srcpage,
640  // even if size() is much less than the capacity of either.
641  bool issmallsrcpage = srcpagetable->capacity() < IDX_T(thePageSize);
642  bool samecapacity = (!issmalldestpage && !issmallsrcpage) ||
643  (issmalldestpage && issmallsrcpage && destpagetable->capacity() == srcpagetable->capacity());
644 
645  // destpage is a full destination page, but may not be replaceable by srcpage.
646  // srcpage lines up if srcoff == 0, and always implicitly lines up if constant.
647  // If either src or dest is small page and can't reference due to different capacity,
648  // fall through to copyPartialPage, which won't reference.
649 
650  isfullpage = ((srcoff == UT_PageOff(0) && samecapacity) || srcpage->isConstant());
651  }
652  if (isfullpage)
653  {
654  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, nelements, destpagecapacity);
655  return;
656  }
657 
658  // If it's a partial page, just copy that part
659  // NOTE: This handles overlapping ranges correctly.
660  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, destoff, srcoff, nelements, destpagecapacity);
661  return;
662  }
663 
664  // If overlapping and moving data to later addresses,
665  // we need to do a backward loop, which is a big pain.
666  bool isoverlappingmovelater = (
667  SYSisSame<DATA_T,SRC_DATA_T>() &&
668  (void*)this == (const void *)&src &&
669  (deststart > srcstart && deststart < srcstart+nelements)
670  );
671 
672  if (srcoff == destoff)
673  {
674  // *************************************************************
675  // * CASE 2: Source and dest pages line up and at least one *
676  // * page boundary is crossed. *
677  // *************************************************************
678  // Example for following along:
679  // THEPAGEBITS is 3, so 8-tuple pages.
680  // src and to are dest 5.
681  // src [# # #|# # # # # # # #|# # # # # # # #|# # # # # #]
682  // dest [# # #|# # # # # # # #|# # # # # # # #|# # # # # #]
683 
684  // In this case, src and dest don't have any small pages,
685  // because both have page boundary crossings.
686 
687  const SrcPageTableEntry *psrcpagep = srcpagetable->getPPage(srcpagenum);
688  PageTableEntry *pdestpagep = destpagetable->getPPage(destpagenum);
689 
690  // If overlapping and moving data to later addresses,
691  // we need to do a backward loop, which is a big pain.
692  // It's not a very common case, so it doesn't have to be as optimized.
693  if (isoverlappingmovelater)
694  {
695  UT_ASSERT_P(desttuplesize == srctuplesize);
696 
697  UT_PageOff ntuplesfirstpage(0);
698  if (destoff != UT_PageOff(0))
699  {
700  ntuplesfirstpage = UT_PageOff(thePageSize)-destoff;
701  nelements -= IDX_T(exint(ntuplesfirstpage));
702  }
703 
704  // (nelements is now 3 less)
705  // NOTE: Not numPages, since that'd include any partial page at end
706  UT_PageNum nfullpages = pageNum(nelements);
707  ++psrcpagep;
708  ++pdestpagep;
709  PageTableEntry *pdestend = pdestpagep + nfullpages;
710  const SrcPageTableEntry *psrcend = psrcpagep + nfullpages;
711 
712  // Since backward, first, copy any incomplete last page
713  // src [ | | |# # # # # #]
714  // dest [ | | |# # # # # #]
715  UT_PageOff nleftover = pageOff(nelements);
716  if (nleftover != UT_PageOff(0))
717  {
718  const SrcPageTableEntry *srcpage = psrcend;
719  PageTableEntry *destpage = pdestend;
720 
721  // Remember that it may be effectively complete, if
722  // the last page within the size of the array and nleftover is
723  // the number of elements less than size() in that page.
724  // If it's really a full page, just use replacePage.
725  bool isfullpage = deststart+nelements == size();
726  if (isfullpage)
727  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, nleftover, thePageSize);
728  else
729  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, UT_PageOff(0), UT_PageOff(0), nleftover, thePageSize);
730  }
731 
732  // Still backward, copy/reference whole pages next:
733  // src [ |# # # # # # # #|# # # # # # # #| ]
734  // dest [ |# # # # # # # #|# # # # # # # #| ]
735  while (pdestpagep != pdestend)
736  {
737  --psrcend;
738  --pdestend;
739  const SrcPageTableEntry *srcpage = psrcend;
740  PageTableEntry *destpage = pdestend;
741 
742  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, thePageSize, thePageSize);
743  }
744 
745  // Still backward, lastly, copy any incomplete first page:
746  // src [# # #| | | ]
747  // dest [# # #| | | ]
748  if (destoff != UT_PageOff(0))
749  {
750  --psrcpagep;
751  --pdestpagep;
752  const SrcPageTableEntry *srcpage = psrcpagep;
753  PageTableEntry *destpage = pdestpagep;
754 
755  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, destoff, srcoff, ntuplesfirstpage, thePageSize);
756  }
757 
758  return;
759  }
760 
761  // In the common case of case 2, src and dest aren't overlapping,
762  // or src is later than dest, so we can go forward.
763 
764  // First, copy any incomplete first page:
765  // src [# # #| | | ]
766  // dest [# # #| | | ]
767  if (destoff != UT_PageOff(0))
768  {
769  const SrcPageTableEntry *srcpage = psrcpagep;
770  PageTableEntry *destpage = pdestpagep;
771 
772  UT_PageOff ntuplesfirstpage = UT_PageOff(thePageSize)-destoff;
773  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, destoff, srcoff, ntuplesfirstpage, thePageSize);
774 
775  nelements -= IDX_T(exint(ntuplesfirstpage));
776  ++psrcpagep;
777  ++pdestpagep;
778  }
779 
780  // Copy/reference whole pages next:
781  // src [ |# # # # # # # #|# # # # # # # #| ]
782  // dest [ |# # # # # # # #|# # # # # # # #| ]
783  // (nelements is now 3 less)
784  // NOTE: Not numPages, since that'd include any partial page at end
785  UT_PageNum nfullpages = pageNum(nelements);
786  PageTableEntry *pdestend = pdestpagep + nfullpages;
787  for (; pdestpagep != pdestend; ++psrcpagep, ++pdestpagep)
788  {
789  const SrcPageTableEntry *srcpage = psrcpagep;
790  PageTableEntry *destpage = pdestpagep;
791 
792  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, thePageSize, thePageSize);
793  }
794 
795  // Lastly, copy any incomplete last page
796  // src [ | | |# # # # # #]
797  // dest [ | | |# # # # # #]
798  UT_PageOff nleftover = pageOff(nelements);
799  if (nleftover != UT_PageOff(0))
800  {
801  const SrcPageTableEntry *srcpage = psrcpagep;
802  PageTableEntry *destpage = pdestpagep;
803 
804  // Remember that it may be effectively complete, if
805  // the last page within the size of the array and nleftover is
806  // the number of elements less than size() in that page.
807  // If it's really a full page, just use replacePage.
808  bool isfullpage = deststart+nelements == size();
809  if (isfullpage)
810  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, nleftover, thePageSize);
811  else
812  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, UT_PageOff(0), UT_PageOff(0), nleftover, thePageSize);
813  }
814  return;
815  }
816  else
817  {
818  // *************************************************************
819  // * CASE 3: Source and dest pages don't line up and at least *
820  // * one page boundary is crossed. *
821  // *************************************************************
822  // Example for following along:
823  // THEPAGEBITS is 3, so 8-tuple pages.
824  // src is 5; dest is 3.
825  // src [# # #|# # # # # # # #|# # # # # # # #|# # # # # #]
826  // dest [# # # # #|# # # # # # # #|# # # # # # # #|# # # #]
827  // |<----6---->|<2>|
828  // spagestartind dpagestartins
829  UT_PageOff spagestartind = pageOff(deststart-srcstart);
830  UT_PageOff dpagestartins = pageOff(srcstart-deststart);
831 
832  // Because of the misalignment, we don't have to worry about
833  // referencing pages, though we do have to worry about constant
834  // pages. If both src pages covering a full dest page are constant
835  // and the same value, we can use makeConstantFrom using either
836  // of the source pages.
837 
838  // REMEMBER: This case could have a small first page in either
839  // src or dest, but not both.
840  // REMEMBER: Must handle overlapping ranges!
841 
842  const SrcPageTableEntry *psrcpagep = srcpagetable->getPPage(srcpagenum);
843  PageTableEntry *pdestpagep = destpagetable->getPPage(destpagenum);
844 
845  // Case 3.0:
846  // Overlapping range
847 
848  const SrcPageTableEntry *srcpage0 = psrcpagep;
849 
850  const exint mintuplesize = SYSmin(srctuplesize,desttuplesize);
851 
852  // Case 3.1:
853  // src [# # #|# #]
854  // dest [# # # # #]
855  // dest is in a single page; it could be a small page.
856  // src is across two pages; they can't be small-capacity pages.
857  if (destoff+UT_PageOff(exint(nelements)) <= UT_PageOff(thePageSize))
858  {
859  PageTableEntry *destpage = pdestpagep;
860 
861  bool issmalldestpage = destpagetable->capacity() < IDX_T(thePageSize);
862  UT_PageOff destpagecapacity(thePageSize);
863  if (issmalldestpage)
864  destpagecapacity = destpagetable->capacity();
865 
866  const SrcPageTableEntry *srcpage1 = psrcpagep + 1;
867 
868  if (!PAGESHARDENED && srcpage0->isConstant() && srcpage1->isConstant())
869  {
870  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
871  const SrcNotVoidType *stuple1 = SrcType::getConstantPtr(srcpage1, 0, srctuplesize);
872  if (SrcType::isEqualConst(stuple0, stuple1, srctuplesize))
873  {
874  // If dest page is already constant and equal to both src pages, nothing to do.
875  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
876  return;
877 
878  // If both src pages are constant and equal, and dest is a full
879  // page, make dest constant.
880  bool isfullpage = (nelements == IDX_T(thePageSize)) || (destoff == UT_PageOff(0) && deststart+nelements == size());
881  if (isfullpage)
882  {
883  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
884  return;
885  }
886  }
887  }
888 
889  if (!PAGESHARDENED && destpage->isConstant())
890  hardenConstantPage(destpage, destpagecapacity, desttuplesize);
891  else if (!PAGESHARDENED && destpage->isShared())
892  hardenSharedPage(destpage, destpagecapacity, desttuplesize);
893 
894  UT_PageOff n0 = UT_PageOff(thePageSize)-srcoff;
895  if (isoverlappingmovelater)
896  {
897  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, destoff+n0, UT_PageOff(0), nelements-n0, destpagecapacity);
898  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, destpagecapacity);
899  }
900  else
901  {
902  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, destpagecapacity);
903  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, destoff+n0, UT_PageOff(0), nelements-n0, destpagecapacity);
904  }
905 
906  return;
907  }
908 
909  // There is at least one dest page boundary, so dest has full-capacity pages.
910 
911  if (isoverlappingmovelater)
912  {
913  // FIXME: Implement this!!!
914  UT_ASSERT_MSG(0, "Implement this!!! It should be like the code below, only copying chunks in reverse order.");
915 
916  return;
917  }
918 
919  // Deal with tuples before the first full destination page.
920  if (destoff > UT_PageOff(0))
921  {
922  PageTableEntry *destpage = pdestpagep;
923 
924  if (destoff < spagestartind)
925  {
926  // srcpage0 srcpage1
927  // src [# # #|# # ...
928  // dest [# # # # #|...
929  // |<--->|<->|
930  // spagestartind-destoff dpagestartins
931 
932  UT_PageOff n0 = spagestartind - destoff;
933  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, UT_PageOff(thePageSize));
934 
935  srcoff = UT_PageOff(0);
936  destoff += n0;
937  ++psrcpagep;
938  srcpage0 = psrcpagep;
939  nelements -= IDX_T(exint(n0));
940  }
941 
942  // srcpage0
943  // src [# # # #...
944  // dest [# #|# #...
945  // |<->|
946  // thePageSize-destoff
947  UT_PageOff n0 = UT_PageOff(thePageSize) - destoff;
948  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, UT_PageOff(thePageSize));
949  ++pdestpagep;
950  nelements -= IDX_T(exint(n0));
951  }
952 
953  // Middle full destination pages
954  for (; nelements >= IDX_T(thePageSize); nelements -= IDX_T(thePageSize), ++pdestpagep, ++psrcpagep, (srcpage0 = psrcpagep))
955  {
956  PageTableEntry *destpage = pdestpagep;
957 
958  // srcpage0 srcpage1
959  // src [ # # # # # #|# # ]
960  // dest [ |# # # # # # # #| ]
961  // |<--------->|<->|
962  // spagestartind dpagestartins
963 
964  const SrcPageTableEntry *srcpage1 = psrcpagep + 1;
965 
966  if (!PAGESHARDENED && srcpage0->isConstant() && srcpage1->isConstant())
967  {
968  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
969  const SrcNotVoidType *stuple1 = SrcType::getConstantPtr(srcpage1, 0, srctuplesize);
970  if (SrcType::isEqualConst(stuple0, stuple1, srctuplesize))
971  {
972  // If dest page is already constant and equal to both src pages, nothing to do.
973  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
974  continue;
975 
976  // If both src pages are constant and equal, and dest is a full
977  // page, make dest constant.
978  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
979  continue;
980  }
981  }
982 
983  if (!PAGESHARDENED && destpage->isConstant())
984  hardenConstantPage(destpage, UT_PageOff(thePageSize), desttuplesize);
985  else if (!PAGESHARDENED && destpage->isShared())
986  hardenSharedPage(destpage, UT_PageOff(thePageSize), desttuplesize);
987 
988  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, UT_PageOff(0), dpagestartins, spagestartind, UT_PageOff(thePageSize));
989  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, spagestartind, UT_PageOff(0), dpagestartins, UT_PageOff(thePageSize));
990  }
991 
992  // Final partial page, though may reach size()
993  if (nelements > IDX_T(0))
994  {
995  PageTableEntry *destpage = pdestpagep;
996 
997  const bool isfullmaybeconstpage = !PAGESHARDENED && (deststart+nelements == size());
998 
999  if (nelements > IDX_T(exint(spagestartind)))
1000  {
1001  // srcpage0 srcpage1
1002  // src [ # # # # # #|#]
1003  // dest [ |# # # # # # #]
1004  // |<--------->|-|
1005  // spagestartind nelements-spagestartind
1006 
1007  const SrcPageTableEntry *srcpage1 = psrcpagep + 1;
1008 
1009  if (isfullmaybeconstpage && srcpage0->isConstant() && srcpage1->isConstant())
1010  {
1011  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
1012  const SrcNotVoidType *stuple1 = SrcType::getConstantPtr(srcpage1, 0, srctuplesize);
1013  if (SrcType::isEqualConst(stuple0, stuple1, srctuplesize))
1014  {
1015  // If dest page is already constant and equal to both src pages, nothing to do.
1016  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
1017  return;
1018 
1019  // If both src pages are constant and equal, and dest is a full
1020  // page, make dest constant.
1021  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
1022  return;
1023  }
1024  }
1025 
1026  if (!PAGESHARDENED && destpage->isConstant())
1027  hardenConstantPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1028  else if (!PAGESHARDENED && destpage->isShared())
1029  hardenSharedPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1030 
1031  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, UT_PageOff(0), dpagestartins, spagestartind, UT_PageOff(thePageSize));
1032  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, spagestartind, UT_PageOff(0), UT_PageOff(exint(nelements))-spagestartind, UT_PageOff(thePageSize));
1033  }
1034  else
1035  {
1036  // srcpage0
1037  // src [ # # # # #]
1038  // dest [ |# # # # #]
1039  // |<------->|
1040  // nelements
1041 
1042  if (isfullmaybeconstpage && srcpage0->isConstant())
1043  {
1044  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
1045  // If dest page is already constant and equal to both src pages, nothing to do.
1046  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
1047  return;
1048 
1049  // If both src pages are constant and equal, and dest is a full
1050  // page, make dest constant.
1051  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
1052  return;
1053  }
1054 
1055  if (!PAGESHARDENED && destpage->isConstant())
1056  hardenConstantPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1057  else if (!PAGESHARDENED && destpage->isShared())
1058  hardenSharedPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1059 
1060  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, UT_PageOff(0), dpagestartins, UT_PageOff(exint(nelements)), UT_PageOff(thePageSize));
1061  }
1062  }
1063  }
1064 }
1065 
1066 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1067 void
1069 {
1070  UT_ASSERT_P(nelements >= IDX_T(0));
1071  UT_ASSERT_P(astart >= IDX_T(0) && bstart >= IDX_T(0));
1072  UT_ASSERT_P(astart+nelements <= size() && bstart+nelements <= size());
1073  UT_ASSERT_MSG_P(astart >= bstart+nelements || bstart >= astart+nelements, "Ranges can't overlap when swapping!");
1074  if (nelements <= IDX_T(0))
1075  return;
1076  auto &hard = hardenTable();
1077  if (!SYSisSame<DATA_T,void>())
1078  {
1079  // Easy case, where the storage type is known at compile time.
1080  exint tuplesize = getTupleSize();
1081  for (IDX_T i(0); i < nelements; ++i)
1082  {
1083  for (exint component = 0; component < tuplesize; ++component)
1084  {
1085  UTswap(hard(astart+i, component), hard(bstart+i, component));
1086  }
1087  }
1088  return;
1089  }
1090 
1091  // Hard case, where the storage type is not known at compile time.
1092  UT_Storage storage = getStorage();
1093  switch (storage)
1094  {
1095  case UT_Storage::INT8:
1096  hard.template castType<int8>().swapRange(astart, bstart, nelements); return;
1097  case UT_Storage::INT16:
1098  hard.template castType<int16>().swapRange(astart, bstart, nelements); return;
1099  case UT_Storage::INT32:
1100  hard.template castType<int32>().swapRange(astart, bstart, nelements); return;
1101  case UT_Storage::INT64:
1102  hard.template castType<int64>().swapRange(astart, bstart, nelements); return;
1103  case UT_Storage::REAL16:
1104  hard.template castType<fpreal16>().swapRange(astart, bstart, nelements); return;
1105  case UT_Storage::REAL32:
1106  hard.template castType<fpreal32>().swapRange(astart, bstart, nelements); return;
1107  case UT_Storage::REAL64:
1108  hard.template castType<fpreal64>().swapRange(astart, bstart, nelements); return;
1109  case UT_Storage::INVALID:
1110  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
1111  break;
1112  }
1113 }
1114 
1115 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1116 template<typename SrcType>
1117 void
1119  PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize, UT_PageOff destpagesize, UT_PageOff destpagecapacity)
1120 {
1121  typedef typename SrcType::DataType SRC_DATA_T;
1122  typedef typename SrcType::NotVoidType SrcNotVoidType;
1123  UT_IF_ASSERT_P(const exint SRC_TSIZE = SrcType::theTupleSize;)
1124  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1125  UT_ASSERT_P(!(SYSisSame<SRC_DATA_T,void>()));
1127  UT_ASSERT_P(thePageBits == SrcType::thePageBits);
1128  UT_ASSERT_P((SYSisSame<IndexType, typename SrcType::IndexType>()));
1129  UT_ASSERT_P(desttuplesize > 0 && srctuplesize > 0);
1130  UT_ASSERT_P((TSIZE == -1 || TSIZE == desttuplesize) && (SRC_TSIZE == -1 || SRC_TSIZE == srctuplesize));
1131 
1132  // If the source page is constant,
1133  if (src->isConstant())
1134  {
1135  if (!PAGESHARDENED && (dest->isConstant() || desttuplesize <= srctuplesize))
1136  makeConstantFrom<SrcType>(dest, src, desttuplesize, srctuplesize);
1137  else
1138  {
1139  // This codepath is primarily for the awkward case where we can't
1140  // easily make the destination page constant, because
1141  // it's not currently constant and the tuple size is larger.
1142  // However, it's also used for filling a page that isn't allowed to be
1143  // constant-compressed with the tuple from a constant-compressed source.
1144  UT_ASSERT_P(PAGESHARDENED || (!dest->isConstant() && desttuplesize > srctuplesize));
1145 
1146  if (!PAGESHARDENED && dest->isShared())
1147  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1148 
1149  // Fill range in dest with value from src.
1150  NotVoidType *destpagedata = dest->getFirstPtr();
1151  // NOTE: This is destpagesize instead of capacity, because it's just used for filling in data.
1152  NotVoidType *destpageend = destpagedata + (desttuplesize*destpagesize);
1153 
1154  const SrcNotVoidType *stuple = SrcType::getConstantPtr(src, 0, srctuplesize);
1155 
1156  const exint mintuplesize = SYSmin(srctuplesize,desttuplesize);
1157  const exint desttupleextra = desttuplesize-mintuplesize;
1158 
1159  fillNonConstWithConst(destpagedata, destpageend, stuple, mintuplesize, desttupleextra);
1160  }
1161  }
1162  else if (!PAGESHARDENED && SYSisSame<DATA_T,SRC_DATA_T>() && desttuplesize == srctuplesize)
1163  {
1164  // Nothing to do if already referencing the same data.
1165  // This pointer comparison works because we know that
1166  // the types and tuple sizes are the same,
1167  // and the src is non-constant, (so if dest is constant,
1168  // it won't be equal).
1169  if (src->getFirstPtrVoid() == dest->getFirstPtrVoidUnsafe())
1170  return;
1171 
1172  exint bytesize = desttuplesize*sizeof(NotVoidType);
1173  if (dest->isRefd(bytesize))
1174  dest->decRef();
1175 
1176  // Reference the source page
1177  SYSconst_cast(src)->incRef();
1178 
1179  // Still need to cast to PageTableEntry*, because the compiler needs to
1180  // compile this line when the condition is false.
1181  *dest = *(const PageTableEntry *)src;
1182  }
1183  else
1184  {
1185  if (!PAGESHARDENED && dest->isConstant())
1186  hardenConstantPage(dest, destpagecapacity, desttuplesize);
1187  else if (!PAGESHARDENED && dest->isShared())
1188  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1189 
1190  // Copy data from src to dest
1191  NotVoidType *destpagedata = dest->getFirstPtr();
1192  const SrcNotVoidType *srcpagedata = src->getFirstPtr();
1193  // NOTE: This must be destpagesize instead of capacity, else it might access the source out of bounds.
1194  copyNonConst(destpagedata, srcpagedata, desttuplesize, srctuplesize, destpagesize);
1195  }
1196 }
1197 
1198 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1199 template<typename SrcType>
1200 void
1202  PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize,
1203  UT_PageOff destoff, UT_PageOff srcoff, UT_PageOff ntuples, UT_PageOff destpagecapacity)
1204 {
1205  typedef typename SrcType::DataType SRC_DATA_T;
1206  typedef typename SrcType::NotVoidType SrcNotVoidType;
1207  UT_IF_ASSERT_P(const exint SRC_TSIZE = SrcType::theTupleSize;)
1208  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1209  UT_ASSERT_P(!(SYSisSame<SRC_DATA_T,void>()));
1211  UT_ASSERT_P(thePageBits == SrcType::thePageBits);
1212  UT_ASSERT_P((SYSisSame<IndexType, typename SrcType::IndexType>()));
1213  UT_ASSERT_P(desttuplesize > 0 && srctuplesize > 0);
1214  UT_ASSERT_P((TSIZE == -1 || TSIZE == desttuplesize) && (SRC_TSIZE == -1 || SRC_TSIZE == srctuplesize));
1215  UT_ASSERT_P(ntuples > UT_PageOff(0));
1216 
1217  // NOTE: Don't have to check for full page. The caller is responsible
1218  // for that if they want referencing or constant overwriting.
1219 
1220  // If the source page is constant,
1221  if (src->isConstant())
1222  {
1223  const SrcNotVoidType *stuple = SrcType::getConstantPtr(src, 0, srctuplesize);
1224 
1225  const exint mintuplesize = SYSmin(srctuplesize,desttuplesize);
1226 
1227  // If the destination page is constant,
1228  if (dest->isConstant())
1229  {
1230  const NotVoidType *tuple = getConstantPtr(dest, 0, desttuplesize);
1231 
1232  // If the const pages are equal, there's nothing to do.
1233  if (isEqualConst(tuple, stuple, mintuplesize))
1234  return;
1235 
1236  hardenConstantPage(dest, destpagecapacity, desttuplesize);
1237  }
1238 
1239  if (!PAGESHARDENED && dest->isShared())
1240  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1241 
1242  // Fill range in dest with value from src.
1243  NotVoidType *destpagedata = dest->getFirstPtr() + (desttuplesize*destoff);
1244  NotVoidType *destpageend = destpagedata + (desttuplesize*ntuples);
1245 
1246  const exint desttupleextra = desttuplesize-mintuplesize;
1247 
1248  fillNonConstWithConst(destpagedata, destpageend, stuple, mintuplesize, desttupleextra);
1249 
1250  return;
1251  }
1252 
1253  if (!PAGESHARDENED && dest->isConstant())
1254  hardenConstantPage(dest, destpagecapacity, desttuplesize);
1255  else if (!PAGESHARDENED && dest->isShared())
1256  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1257 
1258  // Remember that the ranges could overlap if same page
1259  // NOTE: Since dest was hardened if shared, dest can only equal src if same table.
1260  if (SYSisSame<DATA_T,SRC_DATA_T>() && dest->getFirstPtrVoidUnsafe() == src->getFirstPtrVoid() && (srcoff < destoff+UT_PageOff(exint(ntuples)) && destoff < srcoff+UT_PageOff(exint(ntuples))))
1261  {
1262  // Overlapping, so be careful!
1263 
1264  UT_ASSERT_P(desttuplesize == srctuplesize);
1265 
1266  // Nothing to do if exactly same range.
1267  // This could happen even if caller checked the global offsets,
1268  // and even if they're separate arrays, because the same page
1269  // can appear in multiple locations.
1270  if (srcoff == destoff)
1271  return;
1272 
1273  NotVoidType *destpagedata = dest->getFirstPtr();
1274  destpagedata += desttuplesize*destoff;
1275  NotVoidType *destend = destpagedata + desttuplesize*ntuples;
1276 
1277  const SrcNotVoidType *srcpagedata = src->getFirstPtr();
1278  srcpagedata += srctuplesize*srcoff;
1279 
1280  // If moving to earlier addresses, can copy in forward loop
1281  if (destoff < srcoff)
1282  {
1283  do
1284  {
1285  *destpagedata = *srcpagedata;
1286  ++srcpagedata;
1287  ++destpagedata;
1288  } while (destpagedata != destend);
1289  }
1290  // If moving to later addresses, must copy in backward loop
1291  else
1292  {
1293  const SrcNotVoidType *srcend = srcpagedata + srctuplesize*ntuples;
1294  do
1295  {
1296  --srcend;
1297  --destend;
1298  *destend = *srcend;
1299  } while (destpagedata != destend);
1300  }
1301  }
1302  else
1303  {
1304  // The two ranges don't overlap, so just copy
1305  NotVoidType *destpagedata = dest->getFirstPtr();
1306  destpagedata += desttuplesize*destoff;
1307 
1308  const SrcNotVoidType *srcpagedata = src->getFirstPtr();
1309  srcpagedata += srctuplesize*srcoff;
1310 
1311  copyNonConst(destpagedata, srcpagedata, desttuplesize, srctuplesize, UT_PageOff(ntuples));
1312  }
1313 }
1314 
1315 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1316 template<typename SrcType>
1317 void
1319  PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize)
1320 {
1321  typedef typename SrcType::DataType SRC_DATA_T;
1322  //typedef UT_PageArray<SRC_DATA_T,SRC_TSIZE,SRC_TABLEHARDENED,SRC_PAGESHARDENED,THEPAGEBITS,IDX_T> SrcType;
1323  typedef typename SrcType::PageTableEntry SrcPageTableEntry;
1324  typedef typename SrcType::NotVoidType SrcNotVoidType;
1325 
1326  const exint SRC_TSIZE = SrcType::theTupleSize;
1327  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1328  UT_ASSERT_P(!(SYSisSame<SRC_DATA_T,void>()));
1329  UT_ASSERT_P(src->isConstant());
1331  UT_ASSERT_P(!PAGESHARDENED);
1332  UT_ASSERT_P(thePageBits == SrcType::thePageBits);
1333  UT_ASSERT_P((SYSisSame<IndexType, typename SrcType::IndexType>()));
1334  UT_ASSERT_P(desttuplesize > 0 && srctuplesize > 0);
1335  UT_ASSERT_P((TSIZE == -1 || TSIZE == desttuplesize) && (SRC_TSIZE == -1 || SRC_TSIZE == srctuplesize));
1336  UT_ASSERT_MSG_P(desttuplesize <= srctuplesize || dest->isConstant(), "The higher component values may not have been constant!");
1337 
1338  if (PAGESHARDENED)
1339  return;
1340 
1341  // Do nothing in very common case of copying zero to zero.
1342  // NOTE: It may seem like this could be generalized to
1343  // if (*src == *dest)
1344  // but, they could be different types or tuple sizes,
1345  // so, for example, one could be an inline constant page
1346  // that just happens to equal a pointer for a non-inline
1347  // constant page, or two inline tuples may be different
1348  // but produce equal pointers, e.g. (-2,-2) in int16
1349  // would match (-65538) in int32.
1350  const bool issrczero = src->isConstantAndZeroSafe();
1351  if (dest->isConstantAndZeroSafe() && issrczero)
1352  return;
1353 
1354  bool wasconst = dest->isConstant();
1355  if (!wasconst)
1356  dest->decRef();
1357 
1358  // Common case of exact match can just reference the same constant page
1359  if (((TSIZE == SRC_TSIZE && TSIZE != -1) || (desttuplesize == srctuplesize)) && SYSisSame<DATA_T,SRC_DATA_T>())
1360  {
1361  // Now that we know that the types and tuple sizes are the same,
1362  // we can just check the pointers to see if they're the same (constant) page.
1363  // The typecast on src is just so that it will compile when the types don't match.
1364  if (*dest == *(const PageTableEntry*)src)
1365  return;
1366 
1367  const bool typefitsinline = PageTableEntry::typeFitsInline(desttuplesize);
1368  if (!typefitsinline && wasconst && !dest->isConstantZero())
1369  {
1370  dest->decRef();
1371  }
1372 
1373  // Still need to cast to PageTableEntry*, because the compiler needs to
1374  // compile this line when the condition is false.
1375  *dest = *(const PageTableEntry*)src;
1376 
1377  if (!typefitsinline && !dest->isConstantZero())
1378  {
1379  dest->incRef();
1380  }
1381 
1382  return;
1383  }
1384 
1385  // Either the type doesn't match or the tuple size doesn't match.
1386 
1387  const SrcNotVoidType *const srctuple = SrcType::getConstantPtr(src, 0, srctuplesize);
1388 
1389  const exint mintuplesize = SYSmin(desttuplesize, srctuplesize);
1390 
1391  // Easy for inline case, checked at compile-time.
1392  if (PageTableEntry::typeFitsInline(desttuplesize))
1393  {
1394  // If dest was already constant, we don't need to write
1395  // theConstantPageBit, and we don't want to blow away any
1396  // components that are between srctuplesize and desttuplesize, in the
1397  // unlikely event that desttuplesize > srctuplesize.
1398  if (!wasconst)
1399  {
1400  // This sets the constant bit and makes sure that the
1401  // space between that bit and tuple component 0 is zeroed.
1402  dest->initZero();
1403 
1404  // Since initZero sets everything to zero, if src
1405  // is all zero, we can early exit.
1406  if (issrczero)
1407  return;
1408  }
1409  NotVoidType *tuple = dest->getInlinePtr(desttuplesize);
1410  if (issrczero)
1411  {
1412  memset(tuple, 0, mintuplesize*sizeof(NotVoidType));
1413  }
1414  else
1415  {
1416  for (exint i = 0; i < mintuplesize; ++i)
1417  tuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1418  }
1419  return;
1420  }
1421 
1422  // In other cases, we may or may not have to unref constant page
1423  if (wasconst)
1424  {
1425  if (dest->isConstantZero())
1426  {
1427  // Fairly common case: already zero, making zero.
1428  if (issrczero)
1429  return;
1430  }
1431  else
1432  {
1433  if (desttuplesize <= srctuplesize && issrczero)
1434  {
1435  // No longer need this old constant page
1436  dest->decRef();
1437  }
1438  else if (dest->isShared())
1439  {
1440  NotVoidType *tuple = dest->getMaskedPtr();
1441  bool equal = true;
1442  if (desttuplesize > srctuplesize && issrczero)
1443  {
1444  equal = isZero(tuple, mintuplesize);
1445  }
1446  else
1447  {
1448  for (exint i = 0; i < mintuplesize; ++i)
1449  {
1450  if (tuple[i] != UTconvertStorage<NotVoidType>(srctuple[i]))
1451  {
1452  equal = false;
1453  break;
1454  }
1455  }
1456  }
1457 
1458  if (equal)
1459  {
1460  // Already equal; nothing to do
1461  return;
1462  }
1463 
1464  // Need to allocate new constant page before ditching the old one
1465  // if desttuplesize is larger, because some elements need to be kept.
1466  if (desttuplesize > srctuplesize)
1467  {
1468  // Need to save the pointers so that we can decRef below
1469  // after calling alloc.
1470  PageTableEntry orig(*dest);
1471 
1472  dest->alloc(UT_PageOff(1), desttuplesize);
1473  NotVoidType *newtuple = dest->getFirstPtr();
1474  // Copy lower components from src
1475  if (issrczero)
1476  {
1477  memset(newtuple, 0, srctuplesize*sizeof(NotVoidType));
1478  }
1479  else
1480  {
1481  for (exint i = 0; i < srctuplesize; ++i)
1482  newtuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1483  }
1484  // Copy higher components from dest
1485  for (exint i = srctuplesize; i < desttuplesize; ++i)
1486  newtuple[i] = tuple[i];
1487 
1488  orig.decRef();
1489  dest->setConstantBit();
1490  return;
1491  }
1492 
1493  // No longer need this old constant page
1494  dest->decRef();
1495  }
1496  else
1497  {
1498  // Reuse the unshared constant page
1499  NotVoidType *tuple = dest->getMaskedPtr();
1500  if (issrczero)
1501  {
1502  memset(tuple, 0, mintuplesize*sizeof(NotVoidType));
1503  }
1504  else
1505  {
1506  for (exint i = 0; i < mintuplesize; ++i)
1507  tuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1508  }
1509  return;
1510  }
1511  }
1512  }
1513 
1514  if (desttuplesize <= srctuplesize && issrczero)
1515  {
1516  dest->initZero();
1517  return;
1518  }
1519 
1520  // Need to allocate new constant page
1521  dest->alloc(UT_PageOff(1), desttuplesize);
1522  NotVoidType *tuple = dest->getFirstPtr();
1523  if (issrczero)
1524  {
1525  memset(tuple, 0, desttuplesize*sizeof(NotVoidType));
1526  }
1527  else
1528  {
1529  for (exint i = 0; i < mintuplesize; ++i)
1530  tuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1531 
1532  if (desttuplesize > srctuplesize)
1533  {
1534  // dest was already zero when here, or !wasconst, so zero out the extra components not copied from src.
1535  memset(tuple+srctuplesize, 0, (desttuplesize-srctuplesize)*sizeof(NotVoidType));
1536  }
1537  }
1538  dest->setConstantBit();
1539 }
1540 
1541 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1542 template<typename SrcNotVoidType>
1543 void
1545  NotVoidType *__restrict destpagedata,
1546  NotVoidType *destpageend,
1547  const SrcNotVoidType *__restrict stuple,
1548  exint mintuplesize,
1549  exint desttupleextra)
1550 {
1551  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1552 
1553  // Fill range in dest with value from stuple.
1554  if (stuple)
1555  {
1556  if (!SYSisSame<NotVoidType,SrcNotVoidType>())
1557  {
1558  do
1559  {
1560  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1561  *destpagedata = UTconvertStorage<NotVoidType>(stuple[i]);
1562  destpagedata += desttupleextra;
1563  } while(destpagedata < destpageend);
1564  }
1565  else
1566  {
1567  do
1568  {
1569  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1570  *destpagedata = stuple[i];
1571  destpagedata += desttupleextra;
1572  } while(destpagedata < destpageend);
1573  }
1574  }
1575  else
1576  {
1577  if (!SYSisSame<NotVoidType,SrcNotVoidType>())
1578  {
1579  do
1580  {
1581  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1582  *destpagedata = NotVoidType();
1583  destpagedata += desttupleextra;
1584  } while(destpagedata < destpageend);
1585  }
1586  else
1587  {
1588  do
1589  {
1590  if (SYSisPOD<NotVoidType>())
1591  {
1592  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1593  ::memset(destpagedata, 0, sizeof(NotVoidType));
1594  }
1595  else
1596  {
1598  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1599  *destpagedata = v;
1600  }
1601  destpagedata += desttupleextra;
1602  } while(destpagedata < destpageend);
1603  }
1604  }
1605 }
1606 
1607 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1608 template<typename SrcNotVoidType>
1609 void
1611  NotVoidType *__restrict destpagedata,
1612  const SrcNotVoidType *__restrict srcpagedata,
1613  const exint desttuplesize,
1614  const exint srctuplesize,
1615  UT_PageOff ntuples)
1616 {
1617  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1618 
1619  NotVoidType *destpageend = destpagedata + ntuples*desttuplesize;
1620 
1621  if (desttuplesize == srctuplesize)
1622  {
1623  // Copy values from srcpagedata to destpagedata.
1624  if (SYSisSame<NotVoidType,SrcNotVoidType>())
1625  {
1626  do
1627  {
1628  *destpagedata = *srcpagedata;
1629  ++srcpagedata;
1630  ++destpagedata;
1631  } while(destpagedata < destpageend);
1632  }
1633  else
1634  {
1635  do
1636  {
1637  *destpagedata = UTconvertStorage<NotVoidType>(*srcpagedata);
1638  ++srcpagedata;
1639  ++destpagedata;
1640  } while(destpagedata < destpageend);
1641  }
1642  }
1643  else
1644  {
1645  const exint mintuplesize = SYSmin(desttuplesize, srctuplesize);
1646  const exint srctupleextra = srctuplesize - mintuplesize;
1647  const exint desttupleextra = desttuplesize - mintuplesize;
1648 
1649  // Copy values from srcpagedata to destpagedata.
1650  if (SYSisSame<NotVoidType,SrcNotVoidType>())
1651  {
1652  do
1653  {
1654  for (exint i = 0; i < mintuplesize; ++i, ++srcpagedata, ++destpagedata)
1655  *destpagedata = *srcpagedata;
1656  destpagedata += desttupleextra;
1657  srcpagedata += srctupleextra;
1658  } while(destpagedata < destpageend);
1659  }
1660  else
1661  {
1662  do
1663  {
1664  for (exint i = 0; i < mintuplesize; ++i, ++srcpagedata, ++destpagedata)
1665  *destpagedata = UTconvertStorage<NotVoidType>(*srcpagedata);
1666  destpagedata += desttupleextra;
1667  srcpagedata += srctupleextra;
1668  } while(destpagedata < destpageend);
1669  }
1670  }
1671 }
1672 
1673 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1674 template<typename SrcNotVoidType>
1675 bool
1677 {
1678  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1679 
1680  // If they're the same page, or they're both NULL,
1681  // there's nothing to do.
1682  if ((const void*)stuple == (void*)tuple)
1683  return true;
1684 
1685  if (!stuple || !tuple)
1686  return false;
1687 
1688  UT_ASSERT_P(mintuplesize > 0);
1689 
1690  bool isequal;
1691  if (!SYSisSame<NotVoidType,SrcNotVoidType>())
1692  {
1693  // Cast to the destination type, since it's
1694  // supposed to represent whether the destination
1695  // wouldn't change if assigned.
1696  isequal = (tuple[0] == UTconvertStorage<NotVoidType>(stuple[0]));
1697  for (exint i = 1; i < mintuplesize; ++i)
1698  isequal &= (tuple[i] == UTconvertStorage<NotVoidType>(stuple[i]));
1699  }
1700  else
1701  {
1702  // NOTE: Don't want to copy-construct non-POD types
1703  // unnecessarily by casting to NotVoidType.
1704  isequal = (tuple[0] == stuple[0]);
1705  for (exint i = 1; i < mintuplesize; ++i)
1706  isequal &= (tuple[i] == stuple[i]);
1707  }
1708  // If they're equal, nothing to do
1709  return isequal;
1710 }
1711 
1712 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1713 void
1715 {
1717  UT_ASSERT_P(start < end);
1718  UT_ASSERT_P(start >= IDX_T(0));
1719  UT_ASSERT_P(end <= myCapacity);
1720  UT_ASSERT_P(TSIZE >= 1);
1721  UT_ASSERT_MSG_P(myRefCount == 1, "The table must already be hardened before we modify it!");
1722 
1723  UT_PageNum startpage = pageNum(start);
1724  UT_PageOff startoff = pageOff(start);
1725  UT_PageNum endpage = pageNum(end);
1726  UT_PageOff endoff = pageOff(end);
1727 
1728  bool valiszero = !PAGESHARDENED && (startoff != UT_PageOff(0) || endoff != UT_PageOff(0));
1729  if (valiszero)
1730  {
1731  valiszero = isZero(val);
1732  }
1733 
1734  UT_PageOff pagecapacity(thePageSize);
1735  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
1736  pagecapacity = capacity();
1737 
1738  // Handle incomplete first page
1739  if (startoff != UT_PageOff(0))
1740  {
1741  PageTableEntry *page = getPPage(startpage);
1742  bool equal = false;
1743  if (!PAGESHARDENED)
1744  {
1745  if (page->isConstant())
1746  {
1747  const NotVoidType *tuple = getConstantPtr(page);
1748  // Nothing to do if equal already.
1749  if (tuple)
1750  {
1751  equal = true;
1752  for (exint i = 0; i < TSIZE; ++i)
1753  equal &= (tuple[i] == val);
1754  }
1755  else
1756  {
1757  equal = valiszero;
1758  }
1759  if (!equal)
1760  hardenConstantPage(page, pagecapacity);
1761  }
1762  else if (page->isShared())
1763  hardenSharedPage(page, pagecapacity);
1764  }
1765  if (!equal)
1766  {
1767  UT_ASSERT_P(!page->isConstant());
1768  UT_ASSERT_P(!page->isShared());
1769  NotVoidType *data = page->getFirstPtr();
1770  NotVoidType *end = data + TSIZE*((endpage != startpage) ? pagecapacity : endoff);
1771  data += TSIZE*startoff;
1772  for (; data != end; ++data)
1773  *data = val;
1774  }
1775  if (endpage == startpage)
1776  return;
1777 
1778  ++startpage;
1779  }
1780 
1781  // Handle complete middle pages
1782  for (; startpage < endpage; ++startpage)
1783  {
1784  PageTableEntry *page = getPPage(startpage);
1785  // FIXME: Need a makeConstant that takes a single value for non-POD types
1786  if (!PAGESHARDENED)
1787  makeConstant(page, val);
1788  else
1789  {
1790  NotVoidType *data = page->getFirstPtr();
1791  // NOTE: This isn't a small page, so we can use thePageSize
1792  NotVoidType *end = data + TSIZE*thePageSize;
1793  for (; data != end; ++data)
1794  *data = val;
1795  }
1796  }
1797 
1798  // Handle incomplete last page
1799  if (endoff != UT_PageOff(0))
1800  {
1801  PageTableEntry *page = getPPage(startpage);
1802  // If end page, and goes to end, can still make constant.
1803  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
1804  {
1805  makeConstant(page, val);
1806  return;
1807  }
1808  bool equal = false;
1809  if (!PAGESHARDENED)
1810  {
1811  if (page->isConstant())
1812  {
1813  const NotVoidType *tuple = getConstantPtr(page);
1814  // Nothing to do if equal already.
1815  if (tuple)
1816  {
1817  equal = true;
1818  for (exint i = 0; i < TSIZE; ++i)
1819  equal &= (tuple[i] == val);
1820  }
1821  else
1822  {
1823  equal = valiszero;
1824  }
1825  if (!equal)
1826  hardenConstantPage(page, pagecapacity);
1827  }
1828  else if (page->isShared())
1829  hardenSharedPage(page, pagecapacity);
1830  }
1831  if (!equal)
1832  {
1833  UT_ASSERT_P(!page->isConstant());
1834  UT_ASSERT_P(!page->isShared());
1835  NotVoidType *data = page->getFirstPtr();
1836  NotVoidType *end = data + TSIZE*endoff;
1837  for (; data != end; ++data)
1838  *data = val;
1839  }
1840  }
1841 }
1842 
1843 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1844 void
1846 {
1848  UT_ASSERT_P(start < end);
1849  UT_ASSERT_P(start >= IDX_T(0));
1850  UT_ASSERT_P(end <= myCapacity);
1851  UT_ASSERT_P(TSIZE == -1);
1852  UT_ASSERT_P(tuplesize >= 1);
1853  UT_ASSERT_MSG_P(myRefCount == 1, "The table must already be hardened before we modify it!");
1854 
1855  // Fast paths for small sizes.
1856  if (tuplesize <= 4)
1857  {
1858  if (tuplesize == 1)
1859  {
1861  this)->fill(start, end, val);
1862  }
1863  else if (tuplesize == 2)
1864  {
1866  this)->fill(start, end, val);
1867  }
1868  else if (tuplesize == 3)
1869  {
1871  this)->fill(start, end, val);
1872  }
1873  else //if (tuplesize == 4)
1874  {
1876  this)->fill(start, end, val);
1877  }
1878  return;
1879  }
1880 
1881  UT_PageNum startpage = pageNum(start);
1882  UT_PageOff startoff = pageOff(start);
1883  UT_PageNum endpage = pageNum(end);
1884  UT_PageOff endoff = pageOff(end);
1885 
1886  bool valiszero = !PAGESHARDENED && (startoff != UT_PageOff(0) || endoff != UT_PageOff(0));
1887  if (valiszero)
1888  {
1889  valiszero = isZero(val);
1890  }
1891 
1892  UT_PageOff pagecapacity(thePageSize);
1893  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
1894  pagecapacity = capacity();
1895 
1896  // Handle incomplete first page
1897  if (startoff != UT_PageOff(0))
1898  {
1899  PageTableEntry *page = getPPage(startpage);
1900  bool equal = false;
1901  if (!PAGESHARDENED)
1902  {
1903  if (page->isConstant())
1904  {
1905  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
1906  // Nothing to do if equal already.
1907  if (tuple)
1908  {
1909  equal = true;
1910  for (exint i = 0; i < tuplesize; ++i)
1911  equal &= (tuple[i] == val);
1912  }
1913  else
1914  {
1915  equal = valiszero;
1916  }
1917  if (!equal)
1918  hardenConstantPage(page, pagecapacity, tuplesize);
1919  }
1920  else if (page->isShared())
1921  hardenSharedPage(page, pagecapacity, tuplesize);
1922  }
1923  if (!equal)
1924  {
1925  UT_ASSERT_P(!page->isConstant());
1926  UT_ASSERT_P(!page->isShared());
1927  NotVoidType *data = page->getFirstPtr();
1928  NotVoidType *end = data + tuplesize*((endpage != startpage) ? pagecapacity : endoff);
1929  data += tuplesize*startoff;
1930  for (; data != end; ++data)
1931  *data = val;
1932  }
1933  if (endpage == startpage)
1934  return;
1935  ++startpage;
1936  }
1937 
1938  // Handle complete middle pages
1939  for (; startpage < endpage; ++startpage)
1940  {
1941  PageTableEntry *page = getPPage(startpage);
1942  if (!PAGESHARDENED)
1943  makeConstant(page, val, tuplesize);
1944  else
1945  {
1946  NotVoidType *data = page->getFirstPtr();
1947  // NOTE: This isn't a small page, so we can use thePageSize
1948  NotVoidType *end = data + tuplesize*thePageSize;
1949  for (; data != end; ++data)
1950  *data = val;
1951  }
1952  }
1953 
1954  // Handle incomplete last page
1955  if (endoff != UT_PageOff(0))
1956  {
1957  PageTableEntry *page = getPPage(startpage);
1958  // If end page, and goes to end, can still make constant.
1959  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
1960  {
1961  makeConstant(page, val, tuplesize);
1962  return;
1963  }
1964  bool equal = false;
1965  if (!PAGESHARDENED)
1966  {
1967  if (page->isConstant())
1968  {
1969  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
1970  // Nothing to do if equal already.
1971  if (tuple)
1972  {
1973  equal = true;
1974  for (exint i = 0; i < tuplesize; ++i)
1975  equal &= (tuple[i] == val);
1976  }
1977  else
1978  {
1979  equal = valiszero;
1980  }
1981  if (!equal)
1982  hardenConstantPage(page, pagecapacity, tuplesize);
1983  }
1984  else if (page->isShared())
1985  hardenSharedPage(page, pagecapacity, tuplesize);
1986  }
1987  if (!equal)
1988  {
1989  UT_ASSERT_P(!page->isConstant());
1990  UT_ASSERT_P(!page->isShared());
1991  NotVoidType *data = page->getFirstPtr();
1992  NotVoidType *end = data + tuplesize*endoff;
1993  for (; data != end; ++data)
1994  *data = val;
1995  }
1996  }
1997 }
1998 
1999 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2000 void
2002 {
2004  UT_ASSERT_P(start < end);
2005  UT_ASSERT_P(start >= IDX_T(0));
2006  UT_ASSERT_P(end <= myCapacity);
2007  UT_ASSERT_P(TSIZE == -1 || TSIZE==tuplesize);
2008  UT_ASSERT_P(tuplesize >= 1);
2009  UT_ASSERT_MSG_P(myRefCount == 1, "The table must already be hardened before we modify it!");
2010 
2011  UT_PageNum startpage = pageNum(start);
2012  UT_PageOff startoff = pageOff(start);
2013  UT_PageNum endpage = pageNum(end);
2014  UT_PageOff endoff = pageOff(end);
2015 
2016  UT_PageOff pagecapacity(thePageSize);
2017  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
2018  pagecapacity = capacity();
2019 
2020  // Handle incomplete first page
2021  if (startoff != UT_PageOff(0))
2022  {
2023  PageTableEntry *page = getPPage(startpage);
2024  bool equal = false;
2025  if (!PAGESHARDENED)
2026  {
2027  if (page->isConstant())
2028  {
2029  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
2030  // Nothing to do if equal already.
2031  equal = true;
2032  if (tuple)
2033  {
2034  for (exint i = 0; i < tuplesize; ++i)
2035  equal &= (tuple[i] == values[i]);
2036  }
2037  else
2038  {
2039  for (exint i = 0; i < tuplesize; ++i)
2040  equal &= (NotVoidType(0) == values[i]);
2041  }
2042  if (!equal)
2043  hardenConstantPage(page, pagecapacity, tuplesize);
2044  }
2045  else if (page->isShared())
2046  hardenSharedPage(page, pagecapacity, tuplesize);
2047  }
2048  if (!equal)
2049  {
2050  UT_ASSERT_P(!page->isConstant());
2051  UT_ASSERT_P(!page->isShared());
2052  NotVoidType *data = page->getFirstPtr();
2053  NotVoidType *end = data + tuplesize*((endpage != startpage) ? pagecapacity : endoff);
2054  data += tuplesize*startoff;
2055  while (data != end)
2056  {
2057  for (exint i = 0; i < tuplesize; ++i, ++data)
2058  *data = values[i];
2059  }
2060  }
2061  if (endpage == startpage)
2062  return;
2063  ++startpage;
2064  }
2065 
2066  // Handle complete middle pages
2067  for (; startpage < endpage; ++startpage)
2068  {
2069  PageTableEntry *page = getPPage(startpage);
2070  if (!PAGESHARDENED)
2071  makeConstant(page, values, tuplesize);
2072  else
2073  {
2074  NotVoidType *data = page->getFirstPtr();
2075  // NOTE: This isn't a small page, so we can use thePageSize
2076  NotVoidType *end = data + tuplesize*thePageSize;
2077  while (data != end)
2078  {
2079  for (exint i = 0; i < tuplesize; ++i, ++data)
2080  *data = values[i];
2081  }
2082  }
2083  }
2084 
2085  // Handle incomplete last page
2086  if (endoff != UT_PageOff(0))
2087  {
2088  PageTableEntry *page = getPPage(startpage);
2089  // If end page, and goes to end, can still make constant.
2090  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
2091  {
2092  makeConstant(page, values, tuplesize);
2093  return;
2094  }
2095  bool equal = false;
2096  if (!PAGESHARDENED)
2097  {
2098  if (page->isConstant())
2099  {
2100  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
2101  // Nothing to do if equal already.
2102  equal = true;
2103  if (tuple)
2104  {
2105  for (exint i = 0; i < tuplesize; ++i)
2106  equal &= (tuple[i] == values[i]);
2107  }
2108  else
2109  {
2110  for (exint i = 0; i < tuplesize; ++i)
2111  equal &= (NotVoidType(0) == values[i]);
2112  }
2113  if (!equal)
2114  hardenConstantPage(page, pagecapacity, tuplesize);
2115  }
2116  else if (page->isShared())
2117  hardenSharedPage(page, pagecapacity, tuplesize);
2118  }
2119  if (!equal)
2120  {
2121  UT_ASSERT_P(!page->isConstant());
2122  UT_ASSERT_P(!page->isShared());
2123  NotVoidType *data = page->getFirstPtr();
2124  NotVoidType *end = data + tuplesize*endoff;
2125  while (data != end)
2126  {
2127  for (exint i = 0; i < tuplesize; ++i, ++data)
2128  *data = values[i];
2129  }
2130  }
2131  }
2132 }
2133 
2134 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2135 void
2137 {
2139  UT_ASSERT_P(start < end);
2140  UT_ASSERT_P(start >= IDX_T(0));
2141  UT_ASSERT_P(end <= myCapacity);
2142  UT_ASSERT_P(TSIZE >= 1);
2143  UT_ASSERT_MSG_P(myRefCount == 1, "The table must already be hardened before we modify it!");
2144 
2145  UT_PageNum startpage = pageNum(start);
2146  UT_PageOff startoff = pageOff(start);
2147  UT_PageNum endpage = pageNum(end);
2148  UT_PageOff endoff = pageOff(end);
2149 
2150  UT_PageOff pagecapacity(thePageSize);
2151  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
2152  pagecapacity = capacity();
2153 
2154  // Handle incomplete first page
2155  if (startoff != UT_PageOff(0))
2156  {
2157  PageTableEntry *page = getPPage(startpage);
2158  bool equal = false;
2159  if (!PAGESHARDENED)
2160  {
2161  if (page->isConstant())
2162  {
2164  // Nothing to do if equal already.
2165  equal = tuple ? (*tuple == val) : val.isZero();
2166  if (!equal)
2167  hardenConstantPage(page, pagecapacity);
2168  }
2169  else if (page->isShared())
2170  hardenSharedPage(page, pagecapacity);
2171  }
2172  if (!equal)
2173  {
2174  UT_ASSERT_P(!page->isConstant());
2175  UT_ASSERT_P(!page->isShared());
2177  UT_FixedVector<NotVoidType,theSafeTupleSize> *end = data + ((endpage != startpage) ? pagecapacity : endoff);
2178  data += startoff;
2179  for (; data != end; ++data)
2180  *data = val;
2181  }
2182  if (endpage == startpage)
2183  return;
2184 
2185  ++startpage;
2186  }
2187 
2188  // Handle complete middle pages
2189  for (; startpage < endpage; ++startpage)
2190  {
2191  PageTableEntry *page = getPPage(startpage);
2192  if (!PAGESHARDENED)
2193  makeConstant(page, val);
2194  else
2195  {
2197  // NOTE: This isn't a small page, so we can use thePageSize
2199  for (; data != end; ++data)
2200  *data = val;
2201  }
2202  }
2203 
2204  // Handle incomplete last page
2205  if (endoff != UT_PageOff(0))
2206  {
2207  PageTableEntry *page = getPPage(startpage);
2208  // If end page, and goes to end, can still make constant.
2209  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
2210  {
2211  makeConstant(page, val);
2212  return;
2213  }
2214  bool equal = false;
2215  if (!PAGESHARDENED)
2216  {
2217  if (page->isConstant())
2218  {
2220  // Nothing to do if equal already.
2221  equal = tuple ? (*tuple == val) : val.isZero();
2222  if (!equal)
2223  hardenConstantPage(page, pagecapacity);
2224  }
2225  else if (page->isShared())
2226  hardenSharedPage(page, pagecapacity);
2227  }
2228  if (!equal)
2229  {
2230  UT_ASSERT_P(!page->isConstant());
2231  UT_ASSERT_P(!page->isShared());
2233  UT_FixedVector<NotVoidType,theSafeTupleSize> *end = data + endoff;
2234  for (; data != end; ++data)
2235  *data = val;
2236  }
2237  }
2238 }
2239 
2240 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2241 template<typename DEST_DATA_T,exint DEST_TSIZE,bool DEST_INSTANTIATED>
2242 void
2244 {
2245  // If the source storage type is not known at compile time,
2246  // switch, cast, and call again.
2247  if (SYSisSame<DATA_T,void>())
2248  {
2250 
2251  // Probably matches destination type
2253  {
2254  castType<DEST_DATA_T>().getVectorRange(srcstart, nelements, dest);
2255  return;
2256  }
2257 
2258  switch (storage)
2259  {
2260  case UT_Storage::INT8:
2261  castType<int8>().getVectorRange(srcstart, nelements, dest); return;
2262  case UT_Storage::INT16:
2263  castType<int16>().getVectorRange(srcstart, nelements, dest); return;
2264  case UT_Storage::INT32:
2265  castType<int32>().getVectorRange(srcstart, nelements, dest); return;
2266  case UT_Storage::INT64:
2267  castType<int64>().getVectorRange(srcstart, nelements, dest); return;
2268  case UT_Storage::REAL16:
2269  castType<fpreal16>().getVectorRange(srcstart, nelements, dest); return;
2270  case UT_Storage::REAL32:
2271  castType<fpreal32>().getVectorRange(srcstart, nelements, dest); return;
2272  case UT_Storage::REAL64:
2273  castType<fpreal64>().getVectorRange(srcstart, nelements, dest); return;
2274  case UT_Storage::INVALID:
2275  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
2276  break;
2277  }
2278  return;
2279  }
2280 
2281  // We now have both the source type and the destination type known at compile time.
2282  UT_ASSERT_P((!SYSisSame<DATA_T,void>()));
2283 
2284  // Tuple size probably matches
2285  if (TSIZE == -1 && myImpl.getTupleSize() == DEST_TSIZE)
2286  {
2287  castTupleSize<DEST_TSIZE>().getVectorRange(srcstart, nelements, dest);
2288  return;
2289  }
2290 
2291  auto vdest = reinterpret_cast<UT_FixedVector<DEST_DATA_T,DEST_TSIZE> *>(dest);
2292 
2293  // TODO: Implement this more efficiently, e.g. only check once whether each page is constant or shared.
2294  for (IDX_T srcend(srcstart+nelements); srcstart < srcend; ++srcstart, ++vdest)
2295  *vdest = getVector<DEST_DATA_T,DEST_TSIZE>(srcstart);
2296 }
2297 
2298 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2299 template<typename SRC_DATA_T,exint SRC_TSIZE,bool SRC_INSTANTIATED>
2300 void
2302 {
2303  if (nelements <= IDX_T(0))
2304  return;
2305 
2306  auto &hard = hardenTable();
2307 
2308  UT_ASSERT_MSG_P((!SYSisSame<SRC_DATA_T,void>()), "Source type must be known.");
2309 
2310  // If the destination storage type is not known at compile time,
2311  // switch, cast, and call again.
2312  if (SYSisSame<DATA_T,void>())
2313  {
2315 
2316  // Probably matches source type
2318  {
2319  hard.template castType<SRC_DATA_T>().setVectorRange(deststart, nelements, src);
2320  return;
2321  }
2322 
2323  switch (storage)
2324  {
2325  case UT_Storage::INT8:
2326  hard.template castType<int8>().setVectorRange(deststart, nelements, src); return;
2327  case UT_Storage::INT16:
2328  hard.template castType<int16>().setVectorRange(deststart, nelements, src); return;
2329  case UT_Storage::INT32:
2330  hard.template castType<int32>().setVectorRange(deststart, nelements, src); return;
2331  case UT_Storage::INT64:
2332  hard.template castType<int64>().setVectorRange(deststart, nelements, src); return;
2333  case UT_Storage::REAL16:
2334  hard.template castType<fpreal16>().setVectorRange(deststart, nelements, src); return;
2335  case UT_Storage::REAL32:
2336  hard.template castType<fpreal32>().setVectorRange(deststart, nelements, src); return;
2337  case UT_Storage::REAL64:
2338  hard.template castType<fpreal64>().setVectorRange(deststart, nelements, src); return;
2339  case UT_Storage::INVALID:
2340  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
2341  break;
2342  }
2343  return;
2344  }
2345 
2346  // We now have both the source type and the destination type known at compile time.
2347  UT_ASSERT_P((!SYSisSame<DATA_T,void>()));
2348 
2349  // Tuple size probably matches
2350  if (TSIZE == -1 && myImpl.getTupleSize() == SRC_TSIZE)
2351  {
2352  hard.template castTupleSize<SRC_TSIZE>().setVectorRange(deststart, nelements, src);
2353  return;
2354  }
2355 
2356  // TODO: Implement this more efficiently, e.g. only check once whether each page is constant or shared.
2357  for (IDX_T destend(deststart+nelements); deststart < destend; ++deststart, ++src)
2358  setVector(deststart, *src);
2359 }
2360 
2361 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2362 bool
2364 {
2365  if (!UTisFloatStorage(getStorage()))
2366  return false;
2367 
2368  // If the storage type is not known at compile time,
2369  // switch, cast, and call again.
2370  if (SYSisSame<DATA_T,void>())
2371  {
2373  switch (storage)
2374  {
2375  case UT_Storage::REAL16:
2376  return castType<fpreal16>().hasNanInRange(start, end);
2377  case UT_Storage::REAL32:
2378  return castType<fpreal32>().hasNanInRange(start, end);
2379  case UT_Storage::REAL64:
2380  return castType<fpreal64>().hasNanInRange(start, end);
2381  default:
2382  UT_ASSERT_MSG(0, "Only 16-bit, 32-bit, and 64-bit floats should be considered float types!");
2383  break;
2384  }
2385  return false;
2386  }
2387 
2388  UT_ASSERT_P(start >= IDX_T(0) && start <= size());
2389  UT_ASSERT_P(end >= IDX_T(0) && end <= size());
2390  UT_ASSERT_P(start <= end);
2391 
2392  if (start >= end)
2393  return false;
2394 
2395  const PageTable *pages = myImpl.getPages();
2396  UT_ASSERT_P(pages);
2397 
2398  UT_PageNum pagenum = pageNum(start);
2399  UT_PageOff pageoff = pageOff(start);
2400  UT_PageNum endpagenum = pageNum(end);
2401  UT_PageOff endpageoff = pageOff(end);
2402 
2403  exint tuplesize = getTupleSize();
2404 
2405  if (endpageoff == UT_PageOff(0))
2406  {
2407  --endpagenum;
2408  endpageoff = UT_PageOff(thePageSize);
2409  }
2410 
2411  for (; pagenum <= endpagenum; ++pagenum)
2412  {
2413  const PageTableEntry *const page = pages->getPPage(pagenum);
2414  if (page->isConstant())
2415  {
2416  const NotVoidType *data = getConstantPtr(page, 0, tuplesize);
2417  // Special case for zero page is always a number.
2418  if (!data)
2419  continue;
2420  for (exint i = 0; i < tuplesize; ++i)
2421  {
2422  if (SYSisNan(data[i]))
2423  return true;
2424  }
2425  }
2426  else
2427  {
2428  const NotVoidType *data = page->getFirstPtr();
2429  const NotVoidType *end = data + ((pagenum == endpagenum) ? endpageoff : thePageSize*tuplesize);
2430  data += pageoff;
2431  for (; data != end; ++data)
2432  {
2433  if (SYSisNan(*data))
2434  return true;
2435  }
2436  }
2437  pageoff = UT_PageOff(0);
2438  }
2439 
2440  return false;
2441 }
2442 
2443 
2444 #endif
static void copyNonConst(NotVoidType *destpagedata, const SrcNotVoidType *srcpagedata, exint desttuplesize, exint srctuplesize, UT_PageOff ntuples)
static SYS_FORCE_INLINE const NotVoidType * getConstantPtr(const PageTableEntry *page, exint component=0, exint tuplesize=TSIZE)
SYS_FORCE_INLINE void setSize(IDX_T newsize)
SYS_FORCE_INLINE void * getFirstPtrVoid()
Returns the data pointer, if not a constant page.
UT_Storage
Definition: UT_Storage.h:26
NotVoid< DATA_T >::type NotVoidType
Definition: UT_PageArray.h:237
void UTswap(T &a, T &b)
Definition: UT_Swap.h:35
void setTupleSize(exint newtuplesize, const UT_Defaults &v)
const GLdouble * v
Definition: glcorearb.h:836
SYS_FORCE_INLINE bool isZero() const noexcept
GLuint start
Definition: glcorearb.h:474
int64 getI(exint i=0) const
Definition: UT_Defaults.h:265
SYS_FORCE_INLINE IDX_T size() const
Definition: UT_PageArray.h:859
SYS_FORCE_INLINE T * SYSconst_cast(const T *foo)
Definition: SYS_Types.h:126
SYS_FORCE_INLINE bool isShared() const
void countMemory(UT_MemoryCounter &counter, bool inclusive) const
SYS_FORCE_INLINE PageTableEntry * getFirstPage()
bool SYSisNan(const F f)
Definition: SYS_Math.h:173
void setConstant(IDX_T start, IDX_T end, NotVoidType v)
exint UT_PageNum
Definition: UT_PageArray.h:36
SYS_FORCE_INLINE const PageTableEntry * getPPage(UT_PageNum i) const
SYS_FORCE_INLINE exint getTupleSize() const
virtual bool countShared(size_t size, exint refcount, const void *p)
#define UT_MEMORY_DEBUG_LOG_SHARED(m, s, p, r)
SYS_FORCE_INLINE constexpr bool UTisFloatStorage(UT_Storage storage)
Returns true iff the given storage type represents a floating-point number.
Definition: UT_Storage.h:47
SYS_FORCE_INLINE IDX_T size() const
NOTE: This is the size of the full array, not the number of pages.
SYS_FORCE_INLINE UT_Storage getStorage() const
SYS_FORCE_INLINE exint getRefCount() const
png_uint_32 i
Definition: png.h:2877
bool hasNanInRange(IDX_T start, IDX_T end) const
GLsizeiptr size
Definition: glcorearb.h:663
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:101
SYS_FORCE_INLINE bool isShared() const
#define UT_IF_ASSERT_P(ZZ)
Definition: UT_Assert.h:128
void fill(IDX_T start, IDX_T end, const NotVoidType &val)
static bool isEqualConst(const NotVoidType *tuple, const SrcNotVoidType *stuple, exint mintuplesize)
SYS_FORCE_INLINE void setConstantBit()
long long int64
Definition: SYS_Types.h:106
SYS_FORCE_INLINE exint getRefCount() const
SYS_FORCE_INLINE bool isConstantZero() const
This is only valid to call if the type doesn't fit inline.
GLdouble n
Definition: glcorearb.h:2007
static SYS_FORCE_INLINE UT_PageOff pageOff(IDX_T i)
static SYS_FORCE_INLINE UT_PageNum numPages(IDX_T nelements)
int64 exint
Definition: SYS_Types.h:115
SYS_FORCE_INLINE bool isRefd(exint tuplebytes) const
SYS_FORCE_INLINE IDX_T capacity() const
NOTE: This is the capacity of the full array, not the capacity of pages.
static SYS_FORCE_INLINE bool isZero(const T &val)
SYS_FORCE_INLINE IDX_T capacity() const
Definition: UT_PageArray.h:867
GLuint GLuint end
Definition: glcorearb.h:474
bool mustCountUnshared() const
#define UT_MEMORY_DEBUG_LOG(m, s)
SYS_FORCE_INLINE NotVoidType * getInlinePtr(exint tuplesize)
Returns the data pointer, if an inline constant page.
void setCapacity(IDX_T newcapacity)
Definition: UT_PageArray.h:895
SYS_FORCE_INLINE void setVector(IDX_T i, const UT_FixedVector< SRC_DATA_T, SRC_TSIZE, SRC_INSTANTIATED > &v)
Definition: UT_PageArray.h:712
GLenum GLsizei GLsizei GLint * values
Definition: glcorearb.h:1601
SYS_FORCE_INLINE void * getMaskedPtrVoid()
GLboolean * data
Definition: glcorearb.h:130
static void copyPartialPage(PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize, UT_PageOff destoff, UT_PageOff srcoff, UT_PageOff ntuples, UT_PageOff destpagecapacity)
SYS_FORCE_INLINE bool isConstantAndZeroSafe() const
exint UT_PageOff
Definition: UT_PageArray.h:37
SYS_FORCE_INLINE void incRef()
static void makeConstantFrom(PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize)
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition: glcorearb.h:2539
void moveRange(IDX_T srcstart, IDX_T deststart, IDX_T nelements)
void getVectorRange(IDX_T srcstart, IDX_T nelements, UT_FixedVector< DEST_DATA_T, DEST_TSIZE, DEST_INSTANTIATED > *dest) const
void countUnshared(size_t size)
SYS_FORCE_INLINE int64 getMemoryUsage(exint tuplebytes) const
SYS_AtomicInt< int32 > SYS_AtomicCounter
Definition: SYS_AtomicInt.h:86
void setSize(IDX_T newsize)
Definition: UT_PageArray.h:919
#define UT_ASSERT_MSG_P(ZZ, MM)
Definition: UT_Assert.h:104
fpreal64 getF(exint i=0) const
Definition: UT_Defaults.h:241
void swapRange(IDX_T astart, IDX_T bstart, IDX_T nelements)
SYS_FORCE_INLINE void * getFirstPtrVoidUnsafe()
void setStorage(const UT_Storage newstorage)
static void fillNonConstWithConst(NotVoidType *destpagedata, NotVoidType *destpageend, const SrcNotVoidType *stuple, exint mintuplesize, exint desttupleextra)
SYS_FORCE_INLINE NotVoidType * getMaskedPtr()
SYS_FORCE_INLINE bool isConstant() const
This is always valid to call.
static void hardenSharedPage(PageTableEntry *page, UT_PageOff pagecapacity, exint tuplesize=TSIZE)
bool mustCountShared() const
GLuint GLfloat * val
Definition: glcorearb.h:1607
static SYS_FORCE_INLINE void makeConstant(PageTableEntry *page, const UT_FixedVector< NotVoidType, theSafeTupleSize > &val)
bool equal(T1 a, T2 b, T3 t)
Definition: ImathFun.h:143
SYS_FORCE_INLINE UT_PageArray< DATA_T, TSIZE, true, PAGESHARDENED, THEPAGEBITS, IDX_T > & hardenTable()
static const exint thePageSize
Definition: UT_PageArray.h:249
SYS_FORCE_INLINE void alloc(UT_PageOff nelements, exint tuplesize=TSIZE)
static void hardenConstantPage(PageTableEntry *page, UT_PageOff pagecapacity, exint tuplesize=TSIZE)
getOption("OpenEXR.storage") storage
Definition: HDK_Image.dox:276
Container class for all geometry.
Definition: GA_Detail.h:96
int64 getMemoryUsage(bool inclusive) const
static SYS_FORCE_INLINE UT_PageNum pageNum(IDX_T i)
SYS_FORCE_INLINE constexpr bool UTisIntStorage(UT_Storage storage)
Returns true iff the given storage type represents an integer.
Definition: UT_Storage.h:40
#define SYSmin(a, b)
Definition: SYS_Math.h:1366
SYS_FORCE_INLINE constexpr int UTstorageSize(UT_Storage storage)
Returns the number of bytes in the given storage type.
Definition: UT_Storage.h:54
SYS_FORCE_INLINE void initZero()
#define UT_ASSERT_MSG(ZZ, MM)
Definition: UT_Assert.h:105
SYS_FORCE_INLINE exint getTupleSize() const
Definition: UT_Defaults.h:236
SYS_FORCE_INLINE NotVoidType * getFirstPtr()
Returns the data pointer, if not a constant page.
bool isZero(const Type &x)
Return true if x is exactly equal to zero.
Definition: Math.h:324
void setVectorRange(IDX_T deststart, IDX_T nelements, const UT_FixedVector< SRC_DATA_T, SRC_TSIZE, SRC_INSTANTIATED > *src)
SYS_FORCE_INLINE void decRef()
static void replacePage(PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize, UT_PageOff destpagesize, UT_PageOff destpagecapacity)
GLenum src
Definition: glcorearb.h:1792