HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_PageArrayImpl.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_PageArrayImpl.h (UT Library, C++)
7  *
8  * COMMENTS: Implementations of functions of UT_PageArray that
9  * aren't needed in most places that use it.
10  */
11 
12 #pragma once
13 
14 #ifndef __UT_PageArrayImpl__
15 #define __UT_PageArrayImpl__
16 
17 #include "UT_PageArray.h"
18 
19 #include "UT_Defaults.h"
20 #include "UT_MemoryCounter.h"
21 #include "UT_StackBuffer.h"
22 #include "UT_Storage.h"
23 #include "UT_Swap.h"
24 
25 #include <SYS/SYS_Types.h>
26 
27 
28 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
29 void
31 {
32  UT_ASSERT_P(newsize >= IDX_T(0));
34 
35  setCapacityIfNeeded(newsize);
36  hardenTable();
37  PageTable *pages = myImpl.getPages();
38  UT_ASSERT_P(pages || newsize == IDX_T(0));
39  if (pages)
40  {
41  IDX_T oldsize = pages->size();
42 
43  if (!PAGESHARDENED || TSIZE >= 0)
44  pages->setSize(newsize);
45  else
46  pages->setSize(newsize, myImpl.getTupleSize());
47 
48  if (newsize > oldsize)
49  {
50  if (TSIZE >= 1)
51  pages->fill(oldsize, newsize, initval);
52  else if (TSIZE == -1 && myImpl.getTupleSize() > 0)
53  pages->fill(oldsize, newsize, initval, myImpl.getTupleSize());
54  }
55  }
56 }
57 
58 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
59 void
61 {
62  UT_ASSERT_P(newsize >= IDX_T(0));
64  UT_ASSERT_P(getStorage() != UT_Storage::INVALID);
65  UT_ASSERT_P(TSIZE >= 1);
66 
67  setCapacityIfNeeded(newsize);
68  hardenTable();
69  PageTable *pages = myImpl.getPages();
70  UT_ASSERT_P(pages || newsize == IDX_T(0));
71  if (pages)
72  {
73  IDX_T oldsize = pages->size();
74 
75  // No need to destruct if smaller, since it's a POD type.
76 
77  pages->setSize(newsize);
78 
79  if (newsize > oldsize)
80  pages->fill(oldsize, newsize, initval);
81  }
82 }
83 
84 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
85 void
87 {
88  UT_ASSERT_P(newsize >= IDX_T(0));
89 
90  setCapacityIfNeeded(newsize);
91  hardenTable();
92  PageTable *pages = myImpl.getPages();
93  UT_ASSERT_P(pages || newsize == IDX_T(0));
94  if (pages)
95  {
96  IDX_T oldsize = pages->size();
97 
98  // No need to destruct if smaller, since it's a POD type.
99 
100  if (!PAGESHARDENED || TSIZE >= 0)
101  pages->setSize(newsize);
102  else
103  pages->setSize(newsize, myImpl.getTupleSize());
104 
105  if (newsize > oldsize)
106  setConstant(oldsize, newsize, initval);
107  }
108 }
109 
110 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
111 void
113 {
114  UT_ASSERT_P(end >= start);
115  UT_ASSERT_P(start >= IDX_T(0));
116  UT_ASSERT_P(end <= capacity());
118 
119  if (end <= start)
120  return;
121 
122  hardenTable();
123  PageTable *pages = myImpl.getPages();
124  if (!pages)
125  return;
126 
127  if (TSIZE >= 1)
128  pages->fill(start, end, v);
129  else if (TSIZE == -1 && myImpl.getTupleSize() > 0)
130  pages->fill(start, end, v, myImpl.getTupleSize());
131 }
132 
133 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
134 void
136 {
137  UT_ASSERT_P(end >= start);
138  UT_ASSERT_P(start >= IDX_T(0));
139  UT_ASSERT_P(end <= capacity());
141  UT_ASSERT_P(TSIZE >= 1);
142 
143  if (end <= start)
144  return;
145 
146  hardenTable();
147  PageTable *pages = myImpl.getPages();
148  if (!pages)
149  return;
150  pages->fill(start, end, v);
151 }
152 
153 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
154 void
156 {
157  UT_ASSERT_P(end >= start);
158  UT_ASSERT_P(start >= IDX_T(0));
159  UT_ASSERT_P(end <= capacity());
160 
161  auto &hard = hardenTable();
162 
163  UT_Storage storage = getStorage();
164 
165  // If the storage type is not known at compile time,
166  // switch, cast, and call again.
167  if (SYSisSame<DATA_T,void>())
168  {
169  switch (storage)
170  {
171  case UT_Storage::INT8:
172  hard.template castType<int8>().setConstant(start, end, v); return;
173  case UT_Storage::INT16:
174  hard.template castType<int16>().setConstant(start, end, v); return;
175  case UT_Storage::INT32:
176  hard.template castType<int32>().setConstant(start, end, v); return;
177  case UT_Storage::INT64:
178  hard.template castType<int64>().setConstant(start, end, v); return;
179  case UT_Storage::REAL16:
180  hard.template castType<fpreal16>().setConstant(start, end, v); return;
181  case UT_Storage::REAL32:
182  hard.template castType<fpreal32>().setConstant(start, end, v); return;
183  case UT_Storage::REAL64:
184  hard.template castType<fpreal64>().setConstant(start, end, v); return;
185  case UT_Storage::INVALID:
186  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
187  break;
188  }
189  return;
190  }
191 
192  if (end <= start)
193  return;
194 
195  PageTable *pages = myImpl.getPages();
196  if (!pages)
197  return;
198 
199  const exint tuplesize = getTupleSize();
200  if (tuplesize == 0)
201  return;
202 
203  // UT_Defaults is almost always tuple size 1, so have a special case for it.
204  if (v.getTupleSize() == 1 || tuplesize == 1)
205  {
206  if (TSIZE >= 1)
207  {
208  if (UTisIntStorage(storage))
209  pages->fill(start, end, NotVoidType(v.getI(0)));
210  else
211  pages->fill(start, end, NotVoidType(v.getF(0)));
212  }
213  else
214  {
215  if (UTisIntStorage(storage))
216  pages->fill(start, end, NotVoidType(v.getI(0)), tuplesize);
217  else
218  pages->fill(start, end, NotVoidType(v.getF(0)), tuplesize);
219  }
220  }
221  else
222  {
224  if (UTisIntStorage(storage))
225  {
226  for (exint i = 0; i < tuplesize; ++i)
227  buf[i] = NotVoidType(v.getI(i));
228  }
229  else
230  {
231  for (exint i = 0; i < tuplesize; ++i)
232  buf[i] = NotVoidType(v.getF(i));
233  }
234  pages->fill(start, end, buf, tuplesize);
235  }
236 }
237 
238 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
239 void
241 {
242  const UT_Storage oldstorage = getStorage();
243  UT_ASSERT_P(newstorage != UT_Storage::INVALID);
244  UT_ASSERT_MSG_P((SYS_IsSame<DATA_T,void>::value) || (newstorage == oldstorage), "Can't change the storage of an array whose type is fixed.");
245 
246  // Nothing to do if same type, or bad type
247  if (newstorage == oldstorage || newstorage == UT_Storage::INVALID)
248  return;
249 
250  PageTable *const oldpages = myImpl.getPages();
251 
252  // If there's no data, we only need to set the storage.
253  exint tuplesize = getTupleSize();
254  if (tuplesize == 0 || !oldpages)
255  {
256  myImpl.setStorage(newstorage);
257  return;
258  }
259 
260  UT_ASSERT_P(numPages(oldpages->capacity()) >= 1);
261 
262  // Copy the data into a new array with the new storage type
263  ThisType newarray(getTupleSize(), newstorage);
264  newarray.setCapacity(capacity());
265  IDX_T n = size();
266  newarray.setSize(n);
267  newarray.moveRange(*this,IDX_T(0),IDX_T(0),IDX_T(n));
268 
269  // decRef depends on knowing the type
270  switch (oldstorage)
271  {
272  case UT_Storage::INT8:
273  castType<int8>().myImpl.getPages()->decRef(tuplesize); break;
274  case UT_Storage::INT16:
275  castType<int16>().myImpl.getPages()->decRef(tuplesize); break;
276  case UT_Storage::INT32:
277  castType<int32>().myImpl.getPages()->decRef(tuplesize); break;
278  case UT_Storage::INT64:
279  castType<int64>().myImpl.getPages()->decRef(tuplesize); break;
280  case UT_Storage::REAL16:
281  castType<fpreal16>().myImpl.getPages()->decRef(tuplesize); break;
282  case UT_Storage::REAL32:
283  castType<fpreal32>().myImpl.getPages()->decRef(tuplesize); break;
284  case UT_Storage::REAL64:
285  castType<fpreal64>().myImpl.getPages()->decRef(tuplesize); break;
286  case UT_Storage::INVALID:
287  // NOTE: Can't have a UT_PageArray with DATA_T void and invalid storage.
288  myImpl.getPages()->decRef(tuplesize); break;
289  }
290 
291  // Take ownership of the page table.
292  PageTable *newpages = newarray.myImpl.getPages();
293  UT_ASSERT_P(newpages);
294  newpages->incRef();
295 
296  myImpl.setStorage(newstorage);
297  myImpl.getPages() = newpages;
298 }
299 
300 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
301 void
303 {
304  exint oldtuplesize = getTupleSize();
305  UT_ASSERT_P(newtuplesize >= 0);
306  UT_ASSERT_MSG_P((TSIZE == -1) || (newtuplesize == oldtuplesize), "Can't change the tuple size of an array whose tuple size is fixed.");
307 
308  // Nothing to do if same size, or bad size
309  if (newtuplesize == oldtuplesize || newtuplesize < 0)
310  return;
311 
312  PageTable *const oldpages = myImpl.getPages();
313 
314  // If there's no data, we only need to set the tuple size.
315  if (!oldpages)
316  {
317  myImpl.setTupleSize(newtuplesize);
318  return;
319  }
320 
321  UT_ASSERT_P(numPages(oldpages->capacity()) >= 1);
322 
323  // Copy the data into a new array with the new storage type
324  ThisType newarray(newtuplesize, getStorage());
325  newarray.setCapacity(capacity());
326  IDX_T n = size();
327  newarray.setSize(n, v);
328  newarray.moveRange(*this,IDX_T(0),IDX_T(0),IDX_T(n));
329 
330  // decRef depends on knowing the type
331  switch (getStorage())
332  {
333  case UT_Storage::INT8:
334  castType<int8>().myImpl.getPages()->decRef(oldtuplesize); break;
335  case UT_Storage::INT16:
336  castType<int16>().myImpl.getPages()->decRef(oldtuplesize); break;
337  case UT_Storage::INT32:
338  castType<int32>().myImpl.getPages()->decRef(oldtuplesize); break;
339  case UT_Storage::INT64:
340  castType<int64>().myImpl.getPages()->decRef(oldtuplesize); break;
341  case UT_Storage::REAL16:
342  castType<fpreal16>().myImpl.getPages()->decRef(oldtuplesize); break;
343  case UT_Storage::REAL32:
344  castType<fpreal32>().myImpl.getPages()->decRef(oldtuplesize); break;
345  case UT_Storage::REAL64:
346  castType<fpreal64>().myImpl.getPages()->decRef(oldtuplesize); break;
347  case UT_Storage::INVALID:
348  // NOTE: Can't have a UT_PageArray with DATA_T void and invalid storage.
349  myImpl.getPages()->decRef(oldtuplesize); break;
350  }
351 
352  // Take ownership of the page table.
353  PageTable *newpages = newarray.myImpl.getPages();
354  UT_ASSERT_P(newpages);
355  newpages->incRef();
356 
357  myImpl.setTupleSize(newtuplesize);
358  myImpl.getPages() = newpages;
359 }
360 
361 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
362 int64
364 {
365  int64 mem = inclusive ? sizeof(*this) : 0;
366 
367  const PageTable *pages = myImpl.getPages();
368  if (!pages)
369  return mem;
370 
371  UT_PageNum npages = numPages(pages->capacity());
372  mem += exint(npages) * sizeof(PageTableEntry);
373 
374  exint tuplebytes = ((getStorage() != UT_Storage::INVALID) ? UTstorageSize(getStorage()) : sizeof(NotVoidType))*getTupleSize();
375 
376  // Case for a single, possibly small page
377  if (npages == UT_PageNum(1) && !pages->getFirstPage()->isConstant())
378  {
379  mem += sizeof(SYS_AtomicCounter) + tuplebytes*exint(pages->capacity());
380  return mem;
381  }
382 
383  for (UT_PageNum i(0); i < npages; ++i)
384  {
385  const PageTableEntry *const page = pages->getPPage(i);
386  mem += page->getMemoryUsage(tuplebytes);
387  }
388 
389  return mem;
390 }
391 
392 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
393 void
395 {
396  if (counter.mustCountUnshared() && inclusive)
397  {
398  UT_MEMORY_DEBUG_LOG("UT_PageArray",int64(sizeof(*this)));
399  counter.countUnshared(sizeof(*this));
400  }
401 
402  const PageTable *pages = myImpl.getPages();
403  if (!pages)
404  return;
405 
406  UT_PageNum npages = numPages(pages->capacity());
407  int64 tablemem = exint(npages) * sizeof(PageTableEntry);
408  if (!pages->isShared())
409  {
410  if (counter.mustCountUnshared())
411  {
412  UT_MEMORY_DEBUG_LOG("UT_PageArray::PageTable",int64(tablemem));
413  counter.countUnshared(tablemem);
414  }
415  }
416  else
417  {
418  UT_ASSERT_MSG_P(pages->getRefCount() > 1, "Why is something unref'ing data while we're counting memory?");
419  if (counter.mustCountShared())
420  {
421  UT_MEMORY_DEBUG_LOG_SHARED("UT_PageArray::PageTable",int64(tablemem),pages,pages->getRefCount());
422  bool already_counted = counter.countShared(tablemem, pages->getRefCount(), pages);
423 
424  // If this counter has already counted a reference to this page
425  // table, it's also already counted its pages, below, and since
426  // this is the *same* set of references it's already counted,
427  // not different references to the same pages we'd get incorrect
428  // reference count tracking if we counted the pages again, so we
429  // just return.
430  if (already_counted)
431  return;
432  }
433  }
434 
435  exint tuplebytes = ((getStorage() != UT_Storage::INVALID) ? UTstorageSize(getStorage()) : sizeof(NotVoidType))*getTupleSize();
436 
437  // Case for a single, possibly small page
438  if (npages == UT_PageNum(1) && !pages->getFirstPage()->isConstant())
439  {
440  const PageTableEntry *const page = pages->getFirstPage();
441  int64 pagemem = sizeof(SYS_AtomicCounter) + tuplebytes*exint(pages->capacity());
442  if (!page->isShared())
443  {
444  if (counter.mustCountUnshared())
445  {
446  UT_MEMORY_DEBUG_LOG("UT_PageArray::Page0",int64(pagemem));
447  counter.countUnshared(pagemem);
448  }
449  }
450  else
451  {
452  UT_ASSERT_MSG_P(page->getRefCount() > 1, "Why is something unref'ing data while we're counting memory?");
453  if (counter.mustCountShared())
454  {
455  const void *masked = page->isConstant() ? page->getMaskedPtrVoid() : page->getFirstPtrVoid();
456  UT_MEMORY_DEBUG_LOG_SHARED("UT_PageArray::Page0",int64(pagemem),masked,page->getRefCount());
457  counter.countShared(pagemem, page->getRefCount(), masked);
458  }
459  }
460  return;
461  }
462 
463  for (UT_PageNum i(0); i < npages; ++i)
464  {
465  const PageTableEntry *const page = pages->getPPage(i);
466  int64 pagemem = page->getMemoryUsage(tuplebytes);
467  if (!pagemem)
468  continue;
469 
470  if (!page->isShared())
471  {
472  if (counter.mustCountUnshared())
473  {
474  UT_MEMORY_DEBUG_LOG("UT_PageArray::Page",int64(pagemem));
475  counter.countUnshared(pagemem);
476  }
477  }
478  else
479  {
480  UT_ASSERT_P(page->getRefCount() > 1);
481  if (counter.mustCountShared())
482  {
483  const void *masked = page->isConstant() ? page->getMaskedPtrVoid() : page->getFirstPtrVoid();
484  UT_MEMORY_DEBUG_LOG_SHARED("UT_PageArray::Page",int64(pagemem),masked,page->getRefCount());
485  counter.countShared(pagemem, page->getRefCount(), masked);
486  }
487  }
488  }
489 }
490 
491 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
492 void
494 {
495  // Just defer to the general function.
496  // It handles overlapping ranges and constant pages correctly.
497  moveRange(*this, srcstart, deststart, nelements);
498 }
499 
500 // This is a very big function, but don't let it scare you.
501 // Much of the code is only applicable to particular template types.
502 // If it weren't for constant pages, this would be *much* simpler.
503 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
504 template<typename SRC_DATA_T,exint SRC_TSIZE,bool SRC_TABLEHARDENED,bool SRC_PAGESHARDENED>
505 void
508  IDX_T srcstart, IDX_T deststart, IDX_T nelements)
509 {
511  typedef typename SrcType::PageTable SrcPageTable;
512  typedef typename SrcType::PageTableEntry SrcPageTableEntry;
513  typedef typename SrcType::NotVoidType SrcNotVoidType;
514 
515  UT_ASSERT_P(nelements >= IDX_T(0));
516  UT_ASSERT_P(srcstart >= IDX_T(0) && deststart >= IDX_T(0));
517  UT_ASSERT_P(srcstart+nelements <= src.size() && deststart+nelements <= size());
518 
519  UT_ASSERT_P((SYSisSame<DATA_T,SRC_DATA_T>()) || (getStorage() != UT_Storage::INVALID && src.getStorage() != UT_Storage::INVALID));
520 
521  // If there's even a chance we might write values, we should harden the
522  // table and record it in the templates so that we don't harden again.
523  auto &hard = hardenTable();
524 
525  // If the destination storage type is not known at compile time,
526  // switch, cast, and call again.
527  if (SYSisSame<DATA_T,void>())
528  {
529  UT_Storage storage = getStorage();
530  switch (storage)
531  {
532  case UT_Storage::INT8:
533  hard.template castType<int8>().moveRange(src, srcstart, deststart, nelements); return;
534  case UT_Storage::INT16:
535  hard.template castType<int16>().moveRange(src, srcstart, deststart, nelements); return;
536  case UT_Storage::INT32:
537  hard.template castType<int32>().moveRange(src, srcstart, deststart, nelements); return;
538  case UT_Storage::INT64:
539  hard.template castType<int64>().moveRange(src, srcstart, deststart, nelements); return;
540  case UT_Storage::REAL16:
541  hard.template castType<fpreal16>().moveRange(src, srcstart, deststart, nelements); return;
542  case UT_Storage::REAL32:
543  hard.template castType<fpreal32>().moveRange(src, srcstart, deststart, nelements); return;
544  case UT_Storage::REAL64:
545  hard.template castType<fpreal64>().moveRange(src, srcstart, deststart, nelements); return;
546  case UT_Storage::INVALID:
547  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
548  break;
549  }
550  return;
551  }
552 
553  // If the source storage type is not known at compile time,
554  // switch, cast, and call again.
555  if (SYSisSame<SRC_DATA_T,void>())
556  {
557  // Avoid switch on storage type if src is dest.
558  if ((const void*)&src==(void*)this)
559  {
560  hard.moveRange(src.template castType<DATA_T>(), srcstart, deststart, nelements);
561  return;
562  }
563 
564  UT_Storage src_storage = src.getStorage();
565  switch (src_storage)
566  {
567  case UT_Storage::INT8:
568  hard.moveRange(src.template castType<int8>(), srcstart, deststart, nelements); return;
569  case UT_Storage::INT16:
570  hard.moveRange(src.template castType<int16>(), srcstart, deststart, nelements); return;
571  case UT_Storage::INT32:
572  hard.moveRange(src.template castType<int32>(), srcstart, deststart, nelements); return;
573  case UT_Storage::INT64:
574  hard.moveRange(src.template castType<int64>(), srcstart, deststart, nelements); return;
575  case UT_Storage::REAL16:
576  hard.moveRange(src.template castType<fpreal16>(), srcstart, deststart, nelements); return;
577  case UT_Storage::REAL32:
578  hard.moveRange(src.template castType<fpreal32>(), srcstart, deststart, nelements); return;
579  case UT_Storage::REAL64:
580  hard.moveRange(src.template castType<fpreal64>(), srcstart, deststart, nelements); return;
581  case UT_Storage::INVALID:
582  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
583  }
584  return;
585  }
586 
587  // We now have both the source type and the destination type known at compile time.
588  UT_ASSERT_P((!SYSisSame<DATA_T,void>()) && (!SYSisSame<SRC_DATA_T,void>()));
589 
590  // Check if zero elements or moving data to location it's already in.
591  if (nelements <= IDX_T(0) || (SYSisSame<DATA_T,SRC_DATA_T>() && (const void*)&src==(void*)this && srcstart == deststart))
592  return;
593 
594  UT_PageOff srcoff = pageOff(srcstart);
595  UT_PageOff destoff = pageOff(deststart);
596 
597  // Just copy the minimum of the tuple sizes.
598  // Hopefully the compiler optimizes approriately if the values are
599  // known at compile time.
600  const exint srctuplesize = src.getTupleSize();
601  const exint desttuplesize = getTupleSize();
602 
603  // Nothing to do if either tuple size is zero.
604  if (srctuplesize == 0 || desttuplesize == 0)
605  return;
606 
607  const SrcPageTable *srcpagetable = src.myImpl.getPages();
608  PageTable *destpagetable = myImpl.getPages();
609 
610  // Since nelements is > 0, srcpagetable and destpagetable should be non-NULL.
611  UT_ASSERT_P(srcpagetable && destpagetable);
612 
613  UT_PageNum srcpagenum = pageNum(srcstart);
614  UT_PageNum destpagenum = pageNum(deststart);
615 
616  // NOTE: Shouldn't need to check for smaller first page here
617  // (until below), since that page boundary isn't allowed
618  // to be crossed by the ranges.
619  if (srcoff+UT_PageOff(exint(nelements)) <= UT_PageOff(thePageSize) && destoff+UT_PageOff(exint(nelements)) <= UT_PageOff(thePageSize))
620  {
621  // *************************************************************
622  // * CASE 1: Source and dest each confined to 1 page *
623  // *************************************************************
624 
625  // NOTE: We can dereference here because we don't pass any address into srcpage
626  // outside of this scope.
627  const SrcPageTableEntry *const srcpage = srcpagetable->getPPage(srcpagenum);
628  PageTableEntry *destpage = destpagetable->getPPage(destpagenum);
629 
630  // This is the only case that can have both srcpage and destpage be small pages.
631  bool issmalldestpage = destpagetable->capacity() < IDX_T(thePageSize);
632  UT_PageOff destpagecapacity(thePageSize);
633  if (issmalldestpage)
634  destpagecapacity = destpagetable->capacity();
635 
636  // If dest is a full page and src is also a full page or constant, just use replacePage.
637  bool isfullpage = (nelements == IDX_T(thePageSize));
638  if (!isfullpage && destoff == UT_PageOff(0) && deststart+nelements == size())
639  {
640  // If srcpage and destpage aren't the same capacity, destpage can't reference srcpage,
641  // even if size() is much less than the capacity of either.
642  bool issmallsrcpage = srcpagetable->capacity() < IDX_T(thePageSize);
643  bool samecapacity = (!issmalldestpage && !issmallsrcpage) ||
644  (issmalldestpage && issmallsrcpage && destpagetable->capacity() == srcpagetable->capacity());
645 
646  // destpage is a full destination page, but may not be replaceable by srcpage.
647  // srcpage lines up if srcoff == 0, and always implicitly lines up if constant.
648  // If either src or dest is small page and can't reference due to different capacity,
649  // fall through to copyPartialPage, which won't reference.
650 
651  isfullpage = ((srcoff == UT_PageOff(0) && samecapacity) || srcpage->isConstant());
652  }
653  if (isfullpage)
654  {
655  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, nelements, destpagecapacity);
656  return;
657  }
658 
659  // If it's a partial page, just copy that part
660  // NOTE: This handles overlapping ranges correctly.
661  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, destoff, srcoff, nelements, destpagecapacity);
662  return;
663  }
664 
665  // If overlapping and moving data to later addresses,
666  // we need to do a backward loop, which is a big pain.
667  bool isoverlappingmovelater = (
668  SYSisSame<DATA_T,SRC_DATA_T>() &&
669  (void*)this == (const void *)&src &&
670  (deststart > srcstart && deststart < srcstart+nelements)
671  );
672 
673  if (srcoff == destoff)
674  {
675  // *************************************************************
676  // * CASE 2: Source and dest pages line up and at least one *
677  // * page boundary is crossed. *
678  // *************************************************************
679  // Example for following along:
680  // THEPAGEBITS is 3, so 8-tuple pages.
681  // src and to are dest 5.
682  // src [# # #|# # # # # # # #|# # # # # # # #|# # # # # #]
683  // dest [# # #|# # # # # # # #|# # # # # # # #|# # # # # #]
684 
685  // In this case, src and dest don't have any small pages,
686  // because both have page boundary crossings.
687 
688  const SrcPageTableEntry *psrcpagep = srcpagetable->getPPage(srcpagenum);
689  PageTableEntry *pdestpagep = destpagetable->getPPage(destpagenum);
690 
691  // If overlapping and moving data to later addresses,
692  // we need to do a backward loop, which is a big pain.
693  // It's not a very common case, so it doesn't have to be as optimized.
694  if (isoverlappingmovelater)
695  {
696  UT_ASSERT_P(desttuplesize == srctuplesize);
697 
698  UT_PageOff ntuplesfirstpage(0);
699  if (destoff != UT_PageOff(0))
700  {
701  ntuplesfirstpage = UT_PageOff(thePageSize)-destoff;
702  nelements -= IDX_T(exint(ntuplesfirstpage));
703  }
704 
705  // (nelements is now 3 less)
706  // NOTE: Not numPages, since that'd include any partial page at end
707  UT_PageNum nfullpages = pageNum(nelements);
708  ++psrcpagep;
709  ++pdestpagep;
710  PageTableEntry *pdestend = pdestpagep + nfullpages;
711  const SrcPageTableEntry *psrcend = psrcpagep + nfullpages;
712 
713  // Since backward, first, copy any incomplete last page
714  // src [ | | |# # # # # #]
715  // dest [ | | |# # # # # #]
716  UT_PageOff nleftover = pageOff(nelements);
717  if (nleftover != UT_PageOff(0))
718  {
719  const SrcPageTableEntry *srcpage = psrcend;
720  PageTableEntry *destpage = pdestend;
721 
722  // Remember that it may be effectively complete, if
723  // the last page within the size of the array and nleftover is
724  // the number of elements less than size() in that page.
725  // If it's really a full page, just use replacePage.
726  bool isfullpage = deststart+nelements == size();
727  if (isfullpage)
728  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, nleftover, thePageSize);
729  else
730  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, UT_PageOff(0), UT_PageOff(0), nleftover, thePageSize);
731  }
732 
733  // Still backward, copy/reference whole pages next:
734  // src [ |# # # # # # # #|# # # # # # # #| ]
735  // dest [ |# # # # # # # #|# # # # # # # #| ]
736  while (pdestpagep != pdestend)
737  {
738  --psrcend;
739  --pdestend;
740  const SrcPageTableEntry *srcpage = psrcend;
741  PageTableEntry *destpage = pdestend;
742 
743  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, thePageSize, thePageSize);
744  }
745 
746  // Still backward, lastly, copy any incomplete first page:
747  // src [# # #| | | ]
748  // dest [# # #| | | ]
749  if (destoff != UT_PageOff(0))
750  {
751  --psrcpagep;
752  --pdestpagep;
753  const SrcPageTableEntry *srcpage = psrcpagep;
754  PageTableEntry *destpage = pdestpagep;
755 
756  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, destoff, srcoff, ntuplesfirstpage, thePageSize);
757  }
758 
759  return;
760  }
761 
762  // In the common case of case 2, src and dest aren't overlapping,
763  // or src is later than dest, so we can go forward.
764 
765  // First, copy any incomplete first page:
766  // src [# # #| | | ]
767  // dest [# # #| | | ]
768  if (destoff != UT_PageOff(0))
769  {
770  const SrcPageTableEntry *srcpage = psrcpagep;
771  PageTableEntry *destpage = pdestpagep;
772 
773  UT_PageOff ntuplesfirstpage = UT_PageOff(thePageSize)-destoff;
774  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, destoff, srcoff, ntuplesfirstpage, thePageSize);
775 
776  nelements -= IDX_T(exint(ntuplesfirstpage));
777  ++psrcpagep;
778  ++pdestpagep;
779  }
780 
781  // Copy/reference whole pages next:
782  // src [ |# # # # # # # #|# # # # # # # #| ]
783  // dest [ |# # # # # # # #|# # # # # # # #| ]
784  // (nelements is now 3 less)
785  // NOTE: Not numPages, since that'd include any partial page at end
786  UT_PageNum nfullpages = pageNum(nelements);
787  PageTableEntry *pdestend = pdestpagep + nfullpages;
788  for (; pdestpagep != pdestend; ++psrcpagep, ++pdestpagep)
789  {
790  const SrcPageTableEntry *srcpage = psrcpagep;
791  PageTableEntry *destpage = pdestpagep;
792 
793  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, thePageSize, thePageSize);
794  }
795 
796  // Lastly, copy any incomplete last page
797  // src [ | | |# # # # # #]
798  // dest [ | | |# # # # # #]
799  UT_PageOff nleftover = pageOff(nelements);
800  if (nleftover != UT_PageOff(0))
801  {
802  const SrcPageTableEntry *srcpage = psrcpagep;
803  PageTableEntry *destpage = pdestpagep;
804 
805  // Remember that it may be effectively complete, if
806  // the last page within the size of the array and nleftover is
807  // the number of elements less than size() in that page.
808  // If it's really a full page, just use replacePage.
809  bool isfullpage = deststart+nelements == size();
810  if (isfullpage)
811  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, nleftover, thePageSize);
812  else
813  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, UT_PageOff(0), UT_PageOff(0), nleftover, thePageSize);
814  }
815  return;
816  }
817  else
818  {
819  // *************************************************************
820  // * CASE 3: Source and dest pages don't line up and at least *
821  // * one page boundary is crossed. *
822  // *************************************************************
823  // Example for following along:
824  // THEPAGEBITS is 3, so 8-tuple pages.
825  // src is 5; dest is 3.
826  // src [# # #|# # # # # # # #|# # # # # # # #|# # # # # #]
827  // dest [# # # # #|# # # # # # # #|# # # # # # # #|# # # #]
828  // |<----6---->|<2>|
829  // spagestartind dpagestartins
830  UT_PageOff spagestartind = pageOff(deststart-srcstart);
831  UT_PageOff dpagestartins = pageOff(srcstart-deststart);
832 
833  // Because of the misalignment, we don't have to worry about
834  // referencing pages, though we do have to worry about constant
835  // pages. If both src pages covering a full dest page are constant
836  // and the same value, we can use makeConstantFrom using either
837  // of the source pages.
838 
839  // REMEMBER: This case could have a small first page in either
840  // src or dest, but not both.
841  // REMEMBER: Must handle overlapping ranges!
842 
843  const SrcPageTableEntry *psrcpagep = srcpagetable->getPPage(srcpagenum);
844  PageTableEntry *pdestpagep = destpagetable->getPPage(destpagenum);
845 
846  // Case 3.0:
847  // Overlapping range
848 
849  const SrcPageTableEntry *srcpage0 = psrcpagep;
850 
851  const exint mintuplesize = SYSmin(srctuplesize,desttuplesize);
852 
853  // Case 3.1:
854  // src [# # #|# #]
855  // dest [# # # # #]
856  // dest is in a single page; it could be a small page.
857  // src is across two pages; they can't be small-capacity pages.
858  if (destoff+UT_PageOff(exint(nelements)) <= UT_PageOff(thePageSize))
859  {
860  PageTableEntry *destpage = pdestpagep;
861 
862  bool issmalldestpage = destpagetable->capacity() < IDX_T(thePageSize);
863  UT_PageOff destpagecapacity(thePageSize);
864  if (issmalldestpage)
865  destpagecapacity = destpagetable->capacity();
866 
867  const SrcPageTableEntry *srcpage1 = psrcpagep + 1;
868 
869  if (!PAGESHARDENED && srcpage0->isConstant() && srcpage1->isConstant())
870  {
871  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
872  const SrcNotVoidType *stuple1 = SrcType::getConstantPtr(srcpage1, 0, srctuplesize);
873  if (SrcType::isEqualConst(stuple0, stuple1, srctuplesize))
874  {
875  // If dest page is already constant and equal to both src pages, nothing to do.
876  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
877  return;
878 
879  // If both src pages are constant and equal, and dest is a full
880  // page, make dest constant.
881  bool isfullpage = (nelements == IDX_T(thePageSize)) || (destoff == UT_PageOff(0) && deststart+nelements == size());
882  if (isfullpage)
883  {
884  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
885  return;
886  }
887  }
888  }
889 
890  if (!PAGESHARDENED && destpage->isConstant())
891  hardenConstantPage(destpage, destpagecapacity, desttuplesize);
892  else if (!PAGESHARDENED && destpage->isShared())
893  hardenSharedPage(destpage, destpagecapacity, desttuplesize);
894 
895  UT_PageOff n0 = UT_PageOff(thePageSize)-srcoff;
896  if (isoverlappingmovelater)
897  {
898  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, destoff+n0, UT_PageOff(0), nelements-n0, destpagecapacity);
899  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, destpagecapacity);
900  }
901  else
902  {
903  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, destpagecapacity);
904  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, destoff+n0, UT_PageOff(0), nelements-n0, destpagecapacity);
905  }
906 
907  return;
908  }
909 
910  // There is at least one dest page boundary, so dest has full-capacity pages.
911 
912  if (isoverlappingmovelater)
913  {
914  // FIXME: Implement this!!!
915  UT_ASSERT_MSG(0, "Implement this!!! It should be like the code below, only copying chunks in reverse order.");
916 
917  return;
918  }
919 
920  // Deal with tuples before the first full destination page.
921  if (destoff > UT_PageOff(0))
922  {
923  PageTableEntry *destpage = pdestpagep;
924 
925  if (destoff < spagestartind)
926  {
927  // srcpage0 srcpage1
928  // src [# # #|# # ...
929  // dest [# # # # #|...
930  // |<--->|<->|
931  // spagestartind-destoff dpagestartins
932 
933  UT_PageOff n0 = spagestartind - destoff;
934  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, UT_PageOff(thePageSize));
935 
936  srcoff = UT_PageOff(0);
937  destoff += n0;
938  ++psrcpagep;
939  srcpage0 = psrcpagep;
940  nelements -= IDX_T(exint(n0));
941  }
942 
943  // srcpage0
944  // src [# # # #...
945  // dest [# #|# #...
946  // |<->|
947  // thePageSize-destoff
948  UT_PageOff n0 = UT_PageOff(thePageSize) - destoff;
949  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, UT_PageOff(thePageSize));
950  ++pdestpagep;
951  nelements -= IDX_T(exint(n0));
952  }
953 
954  // Middle full destination pages
955  for (; nelements >= IDX_T(thePageSize); nelements -= IDX_T(thePageSize), ++pdestpagep, ++psrcpagep, (srcpage0 = psrcpagep))
956  {
957  PageTableEntry *destpage = pdestpagep;
958 
959  // srcpage0 srcpage1
960  // src [ # # # # # #|# # ]
961  // dest [ |# # # # # # # #| ]
962  // |<--------->|<->|
963  // spagestartind dpagestartins
964 
965  const SrcPageTableEntry *srcpage1 = psrcpagep + 1;
966 
967  if (!PAGESHARDENED && srcpage0->isConstant() && srcpage1->isConstant())
968  {
969  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
970  const SrcNotVoidType *stuple1 = SrcType::getConstantPtr(srcpage1, 0, srctuplesize);
971  if (SrcType::isEqualConst(stuple0, stuple1, srctuplesize))
972  {
973  // If dest page is already constant and equal to both src pages, nothing to do.
974  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
975  continue;
976 
977  // If both src pages are constant and equal, and dest is a full
978  // page, make dest constant.
979  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
980  continue;
981  }
982  }
983 
984  if (!PAGESHARDENED && destpage->isConstant())
985  hardenConstantPage(destpage, UT_PageOff(thePageSize), desttuplesize);
986  else if (!PAGESHARDENED && destpage->isShared())
987  hardenSharedPage(destpage, UT_PageOff(thePageSize), desttuplesize);
988 
989  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, UT_PageOff(0), dpagestartins, spagestartind, UT_PageOff(thePageSize));
990  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, spagestartind, UT_PageOff(0), dpagestartins, UT_PageOff(thePageSize));
991  }
992 
993  // Final partial page, though may reach size()
994  if (nelements > IDX_T(0))
995  {
996  PageTableEntry *destpage = pdestpagep;
997 
998  const bool isfullmaybeconstpage = !PAGESHARDENED && (deststart+nelements == size());
999 
1000  if (nelements > IDX_T(exint(spagestartind)))
1001  {
1002  // srcpage0 srcpage1
1003  // src [ # # # # # #|#]
1004  // dest [ |# # # # # # #]
1005  // |<--------->|-|
1006  // spagestartind nelements-spagestartind
1007 
1008  const SrcPageTableEntry *srcpage1 = psrcpagep + 1;
1009 
1010  if (isfullmaybeconstpage && srcpage0->isConstant() && srcpage1->isConstant())
1011  {
1012  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
1013  const SrcNotVoidType *stuple1 = SrcType::getConstantPtr(srcpage1, 0, srctuplesize);
1014  if (SrcType::isEqualConst(stuple0, stuple1, srctuplesize))
1015  {
1016  // If dest page is already constant and equal to both src pages, nothing to do.
1017  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
1018  return;
1019 
1020  // If both src pages are constant and equal, and dest is a full
1021  // page, make dest constant.
1022  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
1023  return;
1024  }
1025  }
1026 
1027  if (!PAGESHARDENED && destpage->isConstant())
1028  hardenConstantPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1029  else if (!PAGESHARDENED && destpage->isShared())
1030  hardenSharedPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1031 
1032  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, UT_PageOff(0), dpagestartins, spagestartind, UT_PageOff(thePageSize));
1033  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, spagestartind, UT_PageOff(0), UT_PageOff(exint(nelements))-spagestartind, UT_PageOff(thePageSize));
1034  }
1035  else
1036  {
1037  // srcpage0
1038  // src [ # # # # #]
1039  // dest [ |# # # # #]
1040  // |<------->|
1041  // nelements
1042 
1043  if (isfullmaybeconstpage && srcpage0->isConstant())
1044  {
1045  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
1046  // If dest page is already constant and equal to both src pages, nothing to do.
1047  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
1048  return;
1049 
1050  // If both src pages are constant and equal, and dest is a full
1051  // page, make dest constant.
1052  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
1053  return;
1054  }
1055 
1056  if (!PAGESHARDENED && destpage->isConstant())
1057  hardenConstantPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1058  else if (!PAGESHARDENED && destpage->isShared())
1059  hardenSharedPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1060 
1061  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, UT_PageOff(0), dpagestartins, UT_PageOff(exint(nelements)), UT_PageOff(thePageSize));
1062  }
1063  }
1064  }
1065 }
1066 
1067 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1068 void
1070 {
1071  UT_ASSERT_P(nelements >= IDX_T(0));
1072  UT_ASSERT_P(astart >= IDX_T(0) && bstart >= IDX_T(0));
1073  UT_ASSERT_P(astart+nelements <= size() && bstart+nelements <= size());
1074  UT_ASSERT_MSG_P(astart >= bstart+nelements || bstart >= astart+nelements, "Ranges can't overlap when swapping!");
1075  if (nelements <= IDX_T(0))
1076  return;
1077  auto &hard = hardenTable();
1078  if (!SYSisSame<DATA_T,void>())
1079  {
1080  // Easy case, where the storage type is known at compile time.
1081  exint tuplesize = getTupleSize();
1082  for (IDX_T i(0); i < nelements; ++i)
1083  {
1084  for (exint component = 0; component < tuplesize; ++component)
1085  {
1086  UTswap(hard(astart+i, component), hard(bstart+i, component));
1087  }
1088  }
1089  return;
1090  }
1091 
1092  // Hard case, where the storage type is not known at compile time.
1093  UT_Storage storage = getStorage();
1094  switch (storage)
1095  {
1096  case UT_Storage::INT8:
1097  hard.template castType<int8>().swapRange(astart, bstart, nelements); return;
1098  case UT_Storage::INT16:
1099  hard.template castType<int16>().swapRange(astart, bstart, nelements); return;
1100  case UT_Storage::INT32:
1101  hard.template castType<int32>().swapRange(astart, bstart, nelements); return;
1102  case UT_Storage::INT64:
1103  hard.template castType<int64>().swapRange(astart, bstart, nelements); return;
1104  case UT_Storage::REAL16:
1105  hard.template castType<fpreal16>().swapRange(astart, bstart, nelements); return;
1106  case UT_Storage::REAL32:
1107  hard.template castType<fpreal32>().swapRange(astart, bstart, nelements); return;
1108  case UT_Storage::REAL64:
1109  hard.template castType<fpreal64>().swapRange(astart, bstart, nelements); return;
1110  case UT_Storage::INVALID:
1111  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
1112  break;
1113  }
1114 }
1115 
1116 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1117 template<typename SrcType>
1118 void
1120  PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize, UT_PageOff destpagesize, UT_PageOff destpagecapacity)
1121 {
1122  typedef typename SrcType::DataType SRC_DATA_T;
1123  typedef typename SrcType::NotVoidType SrcNotVoidType;
1124  UT_IF_ASSERT_P(const exint SRC_TSIZE = SrcType::theTupleSize;)
1125  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1126  UT_ASSERT_P(!(SYSisSame<SRC_DATA_T,void>()));
1128  UT_ASSERT_P(thePageBits == SrcType::thePageBits);
1129  UT_ASSERT_P((SYSisSame<IndexType, typename SrcType::IndexType>()));
1130  UT_ASSERT_P(desttuplesize > 0 && srctuplesize > 0);
1131  UT_ASSERT_P((TSIZE == -1 || TSIZE == desttuplesize) && (SRC_TSIZE == -1 || SRC_TSIZE == srctuplesize));
1132 
1133  // If the source page is constant,
1134  if (src->isConstant())
1135  {
1136  if (!PAGESHARDENED && (dest->isConstant() || desttuplesize <= srctuplesize))
1137  makeConstantFrom<SrcType>(dest, src, desttuplesize, srctuplesize);
1138  else
1139  {
1140  // This codepath is primarily for the awkward case where we can't
1141  // easily make the destination page constant, because
1142  // it's not currently constant and the tuple size is larger.
1143  // However, it's also used for filling a page that isn't allowed to be
1144  // constant-compressed with the tuple from a constant-compressed source.
1145  UT_ASSERT_P(PAGESHARDENED || (!dest->isConstant() && desttuplesize > srctuplesize));
1146 
1147  if (!PAGESHARDENED && dest->isShared())
1148  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1149 
1150  // Fill range in dest with value from src.
1151  NotVoidType *destpagedata = dest->getFirstPtr();
1152  // NOTE: This is destpagesize instead of capacity, because it's just used for filling in data.
1153  NotVoidType *destpageend = destpagedata + (desttuplesize*destpagesize);
1154 
1155  const SrcNotVoidType *stuple = SrcType::getConstantPtr(src, 0, srctuplesize);
1156 
1157  const exint mintuplesize = SYSmin(srctuplesize,desttuplesize);
1158  const exint desttupleextra = desttuplesize-mintuplesize;
1159 
1160  fillNonConstWithConst(destpagedata, destpageend, stuple, mintuplesize, desttupleextra);
1161  }
1162  }
1163  else if (!PAGESHARDENED && SYSisSame<DATA_T,SRC_DATA_T>() && desttuplesize == srctuplesize)
1164  {
1165  // Nothing to do if already referencing the same data.
1166  // This pointer comparison works because we know that
1167  // the types and tuple sizes are the same,
1168  // and the src is non-constant, (so if dest is constant,
1169  // it won't be equal).
1170  if (src->getFirstPtrVoid() == dest->getFirstPtrVoidUnsafe())
1171  return;
1172 
1173  exint bytesize = desttuplesize*sizeof(NotVoidType);
1174  if (dest->isRefd(bytesize))
1175  dest->decRef();
1176 
1177  // Reference the source page
1178  SYSconst_cast(src)->incRef();
1179 
1180  // Still need to cast to PageTableEntry*, because the compiler needs to
1181  // compile this line when the condition is false.
1182  *dest = *(const PageTableEntry *)src;
1183  }
1184  else
1185  {
1186  if (!PAGESHARDENED && dest->isConstant())
1187  hardenConstantPage(dest, destpagecapacity, desttuplesize);
1188  else if (!PAGESHARDENED && dest->isShared())
1189  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1190 
1191  // Copy data from src to dest
1192  NotVoidType *destpagedata = dest->getFirstPtr();
1193  const SrcNotVoidType *srcpagedata = src->getFirstPtr();
1194  // NOTE: This must be destpagesize instead of capacity, else it might access the source out of bounds.
1195  copyNonConst(destpagedata, srcpagedata, desttuplesize, srctuplesize, destpagesize);
1196  }
1197 }
1198 
1199 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1200 template<typename SrcType>
1201 void
1203  PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize,
1204  UT_PageOff destoff, UT_PageOff srcoff, UT_PageOff ntuples, UT_PageOff destpagecapacity)
1205 {
1206  typedef typename SrcType::DataType SRC_DATA_T;
1207  typedef typename SrcType::NotVoidType SrcNotVoidType;
1208  UT_IF_ASSERT_P(const exint SRC_TSIZE = SrcType::theTupleSize;)
1209  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1210  UT_ASSERT_P(!(SYSisSame<SRC_DATA_T,void>()));
1212  UT_ASSERT_P(thePageBits == SrcType::thePageBits);
1213  UT_ASSERT_P((SYSisSame<IndexType, typename SrcType::IndexType>()));
1214  UT_ASSERT_P(desttuplesize > 0 && srctuplesize > 0);
1215  UT_ASSERT_P((TSIZE == -1 || TSIZE == desttuplesize) && (SRC_TSIZE == -1 || SRC_TSIZE == srctuplesize));
1216  UT_ASSERT_P(ntuples > UT_PageOff(0));
1217 
1218  // NOTE: Don't have to check for full page. The caller is responsible
1219  // for that if they want referencing or constant overwriting.
1220 
1221  // If the source page is constant,
1222  if (src->isConstant())
1223  {
1224  const SrcNotVoidType *stuple = SrcType::getConstantPtr(src, 0, srctuplesize);
1225 
1226  const exint mintuplesize = SYSmin(srctuplesize,desttuplesize);
1227 
1228  // If the destination page is constant,
1229  if (dest->isConstant())
1230  {
1231  const NotVoidType *tuple = getConstantPtr(dest, 0, desttuplesize);
1232 
1233  // If the const pages are equal, there's nothing to do.
1234  if (isEqualConst(tuple, stuple, mintuplesize))
1235  return;
1236 
1237  hardenConstantPage(dest, destpagecapacity, desttuplesize);
1238  }
1239 
1240  if (!PAGESHARDENED && dest->isShared())
1241  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1242 
1243  // Fill range in dest with value from src.
1244  NotVoidType *destpagedata = dest->getFirstPtr() + (desttuplesize*destoff);
1245  NotVoidType *destpageend = destpagedata + (desttuplesize*ntuples);
1246 
1247  const exint desttupleextra = desttuplesize-mintuplesize;
1248 
1249  fillNonConstWithConst(destpagedata, destpageend, stuple, mintuplesize, desttupleextra);
1250 
1251  return;
1252  }
1253 
1254  if (!PAGESHARDENED && dest->isConstant())
1255  hardenConstantPage(dest, destpagecapacity, desttuplesize);
1256  else if (!PAGESHARDENED && dest->isShared())
1257  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1258 
1259  // Remember that the ranges could overlap if same page
1260  // NOTE: Since dest was hardened if shared, dest can only equal src if same table.
1261  if (SYSisSame<DATA_T,SRC_DATA_T>() && dest->getFirstPtrVoidUnsafe() == src->getFirstPtrVoid() && (srcoff < destoff+UT_PageOff(exint(ntuples)) && destoff < srcoff+UT_PageOff(exint(ntuples))))
1262  {
1263  // Overlapping, so be careful!
1264 
1265  UT_ASSERT_P(desttuplesize == srctuplesize);
1266 
1267  // Nothing to do if exactly same range.
1268  // This could happen even if caller checked the global offsets,
1269  // and even if they're separate arrays, because the same page
1270  // can appear in multiple locations.
1271  if (srcoff == destoff)
1272  return;
1273 
1274  NotVoidType *destpagedata = dest->getFirstPtr();
1275  destpagedata += desttuplesize*destoff;
1276  NotVoidType *destend = destpagedata + desttuplesize*ntuples;
1277 
1278  const SrcNotVoidType *srcpagedata = src->getFirstPtr();
1279  srcpagedata += srctuplesize*srcoff;
1280 
1281  // If moving to earlier addresses, can copy in forward loop
1282  if (destoff < srcoff)
1283  {
1284  do
1285  {
1286  *destpagedata = *srcpagedata;
1287  ++srcpagedata;
1288  ++destpagedata;
1289  } while (destpagedata != destend);
1290  }
1291  // If moving to later addresses, must copy in backward loop
1292  else
1293  {
1294  const SrcNotVoidType *srcend = srcpagedata + srctuplesize*ntuples;
1295  do
1296  {
1297  --srcend;
1298  --destend;
1299  *destend = *srcend;
1300  } while (destpagedata != destend);
1301  }
1302  }
1303  else
1304  {
1305  // The two ranges don't overlap, so just copy
1306  NotVoidType *destpagedata = dest->getFirstPtr();
1307  destpagedata += desttuplesize*destoff;
1308 
1309  const SrcNotVoidType *srcpagedata = src->getFirstPtr();
1310  srcpagedata += srctuplesize*srcoff;
1311 
1312  copyNonConst(destpagedata, srcpagedata, desttuplesize, srctuplesize, UT_PageOff(ntuples));
1313  }
1314 }
1315 
1316 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1317 template<typename SrcType>
1318 void
1320  PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize)
1321 {
1322  typedef typename SrcType::DataType SRC_DATA_T;
1323  //typedef UT_PageArray<SRC_DATA_T,SRC_TSIZE,SRC_TABLEHARDENED,SRC_PAGESHARDENED,THEPAGEBITS,IDX_T> SrcType;
1324  typedef typename SrcType::PageTableEntry SrcPageTableEntry;
1325  typedef typename SrcType::NotVoidType SrcNotVoidType;
1326 
1327  const exint SRC_TSIZE = SrcType::theTupleSize;
1328  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1329  UT_ASSERT_P(!(SYSisSame<SRC_DATA_T,void>()));
1330  UT_ASSERT_P(src->isConstant());
1332  UT_ASSERT_P(!PAGESHARDENED);
1333  UT_ASSERT_P(thePageBits == SrcType::thePageBits);
1334  UT_ASSERT_P((SYSisSame<IndexType, typename SrcType::IndexType>()));
1335  UT_ASSERT_P(desttuplesize > 0 && srctuplesize > 0);
1336  UT_ASSERT_P((TSIZE == -1 || TSIZE == desttuplesize) && (SRC_TSIZE == -1 || SRC_TSIZE == srctuplesize));
1337  UT_ASSERT_MSG_P(desttuplesize <= srctuplesize || dest->isConstant(), "The higher component values may not have been constant!");
1338 
1339  if (PAGESHARDENED)
1340  return;
1341 
1342  // Do nothing in very common case of copying zero to zero.
1343  // NOTE: It may seem like this could be generalized to
1344  // if (*src == *dest)
1345  // but, they could be different types or tuple sizes,
1346  // so, for example, one could be an inline constant page
1347  // that just happens to equal a pointer for a non-inline
1348  // constant page, or two inline tuples may be different
1349  // but produce equal pointers, e.g. (-2,-2) in int16
1350  // would match (-65538) in int32.
1351  const bool issrczero = src->isConstantAndZeroSafe();
1352  if (dest->isConstantAndZeroSafe() && issrczero)
1353  return;
1354 
1355  bool wasconst = dest->isConstant();
1356  if (!wasconst)
1357  dest->decRef();
1358 
1359  // Common case of exact match can just reference the same constant page
1360  if (((TSIZE == SRC_TSIZE && TSIZE != -1) || (desttuplesize == srctuplesize)) && SYSisSame<DATA_T,SRC_DATA_T>())
1361  {
1362  // Now that we know that the types and tuple sizes are the same,
1363  // we can just check the pointers to see if they're the same (constant) page.
1364  // The typecast on src is just so that it will compile when the types don't match.
1365  if (*dest == *(const PageTableEntry*)src)
1366  return;
1367 
1368  const bool typefitsinline = PageTableEntry::typeFitsInline(desttuplesize);
1369  if (!typefitsinline && wasconst && !dest->isConstantZero())
1370  {
1371  dest->decRef();
1372  }
1373 
1374  // Still need to cast to PageTableEntry*, because the compiler needs to
1375  // compile this line when the condition is false.
1376  *dest = *(const PageTableEntry*)src;
1377 
1378  if (!typefitsinline && !dest->isConstantZero())
1379  {
1380  dest->incRef();
1381  }
1382 
1383  return;
1384  }
1385 
1386  // Either the type doesn't match or the tuple size doesn't match.
1387 
1388  const SrcNotVoidType *const srctuple = SrcType::getConstantPtr(src, 0, srctuplesize);
1389 
1390  const exint mintuplesize = SYSmin(desttuplesize, srctuplesize);
1391 
1392  // Easy for inline case, checked at compile-time.
1393  if (PageTableEntry::typeFitsInline(desttuplesize))
1394  {
1395  // If dest was already constant, we don't need to write
1396  // theConstantPageBit, and we don't want to blow away any
1397  // components that are between srctuplesize and desttuplesize, in the
1398  // unlikely event that desttuplesize > srctuplesize.
1399  if (!wasconst)
1400  {
1401  // This sets the constant bit and makes sure that the
1402  // space between that bit and tuple component 0 is zeroed.
1403  dest->initZero();
1404 
1405  // Since initZero sets everything to zero, if src
1406  // is all zero, we can early exit.
1407  if (issrczero)
1408  return;
1409  }
1410  NotVoidType *tuple = dest->getInlinePtr(desttuplesize);
1411  if (issrczero)
1412  {
1413  memset(tuple, 0, mintuplesize*sizeof(NotVoidType));
1414  }
1415  else
1416  {
1417  for (exint i = 0; i < mintuplesize; ++i)
1418  tuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1419  }
1420  return;
1421  }
1422 
1423  // In other cases, we may or may not have to unref constant page
1424  if (wasconst)
1425  {
1426  if (dest->isConstantZero())
1427  {
1428  // Fairly common case: already zero, making zero.
1429  if (issrczero)
1430  return;
1431  }
1432  else
1433  {
1434  if (desttuplesize <= srctuplesize && issrczero)
1435  {
1436  // No longer need this old constant page
1437  dest->decRef();
1438  }
1439  else if (dest->isShared())
1440  {
1441  NotVoidType *tuple = dest->getMaskedPtr();
1442  bool equal = true;
1443  if (desttuplesize > srctuplesize && issrczero)
1444  {
1445  equal = isZero(tuple, mintuplesize);
1446  }
1447  else
1448  {
1449  for (exint i = 0; i < mintuplesize; ++i)
1450  {
1451  if (tuple[i] != UTconvertStorage<NotVoidType>(srctuple[i]))
1452  {
1453  equal = false;
1454  break;
1455  }
1456  }
1457  }
1458 
1459  if (equal)
1460  {
1461  // Already equal; nothing to do
1462  return;
1463  }
1464 
1465  // Need to allocate new constant page before ditching the old one
1466  // if desttuplesize is larger, because some elements need to be kept.
1467  if (desttuplesize > srctuplesize)
1468  {
1469  // Need to save the pointers so that we can decRef below
1470  // after calling alloc.
1471  PageTableEntry orig(*dest);
1472 
1473  dest->alloc(UT_PageOff(1), desttuplesize);
1474  NotVoidType *newtuple = dest->getFirstPtr();
1475  // Copy lower components from src
1476  if (issrczero)
1477  {
1478  memset(newtuple, 0, srctuplesize*sizeof(NotVoidType));
1479  }
1480  else
1481  {
1482  for (exint i = 0; i < srctuplesize; ++i)
1483  newtuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1484  }
1485  // Copy higher components from dest
1486  for (exint i = srctuplesize; i < desttuplesize; ++i)
1487  newtuple[i] = tuple[i];
1488 
1489  orig.decRef();
1490  dest->setConstantBit();
1491  return;
1492  }
1493 
1494  // No longer need this old constant page
1495  dest->decRef();
1496  }
1497  else
1498  {
1499  // Reuse the unshared constant page
1500  NotVoidType *tuple = dest->getMaskedPtr();
1501  if (issrczero)
1502  {
1503  memset(tuple, 0, mintuplesize*sizeof(NotVoidType));
1504  }
1505  else
1506  {
1507  for (exint i = 0; i < mintuplesize; ++i)
1508  tuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1509  }
1510  return;
1511  }
1512  }
1513  }
1514 
1515  if (desttuplesize <= srctuplesize && issrczero)
1516  {
1517  dest->initZero();
1518  return;
1519  }
1520 
1521  // Need to allocate new constant page
1522  dest->alloc(UT_PageOff(1), desttuplesize);
1523  NotVoidType *tuple = dest->getFirstPtr();
1524  if (issrczero)
1525  {
1526  memset(tuple, 0, desttuplesize*sizeof(NotVoidType));
1527  }
1528  else
1529  {
1530  for (exint i = 0; i < mintuplesize; ++i)
1531  tuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1532 
1533  if (desttuplesize > srctuplesize)
1534  {
1535  // dest was already zero when here, or !wasconst, so zero out the extra components not copied from src.
1536  memset(tuple+srctuplesize, 0, (desttuplesize-srctuplesize)*sizeof(NotVoidType));
1537  }
1538  }
1539  dest->setConstantBit();
1540 }
1541 
1542 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1543 template<typename SrcNotVoidType>
1544 void
1546  NotVoidType *__restrict destpagedata,
1547  NotVoidType *destpageend,
1548  const SrcNotVoidType *__restrict stuple,
1549  exint mintuplesize,
1550  exint desttupleextra)
1551 {
1552  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1553 
1554  // Fill range in dest with value from stuple.
1555  if (stuple)
1556  {
1557  if (!SYSisSame<NotVoidType,SrcNotVoidType>())
1558  {
1559  do
1560  {
1561  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1562  *destpagedata = UTconvertStorage<NotVoidType>(stuple[i]);
1563  destpagedata += desttupleextra;
1564  } while(destpagedata < destpageend);
1565  }
1566  else
1567  {
1568  do
1569  {
1570  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1571  *destpagedata = stuple[i];
1572  destpagedata += desttupleextra;
1573  } while(destpagedata < destpageend);
1574  }
1575  }
1576  else
1577  {
1578  if (!SYSisSame<NotVoidType,SrcNotVoidType>())
1579  {
1580  do
1581  {
1582  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1583  *destpagedata = NotVoidType();
1584  destpagedata += desttupleextra;
1585  } while(destpagedata < destpageend);
1586  }
1587  else
1588  {
1589  do
1590  {
1591  if (SYSisPOD<NotVoidType>())
1592  {
1593  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1594  ::memset(destpagedata, 0, sizeof(NotVoidType));
1595  }
1596  else
1597  {
1599  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1600  *destpagedata = v;
1601  }
1602  destpagedata += desttupleextra;
1603  } while(destpagedata < destpageend);
1604  }
1605  }
1606 }
1607 
1608 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1609 template<typename SrcNotVoidType>
1610 void
1612  NotVoidType *__restrict destpagedata,
1613  const SrcNotVoidType *__restrict srcpagedata,
1614  const exint desttuplesize,
1615  const exint srctuplesize,
1616  UT_PageOff ntuples)
1617 {
1618  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1619 
1620  NotVoidType *destpageend = destpagedata + ntuples*desttuplesize;
1621 
1622  if (desttuplesize == srctuplesize)
1623  {
1624  // Copy values from srcpagedata to destpagedata.
1625  if (SYSisSame<NotVoidType,SrcNotVoidType>())
1626  {
1627  do
1628  {
1629  *destpagedata = *srcpagedata;
1630  ++srcpagedata;
1631  ++destpagedata;
1632  } while(destpagedata < destpageend);
1633  }
1634  else
1635  {
1636  do
1637  {
1638  *destpagedata = UTconvertStorage<NotVoidType>(*srcpagedata);
1639  ++srcpagedata;
1640  ++destpagedata;
1641  } while(destpagedata < destpageend);
1642  }
1643  }
1644  else
1645  {
1646  const exint mintuplesize = SYSmin(desttuplesize, srctuplesize);
1647  const exint srctupleextra = srctuplesize - mintuplesize;
1648  const exint desttupleextra = desttuplesize - mintuplesize;
1649 
1650  // Copy values from srcpagedata to destpagedata.
1651  if (SYSisSame<NotVoidType,SrcNotVoidType>())
1652  {
1653  do
1654  {
1655  for (exint i = 0; i < mintuplesize; ++i, ++srcpagedata, ++destpagedata)
1656  *destpagedata = *srcpagedata;
1657  destpagedata += desttupleextra;
1658  srcpagedata += srctupleextra;
1659  } while(destpagedata < destpageend);
1660  }
1661  else
1662  {
1663  do
1664  {
1665  for (exint i = 0; i < mintuplesize; ++i, ++srcpagedata, ++destpagedata)
1666  *destpagedata = UTconvertStorage<NotVoidType>(*srcpagedata);
1667  destpagedata += desttupleextra;
1668  srcpagedata += srctupleextra;
1669  } while(destpagedata < destpageend);
1670  }
1671  }
1672 }
1673 
1674 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1675 template<typename SrcNotVoidType>
1676 bool
1678 {
1679  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1680 
1681  // If they're the same page, or they're both NULL,
1682  // there's nothing to do.
1683  if ((const void*)stuple == (void*)tuple)
1684  return true;
1685 
1686  if (!stuple || !tuple)
1687  return false;
1688 
1689  UT_ASSERT_P(mintuplesize > 0);
1690 
1691  bool isequal;
1692  if (!SYSisSame<NotVoidType,SrcNotVoidType>())
1693  {
1694  // Cast to the destination type, since it's
1695  // supposed to represent whether the destination
1696  // wouldn't change if assigned.
1697  isequal = (tuple[0] == UTconvertStorage<NotVoidType>(stuple[0]));
1698  for (exint i = 1; i < mintuplesize; ++i)
1699  isequal &= (tuple[i] == UTconvertStorage<NotVoidType>(stuple[i]));
1700  }
1701  else
1702  {
1703  // NOTE: Don't want to copy-construct non-POD types
1704  // unnecessarily by casting to NotVoidType.
1705  isequal = (tuple[0] == stuple[0]);
1706  for (exint i = 1; i < mintuplesize; ++i)
1707  isequal &= (tuple[i] == stuple[i]);
1708  }
1709  // If they're equal, nothing to do
1710  return isequal;
1711 }
1712 
1713 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1714 void
1716 {
1718  UT_ASSERT_P(start < end);
1719  UT_ASSERT_P(start >= IDX_T(0));
1720  UT_ASSERT_P(end <= myCapacity);
1721  UT_ASSERT_P(TSIZE >= 1);
1722  UT_ASSERT_MSG_P(myRefCount.relaxedLoad() == 1, "The table must already be hardened before we modify it!");
1723 
1724  UT_PageNum startpage = pageNum(start);
1725  UT_PageOff startoff = pageOff(start);
1726  UT_PageNum endpage = pageNum(end);
1727  UT_PageOff endoff = pageOff(end);
1728 
1729  bool valiszero = !PAGESHARDENED && (startoff != UT_PageOff(0) || endoff != UT_PageOff(0));
1730  if (valiszero)
1731  {
1732  valiszero = isZero(val);
1733  }
1734 
1735  UT_PageOff pagecapacity(thePageSize);
1736  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
1737  pagecapacity = capacity();
1738 
1739  // Handle incomplete first page
1740  if (startoff != UT_PageOff(0))
1741  {
1742  PageTableEntry *page = getPPage(startpage);
1743  bool equal = false;
1744  if (!PAGESHARDENED)
1745  {
1746  if (page->isConstant())
1747  {
1748  const NotVoidType *tuple = getConstantPtr(page);
1749  // Nothing to do if equal already.
1750  if (tuple)
1751  {
1752  equal = true;
1753  for (exint i = 0; i < TSIZE; ++i)
1754  equal &= (tuple[i] == val);
1755  }
1756  else
1757  {
1758  equal = valiszero;
1759  }
1760  if (!equal)
1761  hardenConstantPage(page, pagecapacity);
1762  }
1763  else if (page->isShared())
1764  hardenSharedPage(page, pagecapacity);
1765  }
1766  if (!equal)
1767  {
1768  UT_ASSERT_P(!page->isConstant());
1769  UT_ASSERT_P(!page->isShared());
1770  NotVoidType *data = page->getFirstPtr();
1771  NotVoidType *end = data + TSIZE*((endpage != startpage) ? pagecapacity : endoff);
1772  data += TSIZE*startoff;
1773  for (; data != end; ++data)
1774  *data = val;
1775  }
1776  if (endpage == startpage)
1777  return;
1778 
1779  ++startpage;
1780  }
1781 
1782  // Handle complete middle pages
1783  for (; startpage < endpage; ++startpage)
1784  {
1785  PageTableEntry *page = getPPage(startpage);
1786  // FIXME: Need a makeConstant that takes a single value for non-POD types
1787  if (!PAGESHARDENED)
1788  makeConstant(page, val);
1789  else
1790  {
1791  NotVoidType *data = page->getFirstPtr();
1792  // NOTE: This isn't a small page, so we can use thePageSize
1793  NotVoidType *end = data + TSIZE*thePageSize;
1794  for (; data != end; ++data)
1795  *data = val;
1796  }
1797  }
1798 
1799  // Handle incomplete last page
1800  if (endoff != UT_PageOff(0))
1801  {
1802  PageTableEntry *page = getPPage(startpage);
1803  // If end page, and goes to end, can still make constant.
1804  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
1805  {
1806  makeConstant(page, val);
1807  return;
1808  }
1809  bool equal = false;
1810  if (!PAGESHARDENED)
1811  {
1812  if (page->isConstant())
1813  {
1814  const NotVoidType *tuple = getConstantPtr(page);
1815  // Nothing to do if equal already.
1816  if (tuple)
1817  {
1818  equal = true;
1819  for (exint i = 0; i < TSIZE; ++i)
1820  equal &= (tuple[i] == val);
1821  }
1822  else
1823  {
1824  equal = valiszero;
1825  }
1826  if (!equal)
1827  hardenConstantPage(page, pagecapacity);
1828  }
1829  else if (page->isShared())
1830  hardenSharedPage(page, pagecapacity);
1831  }
1832  if (!equal)
1833  {
1834  UT_ASSERT_P(!page->isConstant());
1835  UT_ASSERT_P(!page->isShared());
1836  NotVoidType *data = page->getFirstPtr();
1837  NotVoidType *end = data + TSIZE*endoff;
1838  for (; data != end; ++data)
1839  *data = val;
1840  }
1841  }
1842 }
1843 
1844 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1845 void
1847 {
1849  UT_ASSERT_P(start < end);
1850  UT_ASSERT_P(start >= IDX_T(0));
1851  UT_ASSERT_P(end <= myCapacity);
1852  UT_ASSERT_P(TSIZE == -1);
1853  UT_ASSERT_P(tuplesize >= 1);
1854  UT_ASSERT_MSG_P(myRefCount.relaxedLoad() == 1, "The table must already be hardened before we modify it!");
1855 
1856  // Fast paths for small sizes.
1857  if (tuplesize <= 4)
1858  {
1859  if (tuplesize == 1)
1860  {
1862  this)->fill(start, end, val);
1863  }
1864  else if (tuplesize == 2)
1865  {
1867  this)->fill(start, end, val);
1868  }
1869  else if (tuplesize == 3)
1870  {
1872  this)->fill(start, end, val);
1873  }
1874  else //if (tuplesize == 4)
1875  {
1877  this)->fill(start, end, val);
1878  }
1879  return;
1880  }
1881 
1882  UT_PageNum startpage = pageNum(start);
1883  UT_PageOff startoff = pageOff(start);
1884  UT_PageNum endpage = pageNum(end);
1885  UT_PageOff endoff = pageOff(end);
1886 
1887  bool valiszero = !PAGESHARDENED && (startoff != UT_PageOff(0) || endoff != UT_PageOff(0));
1888  if (valiszero)
1889  {
1890  valiszero = isZero(val);
1891  }
1892 
1893  UT_PageOff pagecapacity(thePageSize);
1894  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
1895  pagecapacity = capacity();
1896 
1897  // Handle incomplete first page
1898  if (startoff != UT_PageOff(0))
1899  {
1900  PageTableEntry *page = getPPage(startpage);
1901  bool equal = false;
1902  if (!PAGESHARDENED)
1903  {
1904  if (page->isConstant())
1905  {
1906  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
1907  // Nothing to do if equal already.
1908  if (tuple)
1909  {
1910  equal = true;
1911  for (exint i = 0; i < tuplesize; ++i)
1912  equal &= (tuple[i] == val);
1913  }
1914  else
1915  {
1916  equal = valiszero;
1917  }
1918  if (!equal)
1919  hardenConstantPage(page, pagecapacity, tuplesize);
1920  }
1921  else if (page->isShared())
1922  hardenSharedPage(page, pagecapacity, tuplesize);
1923  }
1924  if (!equal)
1925  {
1926  UT_ASSERT_P(!page->isConstant());
1927  UT_ASSERT_P(!page->isShared());
1928  NotVoidType *data = page->getFirstPtr();
1929  NotVoidType *end = data + tuplesize*((endpage != startpage) ? pagecapacity : endoff);
1930  data += tuplesize*startoff;
1931  for (; data != end; ++data)
1932  *data = val;
1933  }
1934  if (endpage == startpage)
1935  return;
1936  ++startpage;
1937  }
1938 
1939  // Handle complete middle pages
1940  for (; startpage < endpage; ++startpage)
1941  {
1942  PageTableEntry *page = getPPage(startpage);
1943  if (!PAGESHARDENED)
1944  makeConstant(page, val, tuplesize);
1945  else
1946  {
1947  NotVoidType *data = page->getFirstPtr();
1948  // NOTE: This isn't a small page, so we can use thePageSize
1949  NotVoidType *end = data + tuplesize*thePageSize;
1950  for (; data != end; ++data)
1951  *data = val;
1952  }
1953  }
1954 
1955  // Handle incomplete last page
1956  if (endoff != UT_PageOff(0))
1957  {
1958  PageTableEntry *page = getPPage(startpage);
1959  // If end page, and goes to end, can still make constant.
1960  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
1961  {
1962  makeConstant(page, val, tuplesize);
1963  return;
1964  }
1965  bool equal = false;
1966  if (!PAGESHARDENED)
1967  {
1968  if (page->isConstant())
1969  {
1970  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
1971  // Nothing to do if equal already.
1972  if (tuple)
1973  {
1974  equal = true;
1975  for (exint i = 0; i < tuplesize; ++i)
1976  equal &= (tuple[i] == val);
1977  }
1978  else
1979  {
1980  equal = valiszero;
1981  }
1982  if (!equal)
1983  hardenConstantPage(page, pagecapacity, tuplesize);
1984  }
1985  else if (page->isShared())
1986  hardenSharedPage(page, pagecapacity, tuplesize);
1987  }
1988  if (!equal)
1989  {
1990  UT_ASSERT_P(!page->isConstant());
1991  UT_ASSERT_P(!page->isShared());
1992  NotVoidType *data = page->getFirstPtr();
1993  NotVoidType *end = data + tuplesize*endoff;
1994  for (; data != end; ++data)
1995  *data = val;
1996  }
1997  }
1998 }
1999 
2000 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2001 void
2003 {
2005  UT_ASSERT_P(start < end);
2006  UT_ASSERT_P(start >= IDX_T(0));
2007  UT_ASSERT_P(end <= myCapacity);
2008  UT_ASSERT_P(TSIZE == -1 || TSIZE==tuplesize);
2009  UT_ASSERT_P(tuplesize >= 1);
2010  UT_ASSERT_MSG_P(myRefCount.relaxedLoad() == 1, "The table must already be hardened before we modify it!");
2011 
2012  UT_PageNum startpage = pageNum(start);
2013  UT_PageOff startoff = pageOff(start);
2014  UT_PageNum endpage = pageNum(end);
2015  UT_PageOff endoff = pageOff(end);
2016 
2017  UT_PageOff pagecapacity(thePageSize);
2018  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
2019  pagecapacity = capacity();
2020 
2021  // Handle incomplete first page
2022  if (startoff != UT_PageOff(0))
2023  {
2024  PageTableEntry *page = getPPage(startpage);
2025  bool equal = false;
2026  if (!PAGESHARDENED)
2027  {
2028  if (page->isConstant())
2029  {
2030  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
2031  // Nothing to do if equal already.
2032  equal = true;
2033  if (tuple)
2034  {
2035  for (exint i = 0; i < tuplesize; ++i)
2036  equal &= (tuple[i] == values[i]);
2037  }
2038  else
2039  {
2040  for (exint i = 0; i < tuplesize; ++i)
2041  equal &= (NotVoidType(0) == values[i]);
2042  }
2043  if (!equal)
2044  hardenConstantPage(page, pagecapacity, tuplesize);
2045  }
2046  else if (page->isShared())
2047  hardenSharedPage(page, pagecapacity, tuplesize);
2048  }
2049  if (!equal)
2050  {
2051  UT_ASSERT_P(!page->isConstant());
2052  UT_ASSERT_P(!page->isShared());
2053  NotVoidType *data = page->getFirstPtr();
2054  NotVoidType *end = data + tuplesize*((endpage != startpage) ? pagecapacity : endoff);
2055  data += tuplesize*startoff;
2056  while (data != end)
2057  {
2058  for (exint i = 0; i < tuplesize; ++i, ++data)
2059  *data = values[i];
2060  }
2061  }
2062  if (endpage == startpage)
2063  return;
2064  ++startpage;
2065  }
2066 
2067  // Handle complete middle pages
2068  for (; startpage < endpage; ++startpage)
2069  {
2070  PageTableEntry *page = getPPage(startpage);
2071  if (!PAGESHARDENED)
2072  makeConstant(page, values, tuplesize);
2073  else
2074  {
2075  NotVoidType *data = page->getFirstPtr();
2076  // NOTE: This isn't a small page, so we can use thePageSize
2077  NotVoidType *end = data + tuplesize*thePageSize;
2078  while (data != end)
2079  {
2080  for (exint i = 0; i < tuplesize; ++i, ++data)
2081  *data = values[i];
2082  }
2083  }
2084  }
2085 
2086  // Handle incomplete last page
2087  if (endoff != UT_PageOff(0))
2088  {
2089  PageTableEntry *page = getPPage(startpage);
2090  // If end page, and goes to end, can still make constant.
2091  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
2092  {
2093  makeConstant(page, values, tuplesize);
2094  return;
2095  }
2096  bool equal = false;
2097  if (!PAGESHARDENED)
2098  {
2099  if (page->isConstant())
2100  {
2101  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
2102  // Nothing to do if equal already.
2103  equal = true;
2104  if (tuple)
2105  {
2106  for (exint i = 0; i < tuplesize; ++i)
2107  equal &= (tuple[i] == values[i]);
2108  }
2109  else
2110  {
2111  for (exint i = 0; i < tuplesize; ++i)
2112  equal &= (NotVoidType(0) == values[i]);
2113  }
2114  if (!equal)
2115  hardenConstantPage(page, pagecapacity, tuplesize);
2116  }
2117  else if (page->isShared())
2118  hardenSharedPage(page, pagecapacity, tuplesize);
2119  }
2120  if (!equal)
2121  {
2122  UT_ASSERT_P(!page->isConstant());
2123  UT_ASSERT_P(!page->isShared());
2124  NotVoidType *data = page->getFirstPtr();
2125  NotVoidType *end = data + tuplesize*endoff;
2126  while (data != end)
2127  {
2128  for (exint i = 0; i < tuplesize; ++i, ++data)
2129  *data = values[i];
2130  }
2131  }
2132  }
2133 }
2134 
2135 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2136 void
2138 {
2140  UT_ASSERT_P(start < end);
2141  UT_ASSERT_P(start >= IDX_T(0));
2142  UT_ASSERT_P(end <= myCapacity);
2143  UT_ASSERT_P(TSIZE >= 1);
2144  UT_ASSERT_MSG_P(myRefCount.relaxedLoad() == 1, "The table must already be hardened before we modify it!");
2145 
2146  UT_PageNum startpage = pageNum(start);
2147  UT_PageOff startoff = pageOff(start);
2148  UT_PageNum endpage = pageNum(end);
2149  UT_PageOff endoff = pageOff(end);
2150 
2151  UT_PageOff pagecapacity(thePageSize);
2152  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
2153  pagecapacity = capacity();
2154 
2155  // Handle incomplete first page
2156  if (startoff != UT_PageOff(0))
2157  {
2158  PageTableEntry *page = getPPage(startpage);
2159  bool equal = false;
2160  if (!PAGESHARDENED)
2161  {
2162  if (page->isConstant())
2163  {
2165  // Nothing to do if equal already.
2166  equal = tuple ? (*tuple == val) : val.isZero();
2167  if (!equal)
2168  hardenConstantPage(page, pagecapacity);
2169  }
2170  else if (page->isShared())
2171  hardenSharedPage(page, pagecapacity);
2172  }
2173  if (!equal)
2174  {
2175  UT_ASSERT_P(!page->isConstant());
2176  UT_ASSERT_P(!page->isShared());
2178  UT_FixedVector<NotVoidType,theSafeTupleSize> *end = data + ((endpage != startpage) ? pagecapacity : endoff);
2179  data += startoff;
2180  for (; data != end; ++data)
2181  *data = val;
2182  }
2183  if (endpage == startpage)
2184  return;
2185 
2186  ++startpage;
2187  }
2188 
2189  // Handle complete middle pages
2190  for (; startpage < endpage; ++startpage)
2191  {
2192  PageTableEntry *page = getPPage(startpage);
2193  if (!PAGESHARDENED)
2194  makeConstant(page, val);
2195  else
2196  {
2198  // NOTE: This isn't a small page, so we can use thePageSize
2200  for (; data != end; ++data)
2201  *data = val;
2202  }
2203  }
2204 
2205  // Handle incomplete last page
2206  if (endoff != UT_PageOff(0))
2207  {
2208  PageTableEntry *page = getPPage(startpage);
2209  // If end page, and goes to end, can still make constant.
2210  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
2211  {
2212  makeConstant(page, val);
2213  return;
2214  }
2215  bool equal = false;
2216  if (!PAGESHARDENED)
2217  {
2218  if (page->isConstant())
2219  {
2221  // Nothing to do if equal already.
2222  equal = tuple ? (*tuple == val) : val.isZero();
2223  if (!equal)
2224  hardenConstantPage(page, pagecapacity);
2225  }
2226  else if (page->isShared())
2227  hardenSharedPage(page, pagecapacity);
2228  }
2229  if (!equal)
2230  {
2231  UT_ASSERT_P(!page->isConstant());
2232  UT_ASSERT_P(!page->isShared());
2234  UT_FixedVector<NotVoidType,theSafeTupleSize> *end = data + endoff;
2235  for (; data != end; ++data)
2236  *data = val;
2237  }
2238  }
2239 }
2240 
2241 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2242 template<typename DEST_DATA_T,exint DEST_TSIZE,bool DEST_INSTANTIATED>
2243 void
2245 {
2246  // If the source storage type is not known at compile time,
2247  // switch, cast, and call again.
2248  if (SYSisSame<DATA_T,void>())
2249  {
2251 
2252  // Probably matches destination type
2254  {
2255  castType<DEST_DATA_T>().getVectorRange(srcstart, nelements, dest);
2256  return;
2257  }
2258 
2259  switch (storage)
2260  {
2261  case UT_Storage::INT8:
2262  castType<int8>().getVectorRange(srcstart, nelements, dest); return;
2263  case UT_Storage::INT16:
2264  castType<int16>().getVectorRange(srcstart, nelements, dest); return;
2265  case UT_Storage::INT32:
2266  castType<int32>().getVectorRange(srcstart, nelements, dest); return;
2267  case UT_Storage::INT64:
2268  castType<int64>().getVectorRange(srcstart, nelements, dest); return;
2269  case UT_Storage::REAL16:
2270  castType<fpreal16>().getVectorRange(srcstart, nelements, dest); return;
2271  case UT_Storage::REAL32:
2272  castType<fpreal32>().getVectorRange(srcstart, nelements, dest); return;
2273  case UT_Storage::REAL64:
2274  castType<fpreal64>().getVectorRange(srcstart, nelements, dest); return;
2275  case UT_Storage::INVALID:
2276  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
2277  break;
2278  }
2279  return;
2280  }
2281 
2282  // We now have both the source type and the destination type known at compile time.
2283  UT_ASSERT_P((!SYSisSame<DATA_T,void>()));
2284 
2285  // Tuple size probably matches
2286  if (TSIZE == -1 && myImpl.getTupleSize() == DEST_TSIZE)
2287  {
2288  castTupleSize<DEST_TSIZE>().getVectorRange(srcstart, nelements, dest);
2289  return;
2290  }
2291 
2292  auto vdest = reinterpret_cast<UT_FixedVector<DEST_DATA_T,DEST_TSIZE> *>(dest);
2293 
2294  // TODO: Implement this more efficiently, e.g. only check once whether each page is constant or shared.
2295  for (IDX_T srcend(srcstart+nelements); srcstart < srcend; ++srcstart, ++vdest)
2296  *vdest = getVector<DEST_DATA_T,DEST_TSIZE>(srcstart);
2297 }
2298 
2299 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2300 template<typename SRC_DATA_T,exint SRC_TSIZE,bool SRC_INSTANTIATED>
2301 void
2303 {
2304  if (nelements <= IDX_T(0))
2305  return;
2306 
2307  auto &hard = hardenTable();
2308 
2309  UT_ASSERT_MSG_P((!SYSisSame<SRC_DATA_T,void>()), "Source type must be known.");
2310 
2311  // If the destination storage type is not known at compile time,
2312  // switch, cast, and call again.
2313  if (SYSisSame<DATA_T,void>())
2314  {
2316 
2317  // Probably matches source type
2319  {
2320  hard.template castType<SRC_DATA_T>().setVectorRange(deststart, nelements, src);
2321  return;
2322  }
2323 
2324  switch (storage)
2325  {
2326  case UT_Storage::INT8:
2327  hard.template castType<int8>().setVectorRange(deststart, nelements, src); return;
2328  case UT_Storage::INT16:
2329  hard.template castType<int16>().setVectorRange(deststart, nelements, src); return;
2330  case UT_Storage::INT32:
2331  hard.template castType<int32>().setVectorRange(deststart, nelements, src); return;
2332  case UT_Storage::INT64:
2333  hard.template castType<int64>().setVectorRange(deststart, nelements, src); return;
2334  case UT_Storage::REAL16:
2335  hard.template castType<fpreal16>().setVectorRange(deststart, nelements, src); return;
2336  case UT_Storage::REAL32:
2337  hard.template castType<fpreal32>().setVectorRange(deststart, nelements, src); return;
2338  case UT_Storage::REAL64:
2339  hard.template castType<fpreal64>().setVectorRange(deststart, nelements, src); return;
2340  case UT_Storage::INVALID:
2341  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
2342  break;
2343  }
2344  return;
2345  }
2346 
2347  // We now have both the source type and the destination type known at compile time.
2348  UT_ASSERT_P((!SYSisSame<DATA_T,void>()));
2349 
2350  // Tuple size probably matches
2351  if (TSIZE == -1 && myImpl.getTupleSize() == SRC_TSIZE)
2352  {
2353  hard.template castTupleSize<SRC_TSIZE>().setVectorRange(deststart, nelements, src);
2354  return;
2355  }
2356 
2357  // TODO: Implement this more efficiently, e.g. only check once whether each page is constant or shared.
2358  for (IDX_T destend(deststart+nelements); deststart < destend; ++deststart, ++src)
2359  setVector(deststart, *src);
2360 }
2361 
2362 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2363 bool
2365 {
2366  if (!UTisFloatStorage(getStorage()))
2367  return false;
2368 
2369  // If the storage type is not known at compile time,
2370  // switch, cast, and call again.
2371  if (SYSisSame<DATA_T,void>())
2372  {
2374  switch (storage)
2375  {
2376  case UT_Storage::REAL16:
2377  return castType<fpreal16>().hasNanInRange(start, end);
2378  case UT_Storage::REAL32:
2379  return castType<fpreal32>().hasNanInRange(start, end);
2380  case UT_Storage::REAL64:
2381  return castType<fpreal64>().hasNanInRange(start, end);
2382  default:
2383  UT_ASSERT_MSG(0, "Only 16-bit, 32-bit, and 64-bit floats should be considered float types!");
2384  break;
2385  }
2386  return false;
2387  }
2388 
2389  UT_ASSERT_P(start >= IDX_T(0) && start <= size());
2390  UT_ASSERT_P(end >= IDX_T(0) && end <= size());
2391  UT_ASSERT_P(start <= end);
2392 
2393  if (start >= end)
2394  return false;
2395 
2396  const PageTable *pages = myImpl.getPages();
2397  UT_ASSERT_P(pages);
2398 
2399  UT_PageNum pagenum = pageNum(start);
2400  UT_PageOff pageoff = pageOff(start);
2401  UT_PageNum endpagenum = pageNum(end);
2402  UT_PageOff endpageoff = pageOff(end);
2403 
2404  exint tuplesize = getTupleSize();
2405 
2406  if (endpageoff == UT_PageOff(0))
2407  {
2408  --endpagenum;
2409  endpageoff = UT_PageOff(thePageSize);
2410  }
2411 
2412  for (; pagenum <= endpagenum; ++pagenum)
2413  {
2414  const PageTableEntry *const page = pages->getPPage(pagenum);
2415  if (page->isConstant())
2416  {
2417  const NotVoidType *data = getConstantPtr(page, 0, tuplesize);
2418  // Special case for zero page is always a number.
2419  if (!data)
2420  continue;
2421  for (exint i = 0; i < tuplesize; ++i)
2422  {
2423  if (SYSisNan(data[i]))
2424  return true;
2425  }
2426  }
2427  else
2428  {
2429  const NotVoidType *data = page->getFirstPtr();
2430  const NotVoidType *end = data + ((pagenum == endpagenum) ? endpageoff : thePageSize*tuplesize);
2431  data += pageoff;
2432  for (; data != end; ++data)
2433  {
2434  if (SYSisNan(*data))
2435  return true;
2436  }
2437  }
2438  pageoff = UT_PageOff(0);
2439  }
2440 
2441  return false;
2442 }
2443 
2444 
2445 #endif
static void copyNonConst(NotVoidType *destpagedata, const SrcNotVoidType *srcpagedata, exint desttuplesize, exint srctuplesize, UT_PageOff ntuples)
static SYS_FORCE_INLINE const NotVoidType * getConstantPtr(const PageTableEntry *page, exint component=0, exint tuplesize=TSIZE)
SYS_FORCE_INLINE void setSize(IDX_T newsize)
SYS_FORCE_INLINE void * getFirstPtrVoid()
Returns the data pointer, if not a constant page.
UT_Storage
Definition: UT_Storage.h:26
NotVoid< DATA_T >::type NotVoidType
Definition: UT_PageArray.h:244
void UTswap(T &a, T &b)
Definition: UT_Swap.h:35
void setTupleSize(exint newtuplesize, const UT_Defaults &v)
const GLdouble * v
Definition: glcorearb.h:836
SYS_FORCE_INLINE bool isZero() const noexcept
GLuint start
Definition: glcorearb.h:474
int64 getI(exint i=0) const
Definition: UT_Defaults.h:268
SYS_FORCE_INLINE IDX_T size() const
Definition: UT_PageArray.h:866
SYS_FORCE_INLINE T * SYSconst_cast(const T *foo)
Definition: SYS_Types.h:127
SYS_FORCE_INLINE bool isShared() const
void countMemory(UT_MemoryCounter &counter, bool inclusive) const
SYS_FORCE_INLINE PageTableEntry * getFirstPage()
bool SYSisNan(const F f)
Definition: SYS_Math.h:173
void setConstant(IDX_T start, IDX_T end, NotVoidType v)
exint UT_PageNum
Definition: UT_PageArray.h:40
SYS_FORCE_INLINE const PageTableEntry * getPPage(UT_PageNum i) const
SYS_FORCE_INLINE exint getTupleSize() const
virtual bool countShared(size_t size, exint refcount, const void *p)
#define UT_MEMORY_DEBUG_LOG_SHARED(m, s, p, r)
SYS_FORCE_INLINE constexpr bool UTisFloatStorage(UT_Storage storage)
Returns true iff the given storage type represents a floating-point number.
Definition: UT_Storage.h:47
SYS_FORCE_INLINE IDX_T size() const
NOTE: This is the size of the full array, not the number of pages.
#define UT_IF_ASSERT_P(ZZ)
Definition: UT_Assert.h:152
SYS_FORCE_INLINE UT_Storage getStorage() const
SYS_FORCE_INLINE exint getRefCount() const
png_uint_32 i
Definition: png.h:2877
bool hasNanInRange(IDX_T start, IDX_T end) const
GLsizeiptr size
Definition: glcorearb.h:663
SYS_FORCE_INLINE bool isShared() const
void fill(IDX_T start, IDX_T end, const NotVoidType &val)
static bool isEqualConst(const NotVoidType *tuple, const SrcNotVoidType *stuple, exint mintuplesize)
SYS_FORCE_INLINE void setConstantBit()
long long int64
Definition: SYS_Types.h:107
SYS_FORCE_INLINE exint getRefCount() const
SYS_FORCE_INLINE bool isConstantZero() const
This is only valid to call if the type doesn't fit inline.
GLdouble n
Definition: glcorearb.h:2007
static SYS_FORCE_INLINE UT_PageOff pageOff(IDX_T i)
static SYS_FORCE_INLINE UT_PageNum numPages(IDX_T nelements)
int64 exint
Definition: SYS_Types.h:116
SYS_FORCE_INLINE bool isRefd(exint tuplebytes) const
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:125
SYS_FORCE_INLINE IDX_T capacity() const
NOTE: This is the capacity of the full array, not the capacity of pages.
static SYS_FORCE_INLINE bool isZero(const T &val)
SYS_FORCE_INLINE IDX_T capacity() const
Definition: UT_PageArray.h:874
GLuint GLuint end
Definition: glcorearb.h:474
bool mustCountUnshared() const
#define UT_MEMORY_DEBUG_LOG(m, s)
SYS_FORCE_INLINE NotVoidType * getInlinePtr(exint tuplesize)
Returns the data pointer, if an inline constant page.
void setCapacity(IDX_T newcapacity)
Definition: UT_PageArray.h:902
SYS_FORCE_INLINE void setVector(IDX_T i, const UT_FixedVector< SRC_DATA_T, SRC_TSIZE, SRC_INSTANTIATED > &v)
Definition: UT_PageArray.h:719
GLenum GLsizei GLsizei GLint * values
Definition: glcorearb.h:1601
SYS_FORCE_INLINE void * getMaskedPtrVoid()
GLboolean * data
Definition: glcorearb.h:130
static void copyPartialPage(PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize, UT_PageOff destoff, UT_PageOff srcoff, UT_PageOff ntuples, UT_PageOff destpagecapacity)
SYS_FORCE_INLINE bool isConstantAndZeroSafe() const
exint UT_PageOff
Definition: UT_PageArray.h:41
SYS_FORCE_INLINE void incRef()
static void makeConstantFrom(PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize)
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition: glcorearb.h:2539
void moveRange(IDX_T srcstart, IDX_T deststart, IDX_T nelements)
void getVectorRange(IDX_T srcstart, IDX_T nelements, UT_FixedVector< DEST_DATA_T, DEST_TSIZE, DEST_INSTANTIATED > *dest) const
void countUnshared(size_t size)
SYS_FORCE_INLINE int64 getMemoryUsage(exint tuplebytes) const
SYS_AtomicInt< int32 > SYS_AtomicCounter
Definition: SYS_AtomicInt.h:85
void setSize(IDX_T newsize)
Definition: UT_PageArray.h:926
fpreal64 getF(exint i=0) const
Definition: UT_Defaults.h:244
#define UT_ASSERT_MSG(ZZ, MM)
Definition: UT_Assert.h:129
void swapRange(IDX_T astart, IDX_T bstart, IDX_T nelements)
SYS_FORCE_INLINE void * getFirstPtrVoidUnsafe()
void setStorage(const UT_Storage newstorage)
static void fillNonConstWithConst(NotVoidType *destpagedata, NotVoidType *destpageend, const SrcNotVoidType *stuple, exint mintuplesize, exint desttupleextra)
SYS_FORCE_INLINE NotVoidType * getMaskedPtr()
SYS_FORCE_INLINE bool isConstant() const
This is always valid to call.
static void hardenSharedPage(PageTableEntry *page, UT_PageOff pagecapacity, exint tuplesize=TSIZE)
bool mustCountShared() const
GLuint GLfloat * val
Definition: glcorearb.h:1607
static SYS_FORCE_INLINE void makeConstant(PageTableEntry *page, const UT_FixedVector< NotVoidType, theSafeTupleSize > &val)
bool equal(T1 a, T2 b, T3 t)
Definition: ImathFun.h:143
SYS_FORCE_INLINE UT_PageArray< DATA_T, TSIZE, true, PAGESHARDENED, THEPAGEBITS, IDX_T > & hardenTable()
static const exint thePageSize
Definition: UT_PageArray.h:256
SYS_FORCE_INLINE void alloc(UT_PageOff nelements, exint tuplesize=TSIZE)
static void hardenConstantPage(PageTableEntry *page, UT_PageOff pagecapacity, exint tuplesize=TSIZE)
getOption("OpenEXR.storage") storage
Definition: HDK_Image.dox:276
Container class for all geometry.
Definition: GA_Detail.h:95
int64 getMemoryUsage(bool inclusive) const
static SYS_FORCE_INLINE UT_PageNum pageNum(IDX_T i)
SYS_FORCE_INLINE constexpr bool UTisIntStorage(UT_Storage storage)
Returns true iff the given storage type represents an integer.
Definition: UT_Storage.h:40
#define SYSmin(a, b)
Definition: SYS_Math.h:1368
SYS_FORCE_INLINE constexpr int UTstorageSize(UT_Storage storage)
Returns the number of bytes in the given storage type.
Definition: UT_Storage.h:54
SYS_FORCE_INLINE void initZero()
SYS_FORCE_INLINE exint getTupleSize() const
Definition: UT_Defaults.h:239
SYS_FORCE_INLINE NotVoidType * getFirstPtr()
Returns the data pointer, if not a constant page.
bool isZero(const Type &x)
Return true if x is exactly equal to zero.
Definition: Math.h:308
SYS_FORCE_INLINE T relaxedLoad() const
Definition: SYS_AtomicInt.h:60
void setVectorRange(IDX_T deststart, IDX_T nelements, const UT_FixedVector< SRC_DATA_T, SRC_TSIZE, SRC_INSTANTIATED > *src)
SYS_FORCE_INLINE void decRef()
#define UT_ASSERT_MSG_P(ZZ, MM)
Definition: UT_Assert.h:128
static void replacePage(PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize, UT_PageOff destpagesize, UT_PageOff destpagecapacity)
GLenum src
Definition: glcorearb.h:1792