HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
GA_PageArrayImpl.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: GA_PageArrayImpl.h (GA Library, C++)
7  *
8  * COMMENTS: An array class with special handling of constant pages and
9  * shared page data, specialized for GA_Offset.
10  */
11 
12 #pragma once
13 
14 #ifndef __GA_PageArrayImpl__
15 #define __GA_PageArrayImpl__
16 
17 #include "GA_PageArray.h"
18 
19 #include "GA_API.h"
20 #include "GA_Defaults.h"
21 #include "GA_Defragment.h"
22 #include "GA_Iterator.h"
23 #include "GA_LoadMap.h"
24 #include "GA_MergeMap.h"
25 #include "GA_Range.h"
26 #include "GA_SaveOptions.h"
27 #include "GA_Types.h"
28 
29 #include <UT/UT_Array.h>
30 #include <UT/UT_Assert.h>
31 #include <UT/UT_BitArray.h>
32 #include <UT/UT_FixedVector.h>
33 #include <UT/UT_JSONDefines.h>
34 #include <UT/UT_JSONParser.h>
35 #include <UT/UT_JSONWriter.h>
36 #include <UT/UT_StackBuffer.h>
37 #include <UT/UT_Storage.h>
38 #include <UT/UT_UniquePtr.h>
39 #include <UT/UT_VectorTypes.h>
40 #include <UT/UT_WorkBuffer.h>
41 #include <SYS/SYS_CallIf.h>
42 #include <SYS/SYS_Inline.h>
43 #include <SYS/SYS_Math.h>
44 #include <SYS/SYS_Types.h>
45 #include <SYS/SYS_TypeTraits.h>
46 
47 #include <string.h>
48 
49 
50 // Separate namespace for these, because they shouldn't be duplicated per
51 // template instantiation.
52 namespace GA_PageArrayIO
53 {
54  // JSON tokens
56  {
65  };
66  GA_API const char *getJSONToken(JDTupleToken tokenID);
67  GA_API JDTupleToken getJSONTokenID(const char *token);
68 }
69 
70 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
71 void
73 {
74  auto &hard = hardenTable();
75  for (GA_Defragment::const_iterator it=defrag.begin(); !it.atEnd(); ++it)
76  {
77  GA_Offset a = it.getA();
78  GA_Offset b = it.getB();
79  GA_Size n = it.getN();
80  switch (it.getOp())
81  {
83  hard.swapRange(a, b, GA_Offset(n));
84  break;
86  hard.moveRange(a, b, GA_Offset(n));
87  break;
88  }
89  }
90 }
91 
92 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
93 template<typename SRC_DATA_T,exint SRC_TSIZE,bool SRC_TABLEHARDENED,bool SRC_PAGESHARDENED>
94 void
96  const GA_MergeMap &map,
97  GA_AttributeOwner owner,
99  const GA_Defaults &defaults)
100 {
101  if (SYSisSame<DATA_T,void>())
102  {
103  SYS_CallIf<SYSisSame<DATA_T,void>()>::call([this,&map,owner,&src,&defaults](SYS_CALLIF_AUTO){
104  // Hard case, where the storage type is not known at compile time.
105  UT_Storage storage = this->Base::getStorage();
106  switch (storage)
107  {
108  case UT_Storage::INT8:
109  this->castType<int8>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
110  case UT_Storage::INT16:
111  this->castType<int16>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
112  case UT_Storage::INT32:
113  this->castType<int32>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
114  case UT_Storage::INT64:
115  this->castType<int64>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
116  case UT_Storage::REAL16:
117  this->castType<fpreal16>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
118  case UT_Storage::REAL32:
119  this->castType<fpreal32>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
120  case UT_Storage::REAL64:
121  this->castType<fpreal64>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
122  case UT_Storage::INVALID:
123  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
124  return;
125  }
126  });
127  return;
128  }
129 
130  UT_IF_ASSERT( GA_Offset osize = map.getDestInitCapacity(owner); )
131  GA_Offset nsize = map.getDestCapacity(owner);
132 
133  // Ideally we could assert that capacity() == ocapacity, but this method is
134  // sometimes called by implementations of GA_AIFMerge::copyArray(),
135  // after GA_AIFMerge::growArray() has already been called.
136  UT_ASSERT(osize <= size());
137  UT_ASSERT(osize <= nsize || (osize == GA_Offset(0) && nsize <= GA_Offset(0)));
138 
139  if (nsize <= GA_Offset(0))
140  return;
141 
142  GA_Offset dststart = map.getDestStart(owner);
143  GA_Offset dstend = map.getDestEnd(owner)+1;
144 
145  UT_ASSERT(dstend - dststart <= src.size());
146  UT_ASSERT(GAisValid(dststart) && dststart < nsize);
147  UT_ASSERT(GAisValid(dstend) && dstend <= nsize);
148  UT_ASSERT(dststart < dstend);
149 
150  UT_ASSERT_MSG(GAgetPageOff(dststart) == 0, "mergeGrowArrayAndCopy should only be used when dststart is at a page boundary");
151  if (nsize > size())
152  {
153  setSize(nsize, defaults);
154  }
155 
156  // As odd as it may seem, apparently mergeGrowArrayAndCopy has only ever
157  // supported copying from source offset 0 onward, regardless of
158  // map.getSourceRange(owner). For example, GA_DataArray::
159  // mergeGrowArrayAndCopy and GA_DataBitArray::mergeGrowArrayAndCopy
160  // both assume this too.
161  moveRange(src, GA_Offset(0), dststart, dstend - dststart);
162 }
163 
164 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
165 bool
167  UT_JSONWriter &w, const GA_Range &range,
168  const GA_SaveOptions *options,
169  const UT_IntArray *map, int defvalue) const
170 {
171  if (SYSisSame<DATA_T,void>())
172  {
173  bool success;
174  SYS_CallIf<SYSisSame<DATA_T,void>()>::call([this,&w,&range,options,map,defvalue,&success](SYS_CALLIF_AUTO){
175  // Hard case, where the storage type is not known at compile time.
176  UT_Storage storage = this->Base::getStorage();
177  switch (storage)
178  {
179  case UT_Storage::INT8:
180  success = this->castType<int8>().jsonSave(w, range, options, map, defvalue); return;
181  case UT_Storage::INT16:
182  success = this->castType<int16>().jsonSave(w, range, options, map, defvalue); return;
183  case UT_Storage::INT32:
184  success = this->castType<int32>().jsonSave(w, range, options, map, defvalue); return;
185  case UT_Storage::INT64:
186  success = this->castType<int64>().jsonSave(w, range, options, map, defvalue); return;
187  case UT_Storage::REAL16:
188  success = this->castType<fpreal16>().jsonSave(w, range, options, map, defvalue); return;
189  case UT_Storage::REAL32:
190  success = this->castType<fpreal32>().jsonSave(w, range, options, map, defvalue); return;
191  case UT_Storage::REAL64:
192  success = this->castType<fpreal64>().jsonSave(w, range, options, map, defvalue); return;
193  case UT_Storage::INVALID:
194  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
195  success = false;
196  return;
197  }
198  success = false;
199  });
200  return success;
201  }
202 
203  int tuplesize = getTupleSize();
204 
205  // Cast to optimize for small tuple sizes
206  if (TSIZE == -1 && tuplesize <= 3 && tuplesize >= 1)
207  {
208  bool success;
209  SYS_CallIf<TSIZE == -1>::call([this,&w,&range,options,map,defvalue,tuplesize,&success](SYS_CALLIF_AUTO){
210  if (tuplesize == 3)
211  success = this->castTupleSize<3>().jsonSave(w, range, options, map, defvalue);
212  else if (tuplesize == 1)
213  success = this->castTupleSize<1>().jsonSave(w, range, options, map, defvalue);
214  else
215  {
216  UT_ASSERT_P(tuplesize == 2);
217  success = this->castTupleSize<2>().jsonSave(w, range, options, map, defvalue);
218  }
219  });
220  return success;
221  }
222 
223  GA_Storage ga_storage = getStorage();
224  if (map && !GAisIntStorage(ga_storage))
225  map = nullptr;
226 
227  UT_JID jid = GAStorageToJID(ga_storage);
228 
229  bool ok = true;
230 
231  ok = ok && w.jsonBeginArray();
232 
234  ok = ok && w.jsonInt(tuplesize);
235 
237  ok = ok && w.jsonStringToken(GAstorage(ga_storage));
238 
239  bool savepaged = w.getBinary();
240  if (options)
241  options->importSavePaged(savepaged);
242 
243  if (savepaged)
244  {
246  UT_ASSERT_COMPILETIME(thePageSize == GA_PAGE_SIZE);
247  ok = ok && w.jsonInt(thePageSize);
248 
249 #if 0
250  // For max compatibility with GA_DataArrayTuple, we try to match the old
251  // packing behaviour: 1; 2 -> 1,1; 3; 4 -> 3,1; 5 -> 3,1,1; 6 -> 3,1,1,1
252  // though only for fpreal32 and fpreal64 types. Every other type
253  // had each component stored separately.
254  //
255  // TODO: Check if older versions will load data that is saved
256  // with everything as array-of-structs, avoiding the
257  // need for this.
258  bool hasfirst3packed = (tuplesize >= 3) &&
259  (ga_storage == GA_STORE_REAL32 || ga_storage == GA_STORE_REAL64);
260 
261  // The GA_JDTUPLE_PACKING field is optional and only needed if we
262  // need a data layout other than the default array-of-structs.
263  int n_packing_entries = tuplesize - (hasfirst3packed ? 2 : 0);
264  if (n_packing_entries > 1)
265  {
267  ok = ok && w.beginUniformArray(n_packing_entries, UT_JID_UINT8);
268 
269  // First is 3 or 1; every other one is 1.
270  ok = ok && w.uniformWrite(uint8(hasfirst3packed ? 3 : 1));
271  for (int i = 1; i < n_packing_entries; i++)
272  {
273  ok = ok && w.uniformWrite(uint8(1));
274  }
275 
276  ok = ok && w.endUniformArray();
277  }
278 #else
279  // I think GA_DataArrayTuple::jsonLoad supports loading
280  // array-of-structs, regardless of the tuplesize, so let's try it
281  // for now, and we can always fall back later.
282 
283  // I don't think the packing entry array is needed if there's only one entry.
284 #if 0
285  int n_packing_entries = 1;
286 
288  ok = ok && w.beginUniformArray(n_packing_entries, UT_JID_INT32);
289  ok = ok && w.uniformWrite(int32(tuplesize));
290  ok = ok && w.endUniformArray();
291 #endif
292 #endif
293 
294  // constpagecheck:
295  // 0 - none
296  // 1 - use page state
297  // 2 - full data scan
298  exint const_page_check = 2;
299  if (options)
300  const_page_check = options->constPageCheck();
301 
302  UT_UniquePtr<UT_BitArray> const_page_flags(nullptr);
303  if (tuplesize > 0)
304  {
305  if (const_page_check >= 2)
306  {
307  ok = ok && jsonSaveConstantOutputPageFlags<
309  w, range, const_page_flags);
310  }
311  else if (const_page_check == 1)
312  {
313  ok = ok && jsonSaveConstantOutputPageFlags<
315  w, range, const_page_flags);
316  }
317  }
318 
320 
321  ok = ok && jsonSaveRawPageData(w, range,
322  const_page_flags.get(), jid, map, defvalue);
323  }
324  else if (tuplesize <= 1)
325  {
326  // No reason to save an array of tuples if it's a scalar
328  ok = ok && w.jsonBeginArray();
329 
330  if (tuplesize != 0)
331  ok = ok && jsonSaveAsArray<false>(w, range, jid, map, defvalue);
332 
333  ok = ok && w.jsonEndArray();
334  }
335  else
336  {
337  // Store as an array of structs
339  ok = ok && w.jsonBeginArray();
340 
341  ok = ok && jsonSaveAsArray<true>(w, range, jid, map, defvalue);
342 
343  ok = ok && w.jsonEndArray();
344  }
345  return ok && w.jsonEndArray();
346 }
347 
348 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
349 template<typename MAP_ARRAY_CLASS>
350 bool
352  UT_JSONWriter &w, const GA_Range &range, UT_UniquePtr<UT_BitArray> &output_page_flags) const
353 {
354  bool ok = true;
355 
356  MAP_ARRAY_CLASS output_to_internal_page_map;
357  buildOutputToInternalPageMap(range, output_to_internal_page_map);
358 
359  int64 n_output_pages = ((range.getEntries() + thePageSize-1) / thePageSize);
360  UT_BitArray constant_flags(n_output_pages);
361 
362  GA_Size n_constant_pages = marshallConstantFlagsForOutputPages(
363  output_to_internal_page_map, constant_flags);
364  if (n_constant_pages == 0)
365  return ok;
366 
368  ok = ok && w.jsonBeginArray();
369 
370  ok = ok && w.jsonUniformArray(constant_flags.size(), constant_flags);
371  output_page_flags.reset(new UT_BitArray);
372  constant_flags.swap(*output_page_flags);
373 
374  ok = ok && w.jsonEndArray();
375 
376  return ok;
377 }
378 
379 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
380 bool
382  UT_JSONWriter &w,
383  const NotVoidType *page_data,
384  exint length, exint tuplesize,
385  bool const_output, bool const_input,
386  const UT_IntArray *map, int defvalue,
387  NotVoidType *buffer)
388 {
389  // NOTE: nullptr page_data should be dealt with by caller, using buffer.
390  UT_ASSERT_P(page_data);
391 
392  if (!const_output && !const_input)
393  {
394  if (!map)
395  {
396  // Simple case
397  return w.uniformBlockWrite(page_data, length * tuplesize);
398  }
399  else
400  {
401  for (exint i = 0; i < length; ++i)
402  {
403  for (exint component = 0; component < tuplesize; ++component, ++page_data)
404  {
405  NotVoidType val = *page_data;
406  buffer[component] = (val < 0 || val >= map->size())
407  ? defvalue
408  : (*map)(val);
409  }
410  if (!w.uniformBlockWrite(buffer, tuplesize))
411  return false;
412  }
413  return true;
414  }
415  }
416 
417  // Every case left has a single input value to read
418  const NotVoidType *data = page_data;
419  if (map)
420  {
421  for (exint component = 0; component < tuplesize; ++component, ++page_data)
422  {
423  NotVoidType val = *page_data;
424  buffer[component] = (val < 0 || val >= map->size())
425  ? defvalue
426  : (*map)(val);
427  }
428  data = buffer;
429  }
430 
431  if (const_output)
432  {
433  return w.uniformBlockWrite(data, tuplesize);
434  }
435  else
436  {
437  // const_input and !const_output, so repeat same tuple, length times
438  for (exint i = 0; i < length; ++i)
439  {
440  if (!w.uniformBlockWrite(data, tuplesize))
441  return false;
442  }
443  return true;
444  }
445 }
446 
447 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
448 bool
450  UT_JSONWriter &w, const GA_Range &range,
451  const UT_BitArray *const_page_flags,
452  UT_JID jid_storage,
453  const UT_IntArray *map, int defvalue) const
454 {
455  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
456 
457  exint ntotal = range.getEntries();
458 
459  exint collapsedsize = ntotal;
460  if (const_page_flags && ntotal > 0)
461  {
462  exint n_const_pages = const_page_flags->numBitsSet();
463 
464  // Special handling for last page, since it's not always the same size
465  if (const_page_flags->getBitFast(const_page_flags->size()-1))
466  {
467  collapsedsize = (const_page_flags->size()-n_const_pages)*thePageSize
468  + n_const_pages;
469  }
470  else
471  {
472  // NOTE: ((ntotal-1) & thePageMask) + 1 ensures that we get
473  // thePageSize if ntotal is a multiple of thePageSize.
474  collapsedsize = (const_page_flags->size()-n_const_pages-1)*thePageSize
475  + n_const_pages
476  + ((ntotal-1) & thePageMask) + 1;
477  }
478  }
479  const exint tuplesize = getTupleSize();
480  collapsedsize *= tuplesize;
481 
482  bool ok = true;
483  ok = ok && w.beginUniformArray(collapsedsize, jid_storage);
484 
485  // Don't even try to go through the pages if tuplesize is 0.
486  // Only bugs will ensue. Might as well check this implicitly
487  // by checking collapsedsize, since it's multiplied by tuplesize.
488  if (collapsedsize == 0)
489  {
490  ok = ok && w.endUniformArray();
491  return ok;
492  }
493 
494  bool const_page_data;
495  const NotVoidType *page_data;
497  const GA_Size n_output_pages = (ntotal+thePageSize-1) / thePageSize;
498 
499  GA_Iterator it(range);
500  GA_PageNum last_page_num(-1);
501  GA_Offset block_start = GA_INVALID_OFFSET;
502  GA_PageOff block_start_pageoff;
503  GA_Offset block_end = GA_INVALID_OFFSET;
504  for (GA_Size output_page_num = 0; ok && output_page_num < n_output_pages; ++output_page_num)
505  {
506  const bool output_page_const = const_page_flags && const_page_flags->getBitFast(output_page_num);
507 
508  GA_Size output_page_offset = 0;
509  do
510  {
511  if (block_start == block_end)
512  {
513  bool more_data = it.blockAdvance(block_start, block_end);
514  if (!more_data)
515  {
516  UT_ASSERT_P(output_page_num == n_output_pages-1);
517  UT_ASSERT_P(GA_Size(GAgetPageOff(GA_Offset(ntotal))) == output_page_offset);
518  break;
519  }
520 
521  GA_PageNum page_num = GAgetPageNum(block_start);
522  block_start_pageoff = GAgetPageOff(block_start);
523 
524  // Fetch the page data if we don't already have it.
525  if (page_num != last_page_num)
526  {
527  const_page_data = isPageConstant(page_num);
528  page_data = getPageData(page_num);
529 
530  // Deal with nullptr here, to avoid having to deal with it in
531  // multiple codepaths.
532  if (!page_data)
533  {
534  UT_ASSERT_P(const_page_data);
535  memset((NotVoidType*)buffer,0,sizeof(NotVoidType)*tuplesize);
536  page_data = (NotVoidType*)buffer;
537  }
538  last_page_num = page_num;
539  }
540  }
541 
542  const GA_Size copy_size = SYSmin(GA_Size(block_end-block_start), thePageSize-GA_Size(output_page_offset));
543 
544  if (!output_page_const)
545  {
546  const NotVoidType *copy_data = page_data;
547  if (!const_page_data)
548  copy_data += GA_Size(block_start_pageoff)*tuplesize;
549  ok = ok && jsonWriteDataSpan(
550  w, copy_data, copy_size, tuplesize,
551  false, const_page_data, map, defvalue, (NotVoidType*)buffer);
552  }
553 
554  output_page_offset += copy_size;
555  block_start += copy_size;
556  block_start_pageoff += copy_size;
557  } while (ok && output_page_offset != thePageSize);
558 
559  if (output_page_const)
560  {
561  const NotVoidType *copy_data = page_data;
562  if (!const_page_data)
563  {
564  // The -1 is because we added copy_size, which is at least 1, to block_start_pageoff,
565  // and block_start_pageoff may now be at a page offset that is not the same value,
566  // or may even be at the page offset of block_end.
567  copy_data += GA_Size(block_start_pageoff-1)*tuplesize;
568  }
569  ok = ok && jsonWriteDataSpan(
570  w, copy_data, 1, tuplesize,
571  true, const_page_data, map, defvalue, (NotVoidType*)buffer);
572  }
573  }
574 
575  ok = ok && w.endUniformArray();
576  return ok;
577 }
578 
579 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
580 class GA_PageArray<DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED>::ga_SubPageBlock
581 {
582 public:
585  : myPage(page), myStartOffset(start), myEndOffset(end) {}
586 
590 };
591 
592 // --------------------------------------------------------------------------
593 // Compute a mapping to keep track of which internal pages affect which output
594 // pages. We store this mapping as an ordered list of the input pages as they
595 // are traversed in building the output pages, with the start of each output
596 // page indicated by a negative value (-(input_page + 1)).
597 //
598 // NB: We don't keep track of the page offsets in the mapping so this is
599 // really only useful for internal pages that are flagged as constant.
600 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
601 void
603  const GA_Range &range,
605 {
606  GA_Iterator it(range);
607  GA_Size output_page_offset = 0;
608  GA_Size block_size = 0;
609  GA_PageNum page_num;
610  GA_PageOff page_offset;
611  GA_PageNum last_page_num(-1);
612 
613  while (true)
614  {
615  if (output_page_offset == thePageSize)
616  {
617  output_page_offset = 0;
618  }
619 
620  if (block_size == 0) // need new block
621  {
622  GA_Offset block_start, block_end;
623  if (!it.blockAdvance(block_start, block_end))
624  break;
625 
626  page_num = GAgetPageNum(block_start);
627  page_offset = GAgetPageOff(block_start);
628  block_size = block_end - block_start;
629  }
630 
631  GA_Size output_size = SYSmin(block_size,
632  thePageSize-output_page_offset);
633 
634  if (output_page_offset == 0)
635  {
636  map.append(-(page_num+1));
637  last_page_num = page_num;
638  }
639  else if (page_num != last_page_num)
640  {
641  map.append(page_num);
642  last_page_num = page_num;
643  }
644 
645  page_offset += output_size;
646  block_size -= output_size;
647  output_page_offset += output_size;
648  }
649 }
650 
651 // Compute a mapping to keep track of which internal page data blocks affect
652 // which output pages. We store this mapping as an ordered list of the sub
653 // page blocks as they are traversed in building the output pages, with the
654 // start of each output page indicated by a negative page number
655 // (-(input_page + 1)).
656 //
657 // TODO: We could keep track of block start/end, recomputing the internal
658 // page number at need?
659 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
660 void
662  const GA_Range &range,
664 {
665  GA_Iterator it(range);
666  GA_Size output_page_offset = 0;
667  GA_Size block_size = 0;
668  GA_PageNum page_num;
669  GA_PageOff page_offset;
670 
671  while (true)
672  {
673  if (output_page_offset == thePageSize)
674  {
675  output_page_offset = 0;
676  }
677 
678  if (block_size == 0) // need new block
679  {
680  GA_Offset block_start, block_end;
681  if (!it.blockAdvance(block_start, block_end))
682  break;
683 
684  page_num = GAgetPageNum(block_start);
685  page_offset = GAgetPageOff(block_start);
686  block_size = block_end - block_start;
687  }
688 
689  GA_Size output_size = SYSmin(block_size,
690  thePageSize-output_page_offset);
691 
692  if (output_page_offset == 0)
693  {
694  map.append(ga_SubPageBlock(
695  GA_PageNum(-(page_num+1)), page_offset,
696  page_offset + output_size));
697  }
698  else
699  {
700  map.append(ga_SubPageBlock(
701  page_num, page_offset,
702  page_offset + output_size));
703  }
704 
705  page_offset += output_size;
706  block_size -= output_size;
707  output_page_offset += output_size;
708  }
709 }
710 
711 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
712 GA_Size
714  const UT_Array<GA_PageNum> &internal_page_map,
715  UT_BitArray &constant_flags) const
716 {
717  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
718 
719  GA_Size count = 0;
720  GA_Size output_page = -1;
721  bool output_page_flag = false;
722  const NotVoidType *constant_value;
723  const exint tuplesize = getTupleSize();
724 
725  constant_flags.setAllBits(false);
726  for (GA_Size i = 0; i < internal_page_map.size(); i++)
727  {
728  GA_PageNum internal_page = internal_page_map(i);
729  // A negative internal page is used to mark the start of a new
730  // output page.
731  if (internal_page < 0)
732  {
733  if (output_page >= 0 && output_page_flag)
734  {
735  constant_flags.setBit(output_page, output_page_flag);
736  ++count;
737  }
738 
739  ++output_page;
740  UT_ASSERT_P(output_page <= constant_flags.size());
741  internal_page = -(internal_page + 1);
742  output_page_flag = isPageConstant(internal_page);
743  if (output_page_flag)
744  {
745  constant_value = getPageData(internal_page);
746  }
747  }
748  else if (output_page_flag)
749  {
750  if (!isPageConstant(internal_page))
751  output_page_flag = false;
752  else
753  {
754  const NotVoidType *new_constant_value = getPageData(internal_page);
755  if ((new_constant_value==nullptr) != (constant_value==nullptr))
756  output_page_flag = false;
757  else if (constant_value != new_constant_value)
758  output_page_flag = isEqual(constant_value, new_constant_value, tuplesize);
759  }
760  }
761  }
762  if (output_page >= 0 && output_page_flag)
763  {
764  constant_flags.setBit(output_page, output_page_flag);
765  ++count;
766  }
767  return count;
768 }
769 
770 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
771 GA_Size
773  const UT_Array<ga_SubPageBlock> &internal_page_map,
774  UT_BitArray &constant_flags) const
775 {
776  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
777 
778  GA_Size count = 0;
779  GA_Size output_page = -1;
780  bool output_page_flag = false;
781  const NotVoidType *constant_value;
782  const exint tuplesize = getTupleSize();
783 
784  constant_flags.setAllBits(false);
785  for (GA_Size i = 0; i < internal_page_map.entries(); i++)
786  {
787  GA_PageNum internal_page = internal_page_map(i).myPage;
788  // A negative internal page is used to mark the start of a new
789  // output page.
790  if (internal_page < 0)
791  {
792  if (output_page >= 0 && output_page_flag)
793  {
794  constant_flags.setBit(output_page, output_page_flag);
795  ++count;
796  }
797 
798  ++output_page;
799  UT_ASSERT_P(output_page <= constant_flags.size());
800  internal_page = -(internal_page + 1);
801  output_page_flag = isPageConstant(internal_page);
802  constant_value = getPageData(internal_page);
803  if (!output_page_flag)
804  {
805  GA_PageOff start = internal_page_map(i).myStartOffset;
806  GA_PageOff end = internal_page_map(i).myEndOffset;
807  const NotVoidType *page = constant_value;
808  constant_value += start;
809  output_page_flag = isSubPageConstant(page, start+1, end,
810  tuplesize, constant_value);
811  }
812  }
813  else if (output_page_flag)
814  {
815  const bool page_constant = isPageConstant(internal_page);
816  const NotVoidType *page = getPageData(internal_page);
817  if (page_constant)
818  {
819  if ((page==nullptr) != (constant_value==nullptr))
820  output_page_flag = false;
821  else if (constant_value != page)
822  output_page_flag = isEqual(constant_value, page, tuplesize);
823  }
824  else
825  {
826  if (!isSubPageConstant(page,
827  internal_page_map(i).myStartOffset,
828  internal_page_map(i).myEndOffset,
829  tuplesize,
830  constant_value))
831  output_page_flag = false;
832  }
833  }
834  }
835  if (output_page >= 0 && output_page_flag)
836  {
837  constant_flags.setBit(output_page, output_page_flag);
838  ++count;
839  }
840  return count;
841 }
842 
843 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
844 bool
846  const NotVoidType *page,
848  const exint tuplesize,
849  const NotVoidType *value)
850 {
851  if (value == nullptr)
852  {
853  for (GA_PageOff cur = start; cur < end; cur++)
854  {
855  if (!isZero(page+cur*tuplesize, tuplesize))
856  return false;
857  }
858  }
859  else
860  {
861  for (GA_PageOff cur = start; cur < end; cur++)
862  {
863  if (!isEqual(page+cur*tuplesize, value, tuplesize))
864  return false;
865  }
866  }
867  return true;
868 }
869 
870 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
871 template<bool ARRAY_OF_ARRAYS>
872 bool
874  UT_JSONWriter &w, const GA_Range &range, UT_JID jid_storage,
875  const UT_IntArray *map, int defvalue) const
876 {
877  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
878 
879  int tuplesize = getTupleSize();
880 
881  if (!ARRAY_OF_ARRAYS)
882  {
883  if (!w.beginUniformArray(tuplesize*range.getEntries(), jid_storage))
884  return false;
885  }
886 
887  UT_StackBuffer<NotVoidType> buffer(ARRAY_OF_ARRAYS ? tuplesize : 0);
888 
890  GA_Offset end;
891  for (GA_Iterator it(range); it.blockAdvance(start, end); )
892  {
893  if (map)
894  {
895  for (GA_Offset ai = start; ai < end; ++ai)
896  {
897  if (ARRAY_OF_ARRAYS)
898  {
899  for (int component = 0; component < tuplesize; ++component)
900  {
901  NotVoidType v = this->template get<NotVoidType>(ai, component);
902  v = NotVoidType((v < 0 || v >= map->size()) ? defvalue : (*map)(v));
903  buffer[component] = v;
904  }
905  if (!w.jsonUniformArray(tuplesize, buffer))
906  return false;
907  }
908  else
909  {
910  for (int component = 0; component < tuplesize; ++component)
911  {
912  NotVoidType v = this->template get<NotVoidType>(ai, component);
913  v = NotVoidType((v < 0 || v >= map->size()) ? defvalue : (*map)(v));
914  if (!w.uniformWrite(v))
915  return false;
916  }
917  }
918  }
919  }
920  else
921  {
922  // No map
923  for (GA_Offset ai = start; ai < end; ++ai)
924  {
925  if (ARRAY_OF_ARRAYS)
926  {
927  for (int component = 0; component < tuplesize; ++component)
928  buffer[component] = this->template get<NotVoidType>(ai, component);
929 
930  if (!w.jsonUniformArray(tuplesize, buffer))
931  return false;
932  }
933  else
934  {
935  for (int component = 0; component < tuplesize; ++component)
936  {
937  NotVoidType v = this->template get<NotVoidType>(ai, component);
938 
939  if (!w.uniformWrite(v))
940  return false;
941  }
942  }
943  }
944  }
945  }
946 
947  if (ARRAY_OF_ARRAYS)
948  return true;
949 
950  return w.endUniformArray();
951 }
952 
953 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
954 UT_JID
956 {
957  switch (storage)
958  {
959  case GA_STORE_BOOL:
960  return UT_JID_BOOL;
961  case GA_STORE_INVALID:
962  return UT_JID_NULL;
963  case GA_STORE_STRING:
964  return UT_JID_STRING;
965  case GA_STORE_INT8:
966  return UT_JID_INT8;
967  case GA_STORE_UINT8:
968  return UT_JID_UINT8;
969  case GA_STORE_INT16:
970  return UT_JID_INT16;
971  case GA_STORE_INT32:
972  return UT_JID_INT32;
973  case GA_STORE_INT64:
974  return UT_JID_INT64;
975  case GA_STORE_REAL16:
976  return UT_JID_REAL16;
977  case GA_STORE_REAL32:
978  return UT_JID_REAL32;
979  case GA_STORE_REAL64:
980  return UT_JID_REAL64;
981  }
982  UT_ASSERT_MSG_P(0, "Unhandled GA_Storage value!");
983  return UT_JID_NULL;
984 }
985 
986 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
987 bool
989  UT_JSONParser &p,
990  const GA_LoadMap &map,
991  GA_AttributeOwner owner)
992 {
993  if (SYSisSame<DATA_T,void>())
994  {
995  bool success;
996  SYS_CallIf<SYSisSame<DATA_T,void>()>::call([this,&p,&map,owner,&success](SYS_CALLIF_AUTO){
997  // Hard case, where the storage type is not known at compile time.
998  UT_Storage storage = this->Base::getStorage();
999  switch (storage)
1000  {
1001  case UT_Storage::INT8:
1002  success = this->castType<int8>().jsonLoad(p, map, owner); return;
1003  case UT_Storage::INT16:
1004  success = this->castType<int16>().jsonLoad(p, map, owner); return;
1005  case UT_Storage::INT32:
1006  success = this->castType<int32>().jsonLoad(p, map, owner); return;
1007  case UT_Storage::INT64:
1008  success = this->castType<int64>().jsonLoad(p, map, owner); return;
1009  case UT_Storage::REAL16:
1010  success = this->castType<fpreal16>().jsonLoad(p, map, owner); return;
1011  case UT_Storage::REAL32:
1012  success = this->castType<fpreal32>().jsonLoad(p, map, owner); return;
1013  case UT_Storage::REAL64:
1014  success = this->castType<fpreal64>().jsonLoad(p, map, owner); return;
1015  case UT_Storage::INVALID:
1016  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
1017  success = false;
1018  return;
1019  }
1020  success = false;
1021  });
1022  return success;
1023  }
1024 
1025  int64 tuple_size = getTupleSize();
1026 
1027  // Cast to optimize for small tuple sizes
1028  if (TSIZE == -1 && tuple_size <= 3 && tuple_size >= 1)
1029  {
1030  bool success;
1031  SYS_CallIf<TSIZE == -1>::call([this,&p,&map,owner,tuple_size,&success](SYS_CALLIF_AUTO){
1032  if (tuple_size == 3)
1033  success = this->castTupleSize<3>().jsonLoad(p, map, owner);
1034  else if (tuple_size == 1)
1035  success = this->castTupleSize<1>().jsonLoad(p, map, owner);
1036  else
1037  {
1038  UT_ASSERT_P(tuple_size == 2);
1039  success = this->castTupleSize<2>().jsonLoad(p, map, owner);
1040  }
1041  });
1042  return success;
1043  }
1044 
1045  UT_WorkBuffer key;
1046  int64 page_size = -1;
1047  GA_Storage ga_storage = GA_STORE_INVALID;
1048  UT_StackBuffer<int> packing(tuple_size);
1049  int n_packing_entries = 0;
1050  UT_StackBuffer<UT_UniquePtr<UT_BitArray> > constant_page_flags(tuple_size);
1051  bool constant_page_flags_init = false;
1052 
1053  bool ok = true;
1054  bool done = false;
1055  for (UT_JSONParser::traverser mi = p.beginArray(); ok && !mi.atEnd(); ++mi)
1056  {
1057  if (!mi.getLowerKey(key))
1058  {
1059  ok = false;
1060  break;
1061  }
1062  switch (GA_PageArrayIO::getJSONTokenID(key.buffer()))
1063  {
1065  {
1066  int64 local_tuple_size = -1;
1067  ok = p.parseInteger(local_tuple_size);
1068  if (ok && local_tuple_size != tuple_size)
1069  {
1070  p.addWarning("Inconsistent tuple size specification");
1071  ok = false;
1072  }
1073  break;
1074  }
1076  ok = p.parseString(key);
1077  if (ok)
1078  ga_storage = GAstorage(key.buffer());
1079  break;
1081  // Tuple size and storage type are supposed to have already been set.
1082  if (done || tuple_size != getTupleSize() || ga_storage != getStorage())
1083  {
1084  p.addWarning("Bad data type/size specification");
1085  ok = p.skipNextObject();
1086  }
1087  else
1088  {
1089  // Load as a struct of arrays
1091  for (exint component = 0; ok && !ai.atEnd(); ++component, ++ai)
1092  {
1093  if (component < tuple_size)
1094  {
1095  GA_Offset startoff = map.getLoadOffset(owner);
1096  LoadComponentArrayFunctor op(*this, startoff, component);
1097  if (GAisIntStorage(ga_storage))
1099  else
1101  }
1102  else
1103  {
1104  if (component == tuple_size)
1105  p.addWarning("Too many tuple items in data array");
1106  ok = p.skipNextObject();
1107  }
1108  }
1109  done = true;
1110  }
1111  break;
1113  if (done || tuple_size != getTupleSize() || ga_storage != getStorage())
1114  {
1115  p.addWarning("Bad data type/size specification");
1116  ok = p.skipNextObject();
1117  }
1118  else
1119  {
1120  // Load as an array of structs
1122  GA_Offset offset = map.getLoadOffset(owner);
1123  GA_Size ppage = GAgetPageNum(offset);
1124  for ( ; ok && !ai.atEnd(); ++offset, ++ai)
1125  {
1126  GA_PageNum newpagenum = GAgetPageNum(offset);
1127  if (newpagenum != ppage)
1128  {
1129  // We compress previous page
1130  tryCompressPage(ppage);
1131  ppage = newpagenum;
1132  }
1133 
1134  UT_StackBuffer<NotVoidType> buffer(tuple_size);
1135  exint nread = p.parseUniformArray<NotVoidType>(buffer, tuple_size);
1136  if (nread < tuple_size)
1137  {
1138  ok = false;
1139  break;
1140  }
1141  if (nread > tuple_size)
1142  p.addWarning("Extra data found in array tuple");
1143 
1144  if (TSIZE >= 1)
1145  {
1146  setVector(offset, *(const UT_FixedVector<NotVoidType,theSafeTupleSize>*)buffer.array());
1147  }
1148  else
1149  {
1150  for (int component = 0; component < tuple_size; ++component)
1151  set(offset, component, buffer[component]);
1152  }
1153  }
1154  tryCompressPage(ppage);
1155  done = true;
1156  }
1157  break;
1159  ok = p.parseInteger(page_size);
1160  break;
1162  if (tuple_size != getTupleSize())
1163  {
1164  p.addWarning("Packing requires valid size specification");
1165  ok = p.skipNextObject();
1166  }
1167  else
1168  {
1169  // NB: p.parseUniformArray() might return a greater value
1170  // than expected, but it won't write the extra values
1171  // to packing.array().
1172  n_packing_entries = p.parseUniformArray(packing.array(), tuple_size);
1173 
1174  if (constant_page_flags_init && n_packing_entries != (tuple_size > 0 ? 1 : 0))
1175  {
1176  p.addWarning("Non-trivial packing specification must come before constant page flags");
1177  n_packing_entries = 0;
1178  ok = false;
1179  }
1180  else if (n_packing_entries >= 0)
1181  {
1182  int total_packed_size = 0;
1183  for (int i = 0; i < n_packing_entries; ++i)
1184  {
1185  total_packed_size += packing[i];
1186  }
1187  if (total_packed_size != tuple_size ||
1188  n_packing_entries > tuple_size)
1189  {
1190  p.addWarning("Invalid packing specification");
1191  n_packing_entries = -1;
1192  ok = false;
1193  }
1194  }
1195  }
1196  break;
1198  if (tuple_size != getTupleSize() ||
1199  page_size <= 0 || n_packing_entries < 0)
1200  {
1201  p.addWarning("Bad data type/size specification");
1202  ok = p.skipNextObject();
1203  }
1204  else
1205  {
1206  int i = 0;
1207  UT_BitArray scratch_array;
1208 
1209  int n_arrays = n_packing_entries ? n_packing_entries
1210  : (tuple_size > 0 ? 1 : 0);
1211  int64 n_input_pages = (map.getLoadCount(owner)+page_size-1) / page_size;
1212 
1213  for (UT_JSONParser::traverser it = p.beginArray(); !it.atEnd(); ++it, ++i)
1214  {
1215  if (i < n_arrays)
1216  {
1217  int64 n_loaded = p.parseUniformBoolArray(scratch_array, n_input_pages);
1218 
1219  // We allow an empty array when no pages are constant.
1220  if (n_loaded == 0)
1221  {
1222  constant_page_flags[i].reset(nullptr);
1223  }
1224  else
1225  {
1226  constant_page_flags[i].reset(new UT_BitArray());
1227  scratch_array.swap(*constant_page_flags[i]);
1228  }
1229  }
1230  else
1231  {
1232  p.skipNextObject();
1233  UT_ASSERT(0);
1234  }
1235  }
1236  ok = (i == n_arrays);
1237  constant_page_flags_init = true;
1238  }
1239  break;
1241  // Load as an array of structs with tuples whose pages may be compressed
1242  if (done || tuple_size != getTupleSize() || ga_storage != getStorage() ||
1243  page_size <= 0 || n_packing_entries < 0)
1244  {
1245  p.addWarning("Bad data type/size specification");
1246  ok = p.skipNextObject();
1247  }
1248  else
1249  {
1250  // We default to a full vector when a GA_JDTUPLE_PACKING
1251  // field is missing.
1252  if (n_packing_entries == 0 && tuple_size > 0)
1253  {
1254  packing[0] = tuple_size;
1255  n_packing_entries = 1;
1256  }
1257  done = true;
1258  ok = jsonLoadRawPageData(p, map, owner,
1259  GA_Size(page_size),
1260  packing.array(), n_packing_entries,
1261  constant_page_flags.array());
1262  }
1263  break;
1264  default:
1265  p.addWarning("Data Array Tuple unknown key '%s'", key.buffer());
1266  break;
1267  }
1268  }
1269  if (!done)
1270  p.addWarning("Missing data for data array");
1271  return ok;
1272 }
1273 
1274 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
1275 class GA_PageArray<DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED>::LoadComponentArrayFunctor
1276 {
1277 public:
1279 
1281  : myDest(dest)
1282  , myStartOffset(startoff)
1283  , myComponent(component)
1284  , myDestSize(dest.size()-startoff)
1285  {}
1286 
1287  template <typename T> SYS_FORCE_INLINE bool
1288  set(int64 i, T val) const
1289  {
1290  if (GA_Offset(i) >= myDestSize)
1291  return false;
1292  myDest.set(myStartOffset+(GA_Size)i, myComponent, val);
1293  return true;
1294  }
1295 
1296  template <typename T> SYS_FORCE_INLINE bool
1297  setArray(const T *data, int64 size) const
1298  {
1299  bool outofbounds = false;
1300  if (GA_Offset(size) > myDestSize)
1301  {
1302  size = int64(myDestSize);
1303  outofbounds = true;
1304  }
1305 
1306  // Fast path for single component
1307  if (TSIZE == 1)
1308  {
1309  myDest.setRange(myStartOffset, GA_Offset(size), data);
1310  return !outofbounds;
1311  }
1312 
1313  GA_Offset end = myStartOffset + GA_Size(size);
1314 
1315  for (GA_Offset off = myStartOffset; off < end; ++off, ++data)
1316  {
1317  myDest.set(off, myComponent, *data);
1318  }
1319 
1320  return !outofbounds;
1321  }
1322 
1327 };
1328 
1329 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
1330 bool
1332  UT_JSONParser &p,
1333  const GA_LoadMap &map,
1334  GA_AttributeOwner owner,
1335  GA_Size page_size,
1336  const int *packing,
1337  int n_packing_entries,
1338  const UT_UniquePtr<UT_BitArray> *const constant_page_flags)
1339 {
1340  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1341 
1343  if (it.getErrorState())
1344  return false;
1345 
1346  UT_JID jid = p.getUniformArrayType();
1347  bool istypematch = (jid == GAStorageToJID(getStorage()));
1348 
1349  GA_Size num_input_elements = map.getLoadCount(owner);
1350  GA_Offset load_offset = map.getLoadOffset(owner);
1351  const GA_PageNum start_page_num = GAgetPageNum(load_offset);
1352  GA_PageOff page_offset = GAgetPageOff(load_offset);
1353  const exint tuple_size = getTupleSize();
1354  const exint num_page_values = tuple_size*thePageSize;
1355 
1356  UT_StackBuffer<NotVoidType> single_tuple(tuple_size);
1357 
1358  if (n_packing_entries == 1 && page_size == thePageSize)
1359  {
1360  UT_ASSERT(packing[0] == tuple_size);
1361 
1362  const UT_BitArray *constpagebits = constant_page_flags[0].get();
1363 
1364  if (page_offset == GA_PageOff(0))
1365  {
1366  // Loading at the beginning of a page, making things much simpler
1367  GA_Size num_full_new_pages = (num_input_elements >> GA_PAGE_BITS);
1368  GA_PageOff end_page_offset = GAgetPageOff(GA_Offset(num_input_elements));
1369 
1370  // First, fill in all complete, full-size pages
1371  GA_PageNum pagenum = start_page_num;
1372  for (GA_Size input_pagei = 0; input_pagei < num_full_new_pages; ++input_pagei, ++pagenum)
1373  {
1374  if (constpagebits && constpagebits->getBitFast(input_pagei))
1375  {
1376  if (istypematch)
1377  {
1378  if (!it.readUniformArray(single_tuple.array(), tuple_size))
1379  return false;
1380  }
1381  else
1382  {
1383  if (p.parseArrayValues(it, single_tuple.array(), tuple_size) != tuple_size)
1384  return false;
1385  }
1386  setPageConstant(pagenum, single_tuple.array());
1387  }
1388  else
1389  {
1390  NotVoidType *data = hardenPageNoInit(pagenum);
1391  if (istypematch)
1392  {
1393  if (!it.readUniformArray(data, num_page_values))
1394  return false;
1395  }
1396  else
1397  {
1398  if (p.parseArrayValues(it, data, num_page_values) != num_page_values)
1399  return false;
1400  }
1401  }
1402  }
1403 
1404  // Handle any final incomplete or not-full-size page
1405  if (end_page_offset != GA_PageOff(0))
1406  {
1407  if (constpagebits && constpagebits->getBitFast(num_full_new_pages))
1408  {
1409  if (istypematch)
1410  {
1411  if (!it.readUniformArray(single_tuple.array(), tuple_size))
1412  return false;
1413  }
1414  else
1415  {
1416  if (p.parseArrayValues(it, single_tuple.array(), tuple_size) != tuple_size)
1417  return false;
1418  }
1419  if (load_offset+num_input_elements == size())
1420  setPageConstant(pagenum, single_tuple.array());
1421  else
1422  {
1423  // I don't know if this path will ever be taken; I'm guessing not.
1424 
1425  bool equal = false;
1426  if (isPageConstant(pagenum))
1427  {
1428  const NotVoidType *current_tuple = getPageData(pagenum);
1429  if (current_tuple)
1430  {
1431  if (isEqual(single_tuple.array(),current_tuple,tuple_size))
1432  equal = true;
1433  }
1434  else
1435  {
1436  if (isZero(single_tuple.array(),tuple_size))
1437  equal = true;
1438  }
1439  }
1440  if (!equal)
1441  {
1442  NotVoidType *data = hardenPage(pagenum);
1443  for (GA_PageOff pageoff(0); pageoff < end_page_offset; ++pageoff)
1444  {
1445  for (exint component = 0; component < tuple_size; ++component, ++data)
1446  {
1447  *data = single_tuple[component];
1448  }
1449  }
1450  }
1451  }
1452  }
1453  else
1454  {
1455  // This could be optimized to avoid a bit of redundant initialization,
1456  // but hopefully it's not too much of an issue.
1457  NotVoidType *data = hardenPage(pagenum);
1458  const exint num_left_values = tuple_size*end_page_offset;
1459  if (istypematch)
1460  {
1461  if (!it.readUniformArray(data, num_left_values))
1462  return false;
1463  }
1464  else
1465  {
1466  if (p.parseArrayValues(it, data, num_left_values) != num_left_values)
1467  return false;
1468  }
1469  }
1470  }
1471  }
1472  else
1473  {
1474  // Loading with matching packing (only 1 tuple) and matching page size,
1475  // but not loading at a page boundary.
1476 
1477  // TODO: Optimize this case for that we know that pages are the same size,
1478  // e.g. to try to preserve constant pages or load directly into destination.
1479 
1480  UT_StackBuffer<NotVoidType> buffer(thePageSize*tuple_size);
1481  const exint num_input_pages = (num_input_elements + thePageSize-1) / thePageSize;
1482  for (GA_Size input_pagei = 0; input_pagei < num_input_pages; ++input_pagei)
1483  {
1484  exint inputi = thePageSize*input_pagei;
1485  // NB: Base::thePageSize is needed (as opposed to thePageSize) to avoid
1486  // MSVC 19.14.26428.1 from crashing with /permissive-
1487  const exint num_page_elements = SYSmin(Base::thePageSize, num_input_elements-inputi);
1488  const exint num_page_values = tuple_size*num_page_elements;
1489 
1490  const bool constant_page = constpagebits && constpagebits->getBitFast(input_pagei);
1491  if (constant_page)
1492  {
1493  if (istypematch)
1494  {
1495  if (!it.readUniformArray(buffer.array(), tuple_size))
1496  return false;
1497  }
1498  else
1499  {
1500  if (p.parseArrayValues(it, buffer.array(), tuple_size) != tuple_size)
1501  return false;
1502  }
1503 
1504  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1505  {
1506  for (exint component = 0; component < tuple_size; ++component)
1507  {
1508  set(load_offset+inputi, component, buffer[component]);
1509  }
1510  }
1511  }
1512  else
1513  {
1514  if (istypematch)
1515  {
1516  if (!it.readUniformArray(buffer.array(), num_page_values))
1517  return false;
1518  }
1519  else
1520  {
1521  if (p.parseArrayValues(it, buffer.array(), num_page_values) != num_page_values)
1522  return false;
1523  }
1524 
1525  exint i = 0;
1526  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1527  {
1528  for (exint component = 0; component < tuple_size; ++component, ++i)
1529  {
1530  set(load_offset+inputi, component, buffer[i]);
1531  }
1532  }
1533  }
1534  }
1535  }
1536  }
1537  else
1538  {
1539  UT_StackBuffer<NotVoidType> buffer(page_size*tuple_size);
1540  const exint num_input_pages = (num_input_elements + page_size-1) / page_size;
1541  for (GA_Size input_pagei = 0; input_pagei < num_input_pages; ++input_pagei)
1542  {
1543  exint start_component = 0;
1544  for (exint packingi = 0; packingi < n_packing_entries; ++packingi)
1545  {
1546  exint inputi = page_size*input_pagei;
1547  const exint num_page_elements = SYSmin(page_size, num_input_elements-inputi);
1548  const exint input_tuple_size = packing[packingi];
1549  const exint num_page_values = input_tuple_size*num_page_elements;
1550 
1551  const UT_BitArray *constpagebits = constant_page_flags[packingi].get();
1552 
1553  const bool constant_page = constpagebits && constpagebits->getBitFast(input_pagei);
1554  if (constant_page)
1555  {
1556  if (istypematch)
1557  {
1558  if (!it.readUniformArray(buffer.array(), input_tuple_size))
1559  return false;
1560  }
1561  else
1562  {
1563  if (p.parseArrayValues(it, buffer.array(), input_tuple_size) != input_tuple_size)
1564  return false;
1565  }
1566 
1567  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1568  {
1569  for (exint component = 0; component < input_tuple_size; ++component)
1570  {
1571  set(load_offset+inputi, start_component+component, buffer[component]);
1572  }
1573  }
1574  }
1575  else
1576  {
1577  if (istypematch)
1578  {
1579  if (!it.readUniformArray(buffer.array(), num_page_values))
1580  return false;
1581  }
1582  else
1583  {
1584  if (p.parseArrayValues(it, buffer.array(), num_page_values) != num_page_values)
1585  return false;
1586  }
1587 
1588  exint i = 0;
1589  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1590  {
1591  for (exint component = 0; component < input_tuple_size; ++component, ++i)
1592  {
1593  set(load_offset+inputi, start_component+component, buffer[i]);
1594  }
1595  }
1596  }
1597 
1598  start_component += input_tuple_size;
1599  }
1600  }
1601  }
1602 
1603  return it.atEnd();
1604 }
1605 
1606 #endif
bool uniformWrite(bool value)
The following byte represents an 8 bit integer.
bool beginUniformArray(int64 length, UT_JID id)
const_iterator begin() const
bool parseString(UT_WorkBuffer &v)
No data follows the NULL token.
GLenum GLint * range
Definition: glcorearb.h:1924
UT_Storage
Definition: UT_Storage.h:26
The following 4 bytes represent an 32 bit real (float)
SYS_FORCE_INLINE bool setArray(const T *data, int64 size) const
GA_API JDTupleToken getJSONTokenID(const char *token)
GA_Size GA_PageOff
Definition: GA_Types.h:621
bool getBitFast(exint index) const
Definition: UT_BitArray.h:317
Iteration over a range of elements.
Definition: GA_Iterator.h:28
bool jsonKeyToken(const UT_StringRef &value)
UT_JID
The UT_JID enums are used in byte-stream encoding of binary JSON.
bool getBinary() const
Return whether writing binary or ASCII JSON.
Definition: UT_JSONWriter.h:87
Class which stores the default values for a GA_Attribute.
Definition: GA_Defaults.h:35
bool jsonSave(UT_JSONWriter &w, const GA_Range &range, const GA_SaveOptions *options=nullptr, const UT_IntArray *map=nullptr, int defvalue=-1) const
const GLdouble * v
Definition: glcorearb.h:836
int64 parseUniformBoolArray(UT_BitArray &data, int64 len)
void setAllBits(bool value)
bool blockAdvance(GA_Offset &start, GA_Offset &end)
GLuint start
Definition: glcorearb.h:474
bool GAisValid(GA_Size v)
Definition: GA_Types.h:625
0x23 and 0x24 are reserved for future use (32/64 bit unsigned)
int64 parseArrayValues(iterator &it, T *data, int64 len)
GA_API const char * getJSONToken(JDTupleToken tokenID)
The merge map keeps track of information when merging details.
Definition: GA_MergeMap.h:53
SYS_FORCE_INLINE const char * buffer() const
bool parseInteger(int64 &v)
bool jsonStringToken(const UT_StringRef &value)
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1221
iterator beginArray()
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:75
The following byte represents an unsigned 8 bit integer.
void defragment(const GA_Defragment &defrag)
Include GA_PageArrayImpl.h to call this.
#define GA_API
Definition: GA_API.h:12
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:32
#define UT_IF_ASSERT(ZZ)
Definition: UT_Assert.h:144
bool jsonLoad(UT_JSONParser &p, const GA_LoadMap &map, GA_AttributeOwner owner)
GA_Offset getDestEnd(GA_AttributeOwner owner) const
Definition: GA_MergeMap.h:130
GLuint buffer
Definition: glcorearb.h:659
png_uint_32 i
Definition: png.h:2877
exint size() const
Definition: UT_Array.h:451
GA_Size getLoadCount(GA_AttributeOwner owner) const
This method returns the number of elements being loaded of each type.
exint GA_Size
Defines the bit width for index and offset types in GA.
Definition: GA_Types.h:211
GA_PageOff GAgetPageOff(GA_Offset v)
Definition: GA_Types.h:636
exint numBitsSet() const
GLsizeiptr size
Definition: glcorearb.h:663
#define GA_INVALID_OFFSET
Definition: GA_Types.h:654
A range of elements in an index-map.
Definition: GA_Range.h:42
GA_Size GA_Offset
Definition: GA_Types.h:617
The following 8 bytes represent an 64 bit real (float)
static SYS_FORCE_INLINE void call(FUNCTOR functor)
Definition: SYS_CallIf.h:37
The following 8 bytes represent an 64 bit integer.
long long int64
Definition: SYS_Types.h:107
GA_API const char * GAstorage(GA_Storage store)
Lookup the storage name from the storage type.
GLdouble n
Definition: glcorearb.h:2007
bool uniformBlockWrite(const int8 *value, int64 count)
Write a block of 8 bit integer values to the uniform array.
The following 2 bytes represent an 16 bit integer.
GA_PageArray< DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED > PageArray
int64 exint
Definition: SYS_Types.h:116
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:125
double fpreal64
Definition: SYS_Types.h:192
GLuint GLuint end
Definition: glcorearb.h:474
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
Traverse an array object in the parser.
GLintptr offset
Definition: glcorearb.h:664
bool skipNextObject()
Simple convenience method to skip the next object in the stream.
exint size() const
Definition: UT_BitArray.h:46
Options during loading.
Definition: GA_LoadMap.h:42
bool getErrorState() const
void void addWarning(const char *fmt,...) SYS_PRINTF_CHECK_ATTRIBUTE(2
Defragmentation of IndexMaps.
Definition: GA_Defragment.h:45
GLboolean * data
Definition: glcorearb.h:130
int int32
Definition: SYS_Types.h:35
bool jsonEndArray(bool newline=true)
The following 4 bytes represent an 32 bit integer.
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1221
void mergeGrowArrayAndCopy(const GA_MergeMap &map, GA_AttributeOwner owner, const GA_PageArray< SRC_DATA_T, SRC_TSIZE, SRC_TABLEHARDENED, SRC_PAGESHARDENED > &src, const GA_Defaults &defaults)
Include GA_PageArrayImpl.h to call this.
#define GA_PAGE_SIZE
Definition: GA_Types.h:200
GLint GLsizei count
Definition: glcorearb.h:404
bool setBit(exint index, bool value)
Definition: UT_BitArray.h:272
ga_SubPageBlock(GA_PageNum page, GA_PageOff start, GA_PageOff end)
exint entries() const
Alias of size(). size() is preferred.
Definition: UT_Array.h:453
int64 parseUniformArray(T *data, int64 len)
GLsizei const GLfloat * value
Definition: glcorearb.h:823
GA_AttributeOwner
Definition: GA_Types.h:33
#define UT_ASSERT_MSG(ZZ, MM)
Definition: UT_Assert.h:129
unsigned char uint8
Definition: SYS_Types.h:32
GA_Offset getLoadOffset(GA_AttributeOwner owner) const
Definition: GA_LoadMap.h:154
GA_Size getEntries() const
Get an accurate count of the entries in the range.
Definition: GA_Range.h:242
A smart pointer for unique ownership of dynamically allocated objects.
Definition: UT_UniquePtr.h:47
GA_Size GA_PageNum
Definition: GA_Types.h:620
#define GA_PAGE_BITS
Attributes may paritition their data in pages of GA_PAGE_SIZE offsets.
Definition: GA_Types.h:199
bool loadPODArray(OP_TYPE &op)
GLuint GLfloat * val
Definition: glcorearb.h:1607
bool readUniformArray(T *buffer, int64 size)
bool jsonBeginArray()
Begin a generic array object.
exint append(void)
Definition: UT_Array.h:95
bool equal(T1 a, T2 b, T3 t)
Definition: ImathFun.h:143
LoadComponentArrayFunctor(PageArray &dest, GA_Offset startoff, exint component)
getOption("OpenEXR.storage") storage
Definition: HDK_Image.dox:276
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:856
#define UT_ASSERT(ZZ)
Definition: UT_Assert.h:126
bool jsonUniformArray(int64 length, const int8 *value)
Efficent method of writing a uniform array of int8 values.
bool endUniformArray(int64 *nwritten=0)
SYS_FORCE_INLINE bool set(int64 i, T val) const
GA_Offset getDestCapacity(GA_AttributeOwner owner) const
Convenience method to get new destination size.
Definition: GA_MergeMap.h:121
UT_ASSERT_COMPILETIME(BRAY_EVENT_MAXFLAGS<=32)
#define SYSmin(a, b)
Definition: SYS_Math.h:1368
GA_Storage
Definition: GA_Types.h:48
GA_Offset getDestStart(GA_AttributeOwner owner) const
Definition: GA_MergeMap.h:128
bool isZero(const Type &x)
Return true if x is exactly equal to zero.
Definition: Math.h:308
#define SYS_CALLIF_AUTO
Definition: SYS_CallIf.h:22
GA_PageNum GAgetPageNum(GA_Offset v)
Definition: GA_Types.h:632
bool jsonInt(int32 value)
Write an integer value.
#define UT_ASSERT_MSG_P(ZZ, MM)
Definition: UT_Assert.h:128
void swap(UT_BitArray &other)
GLuint GLsizei GLsizei * length
Definition: glcorearb.h:794
GLenum src
Definition: glcorearb.h:1792
GA_Offset getDestInitCapacity(GA_AttributeOwner owner) const
Convenience method to get old destination size.
Definition: GA_MergeMap.h:117