HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
GA_PageArrayImpl.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: GA_PageArrayImpl.h (GA Library, C++)
7  *
8  * COMMENTS: An array class with special handling of constant pages and
9  * shared page data, specialized for GA_Offset.
10  */
11 
12 #pragma once
13 
14 #ifndef __GA_PageArrayImpl__
15 #define __GA_PageArrayImpl__
16 
17 #include "GA_PageArray.h"
18 
19 #include "GA_API.h"
20 #include "GA_Defaults.h"
21 #include "GA_Defragment.h"
22 #include "GA_Iterator.h"
23 #include "GA_LoadMap.h"
24 #include "GA_MergeMap.h"
25 #include "GA_Range.h"
26 #include "GA_SaveOptions.h"
27 #include "GA_Types.h"
28 
29 #include <UT/UT_Array.h>
30 #include <UT/UT_Assert.h>
31 #include <UT/UT_BitArray.h>
32 #include <UT/UT_FixedVector.h>
33 #include <UT/UT_JSONDefines.h>
34 #include <UT/UT_JSONParser.h>
35 #include <UT/UT_JSONWriter.h>
36 #include <UT/UT_StackBuffer.h>
37 #include <UT/UT_Storage.h>
38 #include <UT/UT_UniquePtr.h>
39 #include <UT/UT_VectorTypes.h>
40 #include <UT/UT_WorkBuffer.h>
41 #include <SYS/SYS_CallIf.h>
42 #include <SYS/SYS_Inline.h>
43 #include <SYS/SYS_Math.h>
44 #include <SYS/SYS_Types.h>
45 #include <SYS/SYS_TypeTraits.h>
46 
47 #include <string.h>
48 
49 
50 // Separate namespace for these, because they shouldn't be duplicated per
51 // template instantiation.
52 namespace GA_PageArrayIO
53 {
54  // JSON tokens
56  {
65  };
66  GA_API const char *getJSONToken(JDTupleToken tokenID);
67  GA_API JDTupleToken getJSONTokenID(const char *token);
68 }
69 
70 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
71 void
73 {
74  auto &hard = hardenTable();
75  for (GA_Defragment::const_iterator it=defrag.begin(); !it.atEnd(); ++it)
76  {
77  GA_Offset a = it.getA();
78  GA_Offset b = it.getB();
79  GA_Size n = it.getN();
80  switch (it.getOp())
81  {
83  hard.swapRange(a, b, GA_Offset(n));
84  break;
86  hard.moveRange(a, b, GA_Offset(n));
87  break;
88  }
89  }
90 }
91 
92 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
93 template<typename SRC_DATA_T,exint SRC_TSIZE,bool SRC_TABLEHARDENED,bool SRC_PAGESHARDENED>
94 void
96  const GA_MergeMap &map,
97  GA_AttributeOwner owner,
99  const GA_Defaults &defaults)
100 {
101  if (SYSisSame<DATA_T,void>())
102  {
103  SYS_CallIf<SYSisSame<DATA_T,void>()>::call([this,&map,owner,&src,&defaults](SYS_CALLIF_AUTO){
104  // Hard case, where the storage type is not known at compile time.
105  UT_Storage storage = this->Base::getStorage();
106  switch (storage)
107  {
108  case UT_Storage::INT8:
109  this->castType<int8>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
110  case UT_Storage::UINT8:
111  this->castType<uint8>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
112  case UT_Storage::INT16:
113  this->castType<int16>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
114  case UT_Storage::INT32:
115  this->castType<int32>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
116  case UT_Storage::INT64:
117  this->castType<int64>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
118  case UT_Storage::REAL16:
119  this->castType<fpreal16>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
120  case UT_Storage::REAL32:
121  this->castType<fpreal32>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
122  case UT_Storage::REAL64:
123  this->castType<fpreal64>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
124  case UT_Storage::INVALID:
125  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
126  return;
127  }
128  });
129  return;
130  }
131 
132  UT_IF_ASSERT( GA_Offset osize = map.getDestInitCapacity(owner); )
133  GA_Offset nsize = map.getDestCapacity(owner);
134 
135  // Ideally we could assert that capacity() == ocapacity, but this method is
136  // sometimes called by implementations of GA_AIFMerge::copyArray(),
137  // after GA_AIFMerge::growArray() has already been called.
138  UT_ASSERT(osize <= size());
139  UT_ASSERT(osize <= nsize || (osize == GA_Offset(0) && nsize <= GA_Offset(0)));
140 
141  if (nsize <= GA_Offset(0))
142  return;
143 
144  GA_Offset dststart = map.getDestStart(owner);
145  GA_Offset dstend = map.getDestEnd(owner)+1;
146 
147  UT_ASSERT(dstend - dststart <= src.size());
148  UT_ASSERT(GAisValid(dststart) && dststart < nsize);
149  UT_ASSERT(GAisValid(dstend) && dstend <= nsize);
150  UT_ASSERT(dststart < dstend);
151 
152  UT_ASSERT_MSG(GAgetPageOff(dststart) == 0, "mergeGrowArrayAndCopy should only be used when dststart is at a page boundary");
153  if (nsize > size())
154  {
155  setSize(nsize, defaults);
156  }
157 
158  // As odd as it may seem, apparently mergeGrowArrayAndCopy has only ever
159  // supported copying from source offset 0 onward, regardless of
160  // map.getSourceRange(owner). For example, GA_DataArray::
161  // mergeGrowArrayAndCopy and GA_DataBitArray::mergeGrowArrayAndCopy
162  // both assume this too.
163  moveRange(src, GA_Offset(0), dststart, dstend - dststart);
164 }
165 
166 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
167 bool
169  UT_JSONWriter &w, const GA_Range &range,
170  const GA_SaveOptions *options,
171  const UT_IntArray *map, int defvalue) const
172 {
173  if (SYSisSame<DATA_T,void>())
174  {
175  bool success;
176  SYS_CallIf<SYSisSame<DATA_T,void>()>::call([this,&w,&range,options,map,defvalue,&success](SYS_CALLIF_AUTO){
177  // Hard case, where the storage type is not known at compile time.
178  UT_Storage storage = this->Base::getStorage();
179  switch (storage)
180  {
181  case UT_Storage::INT8:
182  success = this->castType<int8>().jsonSave(w, range, options, map, defvalue); return;
183  case UT_Storage::UINT8:
184  success = this->castType<uint8>().jsonSave(w, range, options, map, defvalue); return;
185  case UT_Storage::INT16:
186  success = this->castType<int16>().jsonSave(w, range, options, map, defvalue); return;
187  case UT_Storage::INT32:
188  success = this->castType<int32>().jsonSave(w, range, options, map, defvalue); return;
189  case UT_Storage::INT64:
190  success = this->castType<int64>().jsonSave(w, range, options, map, defvalue); return;
191  case UT_Storage::REAL16:
192  success = this->castType<fpreal16>().jsonSave(w, range, options, map, defvalue); return;
193  case UT_Storage::REAL32:
194  success = this->castType<fpreal32>().jsonSave(w, range, options, map, defvalue); return;
195  case UT_Storage::REAL64:
196  success = this->castType<fpreal64>().jsonSave(w, range, options, map, defvalue); return;
197  case UT_Storage::INVALID:
198  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
199  success = false;
200  return;
201  }
202  success = false;
203  });
204  return success;
205  }
206 
207  int tuplesize = getTupleSize();
208 
209  // Cast to optimize for small tuple sizes
210  if (TSIZE == -1 && tuplesize <= 3 && tuplesize >= 1)
211  {
212  bool success;
213  SYS_CallIf<TSIZE == -1>::call([this,&w,&range,options,map,defvalue,tuplesize,&success](SYS_CALLIF_AUTO){
214  if (tuplesize == 3)
215  success = this->castTupleSize<3>().jsonSave(w, range, options, map, defvalue);
216  else if (tuplesize == 1)
217  success = this->castTupleSize<1>().jsonSave(w, range, options, map, defvalue);
218  else
219  {
220  UT_ASSERT_P(tuplesize == 2);
221  success = this->castTupleSize<2>().jsonSave(w, range, options, map, defvalue);
222  }
223  });
224  return success;
225  }
226 
227  GA_Storage ga_storage = getStorage();
228  if (map && !GAisIntStorage(ga_storage))
229  map = nullptr;
230 
231  UT_JID jid = GAStorageToJID(ga_storage);
232 
233  bool ok = true;
234 
235  ok = ok && w.jsonBeginArray();
236 
238  ok = ok && w.jsonInt(tuplesize);
239 
241  ok = ok && w.jsonStringToken(GAstorage(ga_storage));
242 
243  bool savepaged = w.getBinary();
244  if (options)
245  options->importSavePaged(savepaged);
246 
247  if (savepaged)
248  {
250  UT_ASSERT_COMPILETIME(thePageSize == GA_PAGE_SIZE);
251  ok = ok && w.jsonInt(thePageSize);
252 
253 #if 0
254  // For max compatibility with GA_DataArrayTuple, we try to match the old
255  // packing behaviour: 1; 2 -> 1,1; 3; 4 -> 3,1; 5 -> 3,1,1; 6 -> 3,1,1,1
256  // though only for fpreal32 and fpreal64 types. Every other type
257  // had each component stored separately.
258  //
259  // TODO: Check if older versions will load data that is saved
260  // with everything as array-of-structs, avoiding the
261  // need for this.
262  bool hasfirst3packed = (tuplesize >= 3) &&
263  (ga_storage == GA_STORE_REAL32 || ga_storage == GA_STORE_REAL64);
264 
265  // The GA_JDTUPLE_PACKING field is optional and only needed if we
266  // need a data layout other than the default array-of-structs.
267  int n_packing_entries = tuplesize - (hasfirst3packed ? 2 : 0);
268  if (n_packing_entries > 1)
269  {
271  ok = ok && w.beginUniformArray(n_packing_entries, UT_JID_UINT8);
272 
273  // First is 3 or 1; every other one is 1.
274  ok = ok && w.uniformWrite(uint8(hasfirst3packed ? 3 : 1));
275  for (int i = 1; i < n_packing_entries; i++)
276  {
277  ok = ok && w.uniformWrite(uint8(1));
278  }
279 
280  ok = ok && w.endUniformArray();
281  }
282 #else
283  // I think GA_DataArrayTuple::jsonLoad supports loading
284  // array-of-structs, regardless of the tuplesize, so let's try it
285  // for now, and we can always fall back later.
286 
287  // I don't think the packing entry array is needed if there's only one entry.
288 #if 0
289  int n_packing_entries = 1;
290 
292  ok = ok && w.beginUniformArray(n_packing_entries, UT_JID_INT32);
293  ok = ok && w.uniformWrite(int32(tuplesize));
294  ok = ok && w.endUniformArray();
295 #endif
296 #endif
297 
298  // constpagecheck:
299  // 0 - none
300  // 1 - use page state
301  // 2 - full data scan
302  exint const_page_check = 2;
303  if (options)
304  const_page_check = options->constPageCheck();
305 
306  UT_UniquePtr<UT_BitArray> const_page_flags(nullptr);
307  if (tuplesize > 0)
308  {
309  if (const_page_check >= 2)
310  {
311  ok = ok && jsonSaveConstantOutputPageFlags<
313  w, range, const_page_flags);
314  }
315  else if (const_page_check == 1)
316  {
317  ok = ok && jsonSaveConstantOutputPageFlags<
319  w, range, const_page_flags);
320  }
321  }
322 
324 
325  ok = ok && jsonSaveRawPageData(w, range,
326  const_page_flags.get(), jid, map, defvalue);
327  }
328  else if (tuplesize <= 1)
329  {
330  // No reason to save an array of tuples if it's a scalar
332  ok = ok && w.jsonBeginArray();
333 
334  if (tuplesize != 0)
335  ok = ok && jsonSaveAsArray<false>(w, range, jid, map, defvalue);
336 
337  ok = ok && w.jsonEndArray();
338  }
339  else
340  {
341  // Store as an array of structs
343  ok = ok && w.jsonBeginArray();
344 
345  ok = ok && jsonSaveAsArray<true>(w, range, jid, map, defvalue);
346 
347  ok = ok && w.jsonEndArray();
348  }
349  return ok && w.jsonEndArray();
350 }
351 
352 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
353 template<typename MAP_ARRAY_CLASS>
354 bool
356  UT_JSONWriter &w, const GA_Range &range, UT_UniquePtr<UT_BitArray> &output_page_flags) const
357 {
358  bool ok = true;
359 
360  MAP_ARRAY_CLASS output_to_internal_page_map;
361  buildOutputToInternalPageMap(range, output_to_internal_page_map);
362 
363  int64 n_output_pages = ((range.getEntries() + thePageSize-1) / thePageSize);
364  UT_BitArray constant_flags(n_output_pages);
365 
366  GA_Size n_constant_pages = marshallConstantFlagsForOutputPages(
367  output_to_internal_page_map, constant_flags);
368  if (n_constant_pages == 0)
369  return ok;
370 
372  ok = ok && w.jsonBeginArray();
373 
374  ok = ok && w.jsonUniformArray(constant_flags.size(), constant_flags);
375  output_page_flags.reset(new UT_BitArray);
376  constant_flags.swap(*output_page_flags);
377 
378  ok = ok && w.jsonEndArray();
379 
380  return ok;
381 }
382 
383 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
384 bool
386  UT_JSONWriter &w,
387  const NotVoidType *page_data,
388  exint length, exint tuplesize,
389  bool const_output, bool const_input,
390  const UT_IntArray *map, int defvalue,
391  NotVoidType *buffer)
392 {
393  // NOTE: nullptr page_data should be dealt with by caller, using buffer.
394  UT_ASSERT_P(page_data);
395 
396  if (!const_output && !const_input)
397  {
398  if (!map)
399  {
400  // Simple case
401  return w.uniformBlockWrite(page_data, length * tuplesize);
402  }
403  else
404  {
405  for (exint i = 0; i < length; ++i)
406  {
407  for (exint component = 0; component < tuplesize; ++component, ++page_data)
408  {
409  NotVoidType val = *page_data;
410  buffer[component] = (val < 0 || val >= map->size())
411  ? defvalue
412  : (*map)(val);
413  }
414  if (!w.uniformBlockWrite(buffer, tuplesize))
415  return false;
416  }
417  return true;
418  }
419  }
420 
421  // Every case left has a single input value to read
422  const NotVoidType *data = page_data;
423  if (map)
424  {
425  for (exint component = 0; component < tuplesize; ++component, ++page_data)
426  {
427  NotVoidType val = *page_data;
428  buffer[component] = (val < 0 || val >= map->size())
429  ? defvalue
430  : (*map)(val);
431  }
432  data = buffer;
433  }
434 
435  if (const_output)
436  {
437  return w.uniformBlockWrite(data, tuplesize);
438  }
439  else
440  {
441  // const_input and !const_output, so repeat same tuple, length times
442  for (exint i = 0; i < length; ++i)
443  {
444  if (!w.uniformBlockWrite(data, tuplesize))
445  return false;
446  }
447  return true;
448  }
449 }
450 
451 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
452 bool
454  UT_JSONWriter &w, const GA_Range &range,
455  const UT_BitArray *const_page_flags,
456  UT_JID jid_storage,
457  const UT_IntArray *map, int defvalue) const
458 {
459  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
460 
461  exint ntotal = range.getEntries();
462 
463  exint collapsedsize = ntotal;
464  if (const_page_flags && ntotal > 0)
465  {
466  exint n_const_pages = const_page_flags->numBitsSet();
467 
468  // Special handling for last page, since it's not always the same size
469  if (const_page_flags->getBitFast(const_page_flags->size()-1))
470  {
471  collapsedsize = (const_page_flags->size()-n_const_pages)*thePageSize
472  + n_const_pages;
473  }
474  else
475  {
476  // NOTE: ((ntotal-1) & thePageMask) + 1 ensures that we get
477  // thePageSize if ntotal is a multiple of thePageSize.
478  collapsedsize = (const_page_flags->size()-n_const_pages-1)*thePageSize
479  + n_const_pages
480  + ((ntotal-1) & thePageMask) + 1;
481  }
482  }
483  const exint tuplesize = getTupleSize();
484  collapsedsize *= tuplesize;
485 
486  bool ok = true;
487  ok = ok && w.beginUniformArray(collapsedsize, jid_storage);
488 
489  // Don't even try to go through the pages if tuplesize is 0.
490  // Only bugs will ensue. Might as well check this implicitly
491  // by checking collapsedsize, since it's multiplied by tuplesize.
492  if (collapsedsize == 0)
493  {
494  ok = ok && w.endUniformArray();
495  return ok;
496  }
497 
498  bool const_page_data;
499  const NotVoidType *page_data;
501  const GA_Size n_output_pages = (ntotal+thePageSize-1) / thePageSize;
502 
503  GA_Iterator it(range);
504  GA_PageNum last_page_num(-1);
505  GA_Offset block_start = GA_INVALID_OFFSET;
506  GA_PageOff block_start_pageoff;
507  GA_Offset block_end = GA_INVALID_OFFSET;
508  for (GA_Size output_page_num = 0; ok && output_page_num < n_output_pages; ++output_page_num)
509  {
510  const bool output_page_const = const_page_flags && const_page_flags->getBitFast(output_page_num);
511 
512  GA_Size output_page_offset = 0;
513  do
514  {
515  if (block_start == block_end)
516  {
517  bool more_data = it.blockAdvance(block_start, block_end);
518  if (!more_data)
519  {
520  UT_ASSERT_P(output_page_num == n_output_pages-1);
521  UT_ASSERT_P(GA_Size(GAgetPageOff(GA_Offset(ntotal))) == output_page_offset);
522  break;
523  }
524 
525  GA_PageNum page_num = GAgetPageNum(block_start);
526  block_start_pageoff = GAgetPageOff(block_start);
527 
528  // Fetch the page data if we don't already have it.
529  if (page_num != last_page_num)
530  {
531  const_page_data = isPageConstant(page_num);
532  page_data = getPageData(page_num);
533 
534  // Deal with nullptr here, to avoid having to deal with it in
535  // multiple codepaths.
536  if (!page_data)
537  {
538  UT_ASSERT_P(const_page_data);
539  memset((NotVoidType*)buffer,0,sizeof(NotVoidType)*tuplesize);
540  page_data = (NotVoidType*)buffer;
541  }
542  last_page_num = page_num;
543  }
544  }
545 
546  const GA_Size copy_size = SYSmin(GA_Size(block_end-block_start), thePageSize-GA_Size(output_page_offset));
547 
548  if (!output_page_const)
549  {
550  const NotVoidType *copy_data = page_data;
551  if (!const_page_data)
552  copy_data += GA_Size(block_start_pageoff)*tuplesize;
553  ok = ok && jsonWriteDataSpan(
554  w, copy_data, copy_size, tuplesize,
555  false, const_page_data, map, defvalue, (NotVoidType*)buffer);
556  }
557 
558  output_page_offset += copy_size;
559  block_start += copy_size;
560  block_start_pageoff += copy_size;
561  } while (ok && output_page_offset != thePageSize);
562 
563  if (output_page_const)
564  {
565  const NotVoidType *copy_data = page_data;
566  if (!const_page_data)
567  {
568  // The -1 is because we added copy_size, which is at least 1, to block_start_pageoff,
569  // and block_start_pageoff may now be at a page offset that is not the same value,
570  // or may even be at the page offset of block_end.
571  copy_data += GA_Size(block_start_pageoff-1)*tuplesize;
572  }
573  ok = ok && jsonWriteDataSpan(
574  w, copy_data, 1, tuplesize,
575  true, const_page_data, map, defvalue, (NotVoidType*)buffer);
576  }
577  }
578 
579  ok = ok && w.endUniformArray();
580  return ok;
581 }
582 
583 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
584 class GA_PageArray<DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED>::ga_SubPageBlock
585 {
586 public:
589  : myPage(page), myStartOffset(start), myEndOffset(end) {}
590 
594 };
595 
596 // --------------------------------------------------------------------------
597 // Compute a mapping to keep track of which internal pages affect which output
598 // pages. We store this mapping as an ordered list of the input pages as they
599 // are traversed in building the output pages, with the start of each output
600 // page indicated by a negative value (-(input_page + 1)).
601 //
602 // NB: We don't keep track of the page offsets in the mapping so this is
603 // really only useful for internal pages that are flagged as constant.
604 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
605 void
607  const GA_Range &range,
609 {
610  GA_Iterator it(range);
611  GA_Size output_page_offset = 0;
612  GA_Size block_size = 0;
613  GA_PageNum page_num;
614  GA_PageNum last_page_num(-1);
615 
616  while (true)
617  {
618  if (output_page_offset == thePageSize)
619  {
620  output_page_offset = 0;
621  }
622 
623  if (block_size == 0) // need new block
624  {
625  GA_Offset block_start, block_end;
626  if (!it.blockAdvance(block_start, block_end))
627  break;
628 
629  page_num = GAgetPageNum(block_start);
630  block_size = block_end - block_start;
631  }
632 
633  GA_Size output_size = SYSmin(block_size,
634  thePageSize-output_page_offset);
635 
636  if (output_page_offset == 0)
637  {
638  map.append(-(page_num+1));
639  last_page_num = page_num;
640  }
641  else if (page_num != last_page_num)
642  {
643  map.append(page_num);
644  last_page_num = page_num;
645  }
646 
647  block_size -= output_size;
648  output_page_offset += output_size;
649  }
650 }
651 
652 // Compute a mapping to keep track of which internal page data blocks affect
653 // which output pages. We store this mapping as an ordered list of the sub
654 // page blocks as they are traversed in building the output pages, with the
655 // start of each output page indicated by a negative page number
656 // (-(input_page + 1)).
657 //
658 // TODO: We could keep track of block start/end, recomputing the internal
659 // page number at need?
660 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
661 void
663  const GA_Range &range,
665 {
666  GA_Iterator it(range);
667  GA_Size output_page_offset = 0;
668  GA_Size block_size = 0;
669  GA_PageNum page_num;
670  GA_PageOff page_offset;
671 
672  while (true)
673  {
674  if (output_page_offset == thePageSize)
675  {
676  output_page_offset = 0;
677  }
678 
679  if (block_size == 0) // need new block
680  {
681  GA_Offset block_start, block_end;
682  if (!it.blockAdvance(block_start, block_end))
683  break;
684 
685  page_num = GAgetPageNum(block_start);
686  page_offset = GAgetPageOff(block_start);
687  block_size = block_end - block_start;
688  }
689 
690  GA_Size output_size = SYSmin(block_size,
691  thePageSize-output_page_offset);
692 
693  if (output_page_offset == 0)
694  {
695  map.append(ga_SubPageBlock(
696  GA_PageNum(-(page_num+1)), page_offset,
697  page_offset + output_size));
698  }
699  else
700  {
701  map.append(ga_SubPageBlock(
702  page_num, page_offset,
703  page_offset + output_size));
704  }
705 
706  page_offset += output_size;
707  block_size -= output_size;
708  output_page_offset += output_size;
709  }
710 }
711 
712 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
713 GA_Size
715  const UT_Array<GA_PageNum> &internal_page_map,
716  UT_BitArray &constant_flags) const
717 {
718  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
719 
720  GA_Size count = 0;
721  GA_Size output_page = -1;
722  bool output_page_flag = false;
723  const NotVoidType *constant_value;
724  const exint tuplesize = getTupleSize();
725 
726  constant_flags.setAllBits(false);
727  for (GA_Size i = 0; i < internal_page_map.size(); i++)
728  {
729  GA_PageNum internal_page = internal_page_map(i);
730  // A negative internal page is used to mark the start of a new
731  // output page.
732  if (internal_page < 0)
733  {
734  if (output_page >= 0 && output_page_flag)
735  {
736  constant_flags.setBit(output_page, output_page_flag);
737  ++count;
738  }
739 
740  ++output_page;
741  UT_ASSERT_P(output_page <= constant_flags.size());
742  internal_page = -(internal_page + 1);
743  output_page_flag = isPageConstant(internal_page);
744  if (output_page_flag)
745  {
746  constant_value = getPageData(internal_page);
747  }
748  }
749  else if (output_page_flag)
750  {
751  if (!isPageConstant(internal_page))
752  output_page_flag = false;
753  else
754  {
755  const NotVoidType *new_constant_value = getPageData(internal_page);
756  if ((new_constant_value==nullptr) != (constant_value==nullptr))
757  output_page_flag = false;
758  else if (constant_value != new_constant_value)
759  output_page_flag = isEqual(constant_value, new_constant_value, tuplesize);
760  }
761  }
762  }
763  if (output_page >= 0 && output_page_flag)
764  {
765  constant_flags.setBit(output_page, output_page_flag);
766  ++count;
767  }
768  return count;
769 }
770 
771 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
772 GA_Size
774  const UT_Array<ga_SubPageBlock> &internal_page_map,
775  UT_BitArray &constant_flags) const
776 {
777  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
778 
779  GA_Size count = 0;
780  GA_Size output_page = -1;
781  bool output_page_flag = false;
782  const NotVoidType *constant_value;
783  const exint tuplesize = getTupleSize();
784 
785  constant_flags.setAllBits(false);
786  for (GA_Size i = 0; i < internal_page_map.entries(); i++)
787  {
788  GA_PageNum internal_page = internal_page_map(i).myPage;
789  // A negative internal page is used to mark the start of a new
790  // output page.
791  if (internal_page < 0)
792  {
793  if (output_page >= 0 && output_page_flag)
794  {
795  constant_flags.setBit(output_page, output_page_flag);
796  ++count;
797  }
798 
799  ++output_page;
800  UT_ASSERT_P(output_page <= constant_flags.size());
801  internal_page = -(internal_page + 1);
802  output_page_flag = isPageConstant(internal_page);
803  constant_value = getPageData(internal_page);
804  if (!output_page_flag)
805  {
806  GA_PageOff start = internal_page_map(i).myStartOffset;
807  GA_PageOff end = internal_page_map(i).myEndOffset;
808  const NotVoidType *page = constant_value;
809  constant_value += start;
810  output_page_flag = isSubPageConstant(page, start+1, end,
811  tuplesize, constant_value);
812  }
813  }
814  else if (output_page_flag)
815  {
816  const bool page_constant = isPageConstant(internal_page);
817  const NotVoidType *page = getPageData(internal_page);
818  if (page_constant)
819  {
820  if ((page==nullptr) != (constant_value==nullptr))
821  output_page_flag = false;
822  else if (constant_value != page)
823  output_page_flag = isEqual(constant_value, page, tuplesize);
824  }
825  else
826  {
827  if (!isSubPageConstant(page,
828  internal_page_map(i).myStartOffset,
829  internal_page_map(i).myEndOffset,
830  tuplesize,
831  constant_value))
832  output_page_flag = false;
833  }
834  }
835  }
836  if (output_page >= 0 && output_page_flag)
837  {
838  constant_flags.setBit(output_page, output_page_flag);
839  ++count;
840  }
841  return count;
842 }
843 
844 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
845 bool
847  const NotVoidType *page,
849  const exint tuplesize,
850  const NotVoidType *value)
851 {
852  if (value == nullptr)
853  {
854  for (GA_PageOff cur = start; cur < end; cur++)
855  {
856  if (!isZero(page+cur*tuplesize, tuplesize))
857  return false;
858  }
859  }
860  else
861  {
862  for (GA_PageOff cur = start; cur < end; cur++)
863  {
864  if (!isEqual(page+cur*tuplesize, value, tuplesize))
865  return false;
866  }
867  }
868  return true;
869 }
870 
871 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
872 template<bool ARRAY_OF_ARRAYS>
873 bool
875  UT_JSONWriter &w, const GA_Range &range, UT_JID jid_storage,
876  const UT_IntArray *map, int defvalue) const
877 {
878  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
879 
880  int tuplesize = getTupleSize();
881 
882  if (!ARRAY_OF_ARRAYS)
883  {
884  if (!w.beginUniformArray(tuplesize*range.getEntries(), jid_storage))
885  return false;
886  }
887 
888  UT_StackBuffer<NotVoidType> buffer(ARRAY_OF_ARRAYS ? tuplesize : 0);
889 
891  GA_Offset end;
892  for (GA_Iterator it(range); it.blockAdvance(start, end); )
893  {
894  if (map)
895  {
896  for (GA_Offset ai = start; ai < end; ++ai)
897  {
898  if (ARRAY_OF_ARRAYS)
899  {
900  for (int component = 0; component < tuplesize; ++component)
901  {
902  NotVoidType v = this->template get<NotVoidType>(ai, component);
903  v = NotVoidType((v < 0 || v >= map->size()) ? defvalue : (*map)(v));
904  buffer[component] = v;
905  }
906  if (!w.jsonUniformArray(tuplesize, buffer))
907  return false;
908  }
909  else
910  {
911  for (int component = 0; component < tuplesize; ++component)
912  {
913  NotVoidType v = this->template get<NotVoidType>(ai, component);
914  v = NotVoidType((v < 0 || v >= map->size()) ? defvalue : (*map)(v));
915  if (!w.uniformWrite(v))
916  return false;
917  }
918  }
919  }
920  }
921  else
922  {
923  // No map
924  for (GA_Offset ai = start; ai < end; ++ai)
925  {
926  if (ARRAY_OF_ARRAYS)
927  {
928  for (int component = 0; component < tuplesize; ++component)
929  buffer[component] = this->template get<NotVoidType>(ai, component);
930 
931  if (!w.jsonUniformArray(tuplesize, buffer))
932  return false;
933  }
934  else
935  {
936  for (int component = 0; component < tuplesize; ++component)
937  {
938  NotVoidType v = this->template get<NotVoidType>(ai, component);
939 
940  if (!w.uniformWrite(v))
941  return false;
942  }
943  }
944  }
945  }
946  }
947 
948  if (ARRAY_OF_ARRAYS)
949  return true;
950 
951  return w.endUniformArray();
952 }
953 
954 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
955 UT_JID
957 {
958  switch (storage)
959  {
960  case GA_STORE_BOOL:
961  return UT_JID_BOOL;
962  case GA_STORE_INVALID:
963  return UT_JID_NULL;
964  case GA_STORE_DICT:
965  return UT_JID_NULL;
966  case GA_STORE_STRING:
967  return UT_JID_STRING;
968  case GA_STORE_INT8:
969  return UT_JID_INT8;
970  case GA_STORE_UINT8:
971  return UT_JID_UINT8;
972  case GA_STORE_INT16:
973  return UT_JID_INT16;
974  case GA_STORE_INT32:
975  return UT_JID_INT32;
976  case GA_STORE_INT64:
977  return UT_JID_INT64;
978  case GA_STORE_REAL16:
979  return UT_JID_REAL16;
980  case GA_STORE_REAL32:
981  return UT_JID_REAL32;
982  case GA_STORE_REAL64:
983  return UT_JID_REAL64;
984  }
985  UT_ASSERT_MSG_P(0, "Unhandled GA_Storage value!");
986  return UT_JID_NULL;
987 }
988 
989 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
990 bool
992  UT_JSONParser &p,
993  const GA_LoadMap &map,
994  GA_AttributeOwner owner)
995 {
996  if (SYSisSame<DATA_T,void>())
997  {
998  bool success;
999  SYS_CallIf<SYSisSame<DATA_T,void>()>::call([this,&p,&map,owner,&success](SYS_CALLIF_AUTO){
1000  // Hard case, where the storage type is not known at compile time.
1001  UT_Storage storage = this->Base::getStorage();
1002  switch (storage)
1003  {
1004  case UT_Storage::INT8:
1005  success = this->castType<int8>().jsonLoad(p, map, owner); return;
1006  case UT_Storage::UINT8:
1007  success = this->castType<uint8>().jsonLoad(p, map, owner); return;
1008  case UT_Storage::INT16:
1009  success = this->castType<int16>().jsonLoad(p, map, owner); return;
1010  case UT_Storage::INT32:
1011  success = this->castType<int32>().jsonLoad(p, map, owner); return;
1012  case UT_Storage::INT64:
1013  success = this->castType<int64>().jsonLoad(p, map, owner); return;
1014  case UT_Storage::REAL16:
1015  success = this->castType<fpreal16>().jsonLoad(p, map, owner); return;
1016  case UT_Storage::REAL32:
1017  success = this->castType<fpreal32>().jsonLoad(p, map, owner); return;
1018  case UT_Storage::REAL64:
1019  success = this->castType<fpreal64>().jsonLoad(p, map, owner); return;
1020  case UT_Storage::INVALID:
1021  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
1022  success = false;
1023  return;
1024  }
1025  success = false;
1026  });
1027  return success;
1028  }
1029 
1030  int64 tuple_size = getTupleSize();
1031 
1032  // Cast to optimize for small tuple sizes
1033  if (TSIZE == -1 && tuple_size <= 3 && tuple_size >= 1)
1034  {
1035  bool success;
1036  SYS_CallIf<TSIZE == -1>::call([this,&p,&map,owner,tuple_size,&success](SYS_CALLIF_AUTO){
1037  if (tuple_size == 3)
1038  success = this->castTupleSize<3>().jsonLoad(p, map, owner);
1039  else if (tuple_size == 1)
1040  success = this->castTupleSize<1>().jsonLoad(p, map, owner);
1041  else
1042  {
1043  UT_ASSERT_P(tuple_size == 2);
1044  success = this->castTupleSize<2>().jsonLoad(p, map, owner);
1045  }
1046  });
1047  return success;
1048  }
1049 
1050  UT_WorkBuffer key;
1051  int64 page_size = -1;
1052  GA_Storage ga_storage = GA_STORE_INVALID;
1053  UT_StackBuffer<int> packing(tuple_size);
1054  int n_packing_entries = 0;
1055  UT_StackBuffer<UT_UniquePtr<UT_BitArray> > constant_page_flags(tuple_size);
1056  bool constant_page_flags_init = false;
1057 
1058  bool ok = true;
1059  bool done = false;
1060  for (UT_JSONParser::traverser mi = p.beginArray(); ok && !mi.atEnd(); ++mi)
1061  {
1062  if (!mi.getLowerKey(key))
1063  {
1064  ok = false;
1065  break;
1066  }
1067  switch (GA_PageArrayIO::getJSONTokenID(key.buffer()))
1068  {
1070  {
1071  int64 local_tuple_size = -1;
1072  ok = p.parseInteger(local_tuple_size);
1073  if (ok && local_tuple_size != tuple_size)
1074  {
1075  p.addWarning("Inconsistent tuple size specification");
1076  ok = false;
1077  }
1078  break;
1079  }
1081  ok = p.parseString(key);
1082  if (ok)
1083  ga_storage = GAstorage(key.buffer());
1084  break;
1086  // Tuple size and storage type are supposed to have already been set.
1087  if (done || tuple_size != getTupleSize() || ga_storage != getStorage())
1088  {
1089  p.addWarning("Bad data type/size specification");
1090  ok = p.skipNextObject();
1091  }
1092  else
1093  {
1094  // Load as a struct of arrays
1096  for (exint component = 0; ok && !ai.atEnd(); ++component, ++ai)
1097  {
1098  if (component < tuple_size)
1099  {
1100  GA_Offset startoff = map.getLoadOffset(owner);
1101  LoadComponentArrayFunctor op(*this, startoff, component);
1102  if (GAisIntStorage(ga_storage))
1104  else
1106  }
1107  else
1108  {
1109  if (component == tuple_size)
1110  p.addWarning("Too many tuple items in data array");
1111  ok = p.skipNextObject();
1112  }
1113  }
1114  done = true;
1115  }
1116  break;
1118  if (done || tuple_size != getTupleSize() || ga_storage != getStorage())
1119  {
1120  p.addWarning("Bad data type/size specification");
1121  ok = p.skipNextObject();
1122  }
1123  else
1124  {
1125  // Load as an array of structs
1127  GA_Offset offset = map.getLoadOffset(owner);
1128  GA_Size ppage = GAgetPageNum(offset);
1129  for ( ; ok && !ai.atEnd(); ++offset, ++ai)
1130  {
1131  GA_PageNum newpagenum = GAgetPageNum(offset);
1132  if (newpagenum != ppage)
1133  {
1134  // We compress previous page
1135  tryCompressPage(ppage);
1136  ppage = newpagenum;
1137  }
1138 
1139  UT_StackBuffer<NotVoidType> buffer(tuple_size);
1140  exint nread = p.parseUniformArray<NotVoidType>(buffer, tuple_size);
1141  if (nread < tuple_size)
1142  {
1143  ok = false;
1144  break;
1145  }
1146  if (nread > tuple_size)
1147  p.addWarning("Extra data found in array tuple");
1148 
1149  if (TSIZE >= 1)
1150  {
1151  setVector(offset, *(const UT_FixedVector<NotVoidType,theSafeTupleSize>*)buffer.array());
1152  }
1153  else
1154  {
1155  for (int component = 0; component < tuple_size; ++component)
1156  set(offset, component, buffer[component]);
1157  }
1158  }
1159  tryCompressPage(ppage);
1160  done = true;
1161  }
1162  break;
1164  ok = p.parseInteger(page_size);
1165  break;
1167  if (tuple_size != getTupleSize())
1168  {
1169  p.addWarning("Packing requires valid size specification");
1170  ok = p.skipNextObject();
1171  }
1172  else
1173  {
1174  // NB: p.parseUniformArray() might return a greater value
1175  // than expected, but it won't write the extra values
1176  // to packing.array().
1177  n_packing_entries = p.parseUniformArray(packing.array(), tuple_size);
1178 
1179  if (constant_page_flags_init && n_packing_entries != (tuple_size > 0 ? 1 : 0))
1180  {
1181  p.addWarning("Non-trivial packing specification must come before constant page flags");
1182  n_packing_entries = 0;
1183  ok = false;
1184  }
1185  else if (n_packing_entries >= 0)
1186  {
1187  int total_packed_size = 0;
1188  for (int i = 0; i < n_packing_entries; ++i)
1189  {
1190  total_packed_size += packing[i];
1191  }
1192  if (total_packed_size != tuple_size ||
1193  n_packing_entries > tuple_size)
1194  {
1195  p.addWarning("Invalid packing specification");
1196  n_packing_entries = -1;
1197  ok = false;
1198  }
1199  }
1200  }
1201  break;
1203  if (tuple_size != getTupleSize() ||
1204  page_size <= 0 || n_packing_entries < 0)
1205  {
1206  p.addWarning("Bad data type/size specification");
1207  ok = p.skipNextObject();
1208  }
1209  else
1210  {
1211  int i = 0;
1212  UT_BitArray scratch_array;
1213 
1214  int n_arrays = n_packing_entries ? n_packing_entries
1215  : (tuple_size > 0 ? 1 : 0);
1216  int64 n_input_pages = (map.getLoadCount(owner)+page_size-1) / page_size;
1217 
1218  for (UT_JSONParser::traverser it = p.beginArray(); !it.atEnd(); ++it, ++i)
1219  {
1220  if (i < n_arrays)
1221  {
1222  int64 n_loaded = p.parseUniformBoolArray(scratch_array, n_input_pages);
1223 
1224  // We allow an empty array when no pages are constant.
1225  if (n_loaded == 0)
1226  {
1227  constant_page_flags[i].reset(nullptr);
1228  }
1229  else
1230  {
1231  constant_page_flags[i].reset(new UT_BitArray());
1232  scratch_array.swap(*constant_page_flags[i]);
1233  }
1234  }
1235  else
1236  {
1237  p.skipNextObject();
1238  UT_ASSERT(0);
1239  }
1240  }
1241  ok = (i == n_arrays);
1242  constant_page_flags_init = true;
1243  }
1244  break;
1246  // Load as an array of structs with tuples whose pages may be compressed
1247  if (done || tuple_size != getTupleSize() || ga_storage != getStorage() ||
1248  page_size <= 0 || n_packing_entries < 0)
1249  {
1250  p.addWarning("Bad data type/size specification");
1251  ok = p.skipNextObject();
1252  }
1253  else
1254  {
1255  // We default to a full vector when a GA_JDTUPLE_PACKING
1256  // field is missing.
1257  if (n_packing_entries == 0 && tuple_size > 0)
1258  {
1259  packing[0] = tuple_size;
1260  n_packing_entries = 1;
1261  }
1262  done = true;
1263  ok = jsonLoadRawPageData(p, map, owner,
1264  GA_Size(page_size),
1265  packing.array(), n_packing_entries,
1266  constant_page_flags.array());
1267  }
1268  break;
1269  default:
1270  p.addWarning("Data Array Tuple unknown key '%s'", key.buffer());
1271  break;
1272  }
1273  }
1274  if (!done)
1275  p.addWarning("Missing data for data array");
1276  return ok;
1277 }
1278 
1279 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
1280 class GA_PageArray<DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED>::LoadComponentArrayFunctor
1281 {
1282 public:
1284 
1286  : myDest(dest)
1287  , myStartOffset(startoff)
1288  , myComponent(component)
1289  , myDestSize(dest.size()-startoff)
1290  {}
1291 
1292  template <typename T> SYS_FORCE_INLINE bool
1293  set(int64 i, T val) const
1294  {
1295  if (GA_Offset(i) >= myDestSize)
1296  return false;
1297  myDest.set(myStartOffset+(GA_Size)i, myComponent, val);
1298  return true;
1299  }
1300 
1301  template <typename T> SYS_FORCE_INLINE bool
1302  setArray(const T *data, int64 size) const
1303  {
1304  bool outofbounds = false;
1305  if (GA_Offset(size) > myDestSize)
1306  {
1307  size = int64(myDestSize);
1308  outofbounds = true;
1309  }
1310 
1311  // Fast path for single component
1312  if (TSIZE == 1)
1313  {
1314  myDest.setRange(myStartOffset, GA_Offset(size), data);
1315  return !outofbounds;
1316  }
1317 
1318  GA_Offset end = myStartOffset + GA_Size(size);
1319 
1320  for (GA_Offset off = myStartOffset; off < end; ++off, ++data)
1321  {
1322  myDest.set(off, myComponent, *data);
1323  }
1324 
1325  return !outofbounds;
1326  }
1327 
1332 };
1333 
1334 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
1335 bool
1337  UT_JSONParser &p,
1338  const GA_LoadMap &map,
1339  GA_AttributeOwner owner,
1340  GA_Size page_size,
1341  const int *packing,
1342  int n_packing_entries,
1343  const UT_UniquePtr<UT_BitArray> *const constant_page_flags)
1344 {
1345  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1346 
1348  if (it.getErrorState())
1349  return false;
1350 
1351  UT_JID jid = p.getUniformArrayType();
1352  bool istypematch = (jid == GAStorageToJID(getStorage()));
1353 
1354  GA_Size num_input_elements = map.getLoadCount(owner);
1355  GA_Offset load_offset = map.getLoadOffset(owner);
1356  const GA_PageNum start_page_num = GAgetPageNum(load_offset);
1357  GA_PageOff page_offset = GAgetPageOff(load_offset);
1358  const exint tuple_size = getTupleSize();
1359  const exint num_page_values = tuple_size*thePageSize;
1360 
1361  UT_StackBuffer<NotVoidType> single_tuple(tuple_size);
1362 
1363  if (n_packing_entries == 1 && page_size == thePageSize)
1364  {
1365  UT_ASSERT(packing[0] == tuple_size);
1366 
1367  const UT_BitArray *constpagebits = constant_page_flags[0].get();
1368 
1369  if (page_offset == GA_PageOff(0))
1370  {
1371  // Loading at the beginning of a page, making things much simpler
1372  GA_Size num_full_new_pages = (num_input_elements >> GA_PAGE_BITS);
1373  GA_PageOff end_page_offset = GAgetPageOff(GA_Offset(num_input_elements));
1374 
1375  // First, fill in all complete, full-size pages
1376  GA_PageNum pagenum = start_page_num;
1377  for (GA_Size input_pagei = 0; input_pagei < num_full_new_pages; ++input_pagei, ++pagenum)
1378  {
1379  if (constpagebits && constpagebits->getBitFast(input_pagei))
1380  {
1381  if (istypematch)
1382  {
1383  if (!it.readUniformArray(single_tuple.array(), tuple_size))
1384  return false;
1385  }
1386  else
1387  {
1388  if (p.parseArrayValues(it, single_tuple.array(), tuple_size) != tuple_size)
1389  return false;
1390  }
1391  setPageConstant(pagenum, single_tuple.array());
1392  }
1393  else
1394  {
1395  NotVoidType *data = hardenPageNoInit(pagenum);
1396  if (istypematch)
1397  {
1398  if (!it.readUniformArray(data, num_page_values))
1399  return false;
1400  }
1401  else
1402  {
1403  if (p.parseArrayValues(it, data, num_page_values) != num_page_values)
1404  return false;
1405  }
1406  }
1407  }
1408 
1409  // Handle any final incomplete or not-full-size page
1410  if (end_page_offset != GA_PageOff(0))
1411  {
1412  if (constpagebits && constpagebits->getBitFast(num_full_new_pages))
1413  {
1414  if (istypematch)
1415  {
1416  if (!it.readUniformArray(single_tuple.array(), tuple_size))
1417  return false;
1418  }
1419  else
1420  {
1421  if (p.parseArrayValues(it, single_tuple.array(), tuple_size) != tuple_size)
1422  return false;
1423  }
1424  if (load_offset+num_input_elements == size())
1425  setPageConstant(pagenum, single_tuple.array());
1426  else
1427  {
1428  // I don't know if this path will ever be taken; I'm guessing not.
1429 
1430  bool equal = false;
1431  if (isPageConstant(pagenum))
1432  {
1433  const NotVoidType *current_tuple = getPageData(pagenum);
1434  if (current_tuple)
1435  {
1436  if (isEqual(single_tuple.array(),current_tuple,tuple_size))
1437  equal = true;
1438  }
1439  else
1440  {
1441  if (isZero(single_tuple.array(),tuple_size))
1442  equal = true;
1443  }
1444  }
1445  if (!equal)
1446  {
1447  NotVoidType *data = hardenPage(pagenum);
1448  for (GA_PageOff pageoff(0); pageoff < end_page_offset; ++pageoff)
1449  {
1450  for (exint component = 0; component < tuple_size; ++component, ++data)
1451  {
1452  *data = single_tuple[component];
1453  }
1454  }
1455  }
1456  }
1457  }
1458  else
1459  {
1460  // This could be optimized to avoid a bit of redundant initialization,
1461  // but hopefully it's not too much of an issue.
1462  NotVoidType *data = hardenPage(pagenum);
1463  const exint num_left_values = tuple_size*end_page_offset;
1464  if (istypematch)
1465  {
1466  if (!it.readUniformArray(data, num_left_values))
1467  return false;
1468  }
1469  else
1470  {
1471  if (p.parseArrayValues(it, data, num_left_values) != num_left_values)
1472  return false;
1473  }
1474  }
1475  }
1476  }
1477  else
1478  {
1479  // Loading with matching packing (only 1 tuple) and matching page size,
1480  // but not loading at a page boundary.
1481 
1482  // TODO: Optimize this case for that we know that pages are the same size,
1483  // e.g. to try to preserve constant pages or load directly into destination.
1484 
1485  UT_StackBuffer<NotVoidType> buffer(thePageSize*tuple_size);
1486  const exint num_input_pages = (num_input_elements + thePageSize-1) / thePageSize;
1487  for (GA_Size input_pagei = 0; input_pagei < num_input_pages; ++input_pagei)
1488  {
1489  exint inputi = thePageSize*input_pagei;
1490  // NB: Base::thePageSize is needed (as opposed to thePageSize) to avoid
1491  // MSVC 19.14.26428.1 from crashing with /permissive-
1492  const exint num_page_elements = SYSmin(Base::thePageSize, num_input_elements-inputi);
1493  const exint num_page_values = tuple_size*num_page_elements;
1494 
1495  const bool constant_page = constpagebits && constpagebits->getBitFast(input_pagei);
1496  if (constant_page)
1497  {
1498  if (istypematch)
1499  {
1500  if (!it.readUniformArray(buffer.array(), tuple_size))
1501  return false;
1502  }
1503  else
1504  {
1505  if (p.parseArrayValues(it, buffer.array(), tuple_size) != tuple_size)
1506  return false;
1507  }
1508 
1509  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1510  {
1511  for (exint component = 0; component < tuple_size; ++component)
1512  {
1513  set(load_offset+inputi, component, buffer[component]);
1514  }
1515  }
1516  }
1517  else
1518  {
1519  if (istypematch)
1520  {
1521  if (!it.readUniformArray(buffer.array(), num_page_values))
1522  return false;
1523  }
1524  else
1525  {
1526  if (p.parseArrayValues(it, buffer.array(), num_page_values) != num_page_values)
1527  return false;
1528  }
1529 
1530  exint i = 0;
1531  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1532  {
1533  for (exint component = 0; component < tuple_size; ++component, ++i)
1534  {
1535  set(load_offset+inputi, component, buffer[i]);
1536  }
1537  }
1538  }
1539  }
1540  }
1541  }
1542  else
1543  {
1544  UT_StackBuffer<NotVoidType> buffer(page_size*tuple_size);
1545  const exint num_input_pages = (num_input_elements + page_size-1) / page_size;
1546  for (GA_Size input_pagei = 0; input_pagei < num_input_pages; ++input_pagei)
1547  {
1548  exint start_component = 0;
1549  for (exint packingi = 0; packingi < n_packing_entries; ++packingi)
1550  {
1551  exint inputi = page_size*input_pagei;
1552  const exint num_page_elements = SYSmin(page_size, num_input_elements-inputi);
1553  const exint input_tuple_size = packing[packingi];
1554  const exint num_page_values = input_tuple_size*num_page_elements;
1555 
1556  const UT_BitArray *constpagebits = constant_page_flags[packingi].get();
1557 
1558  const bool constant_page = constpagebits && constpagebits->getBitFast(input_pagei);
1559  if (constant_page)
1560  {
1561  if (istypematch)
1562  {
1563  if (!it.readUniformArray(buffer.array(), input_tuple_size))
1564  return false;
1565  }
1566  else
1567  {
1568  if (p.parseArrayValues(it, buffer.array(), input_tuple_size) != input_tuple_size)
1569  return false;
1570  }
1571 
1572  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1573  {
1574  for (exint component = 0; component < input_tuple_size; ++component)
1575  {
1576  set(load_offset+inputi, start_component+component, buffer[component]);
1577  }
1578  }
1579  }
1580  else
1581  {
1582  if (istypematch)
1583  {
1584  if (!it.readUniformArray(buffer.array(), num_page_values))
1585  return false;
1586  }
1587  else
1588  {
1589  if (p.parseArrayValues(it, buffer.array(), num_page_values) != num_page_values)
1590  return false;
1591  }
1592 
1593  exint i = 0;
1594  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1595  {
1596  for (exint component = 0; component < input_tuple_size; ++component, ++i)
1597  {
1598  set(load_offset+inputi, start_component+component, buffer[i]);
1599  }
1600  }
1601  }
1602 
1603  start_component += input_tuple_size;
1604  }
1605  }
1606  }
1607 
1608  return it.atEnd();
1609 }
1610 
1611 #endif
bool uniformWrite(bool value)
The following byte represents an 8 bit integer.
bool beginUniformArray(int64 length, UT_JID id)
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1221
const_iterator begin() const
bool parseString(UT_WorkBuffer &v)
UT_ASSERT_COMPILETIME(BRAY_EVENT_MAXFLAGS<=32)
No data follows the NULL token.
int int32
Definition: SYS_Types.h:39
UT_Storage
Definition: UT_Storage.h:28
The following 4 bytes represent an 32 bit real (float)
SYS_FORCE_INLINE bool setArray(const T *data, int64 size) const
GA_API JDTupleToken getJSONTokenID(const char *token)
GA_Size GA_PageOff
Definition: GA_Types.h:644
bool getBitFast(exint index) const
Definition: UT_BitArray.h:317
Iteration over a range of elements.
Definition: GA_Iterator.h:28
bool jsonKeyToken(const UT_StringRef &value)
UT_JID
The UT_JID enums are used in byte-stream encoding of binary JSON.
bool getBinary() const
Return whether writing binary or ASCII JSON.
Class which stores the default values for a GA_Attribute.
Definition: GA_Defaults.h:35
bool jsonSave(UT_JSONWriter &w, const GA_Range &range, const GA_SaveOptions *options=nullptr, const UT_IntArray *map=nullptr, int defvalue=-1) const
int64 parseUniformBoolArray(UT_BitArray &data, int64 len)
void setAllBits(bool value)
bool blockAdvance(GA_Offset &start, GA_Offset &end)
GLuint start
Definition: glcorearb.h:474
bool GAisValid(GA_Size v)
Definition: GA_Types.h:648
0x23 and 0x24 are reserved for future use (32/64 bit unsigned)
int64 parseArrayValues(iterator &it, T *data, int64 len)
**And then you can **find out if it s done
Definition: thread.h:628
GA_API const char * getJSONToken(JDTupleToken tokenID)
The merge map keeps track of information when merging details.
Definition: GA_MergeMap.h:53
int64 exint
Definition: SYS_Types.h:125
SYS_FORCE_INLINE const char * buffer() const
bool parseInteger(int64 &v)
bool jsonStringToken(const UT_StringRef &value)
iterator beginArray()
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:88
The following byte represents an unsigned 8 bit integer.
void defragment(const GA_Defragment &defrag)
Include GA_PageArrayImpl.h to call this.
#define GA_API
Definition: GA_API.h:14
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:35
#define UT_ASSERT_MSG_P(ZZ,...)
Definition: UT_Assert.h:173
#define UT_IF_ASSERT(ZZ)
Definition: UT_Assert.h:189
bool jsonLoad(UT_JSONParser &p, const GA_LoadMap &map, GA_AttributeOwner owner)
GLenum src
Definition: glcorearb.h:1792
GA_Offset getDestEnd(GA_AttributeOwner owner) const
Definition: GA_MergeMap.h:130
GLuint buffer
Definition: glcorearb.h:659
exint size() const
Definition: UT_Array.h:479
GA_Size getLoadCount(GA_AttributeOwner owner) const
This method returns the number of elements being loaded of each type.
exint GA_Size
Defines the bit width for index and offset types in GA.
Definition: GA_Types.h:234
GA_PageOff GAgetPageOff(GA_Offset v)
Definition: GA_Types.h:659
exint numBitsSet() const
#define GA_INVALID_OFFSET
Definition: GA_Types.h:677
A range of elements in an index-map.
Definition: GA_Range.h:42
std::unique_ptr< T, Deleter > UT_UniquePtr
A smart pointer for unique ownership of dynamically allocated objects.
Definition: UT_UniquePtr.h:33
GLsizeiptr size
Definition: glcorearb.h:663
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:856
double fpreal64
Definition: SYS_Types.h:201
#define UT_ASSERT_MSG(ZZ,...)
Definition: UT_Assert.h:174
unsigned char uint8
Definition: SYS_Types.h:36
GA_Size GA_Offset
Definition: GA_Types.h:640
The following 8 bytes represent an 64 bit real (float)
The following 8 bytes represent an 64 bit integer.
GA_API const char * GAstorage(GA_Storage store)
Lookup the storage name from the storage type.
bool uniformBlockWrite(const int8 *value, int64 count)
Write a block of 8 bit integer values to the uniform array.
The following 2 bytes represent an 16 bit integer.
static SYS_FORCE_INLINE void call(FUNCTOR functor, ARGS &&...args)
Definition: SYS_CallIf.h:39
GA_PageArray< DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED > PageArray
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:170
const GLdouble * v
Definition: glcorearb.h:836
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1221
GLuint GLuint end
Definition: glcorearb.h:474
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
Traverse an array object in the parser.
bool skipNextObject()
Simple convenience method to skip the next object in the stream.
exint size() const
Definition: UT_BitArray.h:46
long long int64
Definition: SYS_Types.h:116
GLfloat GLfloat p
Definition: glew.h:16656
Options during loading.
Definition: GA_LoadMap.h:42
bool getErrorState() const
void void addWarning(const char *fmt,...) SYS_PRINTF_CHECK_ATTRIBUTE(2
Defragmentation of IndexMaps.
Definition: GA_Defragment.h:45
GLint GLsizei count
Definition: glcorearb.h:404
bool jsonEndArray(bool newline=true)
The following 4 bytes represent an 32 bit integer.
void mergeGrowArrayAndCopy(const GA_MergeMap &map, GA_AttributeOwner owner, const GA_PageArray< SRC_DATA_T, SRC_TSIZE, SRC_TABLEHARDENED, SRC_PAGESHARDENED > &src, const GA_Defaults &defaults)
Include GA_PageArrayImpl.h to call this.
#define GA_PAGE_SIZE
Definition: GA_Types.h:223
exint append()
Definition: UT_Array.h:95
bool setBit(exint index, bool value)
Definition: UT_BitArray.h:272
ga_SubPageBlock(GA_PageNum page, GA_PageOff start, GA_PageOff end)
GLdouble n
Definition: glcorearb.h:2007
exint entries() const
Alias of size(). size() is preferred.
Definition: UT_Array.h:481
int64 parseUniformArray(T *data, int64 len)
GLboolean * data
Definition: glcorearb.h:130
GLuint GLsizei GLsizei * length
Definition: glcorearb.h:794
GLuint GLfloat * val
Definition: glcorearb.h:1607
GA_AttributeOwner
Definition: GA_Types.h:33
GA_Offset getLoadOffset(GA_AttributeOwner owner) const
Definition: GA_LoadMap.h:154
GA_Size getEntries() const
Get an accurate count of the entries in the range.
Definition: GA_Range.h:251
GA_Size GA_PageNum
Definition: GA_Types.h:643
#define GA_PAGE_BITS
Attributes may paritition their data in pages of GA_PAGE_SIZE offsets.
Definition: GA_Types.h:222
bool loadPODArray(OP_TYPE &op)
bool readUniformArray(T *buffer, int64 size)
bool jsonBeginArray()
Begin a generic array object.
GLsizei const GLfloat * value
Definition: glcorearb.h:823
bool equal(T1 a, T2 b, T3 t)
Definition: ImathFun.h:143
LoadComponentArrayFunctor(PageArray &dest, GA_Offset startoff, exint component)
GLenum GLint * range
Definition: glcorearb.h:1924
getOption("OpenEXR.storage") storage
Definition: HDK_Image.dox:276
#define UT_ASSERT(ZZ)
Definition: UT_Assert.h:171
GLintptr offset
Definition: glcorearb.h:664
bool jsonUniformArray(int64 length, const int8 *value)
Efficent method of writing a uniform array of int8 values.
bool endUniformArray(int64 *nwritten=0)
SYS_FORCE_INLINE bool set(int64 i, T val) const
GA_Offset getDestCapacity(GA_AttributeOwner owner) const
Convenience method to get new destination size.
Definition: GA_MergeMap.h:121
#define SYSmin(a, b)
Definition: SYS_Math.h:1536
GA_Storage
Definition: GA_Types.h:49
GA_Offset getDestStart(GA_AttributeOwner owner) const
Definition: GA_MergeMap.h:128
bool isZero(const Type &x)
Return true if x is exactly equal to zero.
Definition: Math.h:338
#define SYS_CALLIF_AUTO
Definition: SYS_CallIf.h:24
GA_PageNum GAgetPageNum(GA_Offset v)
Definition: GA_Types.h:655
bool jsonInt(int32 value)
Write an integer value.
void swap(UT_BitArray &other)
GA_Offset getDestInitCapacity(GA_AttributeOwner owner) const
Convenience method to get old destination size.
Definition: GA_MergeMap.h:117