HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
GA_PageArrayImpl.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: GA_PageArrayImpl.h (GA Library, C++)
7  *
8  * COMMENTS: An array class with special handling of constant pages and
9  * shared page data, specialized for GA_Offset.
10  */
11 
12 #pragma once
13 
14 #ifndef __GA_PageArrayImpl__
15 #define __GA_PageArrayImpl__
16 
17 #include <UT/UT_JSONParser.h>
18 #include "GA_PageArray.h"
19 
20 #include "GA_API.h"
21 #include "GA_ATINumeric.h"
22 #include "GA_ATITopology.h"
23 #include "GA_Defragment.h"
24 #include "GA_MergeMap.h"
25 #include "GA_LoadMap.h"
26 #include "GA_SaveOptions.h"
27 
28 // Separate namespace for these, because they shouldn't be duplicated per
29 // template instantiation.
30 namespace GA_PageArrayIO
31 {
32  // JSON tokens
34  {
43  };
44  GA_API const char *getJSONToken(JDTupleToken tokenID);
45  GA_API JDTupleToken getJSONTokenID(const char *token);
46 }
47 
48 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
49 void
51 {
52  auto &hard = hardenTable();
53  for (GA_Defragment::const_iterator it=defrag.begin(); !it.atEnd(); ++it)
54  {
55  GA_Offset a = it.getA();
56  GA_Offset b = it.getB();
57  GA_Size n = it.getN();
58  switch (it.getOp())
59  {
61  hard.swapRange(a, b, GA_Offset(n));
62  break;
64  hard.moveRange(a, b, GA_Offset(n));
65  break;
66  }
67  }
68 }
69 
70 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
71 template<typename SRC_DATA_T,exint SRC_TSIZE,bool SRC_TABLEHARDENED,bool SRC_PAGESHARDENED>
72 void
74  const GA_MergeMap &map,
75  GA_AttributeOwner owner,
77  const GA_Defaults &defaults)
78 {
79  if (SYSisSame<DATA_T,void>())
80  {
81  SYS_CallIf<SYSisSame<DATA_T,void>()>::call([this,&map,owner,&src,&defaults](){
82  // Hard case, where the storage type is not known at compile time.
83  UT_Storage storage = Base::getStorage();
84  switch (storage)
85  {
86  case UT_Storage::INT8:
87  this->castType<int8>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
88  case UT_Storage::INT16:
89  this->castType<int16>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
90  case UT_Storage::INT32:
91  this->castType<int32>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
92  case UT_Storage::INT64:
93  this->castType<int64>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
94  case UT_Storage::REAL16:
95  this->castType<fpreal16>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
96  case UT_Storage::REAL32:
97  this->castType<fpreal32>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
98  case UT_Storage::REAL64:
99  this->castType<fpreal64>().mergeGrowArrayAndCopy(map, owner, src, defaults); return;
100  case UT_Storage::INVALID:
101  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
102  return;
103  }
104  });
105  return;
106  }
107 
108  UT_IF_ASSERT( GA_Offset osize = map.getDestInitCapacity(owner); )
109  GA_Offset nsize = map.getDestCapacity(owner);
110 
111  // Ideally we could assert that capacity() == ocapacity, but this method is
112  // sometimes called by implementations of GA_AIFMerge::copyArray(),
113  // after GA_AIFMerge::growArray() has already been called.
114  UT_ASSERT(osize <= size());
115  UT_ASSERT(osize <= nsize || (osize == GA_Offset(0) && nsize <= GA_Offset(0)));
116 
117  if (nsize <= GA_Offset(0))
118  return;
119 
120  GA_Offset dststart = map.getDestStart(owner);
121  GA_Offset dstend = map.getDestEnd(owner)+1;
122 
123  UT_ASSERT(dstend - dststart <= src.size());
124  UT_ASSERT(GAisValid(dststart) && dststart < nsize);
125  UT_ASSERT(GAisValid(dstend) && dstend <= nsize);
126  UT_ASSERT(dststart < dstend);
127 
128  UT_ASSERT_MSG(GAgetPageOff(dststart) == 0, "mergeGrowArrayAndCopy should only be used when dststart is at a page boundary");
129  if (nsize > size())
130  {
131  setSize(nsize, defaults);
132  }
133 
134  // As odd as it may seem, apparently mergeGrowArrayAndCopy has only ever
135  // supported copying from source offset 0 onward, regardless of
136  // map.getSourceRange(owner). For example, GA_DataArray::
137  // mergeGrowArrayAndCopy and GA_DataBitArray::mergeGrowArrayAndCopy
138  // both assume this too.
139  moveRange(src, GA_Offset(0), dststart, dstend - dststart);
140 }
141 
142 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
143 bool
145  UT_JSONWriter &w, const GA_Range &range,
146  const GA_SaveOptions *options,
147  const UT_IntArray *map, int defvalue) const
148 {
149  if (SYSisSame<DATA_T,void>())
150  {
151  bool success;
152  SYS_CallIf<SYSisSame<DATA_T,void>()>::call([this,&w,&range,options,map,defvalue,&success](){
153  // Hard case, where the storage type is not known at compile time.
154  UT_Storage storage = Base::getStorage();
155  switch (storage)
156  {
157  case UT_Storage::INT8:
158  success = this->castType<int8>().jsonSave(w, range, options, map, defvalue); return;
159  case UT_Storage::INT16:
160  success = this->castType<int16>().jsonSave(w, range, options, map, defvalue); return;
161  case UT_Storage::INT32:
162  success = this->castType<int32>().jsonSave(w, range, options, map, defvalue); return;
163  case UT_Storage::INT64:
164  success = this->castType<int64>().jsonSave(w, range, options, map, defvalue); return;
165  case UT_Storage::REAL16:
166  success = this->castType<fpreal16>().jsonSave(w, range, options, map, defvalue); return;
167  case UT_Storage::REAL32:
168  success = this->castType<fpreal32>().jsonSave(w, range, options, map, defvalue); return;
169  case UT_Storage::REAL64:
170  success = this->castType<fpreal64>().jsonSave(w, range, options, map, defvalue); return;
171  case UT_Storage::INVALID:
172  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
173  success = false;
174  return;
175  }
176  success = false;
177  });
178  return success;
179  }
180 
181  int tuplesize = getTupleSize();
182 
183  // Cast to optimize for small tuple sizes
184  if (TSIZE == -1 && tuplesize <= 3 && tuplesize >= 1)
185  {
186  bool success;
187  SYS_CallIf<TSIZE == -1>::call([this,&w,&range,options,map,defvalue,tuplesize,&success](){
188  if (tuplesize == 3)
189  success = this->castTupleSize<3>().jsonSave(w, range, options, map, defvalue);
190  else if (tuplesize == 1)
191  success = this->castTupleSize<1>().jsonSave(w, range, options, map, defvalue);
192  else
193  {
194  UT_ASSERT_P(tuplesize == 2);
195  success = this->castTupleSize<2>().jsonSave(w, range, options, map, defvalue);
196  }
197  });
198  return success;
199  }
200 
201  GA_Storage ga_storage = getStorage();
202  if (map && !GAisIntStorage(ga_storage))
203  map = NULL;
204 
205  UT_JID jid = GAStorageToJID(ga_storage);
206 
207  bool ok = true;
208 
209  ok = ok && w.jsonBeginArray();
210 
212  ok = ok && w.jsonInt(tuplesize);
213 
215  ok = ok && w.jsonStringToken(GAstorage(ga_storage));
216 
217  bool savepaged = w.getBinary();
218  if (options)
219  options->importSavePaged(savepaged);
220 
221  if (savepaged)
222  {
224  UT_ASSERT_COMPILETIME(thePageSize == GA_PAGE_SIZE);
225  ok = ok && w.jsonInt(thePageSize);
226 
227 #if 0
228  // For max compatibility with GA_DataArrayTuple, we try to match the old
229  // packing behaviour: 1; 2 -> 1,1; 3; 4 -> 3,1; 5 -> 3,1,1; 6 -> 3,1,1,1
230  // though only for fpreal32 and fpreal64 types. Every other type
231  // had each component stored separately.
232  //
233  // TODO: Check if older versions will load data that is saved
234  // with everything as array-of-structs, avoiding the
235  // need for this.
236  bool hasfirst3packed = (tuplesize >= 3) &&
237  (ga_storage == GA_STORE_REAL32 || ga_storage == GA_STORE_REAL64);
238 
239  // The GA_JDTUPLE_PACKING field is optional and only needed if we
240  // need a data layout other than the default array-of-structs.
241  int n_packing_entries = tuplesize - (hasfirst3packed ? 2 : 0);
242  if (n_packing_entries > 1)
243  {
245  ok = ok && w.beginUniformArray(n_packing_entries, UT_JID_UINT8);
246 
247  // First is 3 or 1; every other one is 1.
248  ok = ok && w.uniformWrite(uint8(hasfirst3packed ? 3 : 1));
249  for (int i = 1; i < n_packing_entries; i++)
250  {
251  ok = ok && w.uniformWrite(uint8(1));
252  }
253 
254  ok = ok && w.endUniformArray();
255  }
256 #else
257  // I think GA_DataArrayTuple::jsonLoad supports loading
258  // array-of-structs, regardless of the tuplesize, so let's try it
259  // for now, and we can always fall back later.
260 
261  // I don't think the packing entry array is needed if there's only one entry.
262 #if 0
263  int n_packing_entries = 1;
264 
266  ok = ok && w.beginUniformArray(n_packing_entries, UT_JID_INT32);
267  ok = ok && w.uniformWrite(int32(tuplesize));
268  ok = ok && w.endUniformArray();
269 #endif
270 #endif
271 
272  // constpagecheck:
273  // 0 - none
274  // 1 - use page state
275  // 2 - full data scan
276  exint const_page_check = 2;
277  if (options)
278  const_page_check = options->constPageCheck();
279 
280  UT_UniquePtr<UT_BitArray> const_page_flags(nullptr);
281  if (tuplesize > 0)
282  {
283  if (const_page_check >= 2)
284  {
285  ok = ok && jsonSaveConstantOutputPageFlags<
287  w, range, const_page_flags);
288  }
289  else if (const_page_check == 1)
290  {
291  ok = ok && jsonSaveConstantOutputPageFlags<
293  w, range, const_page_flags);
294  }
295  }
296 
298 
299  ok = ok && jsonSaveRawPageData(w, range,
300  const_page_flags.get(), jid, map, defvalue);
301  }
302  else if (tuplesize <= 1)
303  {
304  // No reason to save an array of tuples if it's a scalar
306  ok = ok && w.jsonBeginArray();
307 
308  if (tuplesize != 0)
309  ok = ok && jsonSaveAsArray<false>(w, range, jid, map, defvalue);
310 
311  ok = ok && w.jsonEndArray();
312  }
313  else
314  {
315  // Store as an array of structs
317  ok = ok && w.jsonBeginArray();
318 
319  ok = ok && jsonSaveAsArray<true>(w, range, jid, map, defvalue);
320 
321  ok = ok && w.jsonEndArray();
322  }
323  return ok && w.jsonEndArray();
324 }
325 
326 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
327 template<typename MAP_ARRAY_CLASS>
328 bool
330  UT_JSONWriter &w, const GA_Range &range, UT_UniquePtr<UT_BitArray> &output_page_flags) const
331 {
332  bool ok = true;
333 
334  MAP_ARRAY_CLASS output_to_internal_page_map;
335  buildOutputToInternalPageMap(range, output_to_internal_page_map);
336 
337  int64 n_output_pages = ((range.getEntries() + thePageSize-1) / thePageSize);
338  UT_BitArray constant_flags(n_output_pages);
339 
340  GA_Size n_constant_pages = marshallConstantFlagsForOutputPages(
341  output_to_internal_page_map, constant_flags);
342  if (n_constant_pages == 0)
343  return ok;
344 
346  ok = ok && w.jsonBeginArray();
347 
348  ok = ok && w.jsonUniformArray(constant_flags.size(), constant_flags);
349  output_page_flags.reset(new UT_BitArray);
350  constant_flags.swap(*output_page_flags);
351 
352  ok = ok && w.jsonEndArray();
353 
354  return ok;
355 }
356 
357 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
358 bool
360  UT_JSONWriter &w,
361  const NotVoidType *page_data,
362  exint length, exint tuplesize,
363  bool const_output, bool const_input,
364  const UT_IntArray *map, int defvalue,
365  NotVoidType *buffer)
366 {
367  // NOTE: NULL page_data should be dealt with by caller, using buffer.
368  UT_ASSERT_P(page_data);
369 
370  if (!const_output && !const_input)
371  {
372  if (!map)
373  {
374  // Simple case
375  return w.uniformBlockWrite(page_data, length * tuplesize);
376  }
377  else
378  {
379  for (exint i = 0; i < length; ++i)
380  {
381  for (exint component = 0; component < tuplesize; ++component, ++page_data)
382  {
383  NotVoidType val = *page_data;
384  buffer[component] = (val < 0 || val >= map->size())
385  ? defvalue
386  : (*map)(val);
387  }
388  if (!w.uniformBlockWrite(buffer, tuplesize))
389  return false;
390  }
391  return true;
392  }
393  }
394 
395  // Every case left has a single input value to read
396  const NotVoidType *data = page_data;
397  if (map)
398  {
399  for (exint component = 0; component < tuplesize; ++component, ++page_data)
400  {
401  NotVoidType val = *page_data;
402  buffer[component] = (val < 0 || val >= map->size())
403  ? defvalue
404  : (*map)(val);
405  }
406  data = buffer;
407  }
408 
409  if (const_output)
410  {
411  return w.uniformBlockWrite(data, tuplesize);
412  }
413  else
414  {
415  // const_input and !const_output, so repeat same tuple, length times
416  for (exint i = 0; i < length; ++i)
417  {
418  if (!w.uniformBlockWrite(data, tuplesize))
419  return false;
420  }
421  return true;
422  }
423 }
424 
425 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
426 bool
428  UT_JSONWriter &w, const GA_Range &range,
429  const UT_BitArray *const_page_flags,
430  UT_JID jid_storage,
431  const UT_IntArray *map, int defvalue) const
432 {
433  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
434 
435  exint ntotal = range.getEntries();
436 
437  exint collapsedsize = ntotal;
438  if (const_page_flags && ntotal > 0)
439  {
440  exint n_const_pages = const_page_flags->numBitsSet();
441 
442  // Special handling for last page, since it's not always the same size
443  if (const_page_flags->getBitFast(const_page_flags->size()-1))
444  {
445  collapsedsize = (const_page_flags->size()-n_const_pages)*thePageSize
446  + n_const_pages;
447  }
448  else
449  {
450  // NOTE: ((ntotal-1) & thePageMask) + 1 ensures that we get
451  // thePageSize if ntotal is a multiple of thePageSize.
452  collapsedsize = (const_page_flags->size()-n_const_pages-1)*thePageSize
453  + n_const_pages
454  + ((ntotal-1) & thePageMask) + 1;
455  }
456  }
457  const exint tuplesize = getTupleSize();
458  collapsedsize *= tuplesize;
459 
460  bool ok = true;
461  ok = ok && w.beginUniformArray(collapsedsize, jid_storage);
462 
463  // Don't even try to go through the pages if tuplesize is 0.
464  // Only bugs will ensue. Might as well check this implicitly
465  // by checking collapsedsize, since it's multiplied by tuplesize.
466  if (collapsedsize == 0)
467  {
468  ok = ok && w.endUniformArray();
469  return ok;
470  }
471 
472  bool const_page_data;
473  const NotVoidType *page_data;
475  const GA_Size n_output_pages = (ntotal+thePageSize-1) / thePageSize;
476 
477  GA_Iterator it(range);
478  GA_PageNum last_page_num(-1);
479  GA_Offset block_start = GA_INVALID_OFFSET;
480  GA_PageOff block_start_pageoff;
481  GA_Offset block_end = GA_INVALID_OFFSET;
482  for (GA_Size output_page_num = 0; ok && output_page_num < n_output_pages; ++output_page_num)
483  {
484  const bool output_page_const = const_page_flags && const_page_flags->getBitFast(output_page_num);
485 
486  GA_Size output_page_offset = 0;
487  do
488  {
489  if (block_start == block_end)
490  {
491  bool more_data = it.blockAdvance(block_start, block_end);
492  if (!more_data)
493  {
494  UT_ASSERT_P(output_page_num == n_output_pages-1);
495  UT_ASSERT_P(GA_Size(GAgetPageOff(GA_Offset(ntotal))) == output_page_offset);
496  break;
497  }
498 
499  GA_PageNum page_num = GAgetPageNum(block_start);
500  block_start_pageoff = GAgetPageOff(block_start);
501 
502  // Fetch the page data if we don't already have it.
503  if (page_num != last_page_num)
504  {
505  const_page_data = isPageConstant(page_num);
506  page_data = getPageData(page_num);
507 
508  // Deal with NULL here, to avoid having to deal with it in
509  // multiple codepaths.
510  if (!page_data)
511  {
512  UT_ASSERT_P(const_page_data);
513  memset((NotVoidType*)buffer,0,sizeof(NotVoidType)*tuplesize);
514  page_data = (NotVoidType*)buffer;
515  }
516  last_page_num = page_num;
517  }
518  }
519 
520  const GA_Size copy_size = SYSmin(GA_Size(block_end-block_start), thePageSize-GA_Size(output_page_offset));
521 
522  if (!output_page_const)
523  {
524  const NotVoidType *copy_data = page_data;
525  if (!const_page_data)
526  copy_data += GA_Size(block_start_pageoff)*tuplesize;
527  ok = ok && jsonWriteDataSpan(
528  w, copy_data, copy_size, tuplesize,
529  false, const_page_data, map, defvalue, (NotVoidType*)buffer);
530  }
531 
532  output_page_offset += copy_size;
533  block_start += copy_size;
534  block_start_pageoff += copy_size;
535  } while (ok && output_page_offset != thePageSize);
536 
537  if (output_page_const)
538  {
539  const NotVoidType *copy_data = page_data;
540  if (!const_page_data)
541  {
542  // The -1 is because we added copy_size, which is at least 1, to block_start_pageoff,
543  // and block_start_pageoff may now be at a page offset that is not the same value,
544  // or may even be at the page offset of block_end.
545  copy_data += GA_Size(block_start_pageoff-1)*tuplesize;
546  }
547  ok = ok && jsonWriteDataSpan(
548  w, copy_data, 1, tuplesize,
549  true, const_page_data, map, defvalue, (NotVoidType*)buffer);
550  }
551  }
552 
553  ok = ok && w.endUniformArray();
554  return ok;
555 }
556 
557 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
558 class GA_PageArray<DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED>::ga_SubPageBlock
559 {
560 public:
563  : myPage(page), myStartOffset(start), myEndOffset(end) {}
564 
568 };
569 
570 // --------------------------------------------------------------------------
571 // Compute a mapping to keep track of which internal pages affect which output
572 // pages. We store this mapping as an ordered list of the input pages as they
573 // are traversed in building the output pages, with the start of each output
574 // page indicated by a negative value (-(input_page + 1)).
575 //
576 // NB: We don't keep track of the page offsets in the mapping so this is
577 // really only useful for internal pages that are flagged as constant.
578 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
579 void
581  const GA_Range &range,
583 {
584  GA_Iterator it(range);
585  GA_Size output_page_offset = 0;
586  GA_Size block_size = 0;
587  GA_PageNum page_num;
588  GA_PageOff page_offset;
589  GA_PageNum last_page_num(-1);
590 
591  while (true)
592  {
593  if (output_page_offset == thePageSize)
594  {
595  output_page_offset = 0;
596  }
597 
598  if (block_size == 0) // need new block
599  {
600  GA_Offset block_start, block_end;
601  if (!it.blockAdvance(block_start, block_end))
602  break;
603 
604  page_num = GAgetPageNum(block_start);
605  page_offset = GAgetPageOff(block_start);
606  block_size = block_end - block_start;
607  }
608 
609  GA_Size output_size = SYSmin(block_size,
610  thePageSize-output_page_offset);
611 
612  if (output_page_offset == 0)
613  {
614  map.append(-(page_num+1));
615  last_page_num = page_num;
616  }
617  else if (page_num != last_page_num)
618  {
619  map.append(page_num);
620  last_page_num = page_num;
621  }
622 
623  page_offset += output_size;
624  block_size -= output_size;
625  output_page_offset += output_size;
626  }
627 }
628 
629 // Compute a mapping to keep track of which internal page data blocks affect
630 // which output pages. We store this mapping as an ordered list of the sub
631 // page blocks as they are traversed in building the output pages, with the
632 // start of each output page indicated by a negative page number
633 // (-(input_page + 1)).
634 //
635 // TODO: We could keep track of block start/end, recomputing the internal
636 // page number at need?
637 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
638 void
640  const GA_Range &range,
642 {
643  GA_Iterator it(range);
644  GA_Size output_page_offset = 0;
645  GA_Size block_size = 0;
646  GA_PageNum page_num;
647  GA_PageOff page_offset;
648 
649  while (true)
650  {
651  if (output_page_offset == thePageSize)
652  {
653  output_page_offset = 0;
654  }
655 
656  if (block_size == 0) // need new block
657  {
658  GA_Offset block_start, block_end;
659  if (!it.blockAdvance(block_start, block_end))
660  break;
661 
662  page_num = GAgetPageNum(block_start);
663  page_offset = GAgetPageOff(block_start);
664  block_size = block_end - block_start;
665  }
666 
667  GA_Size output_size = SYSmin(block_size,
668  thePageSize-output_page_offset);
669 
670  if (output_page_offset == 0)
671  {
672  map.append(ga_SubPageBlock(
673  GA_PageNum(-(page_num+1)), page_offset,
674  page_offset + output_size));
675  }
676  else
677  {
678  map.append(ga_SubPageBlock(
679  page_num, page_offset,
680  page_offset + output_size));
681  }
682 
683  page_offset += output_size;
684  block_size -= output_size;
685  output_page_offset += output_size;
686  }
687 }
688 
689 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
690 GA_Size
692  const UT_Array<GA_PageNum> &internal_page_map,
693  UT_BitArray &constant_flags) const
694 {
695  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
696 
697  GA_Size count = 0;
698  GA_Size output_page = -1;
699  bool output_page_flag = false;
700  const NotVoidType *constant_value;
701  const exint tuplesize = getTupleSize();
702 
703  constant_flags.setAllBits(false);
704  for (GA_Size i = 0; i < internal_page_map.size(); i++)
705  {
706  GA_PageNum internal_page = internal_page_map(i);
707  // A negative internal page is used to mark the start of a new
708  // output page.
709  if (internal_page < 0)
710  {
711  if (output_page >= 0 && output_page_flag)
712  {
713  constant_flags.setBit(output_page, output_page_flag);
714  ++count;
715  }
716 
717  ++output_page;
718  UT_ASSERT_P(output_page <= constant_flags.size());
719  internal_page = -(internal_page + 1);
720  output_page_flag = isPageConstant(internal_page);
721  if (output_page_flag)
722  {
723  constant_value = getPageData(internal_page);
724  }
725  }
726  else if (output_page_flag)
727  {
728  if (!isPageConstant(internal_page))
729  output_page_flag = false;
730  else
731  {
732  const NotVoidType *new_constant_value = getPageData(internal_page);
733  if ((new_constant_value==nullptr) != (constant_value==nullptr))
734  output_page_flag = false;
735  else if (constant_value != new_constant_value)
736  output_page_flag = isEqual(constant_value, new_constant_value, tuplesize);
737  }
738  }
739  }
740  if (output_page >= 0 && output_page_flag)
741  {
742  constant_flags.setBit(output_page, output_page_flag);
743  ++count;
744  }
745  return count;
746 }
747 
748 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
749 GA_Size
751  const UT_Array<ga_SubPageBlock> &internal_page_map,
752  UT_BitArray &constant_flags) const
753 {
754  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
755 
756  GA_Size count = 0;
757  GA_Size output_page = -1;
758  bool output_page_flag = false;
759  const NotVoidType *constant_value;
760  const exint tuplesize = getTupleSize();
761 
762  constant_flags.setAllBits(false);
763  for (GA_Size i = 0; i < internal_page_map.entries(); i++)
764  {
765  GA_PageNum internal_page = internal_page_map(i).myPage;
766  // A negative internal page is used to mark the start of a new
767  // output page.
768  if (internal_page < 0)
769  {
770  if (output_page >= 0 && output_page_flag)
771  {
772  constant_flags.setBit(output_page, output_page_flag);
773  ++count;
774  }
775 
776  ++output_page;
777  UT_ASSERT_P(output_page <= constant_flags.size());
778  internal_page = -(internal_page + 1);
779  output_page_flag = isPageConstant(internal_page);
780  constant_value = getPageData(internal_page);
781  if (!output_page_flag)
782  {
783  GA_PageOff start = internal_page_map(i).myStartOffset;
784  GA_PageOff end = internal_page_map(i).myEndOffset;
785  const NotVoidType *page = constant_value;
786  constant_value += start;
787  output_page_flag = isSubPageConstant(page, start+1, end,
788  tuplesize, constant_value);
789  }
790  }
791  else if (output_page_flag)
792  {
793  const bool page_constant = isPageConstant(internal_page);
794  const NotVoidType *page = getPageData(internal_page);
795  if (page_constant)
796  {
797  if ((page==nullptr) != (constant_value==nullptr))
798  output_page_flag = false;
799  else if (constant_value != page)
800  output_page_flag = isEqual(constant_value, page, tuplesize);
801  }
802  else
803  {
804  if (!isSubPageConstant(page,
805  internal_page_map(i).myStartOffset,
806  internal_page_map(i).myEndOffset,
807  tuplesize,
808  constant_value))
809  output_page_flag = false;
810  }
811  }
812  }
813  if (output_page >= 0 && output_page_flag)
814  {
815  constant_flags.setBit(output_page, output_page_flag);
816  ++count;
817  }
818  return count;
819 }
820 
821 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
822 bool
824  const NotVoidType *page,
826  const exint tuplesize,
827  const NotVoidType *value)
828 {
829  if (value == nullptr)
830  {
831  for (GA_PageOff cur = start; cur < end; cur++)
832  {
833  if (!isZero(page+cur*tuplesize, tuplesize))
834  return false;
835  }
836  }
837  else
838  {
839  for (GA_PageOff cur = start; cur < end; cur++)
840  {
841  if (!isEqual(page+cur*tuplesize, value, tuplesize))
842  return false;
843  }
844  }
845  return true;
846 }
847 
848 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
849 template<bool ARRAY_OF_ARRAYS>
850 bool
852  UT_JSONWriter &w, const GA_Range &range, UT_JID jid_storage,
853  const UT_IntArray *map, int defvalue) const
854 {
855  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
856 
857  int tuplesize = getTupleSize();
858 
859  if (!ARRAY_OF_ARRAYS)
860  {
861  if (!w.beginUniformArray(tuplesize*range.getEntries(), jid_storage))
862  return false;
863  }
864 
865  UT_StackBuffer<NotVoidType> buffer(ARRAY_OF_ARRAYS ? tuplesize : 0);
866 
868  GA_Offset end;
869  for (GA_Iterator it(range); it.blockAdvance(start, end); )
870  {
871  if (map)
872  {
873  for (GA_Offset ai = start; ai < end; ++ai)
874  {
875  if (ARRAY_OF_ARRAYS)
876  {
877  for (int component = 0; component < tuplesize; ++component)
878  {
879  NotVoidType v = this->template get<NotVoidType>(ai, component);
880  v = NotVoidType((v < 0 || v >= map->size()) ? defvalue : (*map)(v));
881  buffer[component] = v;
882  }
883  if (!w.jsonUniformArray(tuplesize, buffer))
884  return false;
885  }
886  else
887  {
888  for (int component = 0; component < tuplesize; ++component)
889  {
890  NotVoidType v = this->template get<NotVoidType>(ai, component);
891  v = NotVoidType((v < 0 || v >= map->size()) ? defvalue : (*map)(v));
892  if (!w.uniformWrite(v))
893  return false;
894  }
895  }
896  }
897  }
898  else
899  {
900  // No map
901  for (GA_Offset ai = start; ai < end; ++ai)
902  {
903  if (ARRAY_OF_ARRAYS)
904  {
905  for (int component = 0; component < tuplesize; ++component)
906  buffer[component] = this->template get<NotVoidType>(ai, component);
907 
908  if (!w.jsonUniformArray(tuplesize, buffer))
909  return false;
910  }
911  else
912  {
913  for (int component = 0; component < tuplesize; ++component)
914  {
915  NotVoidType v = this->template get<NotVoidType>(ai, component);
916 
917  if (!w.uniformWrite(v))
918  return false;
919  }
920  }
921  }
922  }
923  }
924 
925  if (ARRAY_OF_ARRAYS)
926  return true;
927 
928  return w.endUniformArray();
929 }
930 
931 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
932 UT_JID
934 {
935  switch (storage)
936  {
937  case GA_STORE_BOOL:
938  return UT_JID_BOOL;
939  case GA_STORE_INVALID:
940  return UT_JID_NULL;
941  case GA_STORE_STRING:
942  return UT_JID_STRING;
943  case GA_STORE_INT8:
944  return UT_JID_INT8;
945  case GA_STORE_UINT8:
946  return UT_JID_UINT8;
947  case GA_STORE_INT16:
948  return UT_JID_INT16;
949  case GA_STORE_INT32:
950  return UT_JID_INT32;
951  case GA_STORE_INT64:
952  return UT_JID_INT64;
953  case GA_STORE_REAL16:
954  return UT_JID_REAL16;
955  case GA_STORE_REAL32:
956  return UT_JID_REAL32;
957  case GA_STORE_REAL64:
958  return UT_JID_REAL64;
959  }
960  UT_ASSERT_MSG_P(0, "Unhandled GA_Storage value!");
961  return UT_JID_NULL;
962 }
963 
964 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
965 bool
967  UT_JSONParser &p,
968  const GA_LoadMap &map,
969  GA_AttributeOwner owner)
970 {
971  if (SYSisSame<DATA_T,void>())
972  {
973  bool success;
974  SYS_CallIf<SYSisSame<DATA_T,void>()>::call([this,&p,&map,owner,&success](){
975  // Hard case, where the storage type is not known at compile time.
976  UT_Storage storage = Base::getStorage();
977  switch (storage)
978  {
979  case UT_Storage::INT8:
980  success = this->castType<int8>().jsonLoad(p, map, owner); return;
981  case UT_Storage::INT16:
982  success = this->castType<int16>().jsonLoad(p, map, owner); return;
983  case UT_Storage::INT32:
984  success = this->castType<int32>().jsonLoad(p, map, owner); return;
985  case UT_Storage::INT64:
986  success = this->castType<int64>().jsonLoad(p, map, owner); return;
987  case UT_Storage::REAL16:
988  success = this->castType<fpreal16>().jsonLoad(p, map, owner); return;
989  case UT_Storage::REAL32:
990  success = this->castType<fpreal32>().jsonLoad(p, map, owner); return;
991  case UT_Storage::REAL64:
992  success = this->castType<fpreal64>().jsonLoad(p, map, owner); return;
993  case UT_Storage::INVALID:
994  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
995  success = false;
996  return;
997  }
998  success = false;
999  });
1000  return success;
1001  }
1002 
1003  int64 tuple_size = getTupleSize();
1004 
1005  // Cast to optimize for small tuple sizes
1006  if (TSIZE == -1 && tuple_size <= 3 && tuple_size >= 1)
1007  {
1008  bool success;
1009  SYS_CallIf<TSIZE == -1>::call([this,&p,&map,owner,tuple_size,&success](){
1010  if (tuple_size == 3)
1011  success = this->castTupleSize<3>().jsonLoad(p, map, owner);
1012  else if (tuple_size == 1)
1013  success = this->castTupleSize<1>().jsonLoad(p, map, owner);
1014  else
1015  {
1016  UT_ASSERT_P(tuple_size == 2);
1017  success = this->castTupleSize<2>().jsonLoad(p, map, owner);
1018  }
1019  });
1020  return success;
1021  }
1022 
1023  UT_WorkBuffer key;
1024  int64 page_size = -1;
1025  GA_Storage ga_storage = GA_STORE_INVALID;
1026  UT_StackBuffer<int> packing(tuple_size);
1027  int n_packing_entries = 0;
1028  UT_StackBuffer<UT_UniquePtr<UT_BitArray> > constant_page_flags(tuple_size);
1029  bool constant_page_flags_init = false;
1030 
1031  bool ok = true;
1032  bool done = false;
1033  for (UT_JSONParser::traverser mi = p.beginArray(); ok && !mi.atEnd(); ++mi)
1034  {
1035  if (!mi.getLowerKey(key))
1036  {
1037  ok = false;
1038  break;
1039  }
1040  switch (GA_PageArrayIO::getJSONTokenID(key.buffer()))
1041  {
1043  {
1044  int64 local_tuple_size = -1;
1045  ok = p.parseInteger(local_tuple_size);
1046  if (ok && local_tuple_size != tuple_size)
1047  {
1048  p.addWarning("Inconsistent tuple size specification");
1049  ok = false;
1050  }
1051  break;
1052  }
1054  ok = p.parseString(key);
1055  if (ok)
1056  ga_storage = GAstorage(key.buffer());
1057  break;
1059  // Tuple size and storage type are supposed to have already been set.
1060  if (done || tuple_size != getTupleSize() || ga_storage != getStorage())
1061  {
1062  p.addWarning("Bad data type/size specification");
1063  ok = p.skipNextObject();
1064  }
1065  else
1066  {
1067  // Load as a struct of arrays
1069  for (exint component = 0; ok && !ai.atEnd(); ++component, ++ai)
1070  {
1071  if (component < tuple_size)
1072  {
1073  GA_Offset startoff = map.getLoadOffset(owner);
1074  LoadComponentArrayFunctor op(*this, startoff, component);
1075  if (GAisIntStorage(ga_storage))
1077  else
1079  }
1080  else
1081  {
1082  if (component == tuple_size)
1083  p.addWarning("Too many tuple items in data array");
1084  ok = p.skipNextObject();
1085  }
1086  }
1087  done = true;
1088  }
1089  break;
1091  if (done || tuple_size != getTupleSize() || ga_storage != getStorage())
1092  {
1093  p.addWarning("Bad data type/size specification");
1094  ok = p.skipNextObject();
1095  }
1096  else
1097  {
1098  // Load as an array of structs
1100  GA_Offset offset = map.getLoadOffset(owner);
1101  GA_Size ppage = GAgetPageNum(offset);
1102  for ( ; ok && !ai.atEnd(); ++offset, ++ai)
1103  {
1104  GA_PageNum newpagenum = GAgetPageNum(offset);
1105  if (newpagenum != ppage)
1106  {
1107  // We compress previous page
1108  tryCompressPage(ppage);
1109  ppage = newpagenum;
1110  }
1111 
1112  UT_StackBuffer<NotVoidType> buffer(tuple_size);
1113  exint nread = p.parseUniformArray<NotVoidType>(buffer, tuple_size);
1114  if (nread < tuple_size)
1115  {
1116  ok = false;
1117  break;
1118  }
1119  if (nread > tuple_size)
1120  p.addWarning("Extra data found in array tuple");
1121 
1122  if (TSIZE >= 1)
1123  {
1124  setVector(offset, *(const UT_FixedVector<NotVoidType,theSafeTupleSize>*)buffer.array());
1125  }
1126  else
1127  {
1128  for (int component = 0; component < tuple_size; ++component)
1129  set(offset, component, buffer[component]);
1130  }
1131  }
1132  tryCompressPage(ppage);
1133  done = true;
1134  }
1135  break;
1137  ok = p.parseInteger(page_size);
1138  break;
1140  if (tuple_size != getTupleSize())
1141  {
1142  p.addWarning("Packing requires valid size specification");
1143  ok = p.skipNextObject();
1144  }
1145  else
1146  {
1147  // NB: p.parseUniformArray() might return a greater value
1148  // than expected, but it won't write the extra values
1149  // to packing.array().
1150  n_packing_entries = p.parseUniformArray(packing.array(), tuple_size);
1151 
1152  if (constant_page_flags_init && n_packing_entries != (tuple_size > 0 ? 1 : 0))
1153  {
1154  p.addWarning("Non-trivial packing specification must come before constant page flags");
1155  n_packing_entries = 0;
1156  ok = false;
1157  }
1158  else if (n_packing_entries >= 0)
1159  {
1160  int total_packed_size = 0;
1161  for (int i = 0; i < n_packing_entries; ++i)
1162  {
1163  total_packed_size += packing[i];
1164  }
1165  if (total_packed_size != tuple_size ||
1166  n_packing_entries > tuple_size)
1167  {
1168  p.addWarning("Invalid packing specification");
1169  n_packing_entries = -1;
1170  ok = false;
1171  }
1172  }
1173  }
1174  break;
1176  if (tuple_size != getTupleSize() ||
1177  page_size <= 0 || n_packing_entries < 0)
1178  {
1179  p.addWarning("Bad data type/size specification");
1180  ok = p.skipNextObject();
1181  }
1182  else
1183  {
1184  int i = 0;
1185  UT_BitArray scratch_array;
1186 
1187  int n_arrays = n_packing_entries ? n_packing_entries
1188  : (tuple_size > 0 ? 1 : 0);
1189  int64 n_input_pages = (map.getLoadCount(owner)+page_size-1) / page_size;
1190 
1191  for (UT_JSONParser::traverser it = p.beginArray(); !it.atEnd(); ++it, ++i)
1192  {
1193  if (i < n_arrays)
1194  {
1195  int64 n_loaded = p.parseUniformBoolArray(scratch_array, n_input_pages);
1196 
1197  // We allow an empty array when no pages are constant.
1198  if (n_loaded == 0)
1199  {
1200  constant_page_flags[i].reset(NULL);
1201  }
1202  else
1203  {
1204  constant_page_flags[i].reset(new UT_BitArray());
1205  scratch_array.swap(*constant_page_flags[i]);
1206  }
1207  }
1208  else
1209  {
1210  p.skipNextObject();
1211  UT_ASSERT(0);
1212  }
1213  }
1214  ok = (i == n_arrays);
1215  constant_page_flags_init = true;
1216  }
1217  break;
1219  // Load as an array of structs with tuples whose pages may be compressed
1220  if (done || tuple_size != getTupleSize() || ga_storage != getStorage() ||
1221  page_size <= 0 || n_packing_entries < 0)
1222  {
1223  p.addWarning("Bad data type/size specification");
1224  ok = p.skipNextObject();
1225  }
1226  else
1227  {
1228  // We default to a full vector when a GA_JDTUPLE_PACKING
1229  // field is missing.
1230  if (n_packing_entries == 0 && tuple_size > 0)
1231  {
1232  packing[0] = tuple_size;
1233  n_packing_entries = 1;
1234  }
1235  done = true;
1236  ok = jsonLoadRawPageData(p, map, owner,
1237  GA_Size(page_size),
1238  packing.array(), n_packing_entries,
1239  constant_page_flags.array());
1240  }
1241  break;
1242  default:
1243  p.addWarning("Data Array Tuple unknown key '%s'", key.buffer());
1244  break;
1245  }
1246  }
1247  if (!done)
1248  p.addWarning("Missing data for data array");
1249  return ok;
1250 }
1251 
1252 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
1253 class GA_PageArray<DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED>::LoadComponentArrayFunctor
1254 {
1255 public:
1257 
1259  : myDest(dest)
1260  , myStartOffset(startoff)
1261  , myComponent(component)
1262  , myDestSize(dest.size()-startoff)
1263  {}
1264 
1265  template <typename T> SYS_FORCE_INLINE bool
1266  set(int64 i, T val) const
1267  {
1268  if (GA_Offset(i) >= myDestSize)
1269  return false;
1270  myDest.set(myStartOffset+(GA_Size)i, myComponent, val);
1271  return true;
1272  }
1273 
1274  template <typename T> SYS_FORCE_INLINE bool
1275  setArray(const T *data, int64 size) const
1276  {
1277  bool outofbounds = false;
1278  if (GA_Offset(size) > myDestSize)
1279  {
1280  size = int64(myDestSize);
1281  outofbounds = true;
1282  }
1283 
1284  // Fast path for single component
1285  if (TSIZE == 1)
1286  {
1287  myDest.setRange(myStartOffset, GA_Offset(size), data);
1288  return !outofbounds;
1289  }
1290 
1291  GA_Offset end = myStartOffset + GA_Size(size);
1292 
1293  for (GA_Offset off = myStartOffset; off < end; ++off, ++data)
1294  {
1295  myDest.set(off, myComponent, *data);
1296  }
1297 
1298  return !outofbounds;
1299  }
1300 
1305 };
1306 
1307 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
1308 bool
1310  UT_JSONParser &p,
1311  const GA_LoadMap &map,
1312  GA_AttributeOwner owner,
1313  GA_Size page_size,
1314  const int *packing,
1315  int n_packing_entries,
1316  const UT_UniquePtr<UT_BitArray> *const constant_page_flags)
1317 {
1318  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1319 
1321  if (it.getErrorState())
1322  return false;
1323 
1324  UT_JID jid = p.getUniformArrayType();
1325  bool istypematch = (jid == GAStorageToJID(getStorage()));
1326 
1327  GA_Size num_input_elements = map.getLoadCount(owner);
1328  GA_Offset load_offset = map.getLoadOffset(owner);
1329  const GA_PageNum start_page_num = GAgetPageNum(load_offset);
1330  GA_PageOff page_offset = GAgetPageOff(load_offset);
1331  const exint tuple_size = getTupleSize();
1332  const exint num_page_values = tuple_size*thePageSize;
1333 
1334  UT_StackBuffer<NotVoidType> single_tuple(tuple_size);
1335 
1336  if (n_packing_entries == 1 && page_size == thePageSize)
1337  {
1338  UT_ASSERT(packing[0] == tuple_size);
1339 
1340  const UT_BitArray *constpagebits = constant_page_flags[0].get();
1341 
1342  if (page_offset == GA_PageOff(0))
1343  {
1344  // Loading at the beginning of a page, making things much simpler
1345  GA_Size num_full_new_pages = (num_input_elements >> GA_PAGE_BITS);
1346  GA_PageOff end_page_offset = GAgetPageOff(GA_Offset(num_input_elements));
1347 
1348  // First, fill in all complete, full-size pages
1349  GA_PageNum pagenum = start_page_num;
1350  for (GA_Size input_pagei = 0; input_pagei < num_full_new_pages; ++input_pagei, ++pagenum)
1351  {
1352  if (constpagebits && constpagebits->getBitFast(input_pagei))
1353  {
1354  if (istypematch)
1355  {
1356  if (!it.readUniformArray(single_tuple.array(), tuple_size))
1357  return false;
1358  }
1359  else
1360  {
1361  if (p.parseArrayValues(it, single_tuple.array(), tuple_size) != tuple_size)
1362  return false;
1363  }
1364  setPageConstant(pagenum, single_tuple.array());
1365  }
1366  else
1367  {
1368  NotVoidType *data = hardenPageNoInit(pagenum);
1369  if (istypematch)
1370  {
1371  if (!it.readUniformArray(data, num_page_values))
1372  return false;
1373  }
1374  else
1375  {
1376  if (p.parseArrayValues(it, data, num_page_values) != num_page_values)
1377  return false;
1378  }
1379  }
1380  }
1381 
1382  // Handle any final incomplete or not-full-size page
1383  if (end_page_offset != GA_PageOff(0))
1384  {
1385  if (constpagebits && constpagebits->getBitFast(num_full_new_pages))
1386  {
1387  if (istypematch)
1388  {
1389  if (!it.readUniformArray(single_tuple.array(), tuple_size))
1390  return false;
1391  }
1392  else
1393  {
1394  if (p.parseArrayValues(it, single_tuple.array(), tuple_size) != tuple_size)
1395  return false;
1396  }
1397  if (load_offset+num_input_elements == size())
1398  setPageConstant(pagenum, single_tuple.array());
1399  else
1400  {
1401  // I don't know if this path will ever be taken; I'm guessing not.
1402 
1403  bool equal = false;
1404  if (isPageConstant(pagenum))
1405  {
1406  const NotVoidType *current_tuple = getPageData(pagenum);
1407  if (current_tuple)
1408  {
1409  if (isEqual(single_tuple.array(),current_tuple,tuple_size))
1410  equal = true;
1411  }
1412  else
1413  {
1414  if (isZero(single_tuple.array(),tuple_size))
1415  equal = true;
1416  }
1417  }
1418  if (!equal)
1419  {
1420  NotVoidType *data = hardenPage(pagenum);
1421  for (GA_PageOff pageoff(0); pageoff < end_page_offset; ++pageoff)
1422  {
1423  for (exint component = 0; component < tuple_size; ++component, ++data)
1424  {
1425  *data = single_tuple[component];
1426  }
1427  }
1428  }
1429  }
1430  }
1431  else
1432  {
1433  // This could be optimized to avoid a bit of redundant initialization,
1434  // but hopefully it's not too much of an issue.
1435  NotVoidType *data = hardenPage(pagenum);
1436  const exint num_left_values = tuple_size*end_page_offset;
1437  if (istypematch)
1438  {
1439  if (!it.readUniformArray(data, num_left_values))
1440  return false;
1441  }
1442  else
1443  {
1444  if (p.parseArrayValues(it, data, num_left_values) != num_left_values)
1445  return false;
1446  }
1447  }
1448  }
1449  }
1450  else
1451  {
1452  // Loading with matching packing (only 1 tuple) and matching page size,
1453  // but not loading at a page boundary.
1454 
1455  // TODO: Optimize this case for that we know that pages are the same size,
1456  // e.g. to try to preserve constant pages or load directly into destination.
1457 
1458  UT_StackBuffer<NotVoidType> buffer(thePageSize*tuple_size);
1459  const exint num_input_pages = (num_input_elements + thePageSize-1) / thePageSize;
1460  for (GA_Size input_pagei = 0; input_pagei < num_input_pages; ++input_pagei)
1461  {
1462  exint inputi = thePageSize*input_pagei;
1463  const exint num_page_elements = SYSmin(thePageSize, num_input_elements-inputi);
1464  const exint num_page_values = tuple_size*num_page_elements;
1465 
1466  const bool constant_page = constpagebits && constpagebits->getBitFast(input_pagei);
1467  if (constant_page)
1468  {
1469  if (istypematch)
1470  {
1471  if (!it.readUniformArray(buffer.array(), tuple_size))
1472  return false;
1473  }
1474  else
1475  {
1476  if (p.parseArrayValues(it, buffer.array(), tuple_size) != tuple_size)
1477  return false;
1478  }
1479 
1480  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1481  {
1482  for (exint component = 0; component < tuple_size; ++component)
1483  {
1484  set(load_offset+inputi, component, buffer[component]);
1485  }
1486  }
1487  }
1488  else
1489  {
1490  if (istypematch)
1491  {
1492  if (!it.readUniformArray(buffer.array(), num_page_values))
1493  return false;
1494  }
1495  else
1496  {
1497  if (p.parseArrayValues(it, buffer.array(), num_page_values) != num_page_values)
1498  return false;
1499  }
1500 
1501  exint i = 0;
1502  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1503  {
1504  for (exint component = 0; component < tuple_size; ++component, ++i)
1505  {
1506  set(load_offset+inputi, component, buffer[i]);
1507  }
1508  }
1509  }
1510  }
1511  }
1512  }
1513  else
1514  {
1515  UT_StackBuffer<NotVoidType> buffer(page_size*tuple_size);
1516  const exint num_input_pages = (num_input_elements + page_size-1) / page_size;
1517  for (GA_Size input_pagei = 0; input_pagei < num_input_pages; ++input_pagei)
1518  {
1519  exint start_component = 0;
1520  for (exint packingi = 0; packingi < n_packing_entries; ++packingi)
1521  {
1522  exint inputi = page_size*input_pagei;
1523  const exint num_page_elements = SYSmin(page_size, num_input_elements-inputi);
1524  const exint input_tuple_size = packing[packingi];
1525  const exint num_page_values = input_tuple_size*num_page_elements;
1526 
1527  const UT_BitArray *constpagebits = constant_page_flags[packingi].get();
1528 
1529  const bool constant_page = constpagebits && constpagebits->getBitFast(input_pagei);
1530  if (constant_page)
1531  {
1532  if (istypematch)
1533  {
1534  if (!it.readUniformArray(buffer.array(), input_tuple_size))
1535  return false;
1536  }
1537  else
1538  {
1539  if (p.parseArrayValues(it, buffer.array(), input_tuple_size) != input_tuple_size)
1540  return false;
1541  }
1542 
1543  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1544  {
1545  for (exint component = 0; component < input_tuple_size; ++component)
1546  {
1547  set(load_offset+inputi, start_component+component, buffer[component]);
1548  }
1549  }
1550  }
1551  else
1552  {
1553  if (istypematch)
1554  {
1555  if (!it.readUniformArray(buffer.array(), num_page_values))
1556  return false;
1557  }
1558  else
1559  {
1560  if (p.parseArrayValues(it, buffer.array(), num_page_values) != num_page_values)
1561  return false;
1562  }
1563 
1564  exint i = 0;
1565  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1566  {
1567  for (exint component = 0; component < input_tuple_size; ++component, ++i)
1568  {
1569  set(load_offset+inputi, start_component+component, buffer[i]);
1570  }
1571  }
1572  }
1573 
1574  start_component += input_tuple_size;
1575  }
1576  }
1577  }
1578 
1579  return it.atEnd();
1580 }
1581 
1582 #endif
bool uniformWrite(bool value)
#define UT_ASSERT_COMPILETIME(expr)
Definition: UT_Assert.h:109
The following byte represents an 8 bit integer.
bool beginUniformArray(int64 length, UT_JID id)
const_iterator begin() const
bool parseString(UT_WorkBuffer &v)
No data follows the NULL token.
GLenum GLint * range
Definition: glcorearb.h:1924
UT_Storage
Definition: UT_Storage.h:26
The following 4 bytes represent an 32 bit real (float)
SYS_FORCE_INLINE bool setArray(const T *data, int64 size) const
GA_API JDTupleToken getJSONTokenID(const char *token)
GA_Size GA_PageOff
Definition: GA_Types.h:621
bool getBitFast(exint index) const
Definition: UT_BitArray.h:311
Iteration over a range of elements.
Definition: GA_Iterator.h:28
bool jsonKeyToken(const UT_StringRef &value)
UT_JID
The UT_JID enums are used in byte-stream encoding of binary JSON.
bool getBinary() const
Return whether writing binary or ASCII JSON.
Definition: UT_JSONWriter.h:87
Class which stores the default values for a GA_Attribute.
Definition: GA_Defaults.h:35
bool jsonSave(UT_JSONWriter &w, const GA_Range &range, const GA_SaveOptions *options=nullptr, const UT_IntArray *map=nullptr, int defvalue=-1) const
const GLdouble * v
Definition: glcorearb.h:836
int64 parseUniformBoolArray(UT_BitArray &data, int64 len)
void setAllBits(bool value)
bool blockAdvance(GA_Offset &start, GA_Offset &end)
GLuint start
Definition: glcorearb.h:474
bool GAisValid(GA_Size v)
Definition: GA_Types.h:625
0x23 and 0x24 are reserved for future use (32/64 bit unsigned)
int64 parseArrayValues(iterator &it, T *data, int64 len)
GA_API const char * getJSONToken(JDTupleToken tokenID)
The merge map keeps track of information when merging details.
Definition: GA_MergeMap.h:53
bool parseInteger(int64 &v)
bool jsonStringToken(const UT_StringRef &value)
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1221
iterator beginArray()
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:72
The following byte represents an unsigned 8 bit integer.
void defragment(const GA_Defragment &defrag)
Include GA_PageArrayImpl.h to call this.
#define GA_API
Definition: GA_API.h:12
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:32
bool jsonLoad(UT_JSONParser &p, const GA_LoadMap &map, GA_AttributeOwner owner)
GA_Offset getDestEnd(GA_AttributeOwner owner) const
Definition: GA_MergeMap.h:130
GLuint buffer
Definition: glcorearb.h:659
png_uint_32 i
Definition: png.h:2877
exint size() const
Definition: UT_Array.h:444
GA_Size getLoadCount(GA_AttributeOwner owner) const
This method returns the number of elements being loaded of each type.
exint GA_Size
Defines the bit width for index and offset types in GA.
Definition: GA_Types.h:211
GA_PageOff GAgetPageOff(GA_Offset v)
Definition: GA_Types.h:636
exint numBitsSet() const
GLsizeiptr size
Definition: glcorearb.h:663
#define GA_INVALID_OFFSET
Definition: GA_Types.h:654
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:101
A range of elements in an index-map.
Definition: GA_Range.h:42
GA_Size GA_Offset
Definition: GA_Types.h:617
The following 8 bytes represent an 64 bit real (float)
static SYS_FORCE_INLINE void call(FUNCTOR functor)
The following 8 bytes represent an 64 bit integer.
long long int64
Definition: SYS_Types.h:100
GA_API const char * GAstorage(GA_Storage store)
Lookup the storage name from the storage type.
GLdouble n
Definition: glcorearb.h:2007
bool uniformBlockWrite(const int8 *value, int64 count)
Write a block of 8 bit integer values to the uniform array.
The following 2 bytes represent an 16 bit integer.
GA_PageArray< DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED > PageArray
#define UT_ASSERT(ZZ)
Definition: UT_Assert.h:102
int64 exint
Definition: SYS_Types.h:109
double fpreal64
Definition: SYS_Types.h:185
GLuint GLuint end
Definition: glcorearb.h:474
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
Traverse an array object in the parser.
GLintptr offset
Definition: glcorearb.h:664
bool skipNextObject()
Simple convenience method to skip the next object in the stream.
exint size() const
Definition: UT_BitArray.h:46
Options during loading.
Definition: GA_LoadMap.h:42
bool getErrorState() const
void void addWarning(const char *fmt,...) SYS_PRINTF_CHECK_ATTRIBUTE(2
Defragmentation of IndexMaps.
Definition: GA_Defragment.h:45
GLboolean * data
Definition: glcorearb.h:130
int int32
Definition: SYS_Types.h:28
bool jsonEndArray(bool newline=true)
The following 4 bytes represent an 32 bit integer.
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1221
void mergeGrowArrayAndCopy(const GA_MergeMap &map, GA_AttributeOwner owner, const GA_PageArray< SRC_DATA_T, SRC_TSIZE, SRC_TABLEHARDENED, SRC_PAGESHARDENED > &src, const GA_Defaults &defaults)
Include GA_PageArrayImpl.h to call this.
#define GA_PAGE_SIZE
Definition: GA_Types.h:200
GLint GLsizei count
Definition: glcorearb.h:404
bool setBit(exint index, bool value)
Definition: UT_BitArray.h:266
ga_SubPageBlock(GA_PageNum page, GA_PageOff start, GA_PageOff end)
exint entries() const
Alias of size(). size() is preferred.
Definition: UT_Array.h:446
int64 parseUniformArray(T *data, int64 len)
GLsizei const GLfloat * value
Definition: glcorearb.h:823
GA_AttributeOwner
Definition: GA_Types.h:33
#define UT_ASSERT_MSG_P(ZZ, MM)
Definition: UT_Assert.h:104
unsigned char uint8
Definition: SYS_Types.h:25
GA_Offset getLoadOffset(GA_AttributeOwner owner) const
Definition: GA_LoadMap.h:154
GA_Size getEntries() const
Get an accurate count of the entries in the range.
Definition: GA_Range.h:242
A smart pointer for unique ownership of dynamically allocated objects.
Definition: UT_UniquePtr.h:47
GA_Size GA_PageNum
Definition: GA_Types.h:620
#define GA_PAGE_BITS
Attributes may paritition their data in pages of GA_PAGE_SIZE offsets.
Definition: GA_Types.h:199
bool loadPODArray(OP_TYPE &op)
GLuint GLfloat * val
Definition: glcorearb.h:1607
bool readUniformArray(T *buffer, int64 size)
bool jsonBeginArray()
Begin a generic array object.
exint append(void)
Definition: UT_Array.h:95
bool equal(T1 a, T2 b, T3 t)
Definition: ImathFun.h:143
LoadComponentArrayFunctor(PageArray &dest, GA_Offset startoff, exint component)
const char * buffer() const
getOption("OpenEXR.storage") storage
Definition: HDK_Image.dox:276
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:856
bool jsonUniformArray(int64 length, const int8 *value)
Efficent method of writing a uniform array of int8 values.
bool endUniformArray(int64 *nwritten=0)
SYS_FORCE_INLINE bool set(int64 i, T val) const
GA_Offset getDestCapacity(GA_AttributeOwner owner) const
Convenience method to get new destination size.
Definition: GA_MergeMap.h:121
#define SYSmin(a, b)
Definition: SYS_Math.h:1366
#define UT_ASSERT_MSG(ZZ, MM)
Definition: UT_Assert.h:105
GA_Storage
Definition: GA_Types.h:48
GA_Offset getDestStart(GA_AttributeOwner owner) const
Definition: GA_MergeMap.h:128
bool isZero(const Type &x)
Return true if x is exactly equal to zero.
Definition: Math.h:324
GA_PageNum GAgetPageNum(GA_Offset v)
Definition: GA_Types.h:632
bool jsonInt(int32 value)
Write an integer value.
#define UT_IF_ASSERT(ZZ)
Definition: UT_Assert.h:120
void swap(UT_BitArray &other)
GLuint GLsizei GLsizei * length
Definition: glcorearb.h:794
GLenum src
Definition: glcorearb.h:1792
GA_Offset getDestInitCapacity(GA_AttributeOwner owner) const
Convenience method to get old destination size.
Definition: GA_MergeMap.h:117