HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
data_types_internal.h
Go to the documentation of this file.
1 // Copyright (c) Microsoft Corporation. All rights reserved.
2 // Licensed under the MIT License.
3 
4 #pragma once
5 
6 #include <array>
7 #include <cassert>
8 #include <cstdint>
9 #include <string>
10 #include <type_traits>
11 #include <vector>
12 
13 #include "boost/mp11.hpp"
14 
15 #include "core/common/common.h"
17 #ifndef SHARED_PROVIDER
18 #include "core/common/type_list.h"
20 #if !defined(ORT_MINIMAL_BUILD)
21 #include "onnx/defs/schema.h"
22 #else
23 #include "onnx/defs/data_type_utils.h"
24 #endif
25 #include "onnx/onnx_pb.h"
26 #include "onnx/onnx-operators_pb.h"
27 #endif
28 
29 namespace onnxruntime {
30 namespace utils {
31 
32  // The following primitives are strongly recommended for switching on tensor input datatypes for
33  // kernel implementations.
34  //
35  // 1) If you need to handle all of the primitive tensor contained datatypes, the best choice would be macros
36  // DispatchOnTensorType or DispatchOnTensorTypeWithReturn. Use inline wrappers so your function can be invoked as function<T>().
37  // 2) if you have a few types, use Tensor.IsDataType<T>()/IsDataTypeString() or use utils::IsPrimitiveDataType<T>()
38  // if you have a standalone MLDatatType with a sequence of if/else statements.
39  // 3) For something in between, we suggest to use CallDispatcher pattern.
40  //
41  // Invoking DataTypeImpl::GetType<T>() for switching on input types is discouraged and should be avoided.
42  // Every primitive type carries with it an integer constant that can be used for quick switching on types.
43 
44 #define DispatchOnTensorType(tensor_type, function, ...) \
45  switch (tensor_type->AsPrimitiveDataType()->GetDataType()) { \
46  case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: \
47  function<float>(__VA_ARGS__); \
48  break; \
49  case ONNX_NAMESPACE::TensorProto_DataType_BOOL: \
50  function<bool>(__VA_ARGS__); \
51  break; \
52  case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: \
53  function<double>(__VA_ARGS__); \
54  break; \
55  case ONNX_NAMESPACE::TensorProto_DataType_STRING: \
56  function<std::string>(__VA_ARGS__); \
57  break; \
58  case ONNX_NAMESPACE::TensorProto_DataType_INT8: \
59  function<int8_t>(__VA_ARGS__); \
60  break; \
61  case ONNX_NAMESPACE::TensorProto_DataType_UINT8: \
62  function<uint8_t>(__VA_ARGS__); \
63  break; \
64  case ONNX_NAMESPACE::TensorProto_DataType_INT16: \
65  function<int16_t>(__VA_ARGS__); \
66  break; \
67  case ONNX_NAMESPACE::TensorProto_DataType_UINT16: \
68  function<uint16_t>(__VA_ARGS__); \
69  break; \
70  case ONNX_NAMESPACE::TensorProto_DataType_INT32: \
71  function<int32_t>(__VA_ARGS__); \
72  break; \
73  case ONNX_NAMESPACE::TensorProto_DataType_UINT32: \
74  function<uint32_t>(__VA_ARGS__); \
75  break; \
76  case ONNX_NAMESPACE::TensorProto_DataType_INT64: \
77  function<int64_t>(__VA_ARGS__); \
78  break; \
79  case ONNX_NAMESPACE::TensorProto_DataType_UINT64: \
80  function<uint64_t>(__VA_ARGS__); \
81  break; \
82  case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: \
83  function<MLFloat16>(__VA_ARGS__); \
84  break; \
85  case ONNX_NAMESPACE::TensorProto_DataType_BFLOAT16: \
86  function<BFloat16>(__VA_ARGS__); \
87  break; \
88  default: \
89  ORT_ENFORCE(false, "Unknown tensor type of ", tensor_type); \
90  }
91 
92 #define DispatchOnTensorTypeWithReturn(tensor_type, retval, function, ...) \
93  switch (tensor_type->AsPrimitiveDataType()->GetDataType()) { \
94  case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: \
95  retval = function<float>(__VA_ARGS__); \
96  break; \
97  case ONNX_NAMESPACE::TensorProto_DataType_BOOL: \
98  retval = function<bool>(__VA_ARGS__); \
99  break; \
100  case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: \
101  retval = function<double>(__VA_ARGS__); \
102  break; \
103  case ONNX_NAMESPACE::TensorProto_DataType_STRING: \
104  retval = function<std::string>(__VA_ARGS__); \
105  break; \
106  case ONNX_NAMESPACE::TensorProto_DataType_INT8: \
107  retval = function<int8_t>(__VA_ARGS__); \
108  break; \
109  case ONNX_NAMESPACE::TensorProto_DataType_UINT8: \
110  retval = function<uint8_t>(__VA_ARGS__); \
111  break; \
112  case ONNX_NAMESPACE::TensorProto_DataType_UINT16: \
113  retval = function<uint16_t>(__VA_ARGS__); \
114  break; \
115  case ONNX_NAMESPACE::TensorProto_DataType_INT16: \
116  retval = function<int16_t>(__VA_ARGS__); \
117  break; \
118  case ONNX_NAMESPACE::TensorProto_DataType_INT32: \
119  retval = function<int32_t>(__VA_ARGS__); \
120  break; \
121  case ONNX_NAMESPACE::TensorProto_DataType_UINT32: \
122  retval = function<uint32_t>(__VA_ARGS__); \
123  break; \
124  case ONNX_NAMESPACE::TensorProto_DataType_INT64: \
125  retval = function<int64_t>(__VA_ARGS__); \
126  break; \
127  case ONNX_NAMESPACE::TensorProto_DataType_UINT64: \
128  retval = function<uint64_t>(__VA_ARGS__); \
129  break; \
130  case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: \
131  retval = function<MLFloat16>(__VA_ARGS__); \
132  break; \
133  case ONNX_NAMESPACE::TensorProto_DataType_BFLOAT16: \
134  retval = function<BFloat16>(__VA_ARGS__); \
135  break; \
136  default: \
137  ORT_ENFORCE(false, "Unknown tensor type of ", tensor_type); \
138  }
139 
140 ////////////////////////////////////////////////////////////////////////////////
141 /// Use the following primitives if you have a few types to switch on so you
142 // can write a short sequence of if/else statements.
143 
144 // This is a frequently used check so we make a separate utility function.
145 inline bool IsDataTypeString(MLDataType dt_type) {
146  auto prim_type = dt_type->AsPrimitiveDataType();
147  return (prim_type != nullptr && prim_type->GetDataType() == ONNX_NAMESPACE::TensorProto_DataType_STRING);
148 }
149 
150 // Test if MLDataType is a concrete type of PrimitiveDataTypeBase
151 // and it is T
152 template <class T>
153 inline bool IsPrimitiveDataType(MLDataType dt_type) {
154  auto prim_type = dt_type->AsPrimitiveDataType();
155  return (prim_type != nullptr && prim_type->GetDataType() == ToTensorProtoElementType<T>());
156 }
157 
158 // Use after AsPrimitiveDataType() is successful
159 // Check if PrimitiveDataTypeBase is of type T
160 template <class T>
161 inline bool IsPrimitiveDataType(const PrimitiveDataTypeBase* prim_type) {
162  assert(prim_type != nullptr);
163  return prim_type->GetDataType() == ToTensorProtoElementType<T>();
164 }
165 
166 // This implementation contains a workaround for GCC bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47226
167 // GCC until very recently does not support template parameter pack expansion within lambda context.
168 namespace mltype_dispatcher_internal {
169 
170 // T - type handled by this helper
172  int32_t dt_type_; // Type currently dispatched
173  size_t called_;
174 
175  public:
176  explicit CallableDispatchableHelper(int32_t dt_type) noexcept : dt_type_(dt_type), called_(0) {}
177 
178  // Must return integer to be in a expandable context
179  template <class T, class Fn, class... Args>
180  int Invoke(Fn&& fn, Args&&... args) {
181  if (utils::ToTensorProtoElementType<T>() == dt_type_) {
182  std::forward<Fn>(fn)(std::forward<Args>(args)...);
183  ++called_;
184  }
185  return 0;
186  }
187 
189  ORT_ENFORCE(called_ == 1, "Unsupported data type: ", dt_type_);
190  }
191 };
192 
193 // Default policy is to throw an exception.
194 // Other policies may set the second result argument accordingly.
195 template <class Ret>
197  void operator()(int32_t dt_type, Ret& /*result*/) const {
198  ORT_THROW("Unsupported data type: ", dt_type);
199  }
200 };
201 
202 // Helper with the result type
203 template <class Ret, class UnsupportedPolicy>
205  int32_t dt_type_; // Type currently dispatched
206  size_t called_;
207  Ret result_;
208 
209  public:
210  explicit CallableDispatchableRetHelper(int32_t dt_type) noexcept : dt_type_(dt_type), called_(0), result_() {}
211 
212  Ret Get() {
213  // No type was invoked
214  if (called_ == 0) {
215  UnsupportedPolicy()(dt_type_, result_);
216  }
217  return result_;
218  }
219 
220  // Must return integer to be in a expandable context
221  template <class T, class Fn, class... Args>
222  int Invoke(Fn&& fn, Args&&... args) {
223  if (utils::ToTensorProtoElementType<T>() == dt_type_) {
224  result_ = std::forward<Fn>(fn)(std::forward<Args>(args)...);
225  ++called_;
226  }
227  return 0;
228  }
229 };
230 
231 template <typename T>
233  std::integral_constant<ONNX_NAMESPACE::TensorProto_DataType, ToTensorProtoElementType<T>()>;
234 
236  std::integral_constant<ONNX_NAMESPACE::TensorProto_DataType, ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED>;
237 
238 } // namespace mltype_dispatcher_internal
239 
240 /**
241  * This class helps to efficiently dispatch calls to implementation function
242  * objects with a tensor element type template argument.
243  *
244  * The constructor accepts a value corresponding to a tensor element type.
245  * For example, it can be obtained from:
246  * input_tensor->GetElementType()
247  *
248  * The Invoke member functions will instantiate and invoke the provided
249  * function object template, Fn. Fn must be default constructible. Fn must also
250  * have a tensor element type template argument. This type template argument
251  * will be the type that corresponds to the value given in the constructor.
252  * These functions accept and forward arbitrary function arguments. They ensure
253  * that Fn is called once with the type specified in the constructor.
254  *
255  * @tparam Types The types supported by the implementation. This should be a
256  * set of ONNX tensor element types that are supported by ORT.
257  */
258 template <typename... Types>
260  using SupportedTypeList = TypeList<Types...>;
261  using SupportedTensorProtoElementTypeList =
262  boost::mp11::mp_transform<
264 
265  static_assert(
266  boost::mp11::mp_and<
267  boost::mp11::mp_is_set<SupportedTensorProtoElementTypeList>,
268  boost::mp11::mp_not<
269  boost::mp11::mp_set_contains<
270  SupportedTensorProtoElementTypeList,
272  "Types must map to a unique set of ONNX tensor element data types supported by ORT.");
273 
274  int32_t dt_type_;
275 
276  public:
277  /**
278  * Constructor.
279  * @param dt_type The value corresponding to the tensor element type to be
280  * dispatched to. This can be obtained from
281  * input_tensor->GetElementType() or
282  * utils::ToTensorProtoElementType<T>().
283  */
284  explicit MLTypeCallDispatcher(int32_t dt_type) noexcept : dt_type_(dt_type) {}
285 
286  /**
287  * Invokes Fn<T> with the specified arguments.
288  *
289  * @tparam Fn The function object template.
290  * @tparam Args The argument types.
291  */
292  template <template <typename...> class Fn, typename... Args>
293  void Invoke(Args&&... args) const {
294  InvokeWithLeadingTemplateArgs<Fn, TypeList<>>(std::forward<Args>(args)...);
295  }
296 
297  /**
298  * Invokes Fn<..., T> with leading template arguments and the specified
299  * arguments.
300  *
301  * @tparam Fn The function object template.
302  * @tparam LeadingTemplateArgTypeList A type list of the leading template
303  * arguments.
304  * @tparam Args The argument types.
305  */
306  template <template <typename...> class Fn, typename LeadingTemplateArgTypeList, typename... Args>
307  void InvokeWithLeadingTemplateArgs(Args&&... args) const {
308  static_assert(
310  "LeadingTemplateArgTypeList must be a type list (e.g., onnxruntime::TypeList<T1, T2, ...>).");
311 
313 
314  // given LeadingTemplateArgTypeList is a type list L<U1, U2, ...>,
315  // call helper.Invoke() with Fn<U1, U2, ..., T> for each T in Types
316  static_cast<void>(std::array<int, sizeof...(Types)>{
317  helper.template Invoke<Types>(
318  boost::mp11::mp_apply<Fn, boost::mp11::mp_push_back<LeadingTemplateArgTypeList, Types>>(),
319  std::forward<Args>(args)...)...});
320 
321  // avoid "unused parameter" warning for the case where Types is empty
322  static_cast<void>(std::array<int, sizeof...(Args)>{(ORT_UNUSED_PARAMETER(args), 0)...});
323 
324  helper.CheckCalledOnce();
325  }
326 
327  /**
328  * Invokes Fn<T> with the specified arguments and returns the result.
329  *
330  * @tparam Ret The return type. Fn should return a type convertible to Ret.
331  * @tparam Fn The function object template.
332  * @tparam Args The argument types.
333  */
334  template <class Ret, template <typename...> class Fn, typename... Args>
335  Ret InvokeRet(Args&&... args) const {
338  std::forward<Args>(args)...);
339  }
340 
341  /**
342  * Invokes Fn<T> with the specified arguments and returns the result.
343  *
344  * @tparam Ret The return type. Fn should return a type convertible to Ret.
345  * @tparam Fn The function object template.
346  * @tparam UnsupportedPolicy The policy used to handle unsupported types.
347  * See mltype_dispatcher_internal::UnsupportedTypeDefaultPolicy
348  * for an example.
349  * @tparam Args The argument types.
350  */
351  template <class Ret, template <typename...> class Fn, class UnsupportedPolicy, typename... Args>
352  Ret InvokeRetWithUnsupportedPolicy(Args&&... args) const {
354  Ret, Fn, UnsupportedPolicy, TypeList<>>(
355  std::forward<Args>(args)...);
356  }
357 
358  /**
359  * Invokes Fn<..., T> with leading template arguments and the specified
360  * arguments and returns the result.
361  *
362  * @tparam Ret The return type. Fn should return a type convertible to Ret.
363  * @tparam Fn The function object template.
364  * @tparam LeadingTemplateArgTypeList A type list of the leading template
365  * arguments.
366  * @tparam Args The argument types.
367  */
368  template <class Ret, template <typename...> class Fn, typename LeadingTemplateArgTypeList, typename... Args>
369  Ret InvokeRetWithLeadingTemplateArgs(Args&&... args) const {
371  Ret, Fn, mltype_dispatcher_internal::UnsupportedTypeDefaultPolicy<Ret>, LeadingTemplateArgTypeList>(
372  std::forward<Args>(args)...);
373  }
374 
375  /**
376  * Invokes Fn<..., T> with leading template arguments and the specified
377  * arguments and returns the result.
378  *
379  * @tparam Ret The return type. Fn should return a type convertible to Ret.
380  * @tparam Fn The function object template.
381  * @tparam UnsupportedPolicy The policy used to handle unsupported types.
382  * See mltype_dispatcher_internal::UnsupportedTypeDefaultPolicy
383  * for an example.
384  * @tparam LeadingTemplateArgTypeList A type list of the leading template
385  * arguments.
386  * @tparam Args The argument types.
387  */
388  template <class Ret,
389  template <typename...> class Fn,
390  class UnsupportedPolicy,
391  typename LeadingTemplateArgTypeList,
392  typename... Args>
395 
396  // given LeadingTemplateArgTypeList is a type list L<U1, U2, ...>,
397  // call helper.Invoke() with Fn<U1, U2, ..., T> for each T in Types
398  static_cast<void>(std::array<int, sizeof...(Types)>{
399  helper.template Invoke<Types>(
400  boost::mp11::mp_apply<Fn, boost::mp11::mp_push_back<LeadingTemplateArgTypeList, Types>>(),
401  std::forward<Args>(args)...)...});
402 
403  // avoid "unused parameter" warning for the case where Types is empty
404  static_cast<void>(std::array<int, sizeof...(Args)>{(ORT_UNUSED_PARAMETER(args), 0)...});
405 
406  return helper.Get();
407  }
408 };
409 
410 // the type MLTypeCallDispatcher<T...> given a type list L<T...>
411 template <typename L>
412 using MLTypeCallDispatcherFromTypeList = boost::mp11::mp_apply<MLTypeCallDispatcher, L>;
413 
414 namespace data_types_internal {
415 
416 enum class ContainerType : uint16_t {
417  kUndefined = 0,
418  kTensor = 1,
419  kMap = 2,
420  kSequence = 3,
421  kOpaque = 4,
422  kOptional = 5
423 };
424 
425 class TypeNode {
426  // type_ is a TypeProto value case enum
427  // that may be a kTypeTensor, kTypeMap, kTypeSequence
428  // prim_type_ is a TypeProto_DataType enum that has meaning
429  // - for Tensor then prim_type_ is the contained type
430  // - for Map prim_type is the key type. Next entry describes map value
431  // - For sequence prim_type_ is not used and has no meaning. Next entry
432  // describes the value for the sequence
433  // Tensor is always the last entry as it describes a contained primitive type.
434  ContainerType type_;
435  uint16_t prim_type_;
436 
437  public:
438  TypeNode(ContainerType type, int32_t prim_type) noexcept {
439  type_ = type;
440  prim_type_ = static_cast<uint16_t>(prim_type);
441  }
442 
443  bool IsType(ContainerType type) const noexcept {
444  return type_ == type;
445  }
446 
447  bool IsPrimType(int32_t prim_type) const noexcept {
448  return prim_type_ == static_cast<uint16_t>(prim_type);
449  }
450 };
451 
452 } // namespace data_types_internal
453 
454 ////////////////////////////////////////////////////////////////////
455 /// Provides generic interface to test whether MLDataType is a Sequence,
456 /// Map or an Opaque type including arbitrary recursive definitions
457 /// without querying DataTypeImpl::GetType<T> for all known complex types
458 
459 // T is a sequence contained element type
460 // If returns true then we know that the runtime
461 // representation is std::vector<T>
462 // T itself can be a runtime representation of another
463 // sequence, map, opaque type or a tensor
464 //
465 // That is it can be std::vector or a std::map
466 // If T is a primitive type sequence is tested whether it contains
467 // tensors of that type
468 //
469 // If T is an opaque type, then it is only tested to be opaque but not exactly
470 // a specific opaque type. To Test for a specific Opaque type use IsOpaqueType() below
471 //
472 // This class examines the supplied MLDataType and records
473 // its information in a vector so any subsequent checks for Sequences and Maps
474 // are quick.
476  using Cont = std::vector<data_types_internal::TypeNode>;
477  Cont types_;
478 
479  // Default IsContainerOfType is for Opaque type
480  template <class T>
481  struct IsContainerOfType {
482  static bool check(const Cont& c, size_t index) {
483  if (index >= c.size()) {
484  return false;
485  }
486  return c[index].IsType(data_types_internal::ContainerType::kOpaque);
487  }
488  };
489 
490  // Handles the case where sequence element is also a sequence
491  template <class T>
492  struct IsContainerOfType<std::vector<T>> {
493  static bool check(const Cont& c, size_t index) {
494  if (index >= c.size()) {
495  return false;
496  }
497  if (c[index].IsType(data_types_internal::ContainerType::kSequence)) {
498  ORT_ENFORCE(++index < c.size(), "Sequence is missing type entry for its element");
499  constexpr int32_t prim_type = ToTensorProtoElementType<T>();
500  // Check if this is a primitive type and it matches
501  if constexpr(prim_type != ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED) {
502  return c[index].IsType(data_types_internal::ContainerType::kTensor) &&
503  c[index].IsPrimType(prim_type);
504  }
505  else {
506  // T is not primitive, check next entry for non-primitive proto
507  return IsContainerOfType<T>::check(c, index);
508  }
509  }
510  return false;
511  }
512  };
513 
514  template <class K, class V>
515  struct IsContainerOfType<std::map<K, V>> {
516  static bool check(const Cont& c, size_t index) {
517  static_assert(ToTensorProtoElementType<K>() != ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED,
518  "Map Key can not be a non-primitive type");
519  if (index >= c.size()) {
520  return false;
521  }
522  if (!c[index].IsType(data_types_internal::ContainerType::kMap)) {
523  return false;
524  }
525  constexpr int32_t key_type = ToTensorProtoElementType<K>();
526  if (!c[index].IsPrimType(key_type)) {
527  return false;
528  }
529  ORT_ENFORCE(++index < c.size(), "Map is missing type entry for its value");
530  constexpr int32_t val_type = ToTensorProtoElementType<V>();
531  if constexpr(val_type != ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED) {
532  return c[index].IsType(data_types_internal::ContainerType::kTensor) &&
533  c[index].IsPrimType(val_type);
534  }
535  else return IsContainerOfType<V>::check(c, index);
536  }
537  };
538 
539  public:
540  explicit ContainerChecker(MLDataType);
541  ~ContainerChecker() = default;
542 
543  bool IsMap() const noexcept {
544  assert(!types_.empty());
545  return types_[0].IsType(data_types_internal::ContainerType::kMap);
546  }
547 
548  bool IsSequence() const noexcept {
549  assert(!types_.empty());
550  return types_[0].IsType(data_types_internal::ContainerType::kSequence);
551  }
552 
553  template <class T>
554  bool IsSequenceOf() const {
555  assert(!types_.empty());
556  return IsContainerOfType<std::vector<T>>::check(types_, 0);
557  }
558 
559  template <class K, class V>
560  bool IsMapOf() const {
561  assert(!types_.empty());
562  return IsContainerOfType<std::map<K, V>>::check(types_, 0);
563  }
564 };
565 
566 bool IsOpaqueType(MLDataType ml_type, const char* domain, const char* name);
567 
568 } // namespace utils
569 } // namespace onnxruntime
typedef int(APIENTRYP RE_PFNGLXSWAPINTERVALSGIPROC)(int)
Base class for MLDataType.
Definition: data_types.h:81
MLTypeCallDispatcher(int32_t dt_type) noexcept
Ret InvokeRetWithUnsupportedPolicy(Args &&...args) const
Ret InvokeRetWithUnsupportedPolicyAndLeadingTemplateArgs(Args &&...args) const
GLsizei const GLfloat * value
Definition: glcorearb.h:824
bool IsType(ContainerType type) const noexcept
#define ORT_ENFORCE(condition,...)
Definition: common.h:173
Ret InvokeRetWithLeadingTemplateArgs(Args &&...args) const
bool IsPrimType(int32_t prim_type) const noexcept
boost::mp11::mp_apply< MLTypeCallDispatcher, L > MLTypeCallDispatcherFromTypeList
void InvokeWithLeadingTemplateArgs(Args &&...args) const
bool IsPrimitiveDataType(MLDataType dt_type)
const PrimitiveDataTypeBase * AsPrimitiveDataType() const
Definition: data_types.h:962
std::integral_constant< ONNX_NAMESPACE::TensorProto_DataType, ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED > UndefinedTensorProtoElementTypeConstant
#define ORT_UNUSED_PARAMETER(x)
Definition: common.h:48
bool IsOpaqueType(MLDataType ml_type, const char *domain, const char *name)
PrimitiveDataTypeBase Base class for primitive Tensor contained types.
Definition: data_types.h:887
GLuint const GLchar * name
Definition: glcorearb.h:786
std::integral_constant< ONNX_NAMESPACE::TensorProto_DataType, ToTensorProtoElementType< T >()> TensorProtoElementTypeConstant
#define ORT_THROW(...)
Definition: common.h:163
bool IsDataTypeString(MLDataType dt_type)
Use the following primitives if you have a few types to switch on so you.
TypeNode(ContainerType type, int32_t prim_type) noexcept
GLuint index
Definition: glcorearb.h:786
**If you just want to fire and args
Definition: thread.h:609
Definition: core.h:1131
#define const
Definition: zconf.h:214
type
Definition: core.h:1059