HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
onnxruntime_session_options_config_keys.h
Go to the documentation of this file.
1 // Copyright (c) Microsoft Corporation. All rights reserved.
2 // Licensed under the MIT License.
3 
4 #pragma once
5 
6 /*
7  * This file defines SessionOptions Config Keys and format of the Config Values.
8  *
9  * The Naming Convention for a SessionOptions Config Key,
10  * "[Area][.[SubArea1].[SubArea2]...].[Keyname]"
11  * Such as "ep.cuda.use_arena"
12  * The Config Key cannot be empty
13  * The maximum length of the Config Key is 128
14  *
15  * The string format of a SessionOptions Config Value is defined individually for each Config.
16  * The maximum length of the Config Value is 1024
17  */
18 
19 // Key for disable PrePacking,
20 // If the config value is set to "1" then the prepacking is disabled, otherwise prepacking is enabled (default value)
21 static const char* const kOrtSessionOptionsConfigDisablePrepacking = "session.disable_prepacking";
22 
23 // A value of "1" means allocators registered in the env will be used. "0" means the allocators created in the session
24 // will be used. Use this to override the usage of env allocators on a per session level.
25 static const char* const kOrtSessionOptionsConfigUseEnvAllocators = "session.use_env_allocators";
26 
27 // Set to 'ORT' (case sensitive) to load an ORT format model.
28 // If unset, model type will default to ONNX unless inferred from filename ('.ort' == ORT format) or bytes to be ORT
29 static const char* const kOrtSessionOptionsConfigLoadModelFormat = "session.load_model_format";
30 
31 // Set to 'ORT' (case sensitive) to save optimized model in ORT format when SessionOptions.optimized_model_path is set.
32 // If unset, format will default to ONNX unless optimized_model_filepath ends in '.ort'.
33 static const char* const kOrtSessionOptionsConfigSaveModelFormat = "session.save_model_format";
34 
35 // If a value is "1", flush-to-zero and denormal-as-zero are applied. The default is "0".
36 // When multiple sessions are created, a main thread doesn't override changes from succeeding session options,
37 // but threads in session thread pools follow option changes.
38 // When ORT runs with OpenMP, the same rule is applied, i.e. the first session option to flush-to-zero and
39 // denormal-as-zero is only applied to global OpenMP thread pool, which doesn't support per-session thread pool.
40 // Note that an alternative way not using this option at runtime is to train and export a model without denormals
41 // and that's recommended because turning this option on may hurt model accuracy.
42 static const char* const kOrtSessionOptionsConfigSetDenormalAsZero = "session.set_denormal_as_zero";
43 
44 // It controls to run quantization model in QDQ (QuantizelinearDeQuantizelinear) format or not.
45 // "0": enable. ORT does fusion logic for QDQ format.
46 // "1": disable. ORT doesn't do fusion logic for QDQ format.
47 // Its default value is "0"
48 static const char* const kOrtSessionOptionsDisableQuantQDQ = "session.disable_quant_qdq";
49 
50 // It controls whether to enable Double QDQ remover and Identical Children Consolidation
51 // "0": not to disable. ORT does remove the middle 2 Nodes from a Q->(QD->Q)->QD pairs
52 // "1": disable. ORT doesn't remove the middle 2 Nodes from a Q->(QD->Q)->QD pairs
53 // Its default value is "0"
54 static const char* const kOrtSessionOptionsDisableDoubleQDQRemover = "session.disable_double_qdq_remover";
55 
56 // If set to "1", enables the removal of QuantizeLinear/DequantizeLinear node pairs once all QDQ handling has been
57 // completed. e.g. If after all QDQ handling has completed and we have -> FloatOp -> Q -> DQ -> FloatOp -> the
58 // Q -> DQ could potentially be removed. This will provide a performance benefit by avoiding going from float to
59 // 8-bit and back to float, but could impact accuracy. The impact on accuracy will be model specific and depend on
60 // other factors like whether the model was created using Quantization Aware Training or Post Training Quantization.
61 // As such, it's best to test to determine if enabling this works well for your scenario.
62 // The default value is "0"
63 // Available since version 1.11.
64 static const char* const kOrtSessionOptionsEnableQuantQDQCleanup = "session.enable_quant_qdq_cleanup";
65 
66 // Enable or disable gelu approximation in graph optimization. "0": disable; "1": enable. The default is "0".
67 // GeluApproximation has side effects which may change the inference results. It is disabled by default due to this.
68 static const char* const kOrtSessionOptionsEnableGeluApproximation = "optimization.enable_gelu_approximation";
69 
70 #ifdef ENABLE_TRAINING
71 // Specifies a list of op types for memory footprint reduction.
72 // The value should be a ","-delimited list of pair of
73 // <subgraph string : optimization strategy : number of subgraph to apply>.
74 // For example, "Gelu+Cast+:1:0,Dropout+:1:1".
75 // A valid "subgraph string" should be one subgraph representation output by ORT graph transformations.
76 // "optimization strategy" currently has valid values: 0 - disabled, 1 - recompute.
77 // "number of subgraph to apply" is used to control how many subgraphs to apply optimization, to avoid "oversaving"
78 // the memory.
79 static const char* const kOrtSessionOptionsMemoryOptimizerEnabler = "optimization.enable_memory_optimizer";
80 
81 // Specifies the level for detecting subgraphs for memory footprint reduction.
82 // The value should be an integer. The default value is 0.
83 static const char* const kOrtSessionOptionsMemoryOptimizerProbeLevel = "optimization.enable_memory_probe_recompute_level";
84 #endif
85 
86 // Enable or disable using device allocator for allocating initialized tensor memory. "1": enable; "0": disable. The default is "0".
87 // Using device allocators means the memory allocation is made using malloc/new.
88 static const char* const kOrtSessionOptionsUseDeviceAllocatorForInitializers = "session.use_device_allocator_for_initializers";
89 
90 // Configure whether to allow the inter_op/intra_op threads spinning a number of times before blocking
91 // "0": thread will block if found no job to run
92 // "1": default, thread will spin a number of times before blocking
93 static const char* const kOrtSessionOptionsConfigAllowInterOpSpinning = "session.inter_op.allow_spinning";
94 static const char* const kOrtSessionOptionsConfigAllowIntraOpSpinning = "session.intra_op.allow_spinning";
95 
96 // Key for using model bytes directly for ORT format
97 // If a session is created using an input byte array contains the ORT format model data,
98 // By default we will copy the model bytes at the time of session creation to ensure the model bytes
99 // buffer is valid.
100 // Setting this option to "1" will disable copy the model bytes, and use the model bytes directly. The caller
101 // has to guarantee that the model bytes are valid until the ORT session using the model bytes is destroyed.
102 static const char* const kOrtSessionOptionsConfigUseORTModelBytesDirectly = "session.use_ort_model_bytes_directly";
103 
104 /// <summary>
105 /// Key for using the ORT format model flatbuffer bytes directly for initializers.
106 /// This avoids copying the bytes and reduces peak memory usage during model loading and initialization.
107 /// Requires `session.use_ort_model_bytes_directly` to be true.
108 /// If set, the flatbuffer bytes provided when creating the InferenceSession MUST remain valid for the entire
109 /// duration of the InferenceSession.
110 /// </summary>
111 static const char* const kOrtSessionOptionsConfigUseORTModelBytesForInitializers =
112  "session.use_ort_model_bytes_for_initializers";
113 
114 // This should only be specified when exporting an ORT format model for use on a different platform.
115 // If the ORT format model will be used on ARM platforms set to "1". For other platforms set to "0"
116 // Available since version 1.11.
117 static const char* const kOrtSessionOptionsQDQIsInt8Allowed = "session.qdqisint8allowed";
118 
119 // x64 SSE4.1/AVX2/AVX512(with no VNNI) has overflow problem with quantizied matrix multiplication with U8S8.
120 // To avoid this we need to use slower U8U8 matrix multiplication instead. This option, if
121 // turned on, use slower U8U8 matrix multiplications. Only effective with AVX2 or AVX512
122 // platforms.
123 static const char* const kOrtSessionOptionsAvx2PrecisionMode = "session.x64quantprecision";
124 
125 // Specifies how minimal build graph optimizations are handled in a full build.
126 // These optimizations are at the extended level or higher.
127 // Possible values and their effects are:
128 // "save": Save runtime optimizations when saving an ORT format model.
129 // "apply": Only apply optimizations available in a minimal build.
130 // ""/<unspecified>: Apply optimizations available in a full build.
131 // Available since version 1.11.
132 static const char* const kOrtSessionOptionsConfigMinimalBuildOptimizations =
133  "optimization.minimal_build_optimizations";
134 
135 // Note: The options specific to an EP should be specified prior to appending that EP to the session options object in
136 // order for them to take effect.
137 
138 // Specifies a list of stop op types. Nodes of a type in the stop op types and nodes downstream from them will not be
139 // run by the NNAPI EP.
140 // The value should be a ","-delimited list of op types. For example, "Add,Sub".
141 // If not specified, the default set of stop ops is used. To specify an empty stop ops types list and disable stop op
142 // exclusion, set the value to "".
143 static const char* const kOrtSessionOptionsConfigNnapiEpPartitioningStopOps = "ep.nnapi.partitioning_stop_ops";
144 
145 // Enabling dynamic block-sizing for multithreading.
146 // With a positive value, thread pool will split a task of N iterations to blocks of size starting from:
147 // N / (num_of_threads * dynamic_block_base)
148 // As execution progresses, the size will decrease according to the diminishing residual of N,
149 // meaning the task will be distributed in smaller granularity for better parallelism.
150 // For some models, it helps to reduce the variance of E2E inference latency and boost performance.
151 // The feature will not function by default, specify any positive integer, e.g. "4", to enable it.
152 // Available since version 1.11.
153 static const char* const kOrtSessionOptionsConfigDynamicBlockBase = "session.dynamic_block_base";
154 
155 // This option allows to decrease CPU usage between infrequent
156 // requests and forces any TP threads spinning stop immediately when the last of
157 // concurrent Run() call returns.
158 // Spinning is restarted on the next Run() call.
159 // Applies only to internal thread-pools
160 static const char* const kOrtSessionOptionsConfigForceSpinningStop = "session.force_spinning_stop";
161 
162 // "1": all inconsistencies encountered during shape and type inference
163 // will result in failures.
164 // "0": in some cases warnings will be logged but processing will continue. The default.
165 // May be useful to expose bugs in models.
166 static const char* const kOrtSessionOptionsConfigStrictShapeTypeInference = "session.strict_shape_type_inference";
167 
168 // The file saves configuration for partitioning node among logic streams
169 static const char* const kNodePartitionConfigFile = "session.node_partition_config_file";
170 
171 // This Option allows setting affinities for intra op threads.
172 // Affinity string follows format:
173 // logical_processor_id,logical_processor_id;logical_processor_id,logical_processor_id
174 // Semicolon isolates configurations among threads, while comma split processors where ith thread expected to attach to.
175 // e.g.1,2,3;4,5
176 // specifies affinities for two threads, with the 1st thread attach to the 1st, 2nd, and 3rd processor, and 2nd thread to the 4th and 5th.
177 // To ease the configuration, an "interval" is also allowed:
178 // e.g. 1-8;8-16;17-24
179 // orders that the 1st thread runs on first eight processors, 2nd thread runs on next eight processors, and so forth.
180 // Note:
181 // 1. Once set, the number of thread affinities must equal to intra_op_num_threads - 1, since ort does not set affinity on the main thread which
182 // is started and managed by the calling app;
183 // 2. For windows, ort will infer the group id from a logical processor id, for example, assuming there are two groups with each has 64 logical processors,
184 // an id of 64 will be inferred as the last processor of the 1st group, while 65 will be interpreted as the 1st processor of the second group.
185 // Hence 64-65 is an invalid configuration, because a windows thread cannot be attached to processors across group boundary.
186 static const char* const kOrtSessionOptionsConfigIntraOpThreadAffinities = "session.intra_op_thread_affinities";