HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
tensor.h
Go to the documentation of this file.
1 // Copyright (c) Microsoft Corporation. All rights reserved.
2 // Licensed under the MIT License.
3 
4 #pragma once
5 
6 #include <stddef.h>
7 #include <iostream>
8 #include <string>
9 #include <vector>
10 
11 #include "core/common/gsl.h"
12 #include "core/common/common.h"
16 #include "onnxruntime_config.h"
19 
20 struct OrtValue;
21 
22 namespace onnxruntime {
23 
24 // TODO:ensure dtype_!=nullptr
25 #ifdef __GNUC__
26 #pragma GCC diagnostic push
27 #ifdef HAS_NULL_DEREFERENCE
28 #pragma GCC diagnostic ignored "-Wnull-dereference"
29 #endif
30 #endif
31 /*
32  We want to keep tensor as simple as possible, it is just a placeholder
33  for a piece of memory, with additional shape information.
34  Memory is owned and managed by Executor / Workspace, so Tensor just uses
35  it, and won't do any allocation / release.
36 */
37 
38 class Tensor final {
39  public:
40  // NB! Removing Create() methods returning unique_ptr<Tensor>. Still available in other EPs that are dynamically linked.
41  // Strive not to allocate Tensor with new/delete as it is a shallow class and using it by value is just fine.
42  // Use InitOrtValue() methods to allocate for OrtValue.
43 
44  Tensor() = default; // to allow creating vector<Tensor> to support seq(tensor)
45 
46  /**
47  * Create tensor with given type, shape, pre-allocated memory and allocator info.
48  * This function won't check if the preallocated buffer(p_data) has enough room for the shape.
49  * \param p_type Data type of the tensor
50  * \param shape Shape of the tensor
51  * \param p_data A preallocated buffer. Can be NULL if the shape is empty.
52  * Tensor does not own the data and will not delete it
53  * \param alloc Where the buffer('p_data') was allocated from
54  * \param offset Offset in bytes to start of Tensor within p_data.
55  * \param strides Strides span. Can be empty if the tensor is contiguous.
56  */
57  Tensor(MLDataType p_type, const TensorShape& shape, void* p_data, const OrtMemoryInfo& alloc,
58  ptrdiff_t offset = 0, gsl::span<const int64_t> strides = {});
59 
60  /// <summary>
61  /// Creates an instance of Tensor on the heap using the appropriate __ctor and
62  /// initializes OrtValue with it.
63  /// </summary>
64  /// <param name="p_type"></param>
65  /// <param name="shape"></param>
66  /// <param name="p_data"></param>
67  /// <param name="info"></param>
68  /// <param name="offset"></param>
69  /// <param name="strides"></param>
70  static void InitOrtValue(MLDataType p_type, const TensorShape& shape,
71  void* p_data, const OrtMemoryInfo& location,
72  OrtValue& ort_value, ptrdiff_t offset = 0,
73  gsl::span<const int64_t> strides = {});
74 
75  /// <summary>
76  /// Creates an instance of Tensor who own the pre-allocated buffer.
77  /// </summary>
78  /// <param name="p_type"></param>
79  /// <param name="shape"></param>
80  /// <param name="p_data"></param>
81  /// <param name="allocator"></param>
82  /// <param name="offset"></param>
83  /// <param name="strides"></param>
84  static void InitOrtValue(MLDataType p_type, const TensorShape& shape,
85  void* p_data, std::shared_ptr<IAllocator> allocator,
86  OrtValue& ort_value, ptrdiff_t offset = 0,
87  gsl::span<const int64_t> strides = {});
88 
89  static size_t CalculateTensorStorageSize(MLDataType p_type,
90  const TensorShape& shape,
91  gsl::span<const int64_t> strides = {});
92 
93  /**
94  * Deprecated. The original design is this Tensor class won't do any allocation / release.
95  * However, this function will allocate the buffer for the shape, and do placement new if p_type is string tensor.
96  */
97  Tensor(MLDataType p_type, const TensorShape& shape, std::shared_ptr<IAllocator> allocator,
98  gsl::span<const int64_t> strides = {});
99 
100  /// <summary>
101  /// Creates an instance of Tensor on the heap using the appropriate __ctor and
102  /// initializes OrtValue with it.
103  /// </summary>
104  /// <param name="elt_type"></param>
105  /// <param name="shape"></param>
106  /// <param name="allocator"></param>
107  /// <param name="ort_value"></param>
108  /// <param name="strides"></param>
109  static void InitOrtValue(MLDataType elt_type,
110  const TensorShape& shape,
111  std::shared_ptr<IAllocator> allocator,
112  OrtValue& ort_value,
113  gsl::span<const int64_t> strides = {});
114 
115  /// <summary>
116  /// Creates an instance of Tensor on the heap using the appropriate __ctor and
117  /// initializes OrtValue with it.
118  /// </summary>
119  /// <param name="tensor"></param>
120  /// <param name="ort_value"></param>
121  static void InitOrtValue(Tensor&& tensor, OrtValue& ort_value);
122 
123  /**
124  * Create tensor with given type, shape, pre-allocated memory and allocator which will be used to free the pre-allocated memory.
125  * This function won't check if the preallocated buffer(p_data) has enough room for the shape.
126  * However, this function will de-allocate the buffer upon the tensor getting destructed.
127  * \param p_type Data type of the tensor
128  * \param shape Shape of the tensor
129  * \param p_data A preallocated buffer. Can be NULL if the shape is empty.
130  * Tensor will own the memory and will delete it when the tensor instance is destructed.
131  * \param deleter Allocator used to free the pre-allocated memory
132  * \param offset Offset in bytes to start of Tensor within p_data.
133  * \param strides Strides span. Can be empty if the tensor is contiguous.
134  */
135  Tensor(MLDataType p_type, const TensorShape& shape, void* p_data, std::shared_ptr<IAllocator> deleter,
136  ptrdiff_t offset = 0, gsl::span<const int64_t> strides = {});
137 
138  ~Tensor();
139 
140  // Move is allowed
142 
143  Tensor(Tensor&& other) noexcept;
144 
145  Tensor& operator=(Tensor&& other) noexcept;
146 
147  /**
148  Returns the data type.
149  */
150  MLDataType DataType() const { return dtype_; }
151 
152  /**
153  Returns the data type enum constant
154  @remarks Use utils::ToTensorProtoElementType<T> for comparison.
155  */
156  int32_t GetElementType() const {
157  return dtype_->GetDataType();
158  }
159 
160  // Check if contains string data. This is a separate
161  // interface bc it is frequently used.
162  bool IsDataTypeString() const {
163  return utils::IsPrimitiveDataType<std::string>(dtype_);
164  }
165 
166  // Checks if the Tensor contains data type T
167  template <class T>
168  bool IsDataType() const {
169  return utils::IsPrimitiveDataType<T>(dtype_);
170  }
171 
172  /**
173  Returns the shape of the tensor.
174  */
175  const TensorShape& Shape() const noexcept { return shape_; }
176 
177  /**
178  Returns the location of the tensor's memory
179  */
180  const OrtMemoryInfo& Location() const { return alloc_info_; }
181 
182  /**
183  May return nullptr if tensor size is zero
184  */
185  template <typename T>
187  // Type check
188  ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
189  "T ", "!=", dtype_);
190  return reinterpret_cast<T*>(static_cast<char*>(p_data_) + byte_offset_);
191  }
192 
193  /**
194  May return nullptr if tensor size is zero
195  */
196  template <typename T>
197  gsl::span<T> MutableDataAsSpan() {
198  // Type check
199  ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
200  "T ", "!=", dtype_);
201  T* data = reinterpret_cast<T*>(static_cast<char*>(p_data_) + byte_offset_);
202  return gsl::make_span(data, static_cast<size_t>(shape_.Size()));
203  }
204 
205  template <typename T>
206  const T* Data() const {
207  // Type check
208  ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
209  "T ", "!=", dtype_);
210  return reinterpret_cast<const T*>(static_cast<char*>(p_data_) + byte_offset_);
211  }
212 
213  template <typename T>
214  gsl::span<const T> DataAsSpan() const {
215  // Type check
216  ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
217  "T ", "!=", dtype_);
218  const T* data = reinterpret_cast<const T*>(static_cast<char*>(p_data_) + byte_offset_);
219  return gsl::make_span(data, static_cast<typename gsl::span<T>::size_type>(shape_.Size()));
220  }
221 
223  ORT_ENFORCE(type == dtype_, "Tensor type mismatch.", type, "!=", dtype_);
224  return static_cast<char*>(p_data_) + byte_offset_;
225  }
226 
227  const void* DataRaw(MLDataType type) const {
228  ORT_ENFORCE(type == dtype_, "Tensor type mismatch.", type, "!=", dtype_);
229  return static_cast<char*>(p_data_) + byte_offset_;
230  }
231 
232  void* MutableDataRaw() noexcept {
233  return static_cast<char*>(p_data_) + byte_offset_;
234  }
235 
236  const void* DataRaw() const noexcept {
237  return static_cast<char*>(p_data_) + byte_offset_;
238  }
239 
240  bool OwnsBuffer() const noexcept {
241  return buffer_deleter_ != nullptr;
242  }
243 
244  /**
245  * Resizes the tensor without touching underlying storage.
246  * This requires the total size of the tensor to remains constant.
247  * @warning this function is NOT thread-safe.
248  */
249  inline void Reshape(const TensorShape& new_shape) {
250  ORT_ENFORCE(shape_.Size() == new_shape.Size(),
251  "Tensor size (" + std::to_string(shape_.Size()) +
252  ") != new size (" + std::to_string(new_shape.Size()) + ")");
253  shape_ = new_shape;
254  }
255 
256  /**
257  * Get the byte offset with respect to the p_data
258  * @warning this is a temporary solution for reusing the buffer bigger than needed.
259  * @warning use with caution - make sure you do boundary check before calling this method (see view.cc)
260  */
261  inline ptrdiff_t ByteOffset() const {
262  return byte_offset_;
263  }
264 
265  /**
266  * Set the byte offset with respect to the p_data
267  * @warning this is a temporary solution for reusing the buffer bigger than needed.
268  */
269  inline void SetByteOffset(ptrdiff_t byte_offset) {
270  byte_offset_ = byte_offset;
271  }
272 
273  /**
274  The number of bytes of data.
275  */
276  size_t SizeInBytes() const;
277 
278 #ifdef ENABLE_STRIDED_TENSORS
279  /**
280  * Get the strides of the tensor.
281  */
282  gsl::span<const int64_t> Strides() const;
283 
284  /**
285  * Return if the tensor is contiguous.
286  */
287  bool IsContiguous() const noexcept { return is_contiguous_; }
288 
289  /**
290  * Set strides.
291  */
292  void SetShapeAndStrides(const TensorShape& new_shape, gsl::span<const int64_t> new_strides);
293 #endif
294 
295  // More API methods.
296  private:
297  void Init(MLDataType p_type,
298  const TensorShape& shape,
299  void* p_raw_data,
300  AllocatorPtr deleter,
301  ptrdiff_t offset = 0,
302  gsl::span<const int64_t> strides = {});
303 
304  void ReleaseBuffer();
305 
306 #ifdef ENABLE_STRIDED_TENSORS
307  bool CheckIsContiguous() const;
308 #endif
309 
310  void* p_data_;
311  /**
312  if buffer_deleter_ is null, it means tensor does not own the buffer.
313  otherwise tensor will use the deleter to release the buffer when
314  tensor is released.
315  */
316  AllocatorPtr buffer_deleter_;
317 
318  TensorShape shape_;
319 #ifdef ENABLE_STRIDED_TENSORS
320  mutable TensorShapeVector strides_;
321  bool is_contiguous_ = true;
322 #endif
323 
324  const PrimitiveDataTypeBase* dtype_;
325  OrtMemoryInfo alloc_info_;
326  ptrdiff_t byte_offset_;
327 };
328 #ifdef __GNUC__
329 #pragma GCC diagnostic pop
330 #endif
331 } // namespace onnxruntime
bool OwnsBuffer() const noexcept
Definition: tensor.h:240
MLDataType DataType() const
Definition: tensor.h:150
auto to_string(const T &value) -> std::string
Definition: format.h:2597
Base class for MLDataType.
Definition: data_types.h:81
int64_t Size() const
constexpr span< ElementType, Extent > make_span(span< ElementType, Extent > s) noexcept
Definition: UT_Span.h:559
size_t SizeInBytes() const
#define ORT_ENFORCE(condition,...)
Definition: common.h:173
ORT_DISALLOW_COPY_AND_ASSIGNMENT(Tensor)
void * MutableDataRaw() noexcept
Definition: tensor.h:232
const TensorShape & Shape() const noexcept
Definition: tensor.h:175
GLintptr offset
Definition: glcorearb.h:665
bool IsDataType() const
Definition: tensor.h:168
const void * DataRaw() const noexcept
Definition: tensor.h:236
int32_t GetElementType() const
Definition: tensor.h:156
Tensor & operator=(Tensor &&other) noexcept
absl::InlinedVector< int64_t, kTensorShapeSmallBufferElementsSize > TensorShapeVector
Definition: tensor_shape.h:46
static void InitOrtValue(MLDataType p_type, const TensorShape &shape, void *p_data, const OrtMemoryInfo &location, OrtValue &ort_value, ptrdiff_t offset=0, gsl::span< const int64_t > strides={})
Creates an instance of Tensor on the heap using the appropriate __ctor and initializes OrtValue with ...
GLint location
Definition: glcorearb.h:805
void SetByteOffset(ptrdiff_t byte_offset)
Definition: tensor.h:269
static size_t CalculateTensorStorageSize(MLDataType p_type, const TensorShape &shape, gsl::span< const int64_t > strides={})
const DataTypeImpl * MLDataType
Definition: data_types.h:72
gsl::span< T > MutableDataAsSpan()
Definition: tensor.h:197
const OrtMemoryInfo & Location() const
Definition: tensor.h:180
std::shared_ptr< IAllocator > AllocatorPtr
Definition: allocator.h:190
const void * DataRaw(MLDataType type) const
Definition: tensor.h:227
const T * Data() const
Definition: tensor.h:206
void Reshape(const TensorShape &new_shape)
Definition: tensor.h:249
void * MutableDataRaw(MLDataType type)
Definition: tensor.h:222
#define const
Definition: zconf.h:214
gsl::span< const T > DataAsSpan() const
Definition: tensor.h:214
ptrdiff_t ByteOffset() const
Definition: tensor.h:261
type
Definition: core.h:1059
Definition: format.h:895
bool IsDataTypeString() const
Definition: tensor.h:162
GLsizei const GLuint const GLintptr const GLsizei * strides
Definition: glcorearb.h:2625