17 #include "onnxruntime_config.h"
23 namespace onnxruntime {
27 #pragma GCC diagnostic push
28 #ifdef HAS_NULL_DEREFERENCE
29 #pragma GCC diagnostic ignored "-Wnull-dereference"
90 Tensor(Tensor&& other) noexcept;
91 Tensor&
operator=(Tensor&& other) noexcept;
105 ptrdiff_t
offset = 0, gsl::span<const int64_t>
strides = {});
118 std::shared_ptr<IAllocator> allocator,
120 ptrdiff_t
offset = 0, gsl::span<const int64_t>
strides = {});
158 size_t& storage_size);
175 return utils::IsPrimitiveDataType<std::string>(dtype_);
181 return utils::IsPrimitiveDataType<T>(dtype_);
197 template <
typename T>
200 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
202 return reinterpret_cast<T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
208 template <
typename T>
211 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
213 T*
data =
reinterpret_cast<T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
217 template <
typename T>
220 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
222 return reinterpret_cast<const T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
225 template <
typename T>
228 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
230 const T*
data =
reinterpret_cast<const T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
235 ORT_ENFORCE(type == dtype_,
"Tensor type mismatch.", type,
"!=", dtype_);
236 return static_cast<char*
>(p_data_) + byte_offset_;
240 ORT_ENFORCE(type == dtype_,
"Tensor type mismatch.", type,
"!=", dtype_);
241 return static_cast<char*
>(p_data_) + byte_offset_;
245 return static_cast<char*
>(p_data_) + byte_offset_;
249 return static_cast<char*
>(p_data_) + byte_offset_;
253 return buffer_deleter_ !=
nullptr;
282 byte_offset_ = byte_offset;
303 #ifdef ENABLE_STRIDED_TENSORS
307 gsl::span<const int64_t> Strides()
const;
312 bool IsContiguous() const noexcept {
return is_contiguous_; }
317 void SetShapeAndStrides(
const TensorShape& new_shape, gsl::span<const int64_t> new_strides);
323 const TensorShape& shape,
327 gsl::span<const int64_t>
strides = {});
329 void ReleaseBuffer();
331 #ifdef ENABLE_STRIDED_TENSORS
332 bool CheckIsContiguous()
const;
344 #ifdef ENABLE_STRIDED_TENSORS
346 bool is_contiguous_ =
true;
349 const PrimitiveDataTypeBase* dtype_;
351 ptrdiff_t byte_offset_;
354 #pragma GCC diagnostic pop
bool OwnsBuffer() const noexcept
MLDataType DataType() const
Base class for MLDataType.
constexpr span< ElementType, Extent > make_span(span< ElementType, Extent > s) noexcept
size_t SizeInBytes() const
#define ORT_ENFORCE(condition,...)
ORT_DISALLOW_COPY_AND_ASSIGNMENT(Tensor)
void * MutableDataRaw() noexcept
int32_t GetDataType() const
const TensorShape & Shape() const noexcept
static size_t CalculateTensorStorageSize(MLDataType elt_type, const TensorShape &shape)
Calculate the required storage for the tensor.
InlinedVector< int64_t > TensorShapeVector
GLint GLint GLsizei GLint GLenum GLenum type
const void * DataRaw() const noexcept
int32_t GetElementType() const
Tensor & operator=(Tensor &&other) noexcept
void SetByteOffset(ptrdiff_t byte_offset)
int64_t NumStorageElements() const
The number of Tensor "storage" elements. A single storage element may contain multiple sub-elements f...
const DataTypeImpl * MLDataType
gsl::span< T > MutableDataAsSpan()
const OrtMemoryInfo & Location() const
std::shared_ptr< IAllocator > AllocatorPtr
const void * DataRaw(MLDataType type) const
void Reshape(const TensorShape &new_shape)
static void InitOrtValue(MLDataType elt_type, const TensorShape &shape, void *p_data, const OrtMemoryInfo &location, OrtValue &ort_value, ptrdiff_t offset=0, gsl::span< const int64_t > strides={})
Creates an instance of Tensor on the heap and initializes OrtValue with it.
void * MutableDataRaw(MLDataType type)
gsl::span< const T > DataAsSpan() const
ptrdiff_t ByteOffset() const
bool IsDataTypeString() const
GLsizei const GLuint const GLintptr const GLsizei * strides