#include <tensor.h>
|
| Tensor ()=default |
|
| Tensor (MLDataType p_type, const TensorShape &shape, void *p_data, const OrtMemoryInfo &alloc, ptrdiff_t offset=0, gsl::span< const int64_t > strides={}) |
|
| Tensor (MLDataType p_type, const TensorShape &shape, std::shared_ptr< IAllocator > allocator, gsl::span< const int64_t > strides={}) |
|
| Tensor (MLDataType p_type, const TensorShape &shape, void *p_data, std::shared_ptr< IAllocator > deleter, ptrdiff_t offset=0, gsl::span< const int64_t > strides={}) |
|
| ~Tensor () |
|
| ORT_DISALLOW_COPY_AND_ASSIGNMENT (Tensor) |
|
| Tensor (Tensor &&other) noexcept |
|
Tensor & | operator= (Tensor &&other) noexcept |
|
MLDataType | DataType () const |
|
int32_t | GetElementType () const |
|
bool | IsDataTypeString () const |
|
template<class T > |
bool | IsDataType () const |
|
const TensorShape & | Shape () const noexcept |
|
const OrtMemoryInfo & | Location () const |
|
template<typename T > |
T * | MutableData () |
|
template<typename T > |
gsl::span< T > | MutableDataAsSpan () |
|
template<typename T > |
const T * | Data () const |
|
template<typename T > |
gsl::span< const T > | DataAsSpan () const |
|
void * | MutableDataRaw (MLDataType type) |
|
const void * | DataRaw (MLDataType type) const |
|
void * | MutableDataRaw () noexcept |
|
const void * | DataRaw () const noexcept |
|
bool | OwnsBuffer () const noexcept |
|
void | Reshape (const TensorShape &new_shape) |
|
ptrdiff_t | ByteOffset () const |
|
void | SetByteOffset (ptrdiff_t byte_offset) |
|
size_t | SizeInBytes () const |
|
|
static void | InitOrtValue (MLDataType p_type, const TensorShape &shape, void *p_data, const OrtMemoryInfo &location, OrtValue &ort_value, ptrdiff_t offset=0, gsl::span< const int64_t > strides={}) |
| Creates an instance of Tensor on the heap using the appropriate __ctor and initializes OrtValue with it. More...
|
|
static void | InitOrtValue (MLDataType p_type, const TensorShape &shape, void *p_data, std::shared_ptr< IAllocator > allocator, OrtValue &ort_value, ptrdiff_t offset=0, gsl::span< const int64_t > strides={}) |
| Creates an instance of Tensor who own the pre-allocated buffer. More...
|
|
static size_t | CalculateTensorStorageSize (MLDataType p_type, const TensorShape &shape, gsl::span< const int64_t > strides={}) |
|
static void | InitOrtValue (MLDataType elt_type, const TensorShape &shape, std::shared_ptr< IAllocator > allocator, OrtValue &ort_value, gsl::span< const int64_t > strides={}) |
| Creates an instance of Tensor on the heap using the appropriate __ctor and initializes OrtValue with it. More...
|
|
static void | InitOrtValue (Tensor &&tensor, OrtValue &ort_value) |
| Creates an instance of Tensor on the heap using the appropriate __ctor and initializes OrtValue with it. More...
|
|
Definition at line 38 of file tensor.h.
onnxruntime::Tensor::Tensor |
( |
| ) |
|
|
default |
Create tensor with given type, shape, pre-allocated memory and allocator info. This function won't check if the preallocated buffer(p_data) has enough room for the shape.
- Parameters
-
p_type | Data type of the tensor |
shape | Shape of the tensor |
p_data | A preallocated buffer. Can be NULL if the shape is empty. Tensor does not own the data and will not delete it |
alloc | Where the buffer('p_data') was allocated from |
offset | Offset in bytes to start of Tensor within p_data. |
strides | Strides span. Can be empty if the tensor is contiguous. |
Deprecated. The original design is this Tensor class won't do any allocation / release. However, this function will allocate the buffer for the shape, and do placement new if p_type is string tensor.
Create tensor with given type, shape, pre-allocated memory and allocator which will be used to free the pre-allocated memory. This function won't check if the preallocated buffer(p_data) has enough room for the shape. However, this function will de-allocate the buffer upon the tensor getting destructed.
- Parameters
-
p_type | Data type of the tensor |
shape | Shape of the tensor |
p_data | A preallocated buffer. Can be NULL if the shape is empty. Tensor will own the memory and will delete it when the tensor instance is destructed. |
deleter | Allocator used to free the pre-allocated memory |
offset | Offset in bytes to start of Tensor within p_data. |
strides | Strides span. Can be empty if the tensor is contiguous. |
onnxruntime::Tensor::~Tensor |
( |
| ) |
|
onnxruntime::Tensor::Tensor |
( |
Tensor && |
other | ) |
|
|
noexcept |
ptrdiff_t onnxruntime::Tensor::ByteOffset |
( |
| ) |
const |
|
inline |
Get the byte offset with respect to the p_data
- Warning
- this is a temporary solution for reusing the buffer bigger than needed.
-
use with caution - make sure you do boundary check before calling this method (see view.cc)
Definition at line 261 of file tensor.h.
template<typename T >
const T* onnxruntime::Tensor::Data |
( |
| ) |
const |
|
inline |
const void* onnxruntime::Tensor::DataRaw |
( |
| ) |
const |
|
inlinenoexcept |
MLDataType onnxruntime::Tensor::DataType |
( |
| ) |
const |
|
inline |
Returns the data type.
Definition at line 150 of file tensor.h.
int32_t onnxruntime::Tensor::GetElementType |
( |
| ) |
const |
|
inline |
Returns the data type enum constant
Definition at line 156 of file tensor.h.
Creates an instance of Tensor on the heap using the appropriate __ctor and initializes OrtValue with it.
- Parameters
-
p_type | |
shape | |
p_data | |
info | |
offset | |
strides | |
Creates an instance of Tensor who own the pre-allocated buffer.
- Parameters
-
p_type | |
shape | |
p_data | |
allocator | |
offset | |
strides | |
Creates an instance of Tensor on the heap using the appropriate __ctor and initializes OrtValue with it.
- Parameters
-
elt_type | |
shape | |
allocator | |
ort_value | |
strides | |
Creates an instance of Tensor on the heap using the appropriate __ctor and initializes OrtValue with it.
- Parameters
-
template<class T >
bool onnxruntime::Tensor::IsDataType |
( |
| ) |
const |
|
inline |
bool onnxruntime::Tensor::IsDataTypeString |
( |
| ) |
const |
|
inline |
Returns the location of the tensor's memory
Definition at line 180 of file tensor.h.
template<typename T >
T* onnxruntime::Tensor::MutableData |
( |
| ) |
|
|
inline |
May return nullptr if tensor size is zero
Definition at line 186 of file tensor.h.
template<typename T >
gsl::span<T> onnxruntime::Tensor::MutableDataAsSpan |
( |
| ) |
|
|
inline |
May return nullptr if tensor size is zero
Definition at line 197 of file tensor.h.
void* onnxruntime::Tensor::MutableDataRaw |
( |
| ) |
|
|
inlinenoexcept |
onnxruntime::Tensor::ORT_DISALLOW_COPY_AND_ASSIGNMENT |
( |
Tensor |
| ) |
|
bool onnxruntime::Tensor::OwnsBuffer |
( |
| ) |
const |
|
inlinenoexcept |
Resizes the tensor without touching underlying storage. This requires the total size of the tensor to remains constant.
- Warning
- this function is NOT thread-safe.
Definition at line 249 of file tensor.h.
void onnxruntime::Tensor::SetByteOffset |
( |
ptrdiff_t |
byte_offset | ) |
|
|
inline |
Set the byte offset with respect to the p_data
- Warning
- this is a temporary solution for reusing the buffer bigger than needed.
Definition at line 269 of file tensor.h.
Returns the shape of the tensor.
Definition at line 175 of file tensor.h.
size_t onnxruntime::Tensor::SizeInBytes |
( |
| ) |
const |
The number of bytes of data.
The documentation for this class was generated from the following file: