HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
sparse_tensor.h
Go to the documentation of this file.
1 // Copyright (c) Microsoft Corporation. All rights reserved.
2 // Licensed under the MIT License.
3 
4 #if !defined(DISABLE_SPARSE_TENSORS)
5 
6 #pragma once
7 
10 #include "core/framework/tensor.h"
11 
12 struct OrtValue;
13 
14 namespace onnxruntime {
15 
16 class IDataTransfer;
17 class DataTransferManager;
18 
19 /**
20  * @brief This is a Sparse Format enumeration
21  *
22  *
23  */
24 enum class SparseFormat : uint32_t {
25  kUndefined = 0x0U, // For completeness
26  kCoo = 0x1U, // 1-D or 2-D indices
27  kCsrc = 0x1U << 1, // Both CSR(C)
28  kBlockSparse = 0x1U << 2 // as in OpenAI
29 };
30 
31 std::ostream& operator<<(std::ostream&, SparseFormat);
32 
33 /**
34  * @brief This class implements SparseTensor.
35  * This class holds sparse non-zero data (values) and sparse format
36  * specific indices. There are two main uses for the class (similar to that of Tensor)
37  * - one is to re-present model sparse inputs. Such inputs typically reside
38  * in user allocated buffers that are not owned by SparseTensor instance and the instance
39  * serves as a facade to expose user allocated buffers. Such buffers should already
40  * contain proper values and format specific indices.
41  * Use the first constructor
42  * to instantiate SparseTensor and supply values_data pointer. Use*() functions can
43  * be used to supply pointers to format specific indices. These buffers are used as is
44  * and will not be modified or deallocated by the instance. However, the lifespan of the buffers
45  * must eclipse the lifespan of the SparseTensor instance.
46  *
47  * - Represent sparse data that is a result of format conversion or a computation result. Use second constructor
48  * to supply a desired allocator. Use Make*() format specific interfaces to supply values and format
49  * specific indices. The specified data will be copied into an internally allocated buffer.
50  Internally, we will represent a SparseTensor as a single contiguous buffer that
51  * contains values followed by format specific indices. We use Tensors to project
52  * values and indices into various parts of buffer.
53  */
54 
55 class SparseTensor final {
56  public:
57  /// <summary>
58  /// This constructs an instance that points to user defined buffers.
59  /// Make use of Use* functions to supply format specific indices that
60  /// reside in the user supplied buffers. The instance constructed this way
61  /// will not copy data. The lifespan of supplied buffers is expected to eclipse
62  /// the lifespan of the sparse tensor instance.
63  /// </summary>
64  /// <param name="elt_type">MlDataType</param>
65  /// <param name="dense_shape">a shape of original tensor in dense form</param>
66  /// <param name="values_shape">shape for user supplied values. Use {0} shape for fully sparse tensors.</param>
67  /// <param name="values_data">a pointer to values. Use nullptr for fully sparse tensors.</param>
68  /// <param name="location">description of the user allocated memory</param>
69  SparseTensor(MLDataType elt_type,
70  const TensorShape& dense_shape,
71  const TensorShape& values_shape,
72  void* values_data,
73  const OrtMemoryInfo& location);
74 
75  /// <summary>
76  /// Use this constructor to hold sparse data in the buffer
77  /// allocated with the specified allocator. Use Make*() methods
78  /// to populate the instance with data which will be copied into the
79  /// allocated buffer.
80  /// </summary>
81  /// <param name="elt_type"></param>
82  /// <param name="dense_shape"></param>
83  /// <param name="allocator"></param>
84  SparseTensor(MLDataType elt_type,
85  const TensorShape& dense_shape,
86  std::shared_ptr<IAllocator> allocator);
87 
88  SparseTensor() noexcept;
89 
90  ~SparseTensor();
91 
93 
94  /// <summary>
95  /// The factory function creates an instance of SparseTensor on the heap
96  /// using appropriate constructor and initializes OrtValue instance wit it.
97  /// </summary>
98  /// <param name="elt_type">element data type</param>
99  /// <param name="dense_shape">dense shape of the sparse tensor</param>
100  /// <param name="values_shape">values shape. Use {0} for fully sparse tensors.</param>
101  /// <param name="values_data">pointer to a user allocated buffer. Use nullptr for fully sparse tensors.</param>
102  /// <param name="location">description of the user allocated buffer</param>
103  /// <param name="ort_value">default constructed input/output ort_value</param>
104  static void InitOrtValue(MLDataType elt_type,
105  const TensorShape& dense_shape,
106  const TensorShape& values_shape,
107  void* values_data,
108  const OrtMemoryInfo& location,
109  OrtValue& ort_value);
110 
111  /// <summary>
112  /// The factory function creates an instance of SparseTensor on the heap
113  /// using appropriate constructor and initializes OrtValue instance wit it.
114  /// </summary>
115  /// <param name="elt_type">element data type</param>
116  /// <param name="dense_shape">dense shape of the sparse tensor</param>
117  /// <param name="allocator">allocator to use</param>
118  /// <param name="ort_value">default constructed input/output ort_value</param>
119  static void InitOrtValue(MLDataType elt_type,
120  const TensorShape& dense_shape,
121  std::shared_ptr<IAllocator> allocator,
122  OrtValue& ort_value);
123 
124  /// <summary>
125  /// The function will check if the OrtValue is allocated
126  /// fetch the containing SparseTensor instance or throw if it
127  /// does not contain one. It will check that the SparseTensor has
128  /// sparse format set (i.e. fully constructed).
129  /// </summary>
130  /// <param name="v">OrtValue instance</param>
131  /// <returns>const SparseTensor Reference</returns>
132  static const SparseTensor& GetSparseTensorFromOrtValue(const OrtValue& v);
133 
134  /// <summary>
135  /// /// The function will check if the OrtValue is allocated
136  /// fetch the containing SparseTensor instance or throw if it
137  /// does not contain one. It will check that the SparseTensor does not
138  /// have sparse format set and will return non-const ref to so indices
139  /// can be added to it.
140  /// </summary>
141  /// <param name="v">OrtValue</param>
142  /// <returns>non-const reference to SparseTensor</returns>
144 
145  /// <summary>
146  // Returns the number of non-zero values (aka "NNZ")
147  // For block sparse formats this may include some zeros in the blocks
148  // are considered non-zero.
149  /// </summary>
150  /// <returns>nnz</returns>
151  size_t NumValues() const { return static_cast<size_t>(values_.Shape().Size()); }
152 
153  /// <summary>
154  /// Read only accessor to non-zero values
155  /// </summary>
156  /// <returns></returns>
157  const Tensor& Values() const noexcept {
158  return values_;
159  }
160 
161  SparseTensor(SparseTensor&& o) noexcept;
162  SparseTensor& operator=(SparseTensor&& o) noexcept;
163 
164  /// <summary>
165  /// Returns SparseFormat that the instance currently holds
166  /// if the value returned in kUndefined, the instance is not populated
167  /// </summary>
168  /// <returns>format enum</returns>
169  SparseFormat Format() const noexcept {
170  return format_;
171  }
172 
173  /// <summary>
174  /// Returns a would be dense_shape
175  /// </summary>
176  /// <returns>reference to dense_shape</returns>
177  const TensorShape& DenseShape() const noexcept {
178  return dense_shape_;
179  }
180 
181  /// <summary>
182  /// Calculates and returns how much this fully initialized SparseTensor data (would)
183  /// occupy in a contiguous allocation block, or, in fact, occupies if it owns the buffer.
184  /// </summary>
185  /// <returns>required allocation size</returns>
186  int64_t RequiredAllocationSize() const noexcept;
187 
188  /// <summary>
189  /// Returns Tensor element type enum.
190  /// Useful for type dispatching
191  /// </summary>
192  /// <returns></returns>
193  int32_t GetElementType() const {
194  return ml_data_type_->GetDataType();
195  }
196 
197  /// <summary>
198  /// Return Element MLDataType
199  /// </summary>
200  /// <returns></returns>
201  MLDataType DataType() const noexcept {
202  return ml_data_type_;
203  }
204 
205  /// <summary>
206  /// Test for string type
207  /// </summary>
208  /// <returns>true if tensor values are strings</returns>
209  bool IsDataTypeString() const {
210  return utils::IsPrimitiveDataType<std::string>(ml_data_type_);
211  }
212 
213  /// <summary>
214  /// Checks if the Tensor contains data type T
215  /// </summary>
216  /// <typeparam name="T"></typeparam>
217  /// <returns>true if tensor contains data of type T</returns>
218  template <class T>
219  bool IsDataType() const {
220  return utils::IsPrimitiveDataType<T>(ml_data_type_);
221  }
222 
223  const OrtMemoryInfo& Location() const noexcept { return location_; }
224 
225  /// <summary>
226  /// Read only access to Coo indices
227  /// </summary>
228  class CooView {
229  public:
230  explicit CooView(const Tensor& indices) noexcept
231  : indices_(indices) {}
232  const Tensor& Indices() const noexcept { return indices_; }
233 
234  private:
235  std::reference_wrapper<const Tensor> indices_;
236  };
237 
238  /// <summary>
239  /// Returns Coo index view
240  /// </summary>
241  /// <returns>CooView instance</returns>
242  CooView AsCoo() const;
243 
244  /// <summary>
245  /// Uses COO index contained in the user allocated buffer along with the values buffer passed on
246  /// to the constructor. The buffer is used as is and its lifespan must eclipse the lifespan of the sparse
247  /// tensor instance. The OrtMemoryInfo (location) of the index is assumed to be the same as values.
248  ///
249  /// The index size must either exactly match the number of values in which case
250  /// index shape would be 1-D (values_count) or it must be twice the number of values
251  /// in which case its shape would be 2-D (values_count, 2)
252  /// </summary>
253  /// <param name="indices">user allocated buffer span. Use empty span for fully sparse tensors.</param>
254  /// <returns>Status</returns>
255  Status UseCooIndices(gsl::span<int64_t> indices);
256 
257  /// <summary>
258  /// The method allocates a single contiguous buffer and copies specified values
259  /// and indices into it using supplied IDataTransfer.
260  ///
261  /// The indices size must either exactly match the number of values in which case
262  /// indices shape would be 1-D (values_count) or it must be twice the number of values
263  /// in which case its shape would be 2-D (values_count, 2).
264  ///
265  /// Values shape is supplied at construction time and its Size() must match values_count.
266  /// </summary>
267  /// <param name="values_count">Use 0 for fully sparse tensors.</param>
268  /// <param name="values_data">pointer to a buffer to be copied. Use nullptr for fully sparse tensors.</param>
269  /// <param name="indices"></param>
270  /// <returns></returns>
271  Status MakeCooData(const IDataTransfer& data_transfer, const OrtMemoryInfo& data_location,
272  size_t values_count, const void* values_data, gsl::span<const int64_t> indices);
273 
274  /// <summary>
275  /// The method allocates a single contiguous buffer and creates instances of std::strings in it, with
276  /// copies of the supplied zero-terminated strings followed by COO indices.
277  /// All data is assumed to be on CPU and the allocator supplied must be
278  /// a CPU based allocator.
279  /// </summary>
280  /// <param name="string_count">use 0 for fully sparse tensors</param>
281  /// <param name="strings">array of char* pointers. use nullptr for fully sparse tensors</param>
282  /// <param name="indices">span of indices. Use empty span for fully sparse tensors.</param>
283  /// <returns>Status</returns>
284  Status MakeCooStrings(size_t string_count, const char* const* strings, gsl::span<const int64_t> indices);
285 
286  /// <summary>
287  /// Gives mutable access to Coo buffers so they can be populated
288  /// </summary>
289  class CooMutator {
290  public:
291  CooMutator(Tensor& values, Tensor& indices) noexcept : values_(values), indices_(indices) {}
292  Tensor& Values() noexcept { return values_; }
293  Tensor& Indices() noexcept { return indices_; }
294 
295  private:
296  std::reference_wrapper<Tensor> values_;
297  std::reference_wrapper<Tensor> indices_;
298  };
299 
300  /// <summary>
301  /// Allocates memory for values and index and returns a mutator so
302  /// data can be copied into the buffer.
303  /// </summary>
304  /// <param name="values_count">use 0 for fully sparse tensors</param>
305  /// <param name="index_count">use 0 for fully sparse tensors</param>
306  /// <returns></returns>
307  CooMutator MakeCooData(size_t values_count, size_t index_count);
308 
309  /// <summary>
310  /// Read only access to Csr indices
311  /// </summary>
312  class CsrView {
313  public:
314  CsrView(const Tensor& inner, const Tensor& outer) noexcept
315  : inner_(inner), outer_(outer) {}
316  const Tensor& Inner() const noexcept { return inner_; }
317  const Tensor& Outer() const noexcept { return outer_; }
318 
319  private:
320  std::reference_wrapper<const Tensor> inner_;
321  std::reference_wrapper<const Tensor> outer_;
322  };
323 
324  /// <summary>
325  /// Returns Csr indices read only view
326  /// </summary>
327  /// <returns></returns>
328  CsrView AsCsr() const;
329 
330  /// <summary>
331  /// This function will use Csr indices contained within the user allocated buffers.
332  /// The lifespan of the buffers must eclipse the lifespan of sparse tensor instance.
333  /// </summary>
334  /// <param name="inner_index">User allocated buffer span. use empty span for fully sparse tensors</param>
335  /// <param name="outer_index">User allocated buffer span. Use empty span for fully sparse tensors</param>
336  /// <returns></returns>
337  Status UseCsrIndices(gsl::span<int64_t> inner_index, gsl::span<int64_t> outer_index);
338 
339  /// <summary>
340  /// The function will allocate a single contiguous buffer and will copy values
341  /// and indices into it.
342  /// </summary>
343  /// <param name="data_transfer"></param>
344  /// <param name="data_location"></param>
345  /// <param name="values_count">use 0 for fully sparse tensors</param>
346  /// <param name="values_data">pointer to data to be copied. Use nullptr for fully sparse tensors.</param>
347  /// <param name="inner_index">inner index to be copied. Use empty span for fully sparse tensors.</param>
348  /// <param name="outer_index">outer index to be copied. Use empty span for fully sparse tensors.</param>
349  /// <returns></returns>
350  Status MakeCsrData(const IDataTransfer& data_transfer,
351  const OrtMemoryInfo& data_location,
352  size_t values_count, const void* values_data,
353  gsl::span<const int64_t> inner_index,
354  gsl::span<const int64_t> outer_index);
355 
356  /// <summary>
357  /// The method allocates a single contiguous buffer and creates instances of std::strings in it, with
358  /// copies of the supplied zero-terminated strings followed by COO indices.
359  /// All data is assumed to be on CPU and the allocator supplied must be
360  /// a CPU based allocator
361  /// </summary>
362  /// <param name="string_count"></param>
363  /// <param name="strings">array of char* pointers</param>
364  /// <param name="inner_index">inner index to be copied. Use empty span for fully sparse tensors.</param>
365  /// <param name="outer_index">outer index to be copied. Use empty span for fully sparse tensors.</param>
366  /// <returns></returns>
367  Status MakeCsrStrings(size_t string_count, const char* const* strings,
368  gsl::span<const int64_t> inner_index,
369  gsl::span<const int64_t> outer_index);
370 
371  /// <summary>
372  /// Give writable access to Csr values and indices
373  /// </summary>
374  class CsrMutator {
375  public:
376  CsrMutator(Tensor& values, Tensor& inner, Tensor& outer) noexcept
377  : values_(values), inner_(inner), outer_(outer) {}
378  Tensor& Values() const noexcept { return values_; }
379  Tensor& Inner() const noexcept { return inner_; }
380  Tensor& Outer() const noexcept { return outer_; }
381 
382  private:
383  std::reference_wrapper<Tensor> values_;
384  std::reference_wrapper<Tensor> inner_;
385  std::reference_wrapper<Tensor> outer_;
386  };
387 
388  /// <summary>
389  /// Allocates memory for values and index and returns mutator so
390  /// data can be populated.
391  /// </summary>
392  /// <param name="values_count">Use 0 for fully sparse tensors.</param>
393  /// <param name="inner_index_count">Use 0 for fully sparse tensors.</param>
394  /// <param name="outer_index_count">Use 0 for fully sparse tensors.</param>
395  /// <returns></returns>
396  CsrMutator MakeCsrData(size_t values_count, size_t inner_index_count, size_t outer_index_count);
397 
398  /// <summary>
399  /// Read only access to BlockSparse index
400  /// </summary>
402  public:
403  explicit BlockSparseView(const Tensor& indices) noexcept
404  : indices_(indices) {}
405  const Tensor& Indices() const noexcept { return indices_; }
406 
407  private:
408  std::reference_wrapper<const Tensor> indices_;
409  };
410 
411  /// <summary>
412  /// Return BlockSparseIndex view
413  /// </summary>
414  /// <returns>an instance of BlockSparseView</returns>
415  BlockSparseView AsBlockSparse() const;
416 
417  /// <summary>
418  /// Use blocksparse indices contained in the user allocated buffer. The shape of the index
419  /// must be 2-D and must contain one tuple per each of the value blocks that
420  /// were supplied to the constructor. The supplied buffer lifespan must eclipse the life
421  /// of sparse tensor instance.
422  /// </summary>
423  /// <param name="indices_shape">Use {0} for fully sparse tensors.</param>
424  /// <param name="indices_data">Ptr to user allocated buffer. Use nullptr for fully spare tensors.</param>
425  /// <returns></returns>
426  Status UseBlockSparseIndices(const TensorShape& indices_shape, int32_t* indices_data);
427 
428  /// <summary>
429  /// The function allocates a single contiguous buffer and copies values and index
430  /// into it. The shape of the values is expected to be at least 3-D but may contain more
431  /// dimensions. At the very minimum it should be (num_blocks, block_size, block_size).
432  ///
433  // The shape of the index is must be at least 2-D and must contain one tuple per each of
434  // the value blocks that were supplied to the constructor. Each index tuple is a
435  // (row, col) coordinates of the values block in a dense matrix.
436  /// </summary>
437  /// <param name="data_transfer"></param>
438  /// <param name="data_location"></param>
439  /// <param name="values_shape">The shape is expected to be at least 3-D. However, use {0} for fully sparse tensors.</param>
440  /// <param name="values_data">Pointer to a data to be copied. Use nullptr for fully sparse tensors.</param>
441  /// <param name="indices_shape">The shape is expected to be 2-D. However, you can use {0} for fully sparse tensors.</param>
442  /// <param name="indices_data">Pointer to index data to be copied. Use nullptr for fully sparse tensors.</param>
443  /// <returns></returns>
444  Status MakeBlockSparseData(const IDataTransfer& data_transfer,
445  const OrtMemoryInfo& data_location,
446  const TensorShape& values_shape, const void* values_data,
447  const TensorShape& indices_shape, const int32_t* indices_data);
448 
449 
450  /// <summary>
451  /// The method allocates a single contiguous buffer and creates instances of std::strings in it, with
452  /// copies of the supplied zero-terminated strings followed by COO indices.
453  /// All data is assumed to be on CPU and the allocator supplied must be
454  /// a CPU based allocator.
455  /// </summary>
456  /// <param name="values_shape">Use {0} shape for fully sparse tensors</param>
457  /// <param name="strings">array of char* ptrs, use nullptr for fully sparse tensor</param>
458  /// <param name="indices_shape">Use {0} for fully sparse tensors</param>
459  /// <param name="indices_data">use nullptr for fully sparse tensors</param>
460  /// <returns></returns>
461  Status MakeBlockSparseStrings(const TensorShape& values_shape, const char* const* strings,
462  const TensorShape& indices_shape, const int32_t* indices_data);
463 
464  /// <summary>
465  /// Mutable data access
466  /// </summary>
468  public:
470  : values_(values), indices_(indices) {}
471  Tensor& Values() noexcept { return values_; }
472  Tensor& Indices() noexcept { return indices_; }
473 
474  private:
475  std::reference_wrapper<Tensor> values_;
476  std::reference_wrapper<Tensor> indices_;
477  };
478 
479  /// <summary>
480  /// Allocates memory for values and index and returns mutator so
481  /// data can be populated
482  /// </summary>
483  /// <param name="values_shape">Shape is expected to be 3-D, use {0} for fully sparse tensors</param>
484  /// <param name="indices_shape">Shape is expected to be 2-D, use {0} for fully sparse tensors </param>
485  /// <returns></returns>
486  BlockSparseMutator MakeBlockSparseData(const TensorShape& values_shape, const TensorShape& indices_shape);
487 
488  /// <summary>
489  /// X-device copy. Destination tensor must have allocator set.
490  /// </summary>
491  /// <param name="data_transfer_manager"></param>
492  /// <param name="exec_q_id"></param>
493  /// <param name="dst_tensor"></param>
494  /// <returns></returns>
495  Status Copy(const DataTransferManager& data_transfer_manager, SparseTensor& dst_tensor) const;
496 
497  /// <summary>
498  /// X-device copy. Destination tensor must have allocator set.
499  /// </summary>
500  /// <param name="dst_tensor"></param>
501  /// <returns></returns>
502  Status Copy(const IDataTransfer& data_transfer, SparseTensor& dst_tensor) const;
503 
504  private:
505  Status AllocateBuffer(int64_t buffer_size, size_t num_values);
506  void ReleaseBuffer();
507  void* IndicesStart(int64_t values_bytes);
508  const void* IndicesStart(int64_t values_bytes) const;
509  Status ValidateBlockSparseShapes(const TensorShape& values_shape, const TensorShape& index_shape) const;
510 
511  std::vector<int64_t> GetCooIndexDims(size_t values_count, size_t index_size) const;
512  void InitCooIndex(const TensorShape& index_shape, int64_t* index_data);
513 
514  Status ValidateCsrIndices(size_t values_count, size_t inner_size, size_t outer_size) const;
515  void InitCsrIndices(size_t inner_size, const int64_t* inner, size_t outer_size, const int64_t* outer);
516  void InitBlockSparseIndices(const TensorShape& indices_shape, int32_t* indices_data);
517 
518  SparseFormat format_; // sparse format enum value
519  TensorShape dense_shape_; // a shape of a corresponding dense tensor
520  const PrimitiveDataTypeBase* ml_data_type_; // MLDataType for contained values
521  AllocatorPtr allocator_; // Allocator or nullptr when using user supplied buffers
522  OrtMemoryInfo location_; // Memory info where data resides. When allocator is supplied,
523  // location_ is obtained from the allocator.
524  void* p_data_; // Allocated buffer ptr, or nullptr when using user supplied buffers
525  int64_t buffer_size_; // Allocated buffer size or zero when using user supplied buffers.
526  Tensor values_; // Tensor instance that holds a values buffer information either user supplied or
527  // to a beginning of p_data_, before format specific indices.
528  std::vector<Tensor> format_data_; // A collection of format specific indices. They contain pointers to either a
529  // user supplied buffers or to portions of contiguous buffer p_data_.
530 };
531 
532 } // namespace onnxruntime
533 
534 #endif
Tensor & Values() const noexcept
Tensor & Inner() const noexcept
CooView AsCoo() const
Returns Coo index view
GLsizei GLenum const void * indices
Definition: glcorearb.h:406
static void InitOrtValue(MLDataType elt_type, const TensorShape &dense_shape, const TensorShape &values_shape, void *values_data, const OrtMemoryInfo &location, OrtValue &ort_value)
The factory function creates an instance of SparseTensor on the heap using appropriate constructor an...
Base class for MLDataType.
Definition: data_types.h:81
Status MakeCooStrings(size_t string_count, const char *const *strings, gsl::span< const int64_t > indices)
The method allocates a single contiguous buffer and creates instances of std::strings in it...
Status MakeBlockSparseStrings(const TensorShape &values_shape, const char *const *strings, const TensorShape &indices_shape, const int32_t *indices_data)
The method allocates a single contiguous buffer and creates instances of std::strings in it...
CsrMutator(Tensor &values, Tensor &inner, Tensor &outer) noexcept
CsrView AsCsr() const
Returns Csr indices read only view
int64_t Size() const
const Tensor & Indices() const noexcept
BlockSparseMutator(Tensor &values, Tensor &indices) noexcept
Status MakeCooData(const IDataTransfer &data_transfer, const OrtMemoryInfo &data_location, size_t values_count, const void *values_data, gsl::span< const int64_t > indices)
The method allocates a single contiguous buffer and copies specified values and indices into it using...
Status UseBlockSparseIndices(const TensorShape &indices_shape, int32_t *indices_data)
Use blocksparse indices contained in the user allocated buffer. The shape of the index must be 2-D an...
const GLdouble * v
Definition: glcorearb.h:837
Status MakeCsrStrings(size_t string_count, const char *const *strings, gsl::span< const int64_t > inner_index, gsl::span< const int64_t > outer_index)
The method allocates a single contiguous buffer and creates instances of std::strings in it...
MLDataType DataType() const noexcept
Return Element MLDataType
Read only access to Csr indices
Status MakeCsrData(const IDataTransfer &data_transfer, const OrtMemoryInfo &data_location, size_t values_count, const void *values_data, gsl::span< const int64_t > inner_index, gsl::span< const int64_t > outer_index)
The function will allocate a single contiguous buffer and will copy values and indices into it...
Status MakeBlockSparseData(const IDataTransfer &data_transfer, const OrtMemoryInfo &data_location, const TensorShape &values_shape, const void *values_data, const TensorShape &indices_shape, const int32_t *indices_data)
The function allocates a single contiguous buffer and copies values and index into it...
Gives mutable access to Coo buffers so they can be populated
int32_t GetElementType() const
Returns Tensor element type enum. Useful for type dispatching
Tensor & Outer() const noexcept
const TensorShape & Shape() const noexcept
Definition: tensor.h:175
static const SparseTensor & GetSparseTensorFromOrtValue(const OrtValue &v)
The function will check if the OrtValue is allocated fetch the containing SparseTensor instance or th...
This class implements SparseTensor. This class holds sparse non-zero data (values) and sparse format ...
Definition: sparse_tensor.h:55
const Tensor & Outer() const noexcept
Format
Definition: oidn.hpp:16
BlockSparseView AsBlockSparse() const
Return BlockSparseIndex view
const Tensor & Inner() const noexcept
CooView(const Tensor &indices) noexcept
CsrView(const Tensor &inner, const Tensor &outer) noexcept
GLint location
Definition: glcorearb.h:805
int64_t RequiredAllocationSize() const noexcept
Calculates and returns how much this fully initialized SparseTensor data (would) occupy in a contiguo...
PrimitiveDataTypeBase Base class for primitive Tensor contained types.
Definition: data_types.h:887
const Tensor & Values() const noexcept
Read only accessor to non-zero values
GLsizei const GLchar *const * strings
Definition: glcorearb.h:1933
const TensorShape & DenseShape() const noexcept
Returns a would be dense_shape
size_t NumValues() const
Give writable access to Csr values and indices
bool IsDataType() const
Checks if the Tensor contains data type T
Read only access to BlockSparse index
SparseFormat
This is a Sparse Format enumeration.
Definition: sparse_tensor.h:24
std::shared_ptr< IAllocator > AllocatorPtr
Definition: allocator.h:190
GLenum GLsizei GLsizei GLint * values
Definition: glcorearb.h:1602
const OrtMemoryInfo & Location() const noexcept
CooMutator(Tensor &values, Tensor &indices) noexcept
ORT_DISALLOW_COPY_AND_ASSIGNMENT(SparseTensor)
Status UseCsrIndices(gsl::span< int64_t > inner_index, gsl::span< int64_t > outer_index)
This function will use Csr indices contained within the user allocated buffers. The lifespan of the b...
Read only access to Coo indices
#define const
Definition: zconf.h:214
std::ostream & operator<<(std::ostream &out, AllocKind alloc_kind)
Status Copy(const DataTransferManager &data_transfer_manager, SparseTensor &dst_tensor) const
X-device copy. Destination tensor must have allocator set.
const Tensor & Indices() const noexcept
bool IsDataTypeString() const
Test for string type
BlockSparseView(const Tensor &indices) noexcept
Status UseCooIndices(gsl::span< int64_t > indices)
Uses COO index contained in the user allocated buffer along with the values buffer passed on to the c...