|
HDK
|
#include <onnxruntime_lite_custom_op.h>
Inheritance diagram for Ort::Custom::OrtLiteCustomFunc< Args >:Classes | |
| struct | Kernel |
Public Types | |
| using | ComputeFn = void(*)(Args...) |
| using | ComputeFnReturnStatus = Status(*)(Args...) |
| using | MyType = OrtLiteCustomFunc< Args...> |
Public Types inherited from Ort::Custom::OrtLiteCustomOp | |
| using | ConstOptionalFloatTensor = std::optional< const Custom::Tensor< float > & > |
| using | OptionalFloatTensor = std::optional< Custom::Tensor< float >> |
Public Member Functions | |
| OrtLiteCustomFunc (const char *op_name, const char *execution_provider, ComputeFn compute_fn, ShapeInferFn shape_infer_fn={}, int start_ver=1, int end_ver=MAX_CUSTOM_OP_END_VER) | |
| OrtLiteCustomFunc (const char *op_name, const char *execution_provider, ComputeFnReturnStatus compute_fn_return_status, ShapeInferFn shape_infer_fn={}, int start_ver=1, int end_ver=MAX_CUSTOM_OP_END_VER) | |
Additional Inherited Members | |
Static Public Member Functions inherited from Ort::Custom::OrtLiteCustomOp | |
| template<size_t ith_input, size_t ith_output, typename... Ts> | |
| static std::enable_if < sizeof...(Ts)==0, std::tuple <> >::type | CreateTuple (OrtKernelContext *, ArgPtrs &, size_t, size_t, const std::string &) |
| template<size_t ith_input, size_t ith_output, typename T , typename... Ts> | |
| static std::enable_if < std::is_same< T, OrtKernelContext * >::value, std::tuple< T, Ts...> >::type | CreateTuple (OrtKernelContext *context, ArgPtrs &args, size_t num_input, size_t num_output, const std::string &ep) |
| template<size_t ith_input, size_t ith_output, typename T , typename... Ts> | |
| static std::enable_if < std::is_same< T, OrtKernelContext & >::value, std::tuple< T, Ts...> >::type | CreateTuple (OrtKernelContext *context, ArgPtrs &args, size_t num_input, size_t num_output, const std::string &ep) |
| template<size_t ith_input, size_t ith_output, typename T , typename... Ts> | |
| static std::enable_if < std::is_same< T, const TensorArray * >::value, std::tuple< T, Ts...> >::type | CreateTuple (OrtKernelContext *context, ArgPtrs &args, size_t num_input, size_t num_output, const std::string &ep) |
| template<size_t ith_input, size_t ith_output, typename T , typename... Ts> | |
| static std::enable_if < std::is_same< T, const TensorArray & >::value, std::tuple< T, Ts...> >::type | CreateTuple (OrtKernelContext *context, ArgPtrs &args, size_t num_input, size_t num_output, const std::string &ep) |
| template<size_t ith_input, size_t ith_output, typename T , typename... Ts> | |
| static std::enable_if < std::is_same< T, TensorArray * > ::value, std::tuple< T, Ts...> >::type | CreateTuple (OrtKernelContext *context, ArgPtrs &args, size_t num_input, size_t num_output, const std::string &ep) |
| template<size_t ith_input, size_t ith_output, typename T , typename... Ts> | |
| static std::enable_if < std::is_same< T, TensorArray & > ::value, std::tuple< T, Ts...> >::type | CreateTuple (OrtKernelContext *context, ArgPtrs &args, size_t num_input, size_t num_output, const std::string &ep) |
| template<typename... Ts> | |
| static std::enable_if < 0==sizeof...(Ts)>::type | ParseArgs (std::vector< ONNXTensorElementDataType > &, std::vector< ONNXTensorElementDataType > &) |
Public Attributes inherited from Ort::Custom::OrtLiteCustomOp | |
| const std::string | op_name_ |
| const std::string | execution_provider_ |
| std::vector < ONNXTensorElementDataType > | input_types_ |
| std::vector < ONNXTensorElementDataType > | output_types_ |
| ShapeInferFn | shape_infer_fn_ = {} |
| int | start_ver_ = 1 |
| int | end_ver_ = MAX_CUSTOM_OP_END_VER |
| void * | compute_fn_ = {} |
| void * | compute_fn_return_status_ = {} |
Static Public Attributes inherited from Ort::Custom::OrtLiteCustomOp | |
| template<typename T , typename... Ts> | |
| static std::enable_if < 0<=sizeof...(Ts)&&std::is_same < T, OrtKernelContext * > ::value >::type ParseArgs(std::vector < ONNXTensorElementDataType > &input_types, std::vector < ONNXTensorElementDataType > &output_types){ParseArgs < Ts...>input_types, output_types);}template < typename T, typename...Ts > static typename std::enable_if< 0<=sizeof...(Ts)&&std::is_same < T, OrtKernelContext & > ::value >::type ParseArgs(std::vector < ONNXTensorElementDataType > &input_types, std::vector < ONNXTensorElementDataType > &output_types){ParseArgs < Ts...>input_types, output_types);}template < typename T, typename...Ts > static typename std::enable_if< 0<=sizeof...(Ts)&&std::is_same < T, const TensorArray & > ::value >::type ParseArgs(std::vector < ONNXTensorElementDataType > &input_types, std::vector < ONNXTensorElementDataType > &output_types){input_types.push_back(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED);ParseArgs < Ts...>input_types, output_types);}template < typename T, typename...Ts > static typename std::enable_if< 0<=sizeof...(Ts)&&std::is_same < T, const TensorArray * > ::value >::type ParseArgs(std::vector < ONNXTensorElementDataType > &input_types, std::vector < ONNXTensorElementDataType > &output_types){input_types.push_back(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED);ParseArgs < Ts...>input_types, output_types);}template < typename T, typename...Ts > static typename std::enable_if< 0<=sizeof...(Ts)&&std::is_same < T, TensorArray & >::value > ::type ParseArgs(std::vector < ONNXTensorElementDataType > &input_types, std::vector < ONNXTensorElementDataType > &output_types){output_types.push_back(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED);ParseArgs < Ts...>input_types, output_types);}template < typename T, typename...Ts > static typename std::enable_if< 0<=sizeof...(Ts)&&std::is_same < T, TensorArray * >::value > ::type ParseArgs(std::vector < ONNXTensorElementDataType > &input_types, std::vector < ONNXTensorElementDataType > &output_types){output_types.push_back(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED);ParseArgs < Ts...>input_types, output_types);}#define PARSE_INPUT_BASE(pack_type, onnx_type)#define PARSE_INPUT(data_type, onnx_type)#define PARSE_OUTPUT(data_type, onnx_type)#define PARSE_ARGS(data_type, onnx_type) PARSE_ARGS(std::string_view, ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING) OrtLiteCustomOp(const char *op_name, const char *execution_provider, ShapeInferFn shape_infer_fn, int start_ver=1, int end_ver=MAX_CUSTOM_OP_END_VER):op_name_(op_name), execution_provider_(execution_provider), shape_infer_fn_(shape_infer_fn), start_ver_(start_ver), end_ver_(end_ver){OrtCustomOp::version=ORT_API_VERSION;OrtCustomOp::GetName=[](const OrtCustomOp *op){return static_cast< const OrtLiteCustomOp * >op) -> op_name_.c_str();};OrtCustomOp::GetExecutionProviderType=[](const OrtCustomOp *op){return((OrtLiteCustomOp *) op) -> execution_provider_.c_str();};OrtCustomOp::GetInputMemoryType=[](const OrtCustomOp *, size_t){return OrtMemTypeDefault;};OrtCustomOp::GetInputTypeCount=[](const OrtCustomOp *op){auto self=reinterpret_cast < const OrtLiteCustomOp * >op);return self->input_types_.size();};OrtCustomOp::GetInputType=[](const OrtCustomOp *op, size_t indice){auto self=reinterpret_cast< const OrtLiteCustomOp * >op);return self->input_types_[indice];};OrtCustomOp::GetOutputTypeCount=[](const OrtCustomOp *op){auto self=reinterpret_cast < const OrtLiteCustomOp * >op);return self->output_types_.size();};OrtCustomOp::GetOutputType=[](const OrtCustomOp *op, size_t indice){auto self=reinterpret_cast< const OrtLiteCustomOp * >op);return self-> | output_types_ [indice] |
Definition at line 898 of file onnxruntime_lite_custom_op.h.
| using Ort::Custom::OrtLiteCustomFunc< Args >::ComputeFn = void (*)(Args...) |
Definition at line 899 of file onnxruntime_lite_custom_op.h.
| using Ort::Custom::OrtLiteCustomFunc< Args >::ComputeFnReturnStatus = Status (*)(Args...) |
Definition at line 900 of file onnxruntime_lite_custom_op.h.
| using Ort::Custom::OrtLiteCustomFunc< Args >::MyType = OrtLiteCustomFunc<Args...> |
Definition at line 901 of file onnxruntime_lite_custom_op.h.
|
inline |
Definition at line 911 of file onnxruntime_lite_custom_op.h.
|
inline |
Definition at line 951 of file onnxruntime_lite_custom_op.h.