Go to the documentation of this file.
9 #ifndef LLVM_ANALYSIS_TENSORSPEC_H
10 #define LLVM_ANALYSIS_TENSORSPEC_H
12 #include "llvm/Config/llvm-config.h"
33 #define SUPPORTED_TENSOR_TYPES(M) \
47 #define _TENSOR_TYPE_ENUM_MEMBERS(_, Name) Name,
49 #undef _TENSOR_TYPE_ENUM_MEMBERS
56 const std::vector<int64_t> &Shape,
58 return TensorSpec(Name, Port, getDataType<T>(),
sizeof(
T), Shape);
61 const std::string &
name()
const {
return Name; }
62 int port()
const {
return Port; }
64 const std::vector<int64_t> &
shape()
const {
return Shape; }
81 return getDataType<T>() ==
Type;
86 size_t ElementSize,
const std::vector<int64_t> &Shape);
88 template <
typename T>
static TensorType getDataType();
93 std::vector<int64_t> Shape;
95 size_t ElementSize = 0;
121 Optional<std::vector<LoggedFeatureSpec>>
123 StringRef ModelPath, StringRef SpecFileOverride = StringRef());
125 #define TFUTILS_GETDATATYPE_DEF(T, Name) \
126 template <> TensorType TensorSpec::getDataType<T>();
129 #undef TFUTILS_GETDATATYPE_DEF
132 #endif // LLVM_ANALYSIS_TENSORSPEC_H
This is an optimization pass for GlobalISel generic memory operations.
bool operator!=(const TensorSpec &Other) const
_TENSOR_TYPE_ENUM_MEMBERS(_, Name)
The instances of the Type class are immutable: once they are created, they are never changed.
bool operator==(const TensorSpec &Other) const
const std::string & getLoggingName() const
bool isElementType() const
Optional< std::string > LoggingName
size_t getElementCount() const
Get the number of elements in a tensor with this shape.
const std::string & name() const
size_t getElementByteSize() const
Get the size, in bytes, of one element.
#define TFUTILS_GETDATATYPE_DEF(T, Name)
#define SUPPORTED_TENSOR_TYPES(M)
TensorSpec encapsulates the specification of a tensor: its dimensions, or "shape" (row-major),...
Optional< std::vector< LoggedFeatureSpec > > loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName, StringRef ModelPath, StringRef SpecFileOverride=StringRef())
Load the output specs.
const std::vector< int64_t > & shape() const
static TensorSpec createSpec(const std::string &Name, const std::vector< int64_t > &Shape, int Port=0)
Optional< std::vector< StOtherPiece > > Other
Optional< TensorSpec > getTensorSpecFromJSON(LLVMContext &Ctx, const json::Value &Value)
Construct a TensorSpec from a JSON dictionary of the form: { "name": <string>, "port": <int>,...
size_t getTotalTensorBufferSize() const
Get the total size of a memory buffer needed to store the whole tensor.