LLVM  15.0.0git
TensorSpec.h
Go to the documentation of this file.
1 //===- TensorSpec.h - type descriptor for a tensor --------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 #ifndef LLVM_ANALYSIS_TENSORSPEC_H
10 #define LLVM_ANALYSIS_TENSORSPEC_H
11 
12 #include "llvm/Config/llvm-config.h"
13 
14 #include "llvm/ADT/StringMap.h"
15 #include "llvm/IR/LLVMContext.h"
16 #include "llvm/Support/JSON.h"
17 
18 #include <memory>
19 #include <vector>
20 
21 namespace llvm {
22 /// TensorSpec encapsulates the specification of a tensor: its dimensions, or
23 /// "shape" (row-major), its type (see TensorSpec::getDataType specializations
24 /// for supported types), its name and port (see "TensorFlow: Large-Scale
25 /// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2:
26 /// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
27 ///
28 /// Known tensor types. The left part is the C type, the right is a name we
29 /// can use to identify the type (to implement TensorSpec equality checks), and
30 /// to use, if needed, when mapping to an underlying evaluator's type system.
31 /// The main requirement is that the C type we use has the same size and
32 /// encoding (e.g. endian-ness) as the one used by the evaluator.
33 #define SUPPORTED_TENSOR_TYPES(M) \
34  M(float, Float) \
35  M(double, Double) \
36  M(int8_t, Int8) \
37  M(uint8_t, UInt8) \
38  M(int16_t, Int16) \
39  M(uint16_t, UInt16) \
40  M(int32_t, Int32) \
41  M(uint32_t, UInt32) \
42  M(int64_t, Int64) \
43  M(uint64_t, UInt64)
44 
45 enum class TensorType {
46  Invalid,
47 #define _TENSOR_TYPE_ENUM_MEMBERS(_, Name) Name,
49 #undef _TENSOR_TYPE_ENUM_MEMBERS
50 };
51 
52 class TensorSpec final {
53 public:
54  template <typename T>
55  static TensorSpec createSpec(const std::string &Name,
56  const std::vector<int64_t> &Shape,
57  int Port = 0) {
58  return TensorSpec(Name, Port, getDataType<T>(), sizeof(T), Shape);
59  }
60 
61  const std::string &name() const { return Name; }
62  int port() const { return Port; }
63  TensorType type() const { return Type; }
64  const std::vector<int64_t> &shape() const { return Shape; }
65 
66  bool operator==(const TensorSpec &Other) const {
67  return Name == Other.Name && Port == Other.Port && Type == Other.Type &&
68  Shape == Other.Shape;
69  }
70 
71  bool operator!=(const TensorSpec &Other) const { return !(*this == Other); }
72 
73  /// Get the number of elements in a tensor with this shape.
74  size_t getElementCount() const { return ElementCount; }
75  /// Get the size, in bytes, of one element.
76  size_t getElementByteSize() const { return ElementSize; }
77  /// Get the total size of a memory buffer needed to store the whole tensor.
78  size_t getTotalTensorBufferSize() const { return ElementCount * ElementSize; }
79 
80  template <typename T> bool isElementType() const {
81  return getDataType<T>() == Type;
82  }
83 
84 private:
85  TensorSpec(const std::string &Name, int Port, TensorType Type,
86  size_t ElementSize, const std::vector<int64_t> &Shape);
87 
88  template <typename T> static TensorType getDataType();
89 
90  std::string Name;
91  int Port = 0;
93  std::vector<int64_t> Shape;
94  size_t ElementCount = 0;
95  size_t ElementSize = 0;
96 };
97 
98 /// Construct a TensorSpec from a JSON dictionary of the form:
99 /// { "name": <string>,
100 /// "port": <int>,
101 /// "type": <string. Use LLVM's types, e.g. float, double, int64_t>,
102 /// "shape": <array of ints> }
103 /// For the "type" field, see the C++ primitive types used in
104 /// TFUTILS_SUPPORTED_TYPES.
105 Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
106  const json::Value &Value);
107 
111  const std::string &getLoggingName() const {
112  return LoggingName ? *LoggingName : Spec.name();
113  }
114 };
115 
116 /// Load the output specs. If SpecFileOverride is not empty, that path is used.
117 /// Otherwise, the file is assumed to be called 'output_spec.json' and be found
118 /// under ModelPath (the model directory).
119 /// The first output tensor name must match ExpectedDecisionName.
120 /// In case of error, the return is None and the error is logged.
121 Optional<std::vector<LoggedFeatureSpec>>
122 loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
123  StringRef ModelPath, StringRef SpecFileOverride = StringRef());
124 
125 #define TFUTILS_GETDATATYPE_DEF(T, Name) \
126  template <> TensorType TensorSpec::getDataType<T>();
128 
129 #undef TFUTILS_GETDATATYPE_DEF
130 } // namespace llvm
131 
132 #endif // LLVM_ANALYSIS_TENSORSPEC_H
llvm::TensorType::Invalid
@ Invalid
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::LoggedFeatureSpec
Definition: TensorSpec.h:108
llvm::ElementCount
Definition: TypeSize.h:404
llvm::TensorSpec::operator!=
bool operator!=(const TensorSpec &Other) const
Definition: TensorSpec.h:71
llvm::TensorType::_TENSOR_TYPE_ENUM_MEMBERS
_TENSOR_TYPE_ENUM_MEMBERS(_, Name)
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::Optional< std::string >
T
#define T
Definition: Mips16ISelLowering.cpp:341
llvm::TensorSpec::operator==
bool operator==(const TensorSpec &Other) const
Definition: TensorSpec.h:66
llvm::LoggedFeatureSpec::getLoggingName
const std::string & getLoggingName() const
Definition: TensorSpec.h:111
llvm::TensorSpec::port
int port() const
Definition: TensorSpec.h:62
JSON.h
StringMap.h
llvm::TensorSpec::isElementType
bool isElementType() const
Definition: TensorSpec.h:80
llvm::LoggedFeatureSpec::Spec
TensorSpec Spec
Definition: TensorSpec.h:109
llvm::TensorSpec::type
TensorType type() const
Definition: TensorSpec.h:63
llvm::LoggedFeatureSpec::LoggingName
Optional< std::string > LoggingName
Definition: TensorSpec.h:110
llvm::TensorSpec::getElementCount
size_t getElementCount() const
Get the number of elements in a tensor with this shape.
Definition: TensorSpec.h:74
llvm::TensorSpec::name
const std::string & name() const
Definition: TensorSpec.h:61
llvm::TensorSpec::getElementByteSize
size_t getElementByteSize() const
Get the size, in bytes, of one element.
Definition: TensorSpec.h:76
TFUTILS_GETDATATYPE_DEF
#define TFUTILS_GETDATATYPE_DEF(T, Name)
Definition: TensorSpec.h:125
llvm::TensorType
TensorType
Definition: TensorSpec.h:45
SUPPORTED_TENSOR_TYPES
#define SUPPORTED_TENSOR_TYPES(M)
TensorSpec encapsulates the specification of a tensor: its dimensions, or "shape" (row-major),...
Definition: TensorSpec.h:33
llvm::loadOutputSpecs
Optional< std::vector< LoggedFeatureSpec > > loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName, StringRef ModelPath, StringRef SpecFileOverride=StringRef())
Load the output specs.
Definition: TensorSpec.cpp:83
llvm::TargetStackID::Value
Value
Definition: TargetFrameLowering.h:27
llvm::TensorSpec::shape
const std::vector< int64_t > & shape() const
Definition: TensorSpec.h:64
LLVMContext.h
llvm::TensorSpec::createSpec
static TensorSpec createSpec(const std::string &Name, const std::vector< int64_t > &Shape, int Port=0)
Definition: TensorSpec.h:55
llvm::TensorSpec
Definition: TensorSpec.h:52
Other
Optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1237
llvm::getTensorSpecFromJSON
Optional< TensorSpec > getTensorSpecFromJSON(LLVMContext &Ctx, const json::Value &Value)
Construct a TensorSpec from a JSON dictionary of the form: { "name": <string>, "port": <int>,...
Definition: TensorSpec.cpp:45
llvm::TensorSpec::getTotalTensorBufferSize
size_t getTotalTensorBufferSize() const
Get the total size of a memory buffer needed to store the whole tensor.
Definition: TensorSpec.h:78