14#ifndef LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H
15#define LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H
59 template <
class FType>
64 CompiledModel(std::make_unique<TGen>()) {
65 assert(CompiledModel &&
"The CompiledModel should be valid");
72 bool InputIsPresent =
true;
73 populateTensor(InputSpec.size(),
74 TensorSpec::createSpec<uint64_t>(
"model_selector", {2}),
75 Options.FeedPrefix, InputIsPresent);
80 if (
Options.ModelSelector.empty() && InputIsPresent)
82 "A model selector was not specified but the underlying model "
83 "requires selecting one because it exposes a model_selector input");
86 if (!
Options.ModelSelector.empty()) {
88 Ctx.
emitError(
"A model selector was specified but the underlying model "
89 "does not expose a model_selector input");
94 getTensor<uint64_t>(InputSpec.size())[0] =
High;
95 getTensor<uint64_t>(InputSpec.size())[1] =
Low;
101 for (
size_t I = 0;
I < InputSpec.size(); ++
I)
102 populateTensor(
I, InputSpec[
I],
Options.FeedPrefix, InputIsPresent);
104 ResultIndex = CompiledModel->LookupResultIndex(
Options.FetchPrefix.str() +
106 assert(ResultIndex >= 0 &&
"Cannot find DecisionName in inlining model");
120 bool &InputIsPresent) {
122 CompiledModel->LookupArgIndex((Prefix +
Spec.name()).str());
123 void *Buffer =
nullptr;
124 InputIsPresent =
Index >= 0;
126 Buffer = CompiledModel->arg_data(
Index);
130 void *evaluateUntyped()
override {
131 CompiledModel->Run();
132 return CompiledModel->result_data(ResultIndex);
135 int32_t ResultIndex = -1;
136 std::unique_ptr<TGen> CompiledModel;
143#define NOOP_MODEL_ERRMSG \
144 "The mock AOT-ed saved model is a compile-time stub and should not be " \
154#undef NOOP_MODEL_ERRMSG
#define NOOP_MODEL_ERRMSG
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This is an important class for using LLVM in a threaded context.
void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static MD5Result hash(ArrayRef< uint8_t > Data)
Computes the hash for a given bytes.
MLModelRunner interface: abstraction of a mechanism for evaluating a ML model.
void setUpBufferForTensor(size_t Index, const TensorSpec &Spec, void *Buffer)
A mock class satisfying the interface expected by ReleaseModeModelRunner for its TGen parameter.
int LookupArgIndex(const std::string &)
NoopSavedModelImpl()=default
int LookupResultIndex(const std::string &)
ReleaseModeModelRunner(LLVMContext &Ctx, const FType &InputSpec, StringRef DecisionName, const EmbeddedModelRunnerOptions &Options={})
FeatureNames' type should be an indexed collection of std::string, like std::array or std::vector,...
static bool classof(const MLModelRunner *R)
virtual ~ReleaseModeModelRunner()=default
StringRef - Represent a constant reference to a string, i.e.
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
bool isEmbeddedModelEvaluatorValid()
bool isEmbeddedModelEvaluatorValid< NoopSavedModelImpl >()
const char *const DecisionName
ReleaseModeModelRunner - production mode implementation of the MLModelRunner.
EmbeddedModelRunnerOptions & setFeedPrefix(StringRef Value)
StringRef FeedPrefix
Feed and Fetch feature prefixes - i.e.
EmbeddedModelRunnerOptions & setModelSelector(StringRef Value)
StringRef ModelSelector
ModelSelector is the name (recognized by the AOT-ed model) of a sub-model to use.
EmbeddedModelRunnerOptions & setFetchPrefix(StringRef Value)