LLVM 22.0.0git
NoInferenceModelRunner.h
Go to the documentation of this file.
1//===- NoInferenceModelRunner.h ---- noop ML model runner ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9
10#ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
11#define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
12
15namespace llvm {
16class TensorSpec;
17
18/// A pseudo model runner. We use it to store feature values when collecting
19/// logs for the default policy, in 'development' mode, but never ask it to
20/// 'run'.
22public:
24 const std::vector<TensorSpec> &Inputs);
25
26 static bool classof(const MLModelRunner *R) {
27 return R->getKind() == MLModelRunner::Kind::NoOp;
28 }
29
30private:
31 void *evaluateUntyped() override {
32 llvm_unreachable("We shouldn't call run on this model runner.");
33 }
34};
35} // namespace llvm
36#endif // LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
#define LLVM_ABI
Definition Compiler.h:213
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
virtual void * evaluateUntyped()=0
MLModelRunner(const MLModelRunner &)=delete
LLVMContext & Ctx
static bool classof(const MLModelRunner *R)
LLVM_ABI NoInferenceModelRunner(LLVMContext &Ctx, const std::vector< TensorSpec > &Inputs)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.