LLVM 20.0.0git
MLRegAllocPriorityAdvisor.cpp
Go to the documentation of this file.
1//===- MLRegAllocPriorityAdvisor.cpp - ML priority advisor-----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Implementation of the ML priority advisor and reward injection pass
10//
11//===----------------------------------------------------------------------===//
12
13#include "AllocationOrder.h"
14#include "RegAllocGreedy.h"
27#include "llvm/CodeGen/Passes.h"
32#include "llvm/Pass.h"
33#include "llvm/PassRegistry.h"
35
36#if defined(LLVM_HAVE_TFLITE)
40#include "llvm/IR/Module.h"
41#endif
42
43using namespace llvm;
44
46 "regalloc-priority-interactive-channel-base", cl::Hidden,
48 "Base file path for the interactive mode. The incoming filename should "
49 "have the name <regalloc-priority-interactive-channel-base>.in, while "
50 "the outgoing name should be "
51 "<regalloc-priority-interactive-channel-base>.out"));
52
54
55// Options that only make sense in development mode
56#ifdef LLVM_HAVE_TFLITE
57#include "RegAllocScore.h"
59
60static cl::opt<std::string> TrainingLog(
61 "regalloc-priority-training-log", cl::Hidden,
62 cl::desc("Training log for the register allocator priority model"));
63
64static cl::opt<std::string> ModelUnderTraining(
65 "regalloc-priority-model", cl::Hidden,
66 cl::desc("The model being trained for register allocation priority"));
67
68#endif // #ifdef LLVM_HAVE_TFLITE
69
70namespace llvm {
71
72static const std::vector<int64_t> PerLiveRangeShape{1};
73
74#define RA_PRIORITY_FEATURES_LIST(M) \
75 M(int64_t, li_size, PerLiveRangeShape, "size") \
76 M(int64_t, stage, PerLiveRangeShape, "stage") \
77 M(float, weight, PerLiveRangeShape, "weight")
78
79#define DecisionName "priority"
81 TensorSpec::createSpec<float>(DecisionName, {1});
82
83
84// Named features index.
86#define _FEATURE_IDX(_, name, __, ___) name,
88#undef _FEATURE_IDX
90};
91
93public:
95 SlotIndexes *const Indexes, MLModelRunner *Runner);
96
97protected:
99 return static_cast<const RegAllocPriorityAdvisor &>(DefaultAdvisor);
100 }
101
102 // The assumption is that if the Runner could not be constructed, we emit-ed
103 // error, and we shouldn't be asking for it here.
104 const MLModelRunner &getRunner() const { return *Runner; }
105 float getPriorityImpl(const LiveInterval &LI) const;
106 unsigned getPriority(const LiveInterval &LI) const override;
107
108private:
109 const DefaultPriorityAdvisor DefaultAdvisor;
110 MLModelRunner *const Runner;
111};
112
113#define _DECL_FEATURES(type, name, shape, _) \
114 TensorSpec::createSpec<type>(#name, shape),
115
116static const std::vector<TensorSpec> InputFeatures{
118};
119#undef _DECL_FEATURES
120
121// ===================================
122// Release (AOT) - specifics
123// ===================================
126public:
129 // support for isa<> and dyn_cast.
131 return R->getAdvisorMode() == AdvisorMode::Release;
132 }
133
134private:
135 void getAnalysisUsage(AnalysisUsage &AU) const override {
136 AU.setPreservesAll();
139 }
140
141 std::unique_ptr<RegAllocPriorityAdvisor>
142 getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
143 if (!Runner) {
144 if (InteractiveChannelBaseName.empty())
145 Runner = std::make_unique<ReleaseModeModelRunner<CompiledModelType>>(
147 else
148 Runner = std::make_unique<InteractiveModelRunner>(
152 }
153 return std::make_unique<MLPriorityAdvisor>(
154 MF, RA, &getAnalysis<SlotIndexesWrapperPass>().getSI(), Runner.get());
155 }
156 std::unique_ptr<MLModelRunner> Runner;
157};
158
159// ===================================
160// Development mode-specifics
161// ===================================
162//
163// Features we log
164#ifdef LLVM_HAVE_TFLITE
165static const TensorSpec Reward = TensorSpec::createSpec<float>("reward", {1});
166
167#define _DECL_TRAIN_FEATURES(type, name, shape, _) \
168 TensorSpec::createSpec<type>(std::string("action_") + #name, shape),
169
170static const std::vector<TensorSpec> TrainingInputFeatures{
171 {RA_PRIORITY_FEATURES_LIST(_DECL_TRAIN_FEATURES)
172 TensorSpec::createSpec<float>("action_discount", {1}),
173 TensorSpec::createSpec<int32_t>("action_step_type", {1}),
174 TensorSpec::createSpec<float>("action_reward", {1})}};
175#undef _DECL_TRAIN_FEATURES
176
177class DevelopmentModePriorityAdvisor : public MLPriorityAdvisor {
178public:
179 DevelopmentModePriorityAdvisor(const MachineFunction &MF, const RAGreedy &RA,
180 SlotIndexes *const Indexes,
181 MLModelRunner *Runner, Logger *Log)
182 : MLPriorityAdvisor(MF, RA, Indexes, Runner), Log(Log) {}
183
184private:
185 unsigned getPriority(const LiveInterval &LI) const override;
186 Logger *const Log;
187};
188
189class DevelopmentModePriorityAdvisorAnalysis final
191public:
192 DevelopmentModePriorityAdvisorAnalysis()
194 // support for isa<> and dyn_cast.
195 static bool classof(const RegAllocPriorityAdvisorAnalysis *R) {
196 return R->getAdvisorMode() == AdvisorMode::Development;
197 }
198
199 void logRewardIfNeeded(const MachineFunction &MF,
200 llvm::function_ref<float()> GetReward) override {
201 if (!Log || !Log->hasAnyObservationForContext(MF.getName()))
202 return;
203 // The function pass manager would run all the function passes for a
204 // function, so we assume the last context belongs to this function. If
205 // this invariant ever changes, we can implement at that time switching
206 // contexts. At this point, it'd be an error
207 if (Log->currentContext() != MF.getName()) {
209 "The training log context shouldn't have had changed.");
210 }
211 if (Log->hasObservationInProgress())
212 Log->logReward<float>(GetReward());
213 }
214
215private:
216 void getAnalysisUsage(AnalysisUsage &AU) const override {
217 AU.setPreservesAll();
220 }
221
222 // Save all the logs (when requested).
223 bool doInitialization(Module &M) override {
224 LLVMContext &Ctx = M.getContext();
225 if (ModelUnderTraining.empty() && TrainingLog.empty()) {
226 Ctx.emitError("Regalloc development mode should be requested with at "
227 "least logging enabled and/or a training model");
228 return false;
229 }
230 if (ModelUnderTraining.empty())
231 Runner = std::make_unique<NoInferenceModelRunner>(Ctx, InputFeatures);
232 else
233 Runner = ModelUnderTrainingRunner::createAndEnsureValid(
234 Ctx, ModelUnderTraining, DecisionName, TrainingInputFeatures);
235 if (!Runner) {
236 Ctx.emitError("Regalloc: could not set up the model runner");
237 return false;
238 }
239 if (TrainingLog.empty())
240 return false;
241 std::error_code EC;
242 auto OS = std::make_unique<raw_fd_ostream>(TrainingLog, EC);
243 if (EC) {
244 M.getContext().emitError(EC.message() + ":" + TrainingLog);
245 return false;
246 }
247 std::vector<TensorSpec> LFS = InputFeatures;
248 if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(Runner.get()))
249 append_range(LFS, MUTR->extraOutputsForLoggingSpecs());
250 // We always log the output; in particular, if we're not evaluating, we
251 // don't have an output spec json file. That's why we handle the
252 // 'normal' output separately.
253 LFS.push_back(DecisionSpec);
254
255 Log = std::make_unique<Logger>(std::move(OS), LFS, Reward,
256 /*IncludeReward*/ true);
257 return false;
258 }
259
260 std::unique_ptr<RegAllocPriorityAdvisor>
261 getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
262 if (!Runner)
263 return nullptr;
264 if (Log) {
265 Log->switchContext(MF.getName());
266 }
267
268 return std::make_unique<DevelopmentModePriorityAdvisor>(
269 MF, RA, &getAnalysis<SlotIndexesWrapperPass>().getSI(), Runner.get(),
270 Log.get());
271 }
272
273 std::unique_ptr<MLModelRunner> Runner;
274 std::unique_ptr<Logger> Log;
275};
276#endif //#ifdef LLVM_HAVE_TFLITE
277
278} // namespace llvm
279
281 return llvm::isEmbeddedModelEvaluatorValid<CompiledModelType>() ||
284 : nullptr;
285}
286
288 const RAGreedy &RA,
289 SlotIndexes *const Indexes,
290 MLModelRunner *Runner)
291 : RegAllocPriorityAdvisor(MF, RA, Indexes), DefaultAdvisor(MF, RA, Indexes),
292 Runner(std::move(Runner)) {
293 assert(this->Runner);
294 Runner->switchContext(MF.getName());
295}
296
298 const unsigned Size = LI.getSize();
300
301 *Runner->getTensor<int64_t>(0) = static_cast<int64_t>(Size);
302 *Runner->getTensor<int64_t>(1) = static_cast<int64_t>(Stage);
303 *Runner->getTensor<float>(2) = static_cast<float>(LI.weight());
304
305 return Runner->evaluate<float>();
306}
307
309 return static_cast<unsigned>(getPriorityImpl(LI));
310}
311
312#ifdef LLVM_HAVE_TFLITE
314 return new DevelopmentModePriorityAdvisorAnalysis();
315}
316
317unsigned
318DevelopmentModePriorityAdvisor::getPriority(const LiveInterval &LI) const {
319 double Prio = 0;
320
321 if (isa<ModelUnderTrainingRunner>(getRunner())) {
323 } else {
324 Prio = getDefaultAdvisor().getPriority(LI);
325 }
326
327 if (TrainingLog.empty())
328 return Prio;
329
330 // TODO(mtrofin): when we support optional rewards, this can go away. In the
331 // meantime, we log the "pretend" reward (0) for the previous observation
332 // before starting a new one.
333 if (Log->hasObservationInProgress())
334 Log->logReward<float>(0.0);
335
336 Log->startObservation();
337 size_t CurrentFeature = 0;
338 for (; CurrentFeature < InputFeatures.size(); ++CurrentFeature) {
339 Log->logTensorValue(CurrentFeature,
340 reinterpret_cast<const char *>(
341 getRunner().getTensorUntyped(CurrentFeature)));
342 }
343
344 if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(&getRunner())) {
345 for (size_t I = 0; I < MUTR->extraOutputsForLoggingSpecs().size();
346 ++I, ++CurrentFeature)
347 Log->logTensorValue(
348 CurrentFeature,
349 reinterpret_cast<const char *>(MUTR->getUntypedExtraOutputValue(I)));
350 }
351
352 float Ret = static_cast<float>(Prio);
353 Log->logTensorValue(CurrentFeature, reinterpret_cast<const char *>(&Ret));
354 Log->endObservation();
355
356 return static_cast<unsigned>(Prio);
357}
358
359#endif // #ifdef LLVM_HAVE_TFLITE
uint64_t Size
#define I(x, y, z)
Definition: MD5.cpp:58
static cl::opt< std::string > InteractiveChannelBaseName("inliner-interactive-channel-base", cl::Hidden, cl::desc("Base file path for the interactive mode. The incoming filename should " "have the name <inliner-interactive-channel-base>.in, while the " "outgoing name should be <inliner-interactive-channel-base>.out"))
#define _FEATURE_IDX(A, B, C, D)
#define _DECL_FEATURES(type, name, shape, _)
#define DecisionName
static cl::opt< std::string > InteractiveChannelBaseName("regalloc-priority-interactive-channel-base", cl::Hidden, cl::desc("Base file path for the interactive mode. The incoming filename should " "have the name <regalloc-priority-interactive-channel-base>.in, while " "the outgoing name should be " "<regalloc-priority-interactive-channel-base>.out"))
#define RA_PRIORITY_FEATURES_LIST(M)
Module.h This file contains the declarations for the Module class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
raw_pwrite_stream & OS
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:687
float weight() const
Definition: LiveInterval.h:719
unsigned getSize() const
getSize - Returns the sum of sizes of all the LiveRange's.
Logging utility - given an ordered specification of features, and assuming a scalar reward,...
MLModelRunner interface: abstraction of a mechanism for evaluating a ML model.
Definition: MLModelRunner.h:26
virtual void switchContext(StringRef Name)
Definition: MLModelRunner.h:54
T * getTensor(I FeatureID)
Definition: MLModelRunner.h:37
const MLModelRunner & getRunner() const
MLPriorityAdvisor(const MachineFunction &MF, const RAGreedy &RA, SlotIndexes *const Indexes, MLModelRunner *Runner)
const RegAllocPriorityAdvisor & getDefaultAdvisor() const
unsigned getPriority(const LiveInterval &LI) const override
Find the priority value for a live range.
float getPriorityImpl(const LiveInterval &LI) const
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Function & getFunction()
Return the LLVM function that this machine code represents.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
A mock class satisfying the interface expected by ReleaseModeModelRunner for its TGen parameter.
LiveRangeStage getStage(Register Reg) const
const ExtraRegInfo & getExtraInfo() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Interface to the priority advisor, which is responsible for prioritizing live ranges.
static bool classof(const RegAllocPriorityAdvisorAnalysis *R)
SlotIndexes pass.
Definition: SlotIndexes.h:297
An efficient, type-erasing, non-owning reference to a callable.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
RegAllocPriorityAdvisorAnalysis * createReleaseModePriorityAdvisor()
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2098
static const TensorSpec DecisionSpec
const char *const DecisionName
static const std::vector< TensorSpec > InputFeatures
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1856
RegAllocPriorityAdvisorAnalysis * createDevelopmentModePriorityAdvisor()
static const std::vector< int64_t > PerLiveRangeShape
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858