LLVM 22.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
25#include "AMDGPUIGroupLP.h"
26#include "AMDGPUISelDAGToDAG.h"
28#include "AMDGPUMacroFusion.h"
35#include "AMDGPUSplitModule.h"
40#include "GCNDPPCombine.h"
42#include "GCNNSAReassign.h"
46#include "GCNSchedStrategy.h"
47#include "GCNVOPDUtils.h"
48#include "R600.h"
49#include "R600TargetMachine.h"
50#include "SIFixSGPRCopies.h"
51#include "SIFixVGPRCopies.h"
52#include "SIFoldOperands.h"
53#include "SIFormMemoryClauses.h"
55#include "SILowerControlFlow.h"
56#include "SILowerSGPRSpills.h"
57#include "SILowerWWMCopies.h"
59#include "SIMachineScheduler.h"
63#include "SIPeepholeSDWA.h"
64#include "SIPostRABundler.h"
67#include "SIWholeQuadMode.h"
87#include "llvm/CodeGen/Passes.h"
91#include "llvm/IR/IntrinsicsAMDGPU.h"
92#include "llvm/IR/PassManager.h"
101#include "llvm/Transforms/IPO.h"
126#include <optional>
127
128using namespace llvm;
129using namespace llvm::PatternMatch;
130
131namespace {
132//===----------------------------------------------------------------------===//
133// AMDGPU CodeGen Pass Builder interface.
134//===----------------------------------------------------------------------===//
135
136class AMDGPUCodeGenPassBuilder
137 : public CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine> {
138 using Base = CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine>;
139
140public:
141 AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM,
142 const CGPassBuilderOption &Opts,
143 PassInstrumentationCallbacks *PIC);
144
145 void addIRPasses(AddIRPass &) const;
146 void addCodeGenPrepare(AddIRPass &) const;
147 void addPreISel(AddIRPass &addPass) const;
148 void addILPOpts(AddMachinePass &) const;
149 void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const;
150 Error addInstSelector(AddMachinePass &) const;
151 void addPreRewrite(AddMachinePass &) const;
152 void addMachineSSAOptimization(AddMachinePass &) const;
153 void addPostRegAlloc(AddMachinePass &) const;
154 void addPreEmitPass(AddMachinePass &) const;
155 void addPreEmitRegAlloc(AddMachinePass &) const;
156 Error addRegAssignmentOptimized(AddMachinePass &) const;
157 void addPreRegAlloc(AddMachinePass &) const;
158 void addOptimizedRegAlloc(AddMachinePass &) const;
159 void addPreSched2(AddMachinePass &) const;
160
161 /// Check if a pass is enabled given \p Opt option. The option always
162 /// overrides defaults if explicitly used. Otherwise its default will be used
163 /// given that a pass shall work at an optimization \p Level minimum.
164 bool isPassEnabled(const cl::opt<bool> &Opt,
165 CodeGenOptLevel Level = CodeGenOptLevel::Default) const;
166 void addEarlyCSEOrGVNPass(AddIRPass &) const;
167 void addStraightLineScalarOptimizationPasses(AddIRPass &) const;
168};
169
170class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
171public:
172 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
173 : RegisterRegAllocBase(N, D, C) {}
174};
175
176class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
177public:
178 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
179 : RegisterRegAllocBase(N, D, C) {}
180};
181
182class WWMRegisterRegAlloc : public RegisterRegAllocBase<WWMRegisterRegAlloc> {
183public:
184 WWMRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
185 : RegisterRegAllocBase(N, D, C) {}
186};
187
188static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
190 const Register Reg) {
191 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
192 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
193}
194
195static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
197 const Register Reg) {
198 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
199 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
200}
201
202static bool onlyAllocateWWMRegs(const TargetRegisterInfo &TRI,
204 const Register Reg) {
205 const SIMachineFunctionInfo *MFI =
206 MRI.getMF().getInfo<SIMachineFunctionInfo>();
207 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
208 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC) &&
210}
211
212/// -{sgpr|wwm|vgpr}-regalloc=... command line option.
213static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
214
215/// A dummy default pass factory indicates whether the register allocator is
216/// overridden on the command line.
217static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
218static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
219static llvm::once_flag InitializeDefaultWWMRegisterAllocatorFlag;
220
221static SGPRRegisterRegAlloc
222defaultSGPRRegAlloc("default",
223 "pick SGPR register allocator based on -O option",
225
226static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
228SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
229 cl::desc("Register allocator to use for SGPRs"));
230
231static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
233VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
234 cl::desc("Register allocator to use for VGPRs"));
235
236static cl::opt<WWMRegisterRegAlloc::FunctionPassCtor, false,
238 WWMRegAlloc("wwm-regalloc", cl::Hidden,
240 cl::desc("Register allocator to use for WWM registers"));
241
242static void initializeDefaultSGPRRegisterAllocatorOnce() {
243 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
244
245 if (!Ctor) {
246 Ctor = SGPRRegAlloc;
247 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
248 }
249}
250
251static void initializeDefaultVGPRRegisterAllocatorOnce() {
252 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
253
254 if (!Ctor) {
255 Ctor = VGPRRegAlloc;
256 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
257 }
258}
259
260static void initializeDefaultWWMRegisterAllocatorOnce() {
261 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
262
263 if (!Ctor) {
264 Ctor = WWMRegAlloc;
265 WWMRegisterRegAlloc::setDefault(WWMRegAlloc);
266 }
267}
268
269static FunctionPass *createBasicSGPRRegisterAllocator() {
270 return createBasicRegisterAllocator(onlyAllocateSGPRs);
271}
272
273static FunctionPass *createGreedySGPRRegisterAllocator() {
274 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
275}
276
277static FunctionPass *createFastSGPRRegisterAllocator() {
278 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
279}
280
281static FunctionPass *createBasicVGPRRegisterAllocator() {
282 return createBasicRegisterAllocator(onlyAllocateVGPRs);
283}
284
285static FunctionPass *createGreedyVGPRRegisterAllocator() {
286 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
287}
288
289static FunctionPass *createFastVGPRRegisterAllocator() {
290 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
291}
292
293static FunctionPass *createBasicWWMRegisterAllocator() {
294 return createBasicRegisterAllocator(onlyAllocateWWMRegs);
295}
296
297static FunctionPass *createGreedyWWMRegisterAllocator() {
298 return createGreedyRegisterAllocator(onlyAllocateWWMRegs);
299}
300
301static FunctionPass *createFastWWMRegisterAllocator() {
302 return createFastRegisterAllocator(onlyAllocateWWMRegs, false);
303}
304
305static SGPRRegisterRegAlloc basicRegAllocSGPR(
306 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
307static SGPRRegisterRegAlloc greedyRegAllocSGPR(
308 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
309
310static SGPRRegisterRegAlloc fastRegAllocSGPR(
311 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
312
313
314static VGPRRegisterRegAlloc basicRegAllocVGPR(
315 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
316static VGPRRegisterRegAlloc greedyRegAllocVGPR(
317 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
318
319static VGPRRegisterRegAlloc fastRegAllocVGPR(
320 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
321static WWMRegisterRegAlloc basicRegAllocWWMReg("basic",
322 "basic register allocator",
323 createBasicWWMRegisterAllocator);
324static WWMRegisterRegAlloc
325 greedyRegAllocWWMReg("greedy", "greedy register allocator",
326 createGreedyWWMRegisterAllocator);
327static WWMRegisterRegAlloc fastRegAllocWWMReg("fast", "fast register allocator",
328 createFastWWMRegisterAllocator);
329
333}
334} // anonymous namespace
335
336static cl::opt<bool>
338 cl::desc("Run early if-conversion"),
339 cl::init(false));
340
341static cl::opt<bool>
342OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
343 cl::desc("Run pre-RA exec mask optimizations"),
344 cl::init(true));
345
346static cl::opt<bool>
347 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
348 cl::desc("Lower GPU ctor / dtors to globals on the device."),
349 cl::init(true), cl::Hidden);
350
351// Option to disable vectorizer for tests.
353 "amdgpu-load-store-vectorizer",
354 cl::desc("Enable load store vectorizer"),
355 cl::init(true),
356 cl::Hidden);
357
358// Option to control global loads scalarization
360 "amdgpu-scalarize-global-loads",
361 cl::desc("Enable global load scalarization"),
362 cl::init(true),
363 cl::Hidden);
364
365// Option to run internalize pass.
367 "amdgpu-internalize-symbols",
368 cl::desc("Enable elimination of non-kernel functions and unused globals"),
369 cl::init(false),
370 cl::Hidden);
371
372// Option to inline all early.
374 "amdgpu-early-inline-all",
375 cl::desc("Inline all functions early"),
376 cl::init(false),
377 cl::Hidden);
378
380 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
381 cl::desc("Enable removal of functions when they"
382 "use features not supported by the target GPU"),
383 cl::init(true));
384
386 "amdgpu-sdwa-peephole",
387 cl::desc("Enable SDWA peepholer"),
388 cl::init(true));
389
391 "amdgpu-dpp-combine",
392 cl::desc("Enable DPP combiner"),
393 cl::init(true));
394
395// Enable address space based alias analysis
397 cl::desc("Enable AMDGPU Alias Analysis"),
398 cl::init(true));
399
400// Enable lib calls simplifications
402 "amdgpu-simplify-libcall",
403 cl::desc("Enable amdgpu library simplifications"),
404 cl::init(true),
405 cl::Hidden);
406
408 "amdgpu-ir-lower-kernel-arguments",
409 cl::desc("Lower kernel argument loads in IR pass"),
410 cl::init(true),
411 cl::Hidden);
412
414 "amdgpu-reassign-regs",
415 cl::desc("Enable register reassign optimizations on gfx10+"),
416 cl::init(true),
417 cl::Hidden);
418
420 "amdgpu-opt-vgpr-liverange",
421 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
422 cl::init(true), cl::Hidden);
423
425 "amdgpu-atomic-optimizer-strategy",
426 cl::desc("Select DPP or Iterative strategy for scan"),
429 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
431 "Use Iterative approach for scan"),
432 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
433
434// Enable Mode register optimization
436 "amdgpu-mode-register",
437 cl::desc("Enable mode register pass"),
438 cl::init(true),
439 cl::Hidden);
440
441// Enable GFX11+ s_delay_alu insertion
442static cl::opt<bool>
443 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
444 cl::desc("Enable s_delay_alu insertion"),
445 cl::init(true), cl::Hidden);
446
447// Enable GFX11+ VOPD
448static cl::opt<bool>
449 EnableVOPD("amdgpu-enable-vopd",
450 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
451 cl::init(true), cl::Hidden);
452
453// Option is used in lit tests to prevent deadcoding of patterns inspected.
454static cl::opt<bool>
455EnableDCEInRA("amdgpu-dce-in-ra",
456 cl::init(true), cl::Hidden,
457 cl::desc("Enable machine DCE inside regalloc"));
458
459static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
460 cl::desc("Adjust wave priority"),
461 cl::init(false), cl::Hidden);
462
464 "amdgpu-scalar-ir-passes",
465 cl::desc("Enable scalar IR passes"),
466 cl::init(true),
467 cl::Hidden);
468
470 "amdgpu-enable-lower-exec-sync",
471 cl::desc("Enable lowering of execution synchronization."), cl::init(true),
472 cl::Hidden);
473
474static cl::opt<bool>
475 EnableSwLowerLDS("amdgpu-enable-sw-lower-lds",
476 cl::desc("Enable lowering of lds to global memory pass "
477 "and asan instrument resulting IR."),
478 cl::init(true), cl::Hidden);
479
481 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
483 cl::Hidden);
484
486 "amdgpu-enable-pre-ra-optimizations",
487 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
488 cl::Hidden);
489
491 "amdgpu-enable-promote-kernel-arguments",
492 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
493 cl::Hidden, cl::init(true));
494
496 "amdgpu-enable-image-intrinsic-optimizer",
497 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
498 cl::Hidden);
499
500static cl::opt<bool>
501 EnableLoopPrefetch("amdgpu-loop-prefetch",
502 cl::desc("Enable loop data prefetch on AMDGPU"),
503 cl::Hidden, cl::init(false));
504
506 AMDGPUSchedStrategy("amdgpu-sched-strategy",
507 cl::desc("Select custom AMDGPU scheduling strategy."),
508 cl::Hidden, cl::init(""));
509
511 "amdgpu-enable-rewrite-partial-reg-uses",
512 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
513 cl::Hidden);
514
516 "amdgpu-enable-hipstdpar",
517 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
518 cl::Hidden);
519
520static cl::opt<bool>
521 EnableAMDGPUAttributor("amdgpu-attributor-enable",
522 cl::desc("Enable AMDGPUAttributorPass"),
523 cl::init(true), cl::Hidden);
524
526 "new-reg-bank-select",
527 cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of "
528 "regbankselect"),
529 cl::init(false), cl::Hidden);
530
532 "amdgpu-link-time-closed-world",
533 cl::desc("Whether has closed-world assumption at link time"),
534 cl::init(false), cl::Hidden);
535
537 "amdgpu-enable-uniform-intrinsic-combine",
538 cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"),
539 cl::init(true), cl::Hidden);
540
542 // Register the target
545
630}
631
632static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
633 return std::make_unique<AMDGPUTargetObjectFile>();
634}
635
639
640static ScheduleDAGInstrs *
642 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
643 ScheduleDAGMILive *DAG =
644 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
645 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
646 if (ST.shouldClusterStores())
647 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
649 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
650 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
651 DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation(C->MF));
652 return DAG;
653}
654
655static ScheduleDAGInstrs *
657 ScheduleDAGMILive *DAG =
658 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
660 return DAG;
661}
662
663static ScheduleDAGInstrs *
665 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
667 C, std::make_unique<GCNMaxMemoryClauseSchedStrategy>(C));
668 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
669 if (ST.shouldClusterStores())
670 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
671 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
672 DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation(C->MF));
673 return DAG;
674}
675
676static ScheduleDAGInstrs *
678 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
679 auto *DAG = new GCNIterativeScheduler(
681 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
682 if (ST.shouldClusterStores())
683 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
685 return DAG;
686}
687
694
695static ScheduleDAGInstrs *
697 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
699 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
700 if (ST.shouldClusterStores())
701 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
702 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
704 return DAG;
705}
706
708SISchedRegistry("si", "Run SI's custom scheduler",
710
713 "Run GCN scheduler to maximize occupancy",
715
717 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
719
721 "gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause",
723
725 "gcn-iterative-max-occupancy-experimental",
726 "Run GCN scheduler to maximize occupancy (experimental)",
728
730 "gcn-iterative-minreg",
731 "Run GCN iterative scheduler for minimal register usage (experimental)",
733
735 "gcn-iterative-ilp",
736 "Run GCN iterative scheduler for ILP scheduling (experimental)",
738
741 if (!GPU.empty())
742 return GPU;
743
744 // Need to default to a target with flat support for HSA.
745 if (TT.isAMDGCN())
746 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
747
748 return "r600";
749}
750
752 // The AMDGPU toolchain only supports generating shared objects, so we
753 // must always use PIC.
754 return Reloc::PIC_;
755}
756
758 StringRef CPU, StringRef FS,
759 const TargetOptions &Options,
760 std::optional<Reloc::Model> RM,
761 std::optional<CodeModel::Model> CM,
764 T, TT.computeDataLayout(), TT, getGPUOrDefault(TT, CPU), FS, Options,
766 OptLevel),
768 initAsmInfo();
769 if (TT.isAMDGCN()) {
770 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
772 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
774 }
775}
776
779
781
783 Attribute GPUAttr = F.getFnAttribute("target-cpu");
784 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
785}
786
788 Attribute FSAttr = F.getFnAttribute("target-features");
789
790 return FSAttr.isValid() ? FSAttr.getValueAsString()
792}
793
796 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
798 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
799 if (ST.shouldClusterStores())
800 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
801 return DAG;
802}
803
804/// Predicate for Internalize pass.
805static bool mustPreserveGV(const GlobalValue &GV) {
806 if (const Function *F = dyn_cast<Function>(&GV))
807 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
808 F->getName().starts_with("__sanitizer_") ||
809 AMDGPU::isEntryFunctionCC(F->getCallingConv());
810
812 return !GV.use_empty();
813}
814
818
821 if (Params.empty())
823 Params.consume_front("strategy=");
824 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
825 .Case("dpp", ScanOptions::DPP)
826 .Cases({"iterative", ""}, ScanOptions::Iterative)
827 .Case("none", ScanOptions::None)
828 .Default(std::nullopt);
829 if (Result)
830 return *Result;
831 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
832}
833
837 while (!Params.empty()) {
838 StringRef ParamName;
839 std::tie(ParamName, Params) = Params.split(';');
840 if (ParamName == "closed-world") {
841 Result.IsClosedWorld = true;
842 } else {
844 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
845 .str(),
847 }
848 }
849 return Result;
850}
851
853
854#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
856
857 PB.registerScalarOptimizerLateEPCallback(
858 [](FunctionPassManager &FPM, OptimizationLevel Level) {
859 if (Level == OptimizationLevel::O0)
860 return;
861
863 });
864
865 PB.registerVectorizerEndEPCallback(
866 [](FunctionPassManager &FPM, OptimizationLevel Level) {
867 if (Level == OptimizationLevel::O0)
868 return;
869
871 });
872
873 PB.registerPipelineEarlySimplificationEPCallback(
876 if (!isLTOPreLink(Phase)) {
877 // When we are not using -fgpu-rdc, we can run accelerator code
878 // selection relatively early, but still after linking to prevent
879 // eager removal of potentially reachable symbols.
880 if (EnableHipStdPar) {
883 }
885 }
886
887 if (Level == OptimizationLevel::O0)
888 return;
889
890 // We don't want to run internalization at per-module stage.
894 }
895
898 });
899
900 PB.registerPeepholeEPCallback(
901 [](FunctionPassManager &FPM, OptimizationLevel Level) {
902 if (Level == OptimizationLevel::O0)
903 return;
904
908
911 });
912
913 PB.registerCGSCCOptimizerLateEPCallback(
914 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
915 if (Level == OptimizationLevel::O0)
916 return;
917
919
920 // Add promote kernel arguments pass to the opt pipeline right before
921 // infer address spaces which is needed to do actual address space
922 // rewriting.
923 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
926
927 // Add infer address spaces pass to the opt pipeline after inlining
928 // but before SROA to increase SROA opportunities.
930
931 // This should run after inlining to have any chance of doing
932 // anything, and before other cleanup optimizations.
934
935 if (Level != OptimizationLevel::O0) {
936 // Promote alloca to vector before SROA and loop unroll. If we
937 // manage to eliminate allocas before unroll we may choose to unroll
938 // less.
940 }
941
942 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
943 });
944
945 // FIXME: Why is AMDGPUAttributor not in CGSCC?
946 PB.registerOptimizerLastEPCallback([this](ModulePassManager &MPM,
947 OptimizationLevel Level,
949 if (Level != OptimizationLevel::O0) {
950 if (!isLTOPreLink(Phase)) {
951 if (EnableAMDGPUAttributor && getTargetTriple().isAMDGCN()) {
953 MPM.addPass(AMDGPUAttributorPass(*this, Opts, Phase));
954 }
955 }
956 }
957 });
958
959 PB.registerFullLinkTimeOptimizationLastEPCallback(
960 [this](ModulePassManager &PM, OptimizationLevel Level) {
961 // When we are using -fgpu-rdc, we can only run accelerator code
962 // selection after linking to prevent, otherwise we end up removing
963 // potentially reachable symbols that were exported as external in other
964 // modules.
965 if (EnableHipStdPar) {
968 }
969 // We want to support the -lto-partitions=N option as "best effort".
970 // For that, we need to lower LDS earlier in the pipeline before the
971 // module is partitioned for codegen.
975 PM.addPass(AMDGPUSwLowerLDSPass(*this));
978 if (Level != OptimizationLevel::O0) {
979 // We only want to run this with O2 or higher since inliner and SROA
980 // don't run in O1.
981 if (Level != OptimizationLevel::O1) {
982 PM.addPass(
984 }
985 // Do we really need internalization in LTO?
986 if (InternalizeSymbols) {
989 }
990 if (EnableAMDGPUAttributor && getTargetTriple().isAMDGCN()) {
993 Opt.IsClosedWorld = true;
996 }
997 }
998 if (!NoKernelInfoEndLTO) {
1000 FPM.addPass(KernelInfoPrinter(this));
1001 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
1002 }
1003 });
1004
1005 PB.registerRegClassFilterParsingCallback(
1006 [](StringRef FilterName) -> RegAllocFilterFunc {
1007 if (FilterName == "sgpr")
1008 return onlyAllocateSGPRs;
1009 if (FilterName == "vgpr")
1010 return onlyAllocateVGPRs;
1011 if (FilterName == "wwm")
1012 return onlyAllocateWWMRegs;
1013 return nullptr;
1014 });
1015}
1016
1017int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
1018 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1019 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1020 AddrSpace == AMDGPUAS::REGION_ADDRESS)
1021 ? -1
1022 : 0;
1023}
1024
1026 unsigned DestAS) const {
1027 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
1029}
1030
1032 if (auto *Arg = dyn_cast<Argument>(V);
1033 Arg &&
1034 AMDGPU::isModuleEntryFunctionCC(Arg->getParent()->getCallingConv()) &&
1035 !Arg->hasByRefAttr())
1037
1038 const auto *LD = dyn_cast<LoadInst>(V);
1039 if (!LD) // TODO: Handle invariant load like constant.
1041
1042 // It must be a generic pointer loaded.
1043 assert(V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
1044
1045 const auto *Ptr = LD->getPointerOperand();
1046 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
1048 // For a generic pointer loaded from the constant memory, it could be assumed
1049 // as a global pointer since the constant memory is only populated on the
1050 // host side. As implied by the offload programming model, only global
1051 // pointers could be referenced on the host side.
1053}
1054
1055std::pair<const Value *, unsigned>
1057 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
1058 switch (II->getIntrinsicID()) {
1059 case Intrinsic::amdgcn_is_shared:
1060 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
1061 case Intrinsic::amdgcn_is_private:
1062 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
1063 default:
1064 break;
1065 }
1066 return std::pair(nullptr, -1);
1067 }
1068 // Check the global pointer predication based on
1069 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
1070 // the order of 'is_shared' and 'is_private' is not significant.
1071 Value *Ptr;
1072 if (match(
1073 const_cast<Value *>(V),
1076 m_Deferred(Ptr))))))
1077 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
1078
1079 return std::pair(nullptr, -1);
1080}
1081
1082unsigned
1097
1099 Module &M, unsigned NumParts,
1100 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
1101 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
1102 // but all current users of this API don't have one ready and would need to
1103 // create one anyway. Let's hide the boilerplate for now to keep it simple.
1104
1109
1110 PassBuilder PB(this);
1111 PB.registerModuleAnalyses(MAM);
1112 PB.registerFunctionAnalyses(FAM);
1113 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
1114
1116 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
1117 MPM.run(M, MAM);
1118 return true;
1119}
1120
1121//===----------------------------------------------------------------------===//
1122// GCN Target Machine (SI+)
1123//===----------------------------------------------------------------------===//
1124
1126 StringRef CPU, StringRef FS,
1127 const TargetOptions &Options,
1128 std::optional<Reloc::Model> RM,
1129 std::optional<CodeModel::Model> CM,
1130 CodeGenOptLevel OL, bool JIT)
1131 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
1132
1133const TargetSubtargetInfo *
1135 StringRef GPU = getGPUName(F);
1137
1138 SmallString<128> SubtargetKey(GPU);
1139 SubtargetKey.append(FS);
1140
1141 auto &I = SubtargetMap[SubtargetKey];
1142 if (!I) {
1143 // This needs to be done before we create a new subtarget since any
1144 // creation will depend on the TM and the code generation flags on the
1145 // function that reside in TargetOptions.
1147 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
1148 }
1149
1150 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
1151
1152 return I.get();
1153}
1154
1157 return TargetTransformInfo(std::make_unique<GCNTTIImpl>(this, F));
1158}
1159
1162 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
1164 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
1165 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
1166}
1167
1170 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1171 if (ST.enableSIScheduler())
1173
1174 Attribute SchedStrategyAttr =
1175 C->MF->getFunction().getFnAttribute("amdgpu-sched-strategy");
1176 StringRef SchedStrategy = SchedStrategyAttr.isValid()
1177 ? SchedStrategyAttr.getValueAsString()
1179
1180 if (SchedStrategy == "max-ilp")
1182
1183 if (SchedStrategy == "max-memory-clause")
1185
1186 if (SchedStrategy == "iterative-ilp")
1188
1189 if (SchedStrategy == "iterative-minreg")
1190 return createMinRegScheduler(C);
1191
1192 if (SchedStrategy == "iterative-maxocc")
1194
1196}
1197
1200 ScheduleDAGMI *DAG =
1201 new GCNPostScheduleDAGMILive(C, std::make_unique<PostGenericScheduler>(C),
1202 /*RemoveKillFlags=*/true);
1203 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1205 if (ST.shouldClusterStores())
1208 if ((EnableVOPD.getNumOccurrences() ||
1210 EnableVOPD)
1214 return DAG;
1215}
1216//===----------------------------------------------------------------------===//
1217// AMDGPU Legacy Pass Setup
1218//===----------------------------------------------------------------------===//
1219
1220std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
1221 return getStandardCSEConfigForOpt(TM->getOptLevel());
1222}
1223
1224namespace {
1225
1226class GCNPassConfig final : public AMDGPUPassConfig {
1227public:
1228 GCNPassConfig(TargetMachine &TM, PassManagerBase &PM)
1229 : AMDGPUPassConfig(TM, PM) {
1230 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
1231 }
1232
1233 GCNTargetMachine &getGCNTargetMachine() const {
1234 return getTM<GCNTargetMachine>();
1235 }
1236
1237 bool addPreISel() override;
1238 void addMachineSSAOptimization() override;
1239 bool addILPOpts() override;
1240 bool addInstSelector() override;
1241 bool addIRTranslator() override;
1242 void addPreLegalizeMachineIR() override;
1243 bool addLegalizeMachineIR() override;
1244 void addPreRegBankSelect() override;
1245 bool addRegBankSelect() override;
1246 void addPreGlobalInstructionSelect() override;
1247 bool addGlobalInstructionSelect() override;
1248 void addPreRegAlloc() override;
1249 void addFastRegAlloc() override;
1250 void addOptimizedRegAlloc() override;
1251
1252 FunctionPass *createSGPRAllocPass(bool Optimized);
1253 FunctionPass *createVGPRAllocPass(bool Optimized);
1254 FunctionPass *createWWMRegAllocPass(bool Optimized);
1255 FunctionPass *createRegAllocPass(bool Optimized) override;
1256
1257 bool addRegAssignAndRewriteFast() override;
1258 bool addRegAssignAndRewriteOptimized() override;
1259
1260 bool addPreRewrite() override;
1261 void addPostRegAlloc() override;
1262 void addPreSched2() override;
1263 void addPreEmitPass() override;
1264 void addPostBBSections() override;
1265};
1266
1267} // end anonymous namespace
1268
1270 : TargetPassConfig(TM, PM) {
1271 // Exceptions and StackMaps are not supported, so these passes will never do
1272 // anything.
1275 // Garbage collection is not supported.
1278}
1279
1286
1291 // ReassociateGEPs exposes more opportunities for SLSR. See
1292 // the example in reassociate-geps-and-slsr.ll.
1294 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1295 // EarlyCSE can reuse.
1297 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1299 // NaryReassociate on GEPs creates redundant common expressions, so run
1300 // EarlyCSE after it.
1302}
1303
1306
1307 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN())
1309
1310 // There is no reason to run these.
1314
1316 if (LowerCtorDtor)
1318
1319 if (TM.getTargetTriple().isAMDGCN() &&
1322
1325
1326 // This can be disabled by passing ::Disable here or on the command line
1327 // with --expand-variadics-override=disable.
1329
1330 // Function calls are not supported, so make sure we inline everything.
1333
1334 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1335 if (TM.getTargetTriple().getArch() == Triple::r600)
1337
1338 // Make enqueued block runtime handles externally visible.
1340
1341 // Lower special LDS accesses.
1344
1345 // Lower LDS accesses to global memory pass if address sanitizer is enabled.
1346 if (EnableSwLowerLDS)
1348
1349 // Runs before PromoteAlloca so the latter can account for function uses
1352 }
1353
1354 // Run atomic optimizer before Atomic Expand
1355 if ((TM.getTargetTriple().isAMDGCN()) &&
1356 (TM.getOptLevel() >= CodeGenOptLevel::Less) &&
1359 }
1360
1362
1363 if (TM.getOptLevel() > CodeGenOptLevel::None) {
1365
1368
1372 AAResults &AAR) {
1373 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1374 AAR.addAAResult(WrapperPass->getResult());
1375 }));
1376 }
1377
1378 if (TM.getTargetTriple().isAMDGCN()) {
1379 // TODO: May want to move later or split into an early and late one.
1381 }
1382
1383 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1384 // have expanded.
1385 if (TM.getOptLevel() > CodeGenOptLevel::Less)
1387 }
1388
1390
1391 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1392 // example, GVN can combine
1393 //
1394 // %0 = add %a, %b
1395 // %1 = add %b, %a
1396 //
1397 // and
1398 //
1399 // %0 = shl nsw %a, 2
1400 // %1 = shl %a, 2
1401 //
1402 // but EarlyCSE can do neither of them.
1405}
1406
1408 if (TM->getTargetTriple().isAMDGCN() &&
1409 TM->getOptLevel() > CodeGenOptLevel::None)
1411
1412 if (TM->getTargetTriple().isAMDGCN() && EnableLowerKernelArguments)
1414
1416
1419
1420 if (TM->getTargetTriple().isAMDGCN()) {
1421 // This lowering has been placed after codegenprepare to take advantage of
1422 // address mode matching (which is why it isn't put with the LDS lowerings).
1423 // It could be placed anywhere before uniformity annotations (an analysis
1424 // that it changes by splitting up fat pointers into their components)
1425 // but has been put before switch lowering and CFG flattening so that those
1426 // passes can run on the more optimized control flow this pass creates in
1427 // many cases.
1430 }
1431
1432 // LowerSwitch pass may introduce unreachable blocks that can
1433 // cause unexpected behavior for subsequent passes. Placing it
1434 // here seems better that these blocks would get cleaned up by
1435 // UnreachableBlockElim inserted next in the pass flow.
1437}
1438
1440 if (TM->getOptLevel() > CodeGenOptLevel::None)
1442 return false;
1443}
1444
1449
1451 // Do nothing. GC is not supported.
1452 return false;
1453}
1454
1455//===----------------------------------------------------------------------===//
1456// GCN Legacy Pass Setup
1457//===----------------------------------------------------------------------===//
1458
1459bool GCNPassConfig::addPreISel() {
1461
1462 if (TM->getOptLevel() > CodeGenOptLevel::None)
1463 addPass(createSinkingPass());
1464
1465 if (TM->getOptLevel() > CodeGenOptLevel::None)
1467
1468 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1469 // regions formed by them.
1471 addPass(createFixIrreduciblePass());
1472 addPass(createUnifyLoopExitsPass());
1473 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1474
1477 // TODO: Move this right after structurizeCFG to avoid extra divergence
1478 // analysis. This depends on stopping SIAnnotateControlFlow from making
1479 // control flow modifications.
1481
1482 // SDAG requires LCSSA, GlobalISel does not. Disable LCSSA for -global-isel
1483 // with -new-reg-bank-select and without any of the fallback options.
1485 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
1486 addPass(createLCSSAPass());
1487
1488 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1490
1491 return false;
1492}
1493
1494void GCNPassConfig::addMachineSSAOptimization() {
1496
1497 // We want to fold operands after PeepholeOptimizer has run (or as part of
1498 // it), because it will eliminate extra copies making it easier to fold the
1499 // real source operand. We want to eliminate dead instructions after, so that
1500 // we see fewer uses of the copies. We then need to clean up the dead
1501 // instructions leftover after the operands are folded as well.
1502 //
1503 // XXX - Can we get away without running DeadMachineInstructionElim again?
1504 addPass(&SIFoldOperandsLegacyID);
1505 if (EnableDPPCombine)
1506 addPass(&GCNDPPCombineLegacyID);
1508 if (isPassEnabled(EnableSDWAPeephole)) {
1509 addPass(&SIPeepholeSDWALegacyID);
1510 addPass(&EarlyMachineLICMID);
1511 addPass(&MachineCSELegacyID);
1512 addPass(&SIFoldOperandsLegacyID);
1513 }
1516}
1517
1518bool GCNPassConfig::addILPOpts() {
1520 addPass(&EarlyIfConverterLegacyID);
1521
1523 return false;
1524}
1525
1526bool GCNPassConfig::addInstSelector() {
1528 addPass(&SIFixSGPRCopiesLegacyID);
1530 return false;
1531}
1532
1533bool GCNPassConfig::addIRTranslator() {
1534 addPass(new IRTranslator(getOptLevel()));
1535 return false;
1536}
1537
1538void GCNPassConfig::addPreLegalizeMachineIR() {
1539 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1540 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1541 addPass(new Localizer());
1542}
1543
1544bool GCNPassConfig::addLegalizeMachineIR() {
1545 addPass(new Legalizer());
1546 return false;
1547}
1548
1549void GCNPassConfig::addPreRegBankSelect() {
1550 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1551 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1553}
1554
1555bool GCNPassConfig::addRegBankSelect() {
1556 if (NewRegBankSelect) {
1559 } else {
1560 addPass(new RegBankSelect());
1561 }
1562 return false;
1563}
1564
1565void GCNPassConfig::addPreGlobalInstructionSelect() {
1566 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1567 addPass(createAMDGPURegBankCombiner(IsOptNone));
1568}
1569
1570bool GCNPassConfig::addGlobalInstructionSelect() {
1571 addPass(new InstructionSelect(getOptLevel()));
1572 return false;
1573}
1574
1575void GCNPassConfig::addFastRegAlloc() {
1576 // FIXME: We have to disable the verifier here because of PHIElimination +
1577 // TwoAddressInstructions disabling it.
1578
1579 // This must be run immediately after phi elimination and before
1580 // TwoAddressInstructions, otherwise the processing of the tied operand of
1581 // SI_ELSE will introduce a copy of the tied operand source after the else.
1583
1585
1587}
1588
1589void GCNPassConfig::addPreRegAlloc() {
1590 if (getOptLevel() != CodeGenOptLevel::None)
1592}
1593
1594void GCNPassConfig::addOptimizedRegAlloc() {
1595 if (EnableDCEInRA)
1597
1598 // FIXME: when an instruction has a Killed operand, and the instruction is
1599 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1600 // the register in LiveVariables, this would trigger a failure in verifier,
1601 // we should fix it and enable the verifier.
1602 if (OptVGPRLiveRange)
1604
1605 // This must be run immediately after phi elimination and before
1606 // TwoAddressInstructions, otherwise the processing of the tied operand of
1607 // SI_ELSE will introduce a copy of the tied operand source after the else.
1609
1612
1613 if (isPassEnabled(EnablePreRAOptimizations))
1615
1616 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1617 // instructions that cause scheduling barriers.
1619
1620 if (OptExecMaskPreRA)
1622
1623 // This is not an essential optimization and it has a noticeable impact on
1624 // compilation time, so we only enable it from O2.
1625 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1627
1629}
1630
1631bool GCNPassConfig::addPreRewrite() {
1633 addPass(&GCNNSAReassignID);
1634
1636 return true;
1637}
1638
1639FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1640 // Initialize the global default.
1641 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1642 initializeDefaultSGPRRegisterAllocatorOnce);
1643
1644 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1645 if (Ctor != useDefaultRegisterAllocator)
1646 return Ctor();
1647
1648 if (Optimized)
1649 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1650
1651 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1652}
1653
1654FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1655 // Initialize the global default.
1656 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1657 initializeDefaultVGPRRegisterAllocatorOnce);
1658
1659 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1660 if (Ctor != useDefaultRegisterAllocator)
1661 return Ctor();
1662
1663 if (Optimized)
1664 return createGreedyVGPRRegisterAllocator();
1665
1666 return createFastVGPRRegisterAllocator();
1667}
1668
1669FunctionPass *GCNPassConfig::createWWMRegAllocPass(bool Optimized) {
1670 // Initialize the global default.
1671 llvm::call_once(InitializeDefaultWWMRegisterAllocatorFlag,
1672 initializeDefaultWWMRegisterAllocatorOnce);
1673
1674 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
1675 if (Ctor != useDefaultRegisterAllocator)
1676 return Ctor();
1677
1678 if (Optimized)
1679 return createGreedyWWMRegisterAllocator();
1680
1681 return createFastWWMRegisterAllocator();
1682}
1683
1684FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1685 llvm_unreachable("should not be used");
1686}
1687
1689 "-regalloc not supported with amdgcn. Use -sgpr-regalloc, -wwm-regalloc, "
1690 "and -vgpr-regalloc";
1691
1692bool GCNPassConfig::addRegAssignAndRewriteFast() {
1693 if (!usingDefaultRegAlloc())
1695
1696 addPass(&GCNPreRALongBranchRegID);
1697
1698 addPass(createSGPRAllocPass(false));
1699
1700 // Equivalent of PEI for SGPRs.
1701 addPass(&SILowerSGPRSpillsLegacyID);
1702
1703 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1705
1706 // For allocating other wwm register operands.
1707 addPass(createWWMRegAllocPass(false));
1708
1709 addPass(&SILowerWWMCopiesLegacyID);
1711
1712 // For allocating per-thread VGPRs.
1713 addPass(createVGPRAllocPass(false));
1714
1715 return true;
1716}
1717
1718bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1719 if (!usingDefaultRegAlloc())
1721
1722 addPass(&GCNPreRALongBranchRegID);
1723
1724 addPass(createSGPRAllocPass(true));
1725
1726 // Commit allocated register changes. This is mostly necessary because too
1727 // many things rely on the use lists of the physical registers, such as the
1728 // verifier. This is only necessary with allocators which use LiveIntervals,
1729 // since FastRegAlloc does the replacements itself.
1730 addPass(createVirtRegRewriter(false));
1731
1732 // At this point, the sgpr-regalloc has been done and it is good to have the
1733 // stack slot coloring to try to optimize the SGPR spill stack indices before
1734 // attempting the custom SGPR spill lowering.
1735 addPass(&StackSlotColoringID);
1736
1737 // Equivalent of PEI for SGPRs.
1738 addPass(&SILowerSGPRSpillsLegacyID);
1739
1740 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1742
1743 // For allocating other whole wave mode registers.
1744 addPass(createWWMRegAllocPass(true));
1745 addPass(&SILowerWWMCopiesLegacyID);
1746 addPass(createVirtRegRewriter(false));
1748
1749 // For allocating per-thread VGPRs.
1750 addPass(createVGPRAllocPass(true));
1751
1752 addPreRewrite();
1753 addPass(&VirtRegRewriterID);
1754
1756
1757 return true;
1758}
1759
1760void GCNPassConfig::addPostRegAlloc() {
1761 addPass(&SIFixVGPRCopiesID);
1762 if (getOptLevel() > CodeGenOptLevel::None)
1765}
1766
1767void GCNPassConfig::addPreSched2() {
1768 if (TM->getOptLevel() > CodeGenOptLevel::None)
1770 addPass(&SIPostRABundlerLegacyID);
1771}
1772
1773void GCNPassConfig::addPreEmitPass() {
1774 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1775 addPass(&GCNCreateVOPDID);
1776 addPass(createSIMemoryLegalizerPass());
1777 addPass(createSIInsertWaitcntsPass());
1778
1779 addPass(createSIModeRegisterPass());
1780
1781 if (getOptLevel() > CodeGenOptLevel::None)
1782 addPass(&SIInsertHardClausesID);
1783
1785 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1787 if (getOptLevel() > CodeGenOptLevel::None)
1788 addPass(&SIPreEmitPeepholeID);
1789 // The hazard recognizer that runs as part of the post-ra scheduler does not
1790 // guarantee to be able handle all hazards correctly. This is because if there
1791 // are multiple scheduling regions in a basic block, the regions are scheduled
1792 // bottom up, so when we begin to schedule a region we don't know what
1793 // instructions were emitted directly before it.
1794 //
1795 // Here we add a stand-alone hazard recognizer pass which can handle all
1796 // cases.
1797 addPass(&PostRAHazardRecognizerID);
1798
1800
1802
1803 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1804 addPass(&AMDGPUInsertDelayAluID);
1805
1806 addPass(&BranchRelaxationPassID);
1807}
1808
1809void GCNPassConfig::addPostBBSections() {
1810 // We run this later to avoid passes like livedebugvalues and BBSections
1811 // having to deal with the apparent multi-entry functions we may generate.
1813}
1814
1816 return new GCNPassConfig(*this, PM);
1817}
1818
1824
1831
1835
1842
1845 SMDiagnostic &Error, SMRange &SourceRange) const {
1846 const yaml::SIMachineFunctionInfo &YamlMFI =
1847 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1848 MachineFunction &MF = PFS.MF;
1850 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1851
1852 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1853 return true;
1854
1855 if (MFI->Occupancy == 0) {
1856 // Fixup the subtarget dependent default value.
1857 MFI->Occupancy = ST.getOccupancyWithWorkGroupSizes(MF).second;
1858 }
1859
1860 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1861 Register TempReg;
1862 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1863 SourceRange = RegName.SourceRange;
1864 return true;
1865 }
1866 RegVal = TempReg;
1867
1868 return false;
1869 };
1870
1871 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1872 Register &RegVal) {
1873 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1874 };
1875
1876 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1877 return true;
1878
1879 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1880 return true;
1881
1882 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1883 MFI->LongBranchReservedReg))
1884 return true;
1885
1886 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1887 // Create a diagnostic for a the register string literal.
1888 const MemoryBuffer &Buffer =
1889 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1890 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1891 RegName.Value.size(), SourceMgr::DK_Error,
1892 "incorrect register class for field", RegName.Value,
1893 {}, {});
1894 SourceRange = RegName.SourceRange;
1895 return true;
1896 };
1897
1898 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1899 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1900 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1901 return true;
1902
1903 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1904 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1905 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1906 }
1907
1908 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1909 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1910 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1911 }
1912
1913 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1914 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1915 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1916 }
1917
1918 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1919 Register ParsedReg;
1920 if (parseRegister(YamlReg, ParsedReg))
1921 return true;
1922
1923 MFI->reserveWWMRegister(ParsedReg);
1924 }
1925
1926 for (const auto &[_, Info] : PFS.VRegInfosNamed) {
1927 MFI->setFlag(Info->VReg, Info->Flags);
1928 }
1929 for (const auto &[_, Info] : PFS.VRegInfos) {
1930 MFI->setFlag(Info->VReg, Info->Flags);
1931 }
1932
1933 for (const auto &YamlRegStr : YamlMFI.SpillPhysVGPRS) {
1934 Register ParsedReg;
1935 if (parseRegister(YamlRegStr, ParsedReg))
1936 return true;
1937 MFI->SpillPhysVGPRs.push_back(ParsedReg);
1938 }
1939
1940 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1941 const TargetRegisterClass &RC,
1942 ArgDescriptor &Arg, unsigned UserSGPRs,
1943 unsigned SystemSGPRs) {
1944 // Skip parsing if it's not present.
1945 if (!A)
1946 return false;
1947
1948 if (A->IsRegister) {
1949 Register Reg;
1950 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1951 SourceRange = A->RegisterName.SourceRange;
1952 return true;
1953 }
1954 if (!RC.contains(Reg))
1955 return diagnoseRegisterClass(A->RegisterName);
1957 } else
1958 Arg = ArgDescriptor::createStack(A->StackOffset);
1959 // Check and apply the optional mask.
1960 if (A->Mask)
1961 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1962
1963 MFI->NumUserSGPRs += UserSGPRs;
1964 MFI->NumSystemSGPRs += SystemSGPRs;
1965 return false;
1966 };
1967
1968 if (YamlMFI.ArgInfo &&
1969 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1970 AMDGPU::SGPR_128RegClass,
1971 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1972 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1973 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1974 2, 0) ||
1975 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1976 MFI->ArgInfo.QueuePtr, 2, 0) ||
1977 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1978 AMDGPU::SReg_64RegClass,
1979 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1980 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1981 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1982 2, 0) ||
1983 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1984 AMDGPU::SReg_64RegClass,
1985 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1986 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1987 AMDGPU::SGPR_32RegClass,
1988 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1989 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1990 AMDGPU::SGPR_32RegClass,
1991 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1992 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1993 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1994 0, 1) ||
1995 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1996 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1997 0, 1) ||
1998 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1999 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
2000 0, 1) ||
2001 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
2002 AMDGPU::SGPR_32RegClass,
2003 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
2004 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
2005 AMDGPU::SGPR_32RegClass,
2006 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
2007 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
2008 AMDGPU::SReg_64RegClass,
2009 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
2010 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
2011 AMDGPU::SReg_64RegClass,
2012 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
2013 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
2014 AMDGPU::VGPR_32RegClass,
2015 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
2016 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
2017 AMDGPU::VGPR_32RegClass,
2018 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
2019 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
2020 AMDGPU::VGPR_32RegClass,
2021 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
2022 return true;
2023
2024 // Parse FirstKernArgPreloadReg separately, since it's a Register,
2025 // not ArgDescriptor.
2026 if (YamlMFI.ArgInfo && YamlMFI.ArgInfo->FirstKernArgPreloadReg) {
2027 const yaml::SIArgument &A = *YamlMFI.ArgInfo->FirstKernArgPreloadReg;
2028
2029 if (!A.IsRegister) {
2030 // For stack arguments, we don't have RegisterName.SourceRange,
2031 // but we should have some location info from the YAML parser
2032 const MemoryBuffer &Buffer =
2033 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
2034 // Create a minimal valid source range
2036 SMRange Range(Loc, Loc);
2037
2039 *PFS.SM, Loc, Buffer.getBufferIdentifier(), 1, 0, SourceMgr::DK_Error,
2040 "firstKernArgPreloadReg must be a register, not a stack location", "",
2041 {}, {});
2042
2043 SourceRange = Range;
2044 return true;
2045 }
2046
2047 Register Reg;
2048 if (parseNamedRegisterReference(PFS, Reg, A.RegisterName.Value, Error)) {
2049 SourceRange = A.RegisterName.SourceRange;
2050 return true;
2051 }
2052
2053 if (!AMDGPU::SGPR_32RegClass.contains(Reg))
2054 return diagnoseRegisterClass(A.RegisterName);
2055
2056 MFI->ArgInfo.FirstKernArgPreloadReg = Reg;
2057 MFI->NumUserSGPRs += YamlMFI.NumKernargPreloadSGPRs;
2058 }
2059
2060 if (ST.hasIEEEMode())
2061 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
2062 if (ST.hasDX10ClampMode())
2063 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
2064
2065 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
2066 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
2069 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
2072
2079
2080 if (YamlMFI.HasInitWholeWave)
2081 MFI->setInitWholeWave();
2082
2083 return false;
2084}
2085
2086//===----------------------------------------------------------------------===//
2087// AMDGPU CodeGen Pass Builder interface.
2088//===----------------------------------------------------------------------===//
2089
2090AMDGPUCodeGenPassBuilder::AMDGPUCodeGenPassBuilder(
2091 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
2093 : CodeGenPassBuilder(TM, Opts, PIC) {
2094 Opt.MISchedPostRA = true;
2095 Opt.RequiresCodeGenSCCOrder = true;
2096 // Exceptions and StackMaps are not supported, so these passes will never do
2097 // anything.
2098 // Garbage collection is not supported.
2099 disablePass<StackMapLivenessPass, FuncletLayoutPass,
2101}
2102
2103void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass) const {
2104 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN())
2106
2108 if (LowerCtorDtor)
2109 addPass(AMDGPUCtorDtorLoweringPass());
2110
2111 if (isPassEnabled(EnableImageIntrinsicOptimizer))
2113
2116 // This can be disabled by passing ::Disable here or on the command line
2117 // with --expand-variadics-override=disable.
2119
2120 addPass(AMDGPUAlwaysInlinePass());
2121 addPass(AlwaysInlinerPass());
2122
2124
2126 addPass(AMDGPULowerExecSyncPass());
2127
2128 if (EnableSwLowerLDS)
2129 addPass(AMDGPUSwLowerLDSPass(TM));
2130
2131 // Runs before PromoteAlloca so the latter can account for function uses
2133 addPass(AMDGPULowerModuleLDSPass(TM));
2134
2135 // Run atomic optimizer before Atomic Expand
2136 if (TM.getOptLevel() >= CodeGenOptLevel::Less &&
2139
2140 addPass(AtomicExpandPass(TM));
2141
2142 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2143 addPass(AMDGPUPromoteAllocaPass(TM));
2144 if (isPassEnabled(EnableScalarIRPasses))
2145 addStraightLineScalarOptimizationPasses(addPass);
2146
2147 // TODO: Handle EnableAMDGPUAliasAnalysis
2148
2149 // TODO: May want to move later or split into an early and late one.
2150 addPass(AMDGPUCodeGenPreparePass(TM));
2151
2152 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
2153 // have expanded.
2154 if (TM.getOptLevel() > CodeGenOptLevel::Less) {
2156 /*UseMemorySSA=*/true));
2157 }
2158 }
2159
2160 Base::addIRPasses(addPass);
2161
2162 // EarlyCSE is not always strong enough to clean up what LSR produces. For
2163 // example, GVN can combine
2164 //
2165 // %0 = add %a, %b
2166 // %1 = add %b, %a
2167 //
2168 // and
2169 //
2170 // %0 = shl nsw %a, 2
2171 // %1 = shl %a, 2
2172 //
2173 // but EarlyCSE can do neither of them.
2174 if (isPassEnabled(EnableScalarIRPasses))
2175 addEarlyCSEOrGVNPass(addPass);
2176}
2177
2178void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
2179 if (TM.getOptLevel() > CodeGenOptLevel::None)
2181
2183 addPass(AMDGPULowerKernelArgumentsPass(TM));
2184
2185 Base::addCodeGenPrepare(addPass);
2186
2187 if (isPassEnabled(EnableLoadStoreVectorizer))
2188 addPass(LoadStoreVectorizerPass());
2189
2190 // This lowering has been placed after codegenprepare to take advantage of
2191 // address mode matching (which is why it isn't put with the LDS lowerings).
2192 // It could be placed anywhere before uniformity annotations (an analysis
2193 // that it changes by splitting up fat pointers into their components)
2194 // but has been put before switch lowering and CFG flattening so that those
2195 // passes can run on the more optimized control flow this pass creates in
2196 // many cases.
2198 addPass.requireCGSCCOrder();
2199
2200 addPass(AMDGPULowerIntrinsicsPass(TM));
2201
2202 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
2203 // behavior for subsequent passes. Placing it here seems better that these
2204 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
2205 // pass flow.
2206 addPass(LowerSwitchPass());
2207}
2208
2209void AMDGPUCodeGenPassBuilder::addPreISel(AddIRPass &addPass) const {
2210
2211 // Require AMDGPUArgumentUsageAnalysis so that it's available during ISel.
2213
2214 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2215 addPass(FlattenCFGPass());
2216 addPass(SinkingPass());
2217 addPass(AMDGPULateCodeGenPreparePass(TM));
2218 }
2219
2220 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
2221 // regions formed by them.
2222
2224 addPass(FixIrreduciblePass());
2225 addPass(UnifyLoopExitsPass());
2226 addPass(StructurizeCFGPass(/*SkipUniformRegions=*/false));
2227
2229
2230 addPass(SIAnnotateControlFlowPass(TM));
2231
2232 // TODO: Move this right after structurizeCFG to avoid extra divergence
2233 // analysis. This depends on stopping SIAnnotateControlFlow from making
2234 // control flow modifications.
2236
2238 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
2239 addPass(LCSSAPass());
2240
2241 if (TM.getOptLevel() > CodeGenOptLevel::Less)
2242 addPass(AMDGPUPerfHintAnalysisPass(TM));
2243
2244 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
2245 // isn't this in addInstSelector?
2247 /*Force=*/true);
2248}
2249
2250void AMDGPUCodeGenPassBuilder::addILPOpts(AddMachinePass &addPass) const {
2252 addPass(EarlyIfConverterPass());
2253
2254 Base::addILPOpts(addPass);
2255}
2256
2257void AMDGPUCodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
2258 CreateMCStreamer) const {
2259 // TODO: Add AsmPrinter.
2260}
2261
2262Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &addPass) const {
2263 addPass(AMDGPUISelDAGToDAGPass(TM));
2264 addPass(SIFixSGPRCopiesPass());
2265 addPass(SILowerI1CopiesPass());
2266 return Error::success();
2267}
2268
2269void AMDGPUCodeGenPassBuilder::addPreRewrite(AddMachinePass &addPass) const {
2270 if (EnableRegReassign) {
2271 addPass(GCNNSAReassignPass());
2272 }
2273}
2274
2275void AMDGPUCodeGenPassBuilder::addMachineSSAOptimization(
2276 AddMachinePass &addPass) const {
2277 Base::addMachineSSAOptimization(addPass);
2278
2279 addPass(SIFoldOperandsPass());
2280 if (EnableDPPCombine) {
2281 addPass(GCNDPPCombinePass());
2282 }
2283 addPass(SILoadStoreOptimizerPass());
2284 if (isPassEnabled(EnableSDWAPeephole)) {
2285 addPass(SIPeepholeSDWAPass());
2286 addPass(EarlyMachineLICMPass());
2287 addPass(MachineCSEPass());
2288 addPass(SIFoldOperandsPass());
2289 }
2291 addPass(SIShrinkInstructionsPass());
2292}
2293
2294void AMDGPUCodeGenPassBuilder::addOptimizedRegAlloc(
2295 AddMachinePass &addPass) const {
2296 if (EnableDCEInRA)
2297 insertPass<DetectDeadLanesPass>(DeadMachineInstructionElimPass());
2298
2299 // FIXME: when an instruction has a Killed operand, and the instruction is
2300 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
2301 // the register in LiveVariables, this would trigger a failure in verifier,
2302 // we should fix it and enable the verifier.
2303 if (OptVGPRLiveRange)
2304 insertPass<RequireAnalysisPass<LiveVariablesAnalysis, MachineFunction>>(
2306
2307 // This must be run immediately after phi elimination and before
2308 // TwoAddressInstructions, otherwise the processing of the tied operand of
2309 // SI_ELSE will introduce a copy of the tied operand source after the else.
2310 insertPass<PHIEliminationPass>(SILowerControlFlowPass());
2311
2313 insertPass<RenameIndependentSubregsPass>(GCNRewritePartialRegUsesPass());
2314
2315 if (isPassEnabled(EnablePreRAOptimizations))
2316 insertPass<MachineSchedulerPass>(GCNPreRAOptimizationsPass());
2317
2318 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
2319 // instructions that cause scheduling barriers.
2320 insertPass<MachineSchedulerPass>(SIWholeQuadModePass());
2321
2322 if (OptExecMaskPreRA)
2323 insertPass<MachineSchedulerPass>(SIOptimizeExecMaskingPreRAPass());
2324
2325 // This is not an essential optimization and it has a noticeable impact on
2326 // compilation time, so we only enable it from O2.
2327 if (TM.getOptLevel() > CodeGenOptLevel::Less)
2328 insertPass<MachineSchedulerPass>(SIFormMemoryClausesPass());
2329
2330 Base::addOptimizedRegAlloc(addPass);
2331}
2332
2333void AMDGPUCodeGenPassBuilder::addPreRegAlloc(AddMachinePass &addPass) const {
2334 if (getOptLevel() != CodeGenOptLevel::None)
2335 addPass(AMDGPUPrepareAGPRAllocPass());
2336}
2337
2338Error AMDGPUCodeGenPassBuilder::addRegAssignmentOptimized(
2339 AddMachinePass &addPass) const {
2340 // TODO: Check --regalloc-npm option
2341
2342 addPass(GCNPreRALongBranchRegPass());
2343
2344 addPass(RAGreedyPass({onlyAllocateSGPRs, "sgpr"}));
2345
2346 // Commit allocated register changes. This is mostly necessary because too
2347 // many things rely on the use lists of the physical registers, such as the
2348 // verifier. This is only necessary with allocators which use LiveIntervals,
2349 // since FastRegAlloc does the replacements itself.
2350 addPass(VirtRegRewriterPass(false));
2351
2352 // At this point, the sgpr-regalloc has been done and it is good to have the
2353 // stack slot coloring to try to optimize the SGPR spill stack indices before
2354 // attempting the custom SGPR spill lowering.
2355 addPass(StackSlotColoringPass());
2356
2357 // Equivalent of PEI for SGPRs.
2358 addPass(SILowerSGPRSpillsPass());
2359
2360 // To Allocate wwm registers used in whole quad mode operations (for shaders).
2361 addPass(SIPreAllocateWWMRegsPass());
2362
2363 // For allocating other wwm register operands.
2364 addPass(RAGreedyPass({onlyAllocateWWMRegs, "wwm"}));
2365 addPass(SILowerWWMCopiesPass());
2366 addPass(VirtRegRewriterPass(false));
2367 addPass(AMDGPUReserveWWMRegsPass());
2368
2369 // For allocating per-thread VGPRs.
2370 addPass(RAGreedyPass({onlyAllocateVGPRs, "vgpr"}));
2371
2372
2373 addPreRewrite(addPass);
2374 addPass(VirtRegRewriterPass(true));
2375
2377 return Error::success();
2378}
2379
2380void AMDGPUCodeGenPassBuilder::addPostRegAlloc(AddMachinePass &addPass) const {
2381 addPass(SIFixVGPRCopiesPass());
2382 if (TM.getOptLevel() > CodeGenOptLevel::None)
2383 addPass(SIOptimizeExecMaskingPass());
2384 Base::addPostRegAlloc(addPass);
2385}
2386
2387void AMDGPUCodeGenPassBuilder::addPreSched2(AddMachinePass &addPass) const {
2388 if (TM.getOptLevel() > CodeGenOptLevel::None)
2389 addPass(SIShrinkInstructionsPass());
2390 addPass(SIPostRABundlerPass());
2391}
2392
2393void AMDGPUCodeGenPassBuilder::addPreEmitPass(AddMachinePass &addPass) const {
2394 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less)) {
2395 addPass(GCNCreateVOPDPass());
2396 }
2397
2398 addPass(SIMemoryLegalizerPass());
2399 addPass(SIInsertWaitcntsPass());
2400
2401 // TODO: addPass(SIModeRegisterPass());
2402
2403 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2404 // TODO: addPass(SIInsertHardClausesPass());
2405 }
2406
2407 addPass(SILateBranchLoweringPass());
2408
2409 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
2410 addPass(AMDGPUSetWavePriorityPass());
2411
2412 if (TM.getOptLevel() > CodeGenOptLevel::None)
2413 addPass(SIPreEmitPeepholePass());
2414
2415 // The hazard recognizer that runs as part of the post-ra scheduler does not
2416 // guarantee to be able handle all hazards correctly. This is because if there
2417 // are multiple scheduling regions in a basic block, the regions are scheduled
2418 // bottom up, so when we begin to schedule a region we don't know what
2419 // instructions were emitted directly before it.
2420 //
2421 // Here we add a stand-alone hazard recognizer pass which can handle all
2422 // cases.
2423 addPass(PostRAHazardRecognizerPass());
2424 addPass(AMDGPUWaitSGPRHazardsPass());
2425 addPass(AMDGPULowerVGPREncodingPass());
2426
2427 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less)) {
2428 addPass(AMDGPUInsertDelayAluPass());
2429 }
2430
2431 addPass(BranchRelaxationPass());
2432}
2433
2434bool AMDGPUCodeGenPassBuilder::isPassEnabled(const cl::opt<bool> &Opt,
2435 CodeGenOptLevel Level) const {
2436 if (Opt.getNumOccurrences())
2437 return Opt;
2438 if (TM.getOptLevel() < Level)
2439 return false;
2440 return Opt;
2441}
2442
2443void AMDGPUCodeGenPassBuilder::addEarlyCSEOrGVNPass(AddIRPass &addPass) const {
2444 if (TM.getOptLevel() == CodeGenOptLevel::Aggressive)
2445 addPass(GVNPass());
2446 else
2447 addPass(EarlyCSEPass());
2448}
2449
2450void AMDGPUCodeGenPassBuilder::addStraightLineScalarOptimizationPasses(
2451 AddIRPass &addPass) const {
2453 addPass(LoopDataPrefetchPass());
2454
2456
2457 // ReassociateGEPs exposes more opportunities for SLSR. See
2458 // the example in reassociate-geps-and-slsr.ll.
2460
2461 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
2462 // EarlyCSE can reuse.
2463 addEarlyCSEOrGVNPass(addPass);
2464
2465 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
2466 addPass(NaryReassociatePass());
2467
2468 // NaryReassociate on GEPs creates redundant common expressions, so run
2469 // EarlyCSE after it.
2470 addPass(EarlyCSEPass());
2471}
unsigned const MachineRegisterInfo * MRI
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
static std::unique_ptr< TargetLoweringObjectFile > createTLOF(const Triple &TT)
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
Analyzes how many registers and other resources are used by functions.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxMemoryClauseSchedRegistry("gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause", createGCNMaxMemoryClauseMachineScheduler)
static Reloc::Model getEffectiveRelocModel()
static cl::opt< bool > EnableUniformIntrinsicCombine("amdgpu-enable-uniform-intrinsic-combine", cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSwLowerLDS("amdgpu-enable-sw-lower-lds", cl::desc("Enable lowering of lds to global memory pass " "and asan instrument resulting IR."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > HasClosedWorldAssumption("amdgpu-link-time-closed-world", cl::desc("Whether has closed-world assumption at link time"), cl::init(false), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< std::string > AMDGPUSchedStrategy("amdgpu-sched-strategy", cl::desc("Select custom AMDGPU scheduling strategy."), cl::Hidden, cl::init(""))
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
static cl::opt< bool > NewRegBankSelect("new-reg-bank-select", cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of " "regbankselect"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLowerExecSync("amdgpu-enable-lower-exec-sync", cl::desc("Enable lowering of execution synchronization."), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfoImplBase conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
Interfaces for producing common pass manager configurations.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_READNONE
Definition Compiler.h:315
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
DXIL Legalizer
This file provides the interface for a simple, fast CSE pass.
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define _
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
This header defines various interfaces for pass management in LLVM.
#define RegName(no)
This file provides the interface for LLVM's Loop Data Prefetching Pass.
This header provides classes for managing a pipeline of passes over loops in LLVM IR.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
const GCNTargetMachine & getTM(const GCNSubtarget *STI)
SI Machine Scheduler interface.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
static cl::opt< cl::boolOrDefault > EnableGlobalISelOption("global-isel", cl::Hidden, cl::desc("Enable the \"global\" instruction selector"))
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(TargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
std::unique_ptr< TargetLoweringObjectFile > TLOF
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
This class provides access to building LLVM's passes.
CodeGenTargetMachineImpl(const Target &T, StringRef DataLayoutString, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOptLevel OL)
LLVM_ABI void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
static ErrorSuccess success()
Create a success value.
Definition Error.h:336
Tagged union holding either a T or a Error.
Definition Error.h:485
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
const SIRegisterInfo * getRegisterInfo() const override
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
ScheduleDAGInstrs * createPostMachineScheduler(MachineSchedContext *C) const override
Similar to createMachineScheduler but used when postRA machine scheduling is enabled.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Definition GVN.h:128
Pass to remove unused function declarations.
Definition GlobalDCE.h:38
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition Internalize.h:37
Converts loops into loop-closed SSA form.
Definition LCSSA.h:38
Performs Loop Invariant Code Motion Pass.
Definition LICM.h:66
This pass implements the localization mechanism described at the top of this file.
Definition Localizer.h:43
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
const char * getBufferStart() const
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static LLVM_ABI const OptimizationLevel O0
Disable as many optimizations as possible.
static LLVM_ABI const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
void setFlag(Register Reg, uint8_t Flag)
bool checkFlag(Register Reg, uint8_t Flag) const
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition SourceMgr.h:297
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
Represents a range in source code.
Definition SMLoc.h:47
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
const TargetRegisterInfo * TRI
Target processor register info.
Move instructions into successor blocks when possible.
Definition Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition SmallString.h:68
unsigned getMainFileID() const
Definition SourceMgr.h:148
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition SourceMgr.h:141
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
A switch()-like statement whose cases are string literals.
StringSwitch & Cases(std::initializer_list< StringLiteral > CaseStrings, T Value)
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
TargetOptions Options
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
CodeGenOptLevel OptLevel
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetPassConfig(TargetMachine &TM, PassManagerBase &PM)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
LLVM Value Representation.
Definition Value.h:75
bool use_empty() const
Definition Value.h:346
int getNumOccurrences() const
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
LLVM_READNONE constexpr bool isModuleEntryFunctionCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
template class LLVM_TEMPLATE_ABI opt< bool >
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
This is an optimization pass for GlobalISel generic memory operations.
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
LLVM_ABI FunctionPass * createFlattenCFGPass()
std::unique_ptr< ScheduleDAGMutation > createAMDGPUBarrierLatencyDAGMutation(MachineFunction *MF)
LLVM_ABI FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
LLVM_ABI char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
LLVM_ABI char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
LLVM_ABI Pass * createLCSSAPass()
Definition LCSSA.cpp:525
void initializeAMDGPUMarkLastScratchLoadLegacyPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluLegacyPass(PassRegistry &)
void initializeSIOptimizeExecMaskingPreRALegacyPass(PassRegistry &)
char & GCNPreRAOptimizationsID
LLVM_ABI char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeSIInsertHardClausesLegacyPass(PassRegistry &)
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
void initializeGCNPreRAOptimizationsLegacyPass(PassRegistry &)
void initializeSILowerWWMCopiesLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeSIShrinkInstructionsLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeGCNRewritePartialRegUsesLegacyPass(llvm::PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
char & GCNRewritePartialRegUsesID
void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &)
LLVM_ABI std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition Error.cpp:98
void initializeAMDGPULowerVGPREncodingLegacyPass(PassRegistry &)
char & AMDGPUWaitSGPRHazardsLegacyID
void initializeSILowerSGPRSpillsLegacyPass(PassRegistry &)
LLVM_ABI Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
LLVM_ABI FunctionPass * createNaryReassociatePass()
char & AMDGPUReserveWWMRegsLegacyID
void initializeAMDGPUWaitSGPRHazardsLegacyPass(PassRegistry &)
LLVM_ABI char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & SIOptimizeExecMaskingLegacyID
LLVM_ABI char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUExportKernelRuntimeHandlesLegacyPass()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeAMDGPUAsmPrinterPass(PassRegistry &)
void initializeSIFoldOperandsLegacyPass(PassRegistry &)
char & SILoadStoreOptimizerLegacyID
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
PassManager< LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &, CGSCCUpdateResult & > CGSCCPassManager
The CGSCC pass manager.
LLVM_ABI std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition CSEInfo.cpp:89
Target & getTheR600Target()
The target for R600 GPUs.
LLVM_ABI char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
LLVM_ABI Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
LLVM_ABI char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
LLVM_ABI Pass * createLICMPass()
Definition LICM.cpp:386
char & SIFormMemoryClausesID
void initializeSILoadStoreOptimizerLegacyPass(PassRegistry &)
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
AnalysisManager< LazyCallGraph::SCC, LazyCallGraph & > CGSCCAnalysisManager
The CGSCC analysis manager.
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
LLVM_ABI char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
FunctionPass * createAMDGPUUniformIntrinsicCombineLegacyPass()
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition Pass.h:77
@ FullLTOPreLink
Full LTO prelink phase.
Definition Pass.h:85
@ FullLTOPostLink
Full LTO postlink (backend compile) phase.
Definition Pass.h:87
@ ThinLTOPreLink
ThinLTO prelink (summary) phase.
Definition Pass.h:81
char & AMDGPUUnifyDivergentExitNodesID
void initializeAMDGPUPrepareAGPRAllocLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
FunctionPass * createAMDGPUPreloadKernArgPrologLegacyPass()
char & SIOptimizeVGPRLiveRangeLegacyID
LLVM_ABI char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
void initializeSIModeRegisterLegacyPass(PassRegistry &)
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
void initializeAMDGPUPreloadKernelArgumentsLegacyPass(PassRegistry &)
char & SILateBranchLoweringPassID
FunctionToLoopPassAdaptor createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA=false)
A function to deduce a loop pass type and wrap it in the templated adaptor.
LLVM_ABI char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
LLVM_ABI FunctionPass * createSinkingPass()
Definition Sink.cpp:275
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
void initializeSIMemoryLegalizerLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerIntrinsicsLegacyPass()
void initializeR600MachineCFGStructurizerPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition CodeGen.h:111
char & GCNDPPCombineLegacyID
PassManager< Module > ModulePassManager
Convenience typedef for a pass manager over modules.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUTargetMach...
LLVM_ABI char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
void initializeGCNPreRALongBranchRegLegacyPass(PassRegistry &)
char & SILowerWWMCopiesLegacyID
LLVM_ABI FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
LLVM_ABI FunctionPass * createFixIrreduciblePass()
void initializeR600EmitClauseMarkersPass(PassRegistry &)
LLVM_ABI char & FuncletLayoutID
This pass lays out funclets contiguously.
LLVM_ABI char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPULowerExecSyncLegacyPass(PassRegistry &)
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
void initializeAMDGPUExportKernelRuntimeHandlesLegacyPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
void initializeSIInsertWaitcntsLegacyPass(PassRegistry &)
ModulePass * createAMDGPUPreloadKernelArgumentsLegacyPass(const TargetMachine *)
ModulePass * createAMDGPUPrintfRuntimeBinding()
LLVM_ABI char & StackSlotColoringID
StackSlotColoring - This pass performs stack slot coloring.
LLVM_ABI Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringLegacyPass(PassRegistry &)
void initializeSILowerControlFlowLegacyPass(PassRegistry &)
void initializeSIFormMemoryClausesLegacyPass(PassRegistry &)
char & SIPreAllocateWWMRegsLegacyID
Error make_error(ArgTs &&... Args)
Make a Error instance representing failure using the given error info type.
Definition Error.h:340
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
void initializeAMDGPUArgumentUsageInfoWrapperLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
void initializeAMDGPUReserveWWMRegsLegacyPass(PassRegistry &)
char & SIPreEmitPeepholeID
char & SIPostRABundlerLegacyID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
char & SILowerSGPRSpillsLegacyID
LLVM_ABI FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
LLVM_ABI void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
char & SILowerControlFlowLegacyID
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
void initializeGCNCreateVOPDLegacyPass(PassRegistry &)
void initializeAMDGPUUniformIntrinsicCombineLegacyPass(PassRegistry &)
void initializeSIPreAllocateWWMRegsLegacyPass(PassRegistry &)
void initializeSIFixVGPRCopiesLegacyPass(PassRegistry &)
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
void initializeAMDGPULowerIntrinsicsLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
Definition GVN.cpp:3402
void initializeAMDGPURewriteAGPRCopyMFMALegacyPass(PassRegistry &)
void initializeSIPostRABundlerLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankSelectPass()
FunctionPass * createAMDGPURegBankLegalizePass()
LLVM_ABI char & MachineCSELegacyID
MachineCSE - This pass performs global CSE on machine instructions.
char & SIWholeQuadModeID
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
PassManager< Function > FunctionPassManager
Convenience typedef for a pass manager over functions.
LLVM_ABI char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void initializeSIOptimizeExecMaskingLegacyPass(PassRegistry &)
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition Threading.h:86
FunctionPass * createSILowerI1CopiesLegacyPass()
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
char & SIFixSGPRCopiesLegacyID
void initializeGCNDPPCombineLegacyPass(PassRegistry &)
char & GCNCreateVOPDID
char & SIPeepholeSDWALegacyID
LLVM_ABI char & VirtRegRewriterID
VirtRegRewriter pass.
char & SIFixVGPRCopiesID
char & SIFoldOperandsLegacyID
void initializeGCNNSAReassignLegacyPass(PassRegistry &)
char & AMDGPUPrepareAGPRAllocLegacyID
LLVM_ABI FunctionPass * createLowerSwitchPass()
void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
void initializeR600VectorRegMergerPass(PassRegistry &)
char & AMDGPURewriteAGPRCopyMFMALegacyID
ModulePass * createAMDGPULowerExecSyncLegacyPass()
char & AMDGPULowerVGPREncodingLegacyID
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIOptimizeVGPRLiveRangeLegacyPass(PassRegistry &)
void initializeSIPeepholeSDWALegacyPass(PassRegistry &)
void initializeAMDGPURegBankLegalizePass(PassRegistry &)
LLVM_ABI char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
LLVM_ABI FunctionPass * createStraightLineStrengthReducePass()
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
LLVM_ABI FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
void initializeSIWholeQuadModeLegacyPass(PassRegistry &)
LLVM_ABI char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
LLVM_ABI llvm::cl::opt< bool > NoKernelInfoEndLTO
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
void initializeAMDGPUResourceUsageAnalysisWrapperPassPass(PassRegistry &)
FunctionPass * createSIShrinkInstructionsLegacyPass()
char & AMDGPUMarkLastScratchLoadID
LLVM_ABI char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPURemoveIncompatibleFunctionsLegacyPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
LLVM_ABI char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
void initializeSIPreEmitPeepholeLegacyPass(PassRegistry &)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
char & AMDGPUPerfHintAnalysisLegacyID
LLVM_ABI ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
char & GCNPreRALongBranchRegID
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
Definition EarlyCSE.h:31
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
static FuncInfoTy * create(BumpPtrAllocator &Allocator, const Function &F, const SubtargetTy *STI)
Factory function: default behavior is to call new using the supplied allocator.
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
StringMap< VRegInfo * > VRegInfosNamed
Definition MIParser.h:177
DenseMap< Register, VRegInfo * > VRegInfos
Definition MIParser.h:176
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition Threading.h:67
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
SmallVector< StringValue, 2 > SpillPhysVGPRS
A wrapper around std::string which contains a source range that's being set during parsing.