LLVM 20.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
22#include "AMDGPUIGroupLP.h"
23#include "AMDGPUISelDAGToDAG.h"
24#include "AMDGPUMacroFusion.h"
28#include "AMDGPUSplitModule.h"
32#include "GCNDPPCombine.h"
34#include "GCNSchedStrategy.h"
35#include "GCNVOPDUtils.h"
36#include "R600.h"
37#include "R600TargetMachine.h"
38#include "SIFixSGPRCopies.h"
39#include "SIFoldOperands.h"
41#include "SILowerControlFlow.h"
42#include "SILowerSGPRSpills.h"
44#include "SIMachineScheduler.h"
46#include "SIPeepholeSDWA.h"
65#include "llvm/CodeGen/Passes.h"
68#include "llvm/IR/IntrinsicsAMDGPU.h"
69#include "llvm/IR/PassManager.h"
76#include "llvm/Transforms/IPO.h"
99#include <optional>
100
101using namespace llvm;
102using namespace llvm::PatternMatch;
103
104namespace {
105class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
106public:
107 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
108 : RegisterRegAllocBase(N, D, C) {}
109};
110
111class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
112public:
113 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
114 : RegisterRegAllocBase(N, D, C) {}
115};
116
117class WWMRegisterRegAlloc : public RegisterRegAllocBase<WWMRegisterRegAlloc> {
118public:
119 WWMRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
120 : RegisterRegAllocBase(N, D, C) {}
121};
122
123static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
125 const Register Reg) {
126 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
127 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
128}
129
130static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
132 const Register Reg) {
133 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
134 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
135}
136
137static bool onlyAllocateWWMRegs(const TargetRegisterInfo &TRI,
139 const Register Reg) {
140 const SIMachineFunctionInfo *MFI =
141 MRI.getMF().getInfo<SIMachineFunctionInfo>();
142 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
143 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC) &&
145}
146
147/// -{sgpr|wwm|vgpr}-regalloc=... command line option.
148static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
149
150/// A dummy default pass factory indicates whether the register allocator is
151/// overridden on the command line.
152static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
153static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
154static llvm::once_flag InitializeDefaultWWMRegisterAllocatorFlag;
155
156static SGPRRegisterRegAlloc
157defaultSGPRRegAlloc("default",
158 "pick SGPR register allocator based on -O option",
160
161static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
163SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
164 cl::desc("Register allocator to use for SGPRs"));
165
166static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
168VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
169 cl::desc("Register allocator to use for VGPRs"));
170
171static cl::opt<WWMRegisterRegAlloc::FunctionPassCtor, false,
173 WWMRegAlloc("wwm-regalloc", cl::Hidden,
175 cl::desc("Register allocator to use for WWM registers"));
176
177static void initializeDefaultSGPRRegisterAllocatorOnce() {
178 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
179
180 if (!Ctor) {
181 Ctor = SGPRRegAlloc;
182 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
183 }
184}
185
186static void initializeDefaultVGPRRegisterAllocatorOnce() {
187 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
188
189 if (!Ctor) {
190 Ctor = VGPRRegAlloc;
191 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
192 }
193}
194
195static void initializeDefaultWWMRegisterAllocatorOnce() {
196 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
197
198 if (!Ctor) {
199 Ctor = WWMRegAlloc;
200 WWMRegisterRegAlloc::setDefault(WWMRegAlloc);
201 }
202}
203
204static FunctionPass *createBasicSGPRRegisterAllocator() {
205 return createBasicRegisterAllocator(onlyAllocateSGPRs);
206}
207
208static FunctionPass *createGreedySGPRRegisterAllocator() {
209 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
210}
211
212static FunctionPass *createFastSGPRRegisterAllocator() {
213 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
214}
215
216static FunctionPass *createBasicVGPRRegisterAllocator() {
217 return createBasicRegisterAllocator(onlyAllocateVGPRs);
218}
219
220static FunctionPass *createGreedyVGPRRegisterAllocator() {
221 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
222}
223
224static FunctionPass *createFastVGPRRegisterAllocator() {
225 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
226}
227
228static FunctionPass *createBasicWWMRegisterAllocator() {
229 return createBasicRegisterAllocator(onlyAllocateWWMRegs);
230}
231
232static FunctionPass *createGreedyWWMRegisterAllocator() {
233 return createGreedyRegisterAllocator(onlyAllocateWWMRegs);
234}
235
236static FunctionPass *createFastWWMRegisterAllocator() {
237 return createFastRegisterAllocator(onlyAllocateWWMRegs, false);
238}
239
240static SGPRRegisterRegAlloc basicRegAllocSGPR(
241 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
242static SGPRRegisterRegAlloc greedyRegAllocSGPR(
243 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
244
245static SGPRRegisterRegAlloc fastRegAllocSGPR(
246 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
247
248
249static VGPRRegisterRegAlloc basicRegAllocVGPR(
250 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
251static VGPRRegisterRegAlloc greedyRegAllocVGPR(
252 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
253
254static VGPRRegisterRegAlloc fastRegAllocVGPR(
255 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
256static WWMRegisterRegAlloc basicRegAllocWWMReg("basic",
257 "basic register allocator",
258 createBasicWWMRegisterAllocator);
259static WWMRegisterRegAlloc
260 greedyRegAllocWWMReg("greedy", "greedy register allocator",
261 createGreedyWWMRegisterAllocator);
262static WWMRegisterRegAlloc fastRegAllocWWMReg("fast", "fast register allocator",
263 createFastWWMRegisterAllocator);
264
266 return Phase == ThinOrFullLTOPhase::FullLTOPreLink ||
267 Phase == ThinOrFullLTOPhase::ThinLTOPreLink;
268}
269} // anonymous namespace
270
271static cl::opt<bool>
273 cl::desc("Run early if-conversion"),
274 cl::init(false));
275
276static cl::opt<bool>
277OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
278 cl::desc("Run pre-RA exec mask optimizations"),
279 cl::init(true));
280
281static cl::opt<bool>
282 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
283 cl::desc("Lower GPU ctor / dtors to globals on the device."),
284 cl::init(true), cl::Hidden);
285
286// Option to disable vectorizer for tests.
288 "amdgpu-load-store-vectorizer",
289 cl::desc("Enable load store vectorizer"),
290 cl::init(true),
291 cl::Hidden);
292
293// Option to control global loads scalarization
295 "amdgpu-scalarize-global-loads",
296 cl::desc("Enable global load scalarization"),
297 cl::init(true),
298 cl::Hidden);
299
300// Option to run internalize pass.
302 "amdgpu-internalize-symbols",
303 cl::desc("Enable elimination of non-kernel functions and unused globals"),
304 cl::init(false),
305 cl::Hidden);
306
307// Option to inline all early.
309 "amdgpu-early-inline-all",
310 cl::desc("Inline all functions early"),
311 cl::init(false),
312 cl::Hidden);
313
315 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
316 cl::desc("Enable removal of functions when they"
317 "use features not supported by the target GPU"),
318 cl::init(true));
319
321 "amdgpu-sdwa-peephole",
322 cl::desc("Enable SDWA peepholer"),
323 cl::init(true));
324
326 "amdgpu-dpp-combine",
327 cl::desc("Enable DPP combiner"),
328 cl::init(true));
329
330// Enable address space based alias analysis
332 cl::desc("Enable AMDGPU Alias Analysis"),
333 cl::init(true));
334
335// Enable lib calls simplifications
337 "amdgpu-simplify-libcall",
338 cl::desc("Enable amdgpu library simplifications"),
339 cl::init(true),
340 cl::Hidden);
341
343 "amdgpu-ir-lower-kernel-arguments",
344 cl::desc("Lower kernel argument loads in IR pass"),
345 cl::init(true),
346 cl::Hidden);
347
349 "amdgpu-reassign-regs",
350 cl::desc("Enable register reassign optimizations on gfx10+"),
351 cl::init(true),
352 cl::Hidden);
353
355 "amdgpu-opt-vgpr-liverange",
356 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
357 cl::init(true), cl::Hidden);
358
360 "amdgpu-atomic-optimizer-strategy",
361 cl::desc("Select DPP or Iterative strategy for scan"),
362 cl::init(ScanOptions::Iterative),
364 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
365 clEnumValN(ScanOptions::Iterative, "Iterative",
366 "Use Iterative approach for scan"),
367 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
368
369// Enable Mode register optimization
371 "amdgpu-mode-register",
372 cl::desc("Enable mode register pass"),
373 cl::init(true),
374 cl::Hidden);
375
376// Enable GFX11+ s_delay_alu insertion
377static cl::opt<bool>
378 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
379 cl::desc("Enable s_delay_alu insertion"),
380 cl::init(true), cl::Hidden);
381
382// Enable GFX11+ VOPD
383static cl::opt<bool>
384 EnableVOPD("amdgpu-enable-vopd",
385 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
386 cl::init(true), cl::Hidden);
387
388// Option is used in lit tests to prevent deadcoding of patterns inspected.
389static cl::opt<bool>
390EnableDCEInRA("amdgpu-dce-in-ra",
391 cl::init(true), cl::Hidden,
392 cl::desc("Enable machine DCE inside regalloc"));
393
394static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
395 cl::desc("Adjust wave priority"),
396 cl::init(false), cl::Hidden);
397
399 "amdgpu-scalar-ir-passes",
400 cl::desc("Enable scalar IR passes"),
401 cl::init(true),
402 cl::Hidden);
403
404static cl::opt<bool>
405 EnableSwLowerLDS("amdgpu-enable-sw-lower-lds",
406 cl::desc("Enable lowering of lds to global memory pass "
407 "and asan instrument resulting IR."),
408 cl::init(true), cl::Hidden);
409
411 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
413 cl::Hidden);
414
416 "amdgpu-enable-pre-ra-optimizations",
417 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
418 cl::Hidden);
419
421 "amdgpu-enable-promote-kernel-arguments",
422 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
423 cl::Hidden, cl::init(true));
424
426 "amdgpu-enable-image-intrinsic-optimizer",
427 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
428 cl::Hidden);
429
430static cl::opt<bool>
431 EnableLoopPrefetch("amdgpu-loop-prefetch",
432 cl::desc("Enable loop data prefetch on AMDGPU"),
433 cl::Hidden, cl::init(false));
434
436 AMDGPUSchedStrategy("amdgpu-sched-strategy",
437 cl::desc("Select custom AMDGPU scheduling strategy."),
438 cl::Hidden, cl::init(""));
439
441 "amdgpu-enable-rewrite-partial-reg-uses",
442 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
443 cl::Hidden);
444
446 "amdgpu-enable-hipstdpar",
447 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
448 cl::Hidden);
449
450static cl::opt<bool>
451 EnableAMDGPUAttributor("amdgpu-attributor-enable",
452 cl::desc("Enable AMDGPUAttributorPass"),
453 cl::init(true), cl::Hidden);
454
456 "new-reg-bank-select",
457 cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of "
458 "regbankselect"),
459 cl::init(false), cl::Hidden);
460
462 "amdgpu-link-time-closed-world",
463 cl::desc("Whether has closed-world assumption at link time"),
464 cl::init(false), cl::Hidden);
465
467 // Register the target
470
548}
549
550static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
551 return std::make_unique<AMDGPUTargetObjectFile>();
552}
553
555 return new SIScheduleDAGMI(C);
556}
557
558static ScheduleDAGInstrs *
560 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
561 ScheduleDAGMILive *DAG =
562 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
563 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
564 if (ST.shouldClusterStores())
565 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
566 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
567 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
568 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
569 return DAG;
570}
571
572static ScheduleDAGInstrs *
574 ScheduleDAGMILive *DAG =
575 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
576 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
577 return DAG;
578}
579
580static ScheduleDAGInstrs *
582 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
584 C, std::make_unique<GCNMaxMemoryClauseSchedStrategy>(C));
585 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
586 if (ST.shouldClusterStores())
587 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
588 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
589 return DAG;
590}
591
592static ScheduleDAGInstrs *
594 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
595 auto *DAG = new GCNIterativeScheduler(
597 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
598 if (ST.shouldClusterStores())
599 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
600 return DAG;
601}
602
604 return new GCNIterativeScheduler(C,
606}
607
608static ScheduleDAGInstrs *
610 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
612 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
613 if (ST.shouldClusterStores())
614 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
615 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
616 return DAG;
617}
618
620SISchedRegistry("si", "Run SI's custom scheduler",
622
625 "Run GCN scheduler to maximize occupancy",
627
629 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
631
633 "gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause",
635
637 "gcn-iterative-max-occupancy-experimental",
638 "Run GCN scheduler to maximize occupancy (experimental)",
640
642 "gcn-iterative-minreg",
643 "Run GCN iterative scheduler for minimal register usage (experimental)",
645
647 "gcn-iterative-ilp",
648 "Run GCN iterative scheduler for ILP scheduling (experimental)",
650
652 if (TT.getArch() == Triple::r600) {
653 // 32-bit pointers.
654 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
655 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
656 }
657
658 // 32-bit private, local, and region pointers. 64-bit global, constant and
659 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
660 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
661 // (address space 7), and 128-bit non-integral buffer resourcees (address
662 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
663 // like getelementptr.
664 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
665 "-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-"
666 "v32:32-v48:64-v96:"
667 "128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-"
668 "G1-ni:7:8:9";
669}
670
673 if (!GPU.empty())
674 return GPU;
675
676 // Need to default to a target with flat support for HSA.
677 if (TT.getArch() == Triple::amdgcn)
678 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
679
680 return "r600";
681}
682
683static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
684 // The AMDGPU toolchain only supports generating shared objects, so we
685 // must always use PIC.
686 return Reloc::PIC_;
687}
688
690 StringRef CPU, StringRef FS,
691 const TargetOptions &Options,
692 std::optional<Reloc::Model> RM,
693 std::optional<CodeModel::Model> CM,
694 CodeGenOptLevel OptLevel)
696 T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), FS, Options,
698 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
699 TLOF(createTLOF(getTargetTriple())) {
700 initAsmInfo();
701 if (TT.getArch() == Triple::amdgcn) {
702 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
704 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
706 }
707}
708
711
713
715 Attribute GPUAttr = F.getFnAttribute("target-cpu");
716 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
717}
718
720 Attribute FSAttr = F.getFnAttribute("target-features");
721
722 return FSAttr.isValid() ? FSAttr.getValueAsString()
724}
725
726/// Predicate for Internalize pass.
727static bool mustPreserveGV(const GlobalValue &GV) {
728 if (const Function *F = dyn_cast<Function>(&GV))
729 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
730 F->getName().starts_with("__sanitizer_") ||
731 AMDGPU::isEntryFunctionCC(F->getCallingConv());
732
734 return !GV.use_empty();
735}
736
739}
740
743 if (Params.empty())
745 Params.consume_front("strategy=");
746 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
747 .Case("dpp", ScanOptions::DPP)
748 .Cases("iterative", "", ScanOptions::Iterative)
749 .Case("none", ScanOptions::None)
750 .Default(std::nullopt);
751 if (Result)
752 return *Result;
753 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
754}
755
759 while (!Params.empty()) {
760 StringRef ParamName;
761 std::tie(ParamName, Params) = Params.split(';');
762 if (ParamName == "closed-world") {
763 Result.IsClosedWorld = true;
764 } else {
765 return make_error<StringError>(
766 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
767 .str(),
769 }
770 }
771 return Result;
772}
773
775
776#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
778
780 [](ModulePassManager &PM, OptimizationLevel Level) {
781 if (EnableHipStdPar)
783 });
784
789
790 if (Level == OptimizationLevel::O0)
791 return;
792
794
795 // We don't want to run internalization at per-module stage.
799 }
800
803 });
804
806 [](FunctionPassManager &FPM, OptimizationLevel Level) {
807 if (Level == OptimizationLevel::O0)
808 return;
809
813 });
814
816 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
817 if (Level == OptimizationLevel::O0)
818 return;
819
821
822 // Add promote kernel arguments pass to the opt pipeline right before
823 // infer address spaces which is needed to do actual address space
824 // rewriting.
825 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
828
829 // Add infer address spaces pass to the opt pipeline after inlining
830 // but before SROA to increase SROA opportunities.
832
833 // This should run after inlining to have any chance of doing
834 // anything, and before other cleanup optimizations.
836
837 if (Level != OptimizationLevel::O0) {
838 // Promote alloca to vector before SROA and loop unroll. If we
839 // manage to eliminate allocas before unroll we may choose to unroll
840 // less.
842 }
843
844 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
845 });
846
847 // FIXME: Why is AMDGPUAttributor not in CGSCC?
849 OptimizationLevel Level,
851 if (Level != OptimizationLevel::O0) {
852 if (!isLTOPreLink(Phase))
853 MPM.addPass(AMDGPUAttributorPass(*this));
854 }
855 });
856
858 [this](ModulePassManager &PM, OptimizationLevel Level) {
859 // We want to support the -lto-partitions=N option as "best effort".
860 // For that, we need to lower LDS earlier in the pipeline before the
861 // module is partitioned for codegen.
863 PM.addPass(AMDGPUSwLowerLDSPass(*this));
866 if (Level != OptimizationLevel::O0) {
867 // Do we really need internalization in LTO?
868 if (InternalizeSymbols) {
871 }
875 Opt.IsClosedWorld = true;
876 PM.addPass(AMDGPUAttributorPass(*this, Opt));
877 }
878 }
879 });
880
882 [](StringRef FilterName) -> RegAllocFilterFunc {
883 if (FilterName == "sgpr")
884 return onlyAllocateSGPRs;
885 if (FilterName == "vgpr")
886 return onlyAllocateVGPRs;
887 if (FilterName == "wwm")
888 return onlyAllocateWWMRegs;
889 return nullptr;
890 });
891}
892
893int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
894 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
895 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
896 AddrSpace == AMDGPUAS::REGION_ADDRESS)
897 ? -1
898 : 0;
899}
900
902 unsigned DestAS) const {
903 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
905}
906
908 const auto *LD = dyn_cast<LoadInst>(V);
909 if (!LD) // TODO: Handle invariant load like constant.
911
912 // It must be a generic pointer loaded.
913 assert(V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
914
915 const auto *Ptr = LD->getPointerOperand();
916 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
918 // For a generic pointer loaded from the constant memory, it could be assumed
919 // as a global pointer since the constant memory is only populated on the
920 // host side. As implied by the offload programming model, only global
921 // pointers could be referenced on the host side.
923}
924
925std::pair<const Value *, unsigned>
927 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
928 switch (II->getIntrinsicID()) {
929 case Intrinsic::amdgcn_is_shared:
930 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
931 case Intrinsic::amdgcn_is_private:
932 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
933 default:
934 break;
935 }
936 return std::pair(nullptr, -1);
937 }
938 // Check the global pointer predication based on
939 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
940 // the order of 'is_shared' and 'is_private' is not significant.
941 Value *Ptr;
942 if (match(
943 const_cast<Value *>(V),
944 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
945 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
946 m_Deferred(Ptr))))))
947 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
948
949 return std::pair(nullptr, -1);
950}
951
952unsigned
954 switch (Kind) {
964 }
966}
967
969 Module &M, unsigned NumParts,
970 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
971 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
972 // but all current users of this API don't have one ready and would need to
973 // create one anyway. Let's hide the boilerplate for now to keep it simple.
974
979
980 PassBuilder PB(this);
984
986 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
987 MPM.run(M, MAM);
988 return true;
989}
990
991//===----------------------------------------------------------------------===//
992// GCN Target Machine (SI+)
993//===----------------------------------------------------------------------===//
994
996 StringRef CPU, StringRef FS,
997 const TargetOptions &Options,
998 std::optional<Reloc::Model> RM,
999 std::optional<CodeModel::Model> CM,
1000 CodeGenOptLevel OL, bool JIT)
1001 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
1002
1003const TargetSubtargetInfo *
1005 StringRef GPU = getGPUName(F);
1007
1008 SmallString<128> SubtargetKey(GPU);
1009 SubtargetKey.append(FS);
1010
1011 auto &I = SubtargetMap[SubtargetKey];
1012 if (!I) {
1013 // This needs to be done before we create a new subtarget since any
1014 // creation will depend on the TM and the code generation flags on the
1015 // function that reside in TargetOptions.
1017 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
1018 }
1019
1020 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
1021
1022 return I.get();
1023}
1024
1027 return TargetTransformInfo(GCNTTIImpl(this, F));
1028}
1029
1032 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
1034 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
1035 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
1036}
1037
1038//===----------------------------------------------------------------------===//
1039// AMDGPU Legacy Pass Setup
1040//===----------------------------------------------------------------------===//
1041
1042std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
1044}
1045
1046namespace {
1047
1048class GCNPassConfig final : public AMDGPUPassConfig {
1049public:
1050 GCNPassConfig(TargetMachine &TM, PassManagerBase &PM)
1051 : AMDGPUPassConfig(TM, PM) {
1052 // It is necessary to know the register usage of the entire call graph. We
1053 // allow calls without EnableAMDGPUFunctionCalls if they are marked
1054 // noinline, so this is always required.
1055 setRequiresCodeGenSCCOrder(true);
1056 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
1057 }
1058
1059 GCNTargetMachine &getGCNTargetMachine() const {
1060 return getTM<GCNTargetMachine>();
1061 }
1062
1064 createMachineScheduler(MachineSchedContext *C) const override;
1065
1067 createPostMachineScheduler(MachineSchedContext *C) const override {
1069 C, std::make_unique<PostGenericScheduler>(C),
1070 /*RemoveKillFlags=*/true);
1071 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1073 if (ST.shouldClusterStores())
1075 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
1076 DAG->addMutation(
1077 createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA));
1078 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1080 return DAG;
1081 }
1082
1083 bool addPreISel() override;
1084 void addMachineSSAOptimization() override;
1085 bool addILPOpts() override;
1086 bool addInstSelector() override;
1087 bool addIRTranslator() override;
1088 void addPreLegalizeMachineIR() override;
1089 bool addLegalizeMachineIR() override;
1090 void addPreRegBankSelect() override;
1091 bool addRegBankSelect() override;
1092 void addPreGlobalInstructionSelect() override;
1093 bool addGlobalInstructionSelect() override;
1094 void addFastRegAlloc() override;
1095 void addOptimizedRegAlloc() override;
1096
1097 FunctionPass *createSGPRAllocPass(bool Optimized);
1098 FunctionPass *createVGPRAllocPass(bool Optimized);
1099 FunctionPass *createWWMRegAllocPass(bool Optimized);
1100 FunctionPass *createRegAllocPass(bool Optimized) override;
1101
1102 bool addRegAssignAndRewriteFast() override;
1103 bool addRegAssignAndRewriteOptimized() override;
1104
1105 bool addPreRewrite() override;
1106 void addPostRegAlloc() override;
1107 void addPreSched2() override;
1108 void addPreEmitPass() override;
1109};
1110
1111} // end anonymous namespace
1112
1114 : TargetPassConfig(TM, PM) {
1115 // Exceptions and StackMaps are not supported, so these passes will never do
1116 // anything.
1119 // Garbage collection is not supported.
1122}
1123
1127 else
1129}
1130
1135 // ReassociateGEPs exposes more opportunities for SLSR. See
1136 // the example in reassociate-geps-and-slsr.ll.
1138 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1139 // EarlyCSE can reuse.
1141 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1143 // NaryReassociate on GEPs creates redundant common expressions, so run
1144 // EarlyCSE after it.
1146}
1147
1150
1154
1155 // There is no reason to run these.
1159
1161 if (LowerCtorDtor)
1163
1166
1167 // This can be disabled by passing ::Disable here or on the command line
1168 // with --expand-variadics-override=disable.
1170
1171 // Function calls are not supported, so make sure we inline everything.
1174
1175 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1176 if (Arch == Triple::r600)
1178
1179 // Replace OpenCL enqueued block function pointers with global variables.
1181
1182 // Lower LDS accesses to global memory pass if address sanitizer is enabled.
1183 if (EnableSwLowerLDS)
1185
1186 // Runs before PromoteAlloca so the latter can account for function uses
1189 }
1190
1193
1194 // Run atomic optimizer before Atomic Expand
1199 }
1200
1202
1205
1208
1212 AAResults &AAR) {
1213 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1214 AAR.addAAResult(WrapperPass->getResult());
1215 }));
1216 }
1217
1219 // TODO: May want to move later or split into an early and late one.
1221 }
1222
1223 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1224 // have expanded.
1227 }
1228
1230
1231 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1232 // example, GVN can combine
1233 //
1234 // %0 = add %a, %b
1235 // %1 = add %b, %a
1236 //
1237 // and
1238 //
1239 // %0 = shl nsw %a, 2
1240 // %1 = shl %a, 2
1241 //
1242 // but EarlyCSE can do neither of them.
1245}
1246
1249 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
1250 // analysis, and should be removed.
1252 }
1253
1257
1259 // This lowering has been placed after codegenprepare to take advantage of
1260 // address mode matching (which is why it isn't put with the LDS lowerings).
1261 // It could be placed anywhere before uniformity annotations (an analysis
1262 // that it changes by splitting up fat pointers into their components)
1263 // but has been put before switch lowering and CFG flattening so that those
1264 // passes can run on the more optimized control flow this pass creates in
1265 // many cases.
1266 //
1267 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1268 // However, due to some annoying facts about ResourceUsageAnalysis,
1269 // (especially as exercised in the resource-usage-dead-function test),
1270 // we need all the function passes codegenprepare all the way through
1271 // said resource usage analysis to run on the call graph produced
1272 // before codegenprepare runs (because codegenprepare will knock some
1273 // nodes out of the graph, which leads to function-level passes not
1274 // being run on them, which causes crashes in the resource usage analysis).
1276 // In accordance with the above FIXME, manually force all the
1277 // function-level passes into a CGSCCPassManager.
1278 addPass(new DummyCGSCCPass());
1279 }
1280
1282
1285
1286 // LowerSwitch pass may introduce unreachable blocks that can
1287 // cause unexpected behavior for subsequent passes. Placing it
1288 // here seems better that these blocks would get cleaned up by
1289 // UnreachableBlockElim inserted next in the pass flow.
1291}
1292
1296 return false;
1297}
1298
1301 return false;
1302}
1303
1305 // Do nothing. GC is not supported.
1306 return false;
1307}
1308
1311 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1313 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1314 if (ST.shouldClusterStores())
1315 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1316 return DAG;
1317}
1318
1319//===----------------------------------------------------------------------===//
1320// GCN Legacy Pass Setup
1321//===----------------------------------------------------------------------===//
1322
1323ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1324 MachineSchedContext *C) const {
1325 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1326 if (ST.enableSIScheduler())
1328
1329 Attribute SchedStrategyAttr =
1330 C->MF->getFunction().getFnAttribute("amdgpu-sched-strategy");
1331 StringRef SchedStrategy = SchedStrategyAttr.isValid()
1332 ? SchedStrategyAttr.getValueAsString()
1334
1335 if (SchedStrategy == "max-ilp")
1337
1338 if (SchedStrategy == "max-memory-clause")
1340
1342}
1343
1344bool GCNPassConfig::addPreISel() {
1346
1347 if (TM->getOptLevel() > CodeGenOptLevel::None)
1348 addPass(createSinkingPass());
1349
1350 if (TM->getOptLevel() > CodeGenOptLevel::None)
1352
1353 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1354 // regions formed by them.
1356 addPass(createFixIrreduciblePass());
1357 addPass(createUnifyLoopExitsPass());
1358 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1359
1362 // TODO: Move this right after structurizeCFG to avoid extra divergence
1363 // analysis. This depends on stopping SIAnnotateControlFlow from making
1364 // control flow modifications.
1366
1367 addPass(createLCSSAPass());
1368
1369 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1371
1372 return false;
1373}
1374
1375void GCNPassConfig::addMachineSSAOptimization() {
1377
1378 // We want to fold operands after PeepholeOptimizer has run (or as part of
1379 // it), because it will eliminate extra copies making it easier to fold the
1380 // real source operand. We want to eliminate dead instructions after, so that
1381 // we see fewer uses of the copies. We then need to clean up the dead
1382 // instructions leftover after the operands are folded as well.
1383 //
1384 // XXX - Can we get away without running DeadMachineInstructionElim again?
1385 addPass(&SIFoldOperandsLegacyID);
1386 if (EnableDPPCombine)
1387 addPass(&GCNDPPCombineLegacyID);
1389 if (isPassEnabled(EnableSDWAPeephole)) {
1390 addPass(&SIPeepholeSDWALegacyID);
1391 addPass(&EarlyMachineLICMID);
1392 addPass(&MachineCSELegacyID);
1393 addPass(&SIFoldOperandsLegacyID);
1394 }
1397}
1398
1399bool GCNPassConfig::addILPOpts() {
1401 addPass(&EarlyIfConverterLegacyID);
1402
1404 return false;
1405}
1406
1407bool GCNPassConfig::addInstSelector() {
1409 addPass(&SIFixSGPRCopiesLegacyID);
1411 return false;
1412}
1413
1414bool GCNPassConfig::addIRTranslator() {
1415 addPass(new IRTranslator(getOptLevel()));
1416 return false;
1417}
1418
1419void GCNPassConfig::addPreLegalizeMachineIR() {
1420 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1421 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1422 addPass(new Localizer());
1423}
1424
1425bool GCNPassConfig::addLegalizeMachineIR() {
1426 addPass(new Legalizer());
1427 return false;
1428}
1429
1430void GCNPassConfig::addPreRegBankSelect() {
1431 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1432 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1434}
1435
1436bool GCNPassConfig::addRegBankSelect() {
1437 if (NewRegBankSelect) {
1440 } else {
1441 addPass(new RegBankSelect());
1442 }
1443 return false;
1444}
1445
1446void GCNPassConfig::addPreGlobalInstructionSelect() {
1447 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1448 addPass(createAMDGPURegBankCombiner(IsOptNone));
1449}
1450
1451bool GCNPassConfig::addGlobalInstructionSelect() {
1452 addPass(new InstructionSelect(getOptLevel()));
1453 return false;
1454}
1455
1456void GCNPassConfig::addFastRegAlloc() {
1457 // FIXME: We have to disable the verifier here because of PHIElimination +
1458 // TwoAddressInstructions disabling it.
1459
1460 // This must be run immediately after phi elimination and before
1461 // TwoAddressInstructions, otherwise the processing of the tied operand of
1462 // SI_ELSE will introduce a copy of the tied operand source after the else.
1464
1466
1468}
1469
1470void GCNPassConfig::addOptimizedRegAlloc() {
1471 if (EnableDCEInRA)
1473
1474 // FIXME: when an instruction has a Killed operand, and the instruction is
1475 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1476 // the register in LiveVariables, this would trigger a failure in verifier,
1477 // we should fix it and enable the verifier.
1478 if (OptVGPRLiveRange)
1480
1481 // This must be run immediately after phi elimination and before
1482 // TwoAddressInstructions, otherwise the processing of the tied operand of
1483 // SI_ELSE will introduce a copy of the tied operand source after the else.
1485
1488
1489 if (isPassEnabled(EnablePreRAOptimizations))
1491
1492 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1493 // instructions that cause scheduling barriers.
1495
1496 if (OptExecMaskPreRA)
1498
1499 // This is not an essential optimization and it has a noticeable impact on
1500 // compilation time, so we only enable it from O2.
1501 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1503
1505}
1506
1507bool GCNPassConfig::addPreRewrite() {
1509 addPass(&GCNNSAReassignID);
1510 return true;
1511}
1512
1513FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1514 // Initialize the global default.
1515 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1516 initializeDefaultSGPRRegisterAllocatorOnce);
1517
1518 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1519 if (Ctor != useDefaultRegisterAllocator)
1520 return Ctor();
1521
1522 if (Optimized)
1523 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1524
1525 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1526}
1527
1528FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1529 // Initialize the global default.
1530 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1531 initializeDefaultVGPRRegisterAllocatorOnce);
1532
1533 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1534 if (Ctor != useDefaultRegisterAllocator)
1535 return Ctor();
1536
1537 if (Optimized)
1538 return createGreedyVGPRRegisterAllocator();
1539
1540 return createFastVGPRRegisterAllocator();
1541}
1542
1543FunctionPass *GCNPassConfig::createWWMRegAllocPass(bool Optimized) {
1544 // Initialize the global default.
1545 llvm::call_once(InitializeDefaultWWMRegisterAllocatorFlag,
1546 initializeDefaultWWMRegisterAllocatorOnce);
1547
1548 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
1549 if (Ctor != useDefaultRegisterAllocator)
1550 return Ctor();
1551
1552 if (Optimized)
1553 return createGreedyWWMRegisterAllocator();
1554
1555 return createFastWWMRegisterAllocator();
1556}
1557
1558FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1559 llvm_unreachable("should not be used");
1560}
1561
1563 "-regalloc not supported with amdgcn. Use -sgpr-regalloc, -wwm-regalloc, "
1564 "and -vgpr-regalloc";
1565
1566bool GCNPassConfig::addRegAssignAndRewriteFast() {
1567 if (!usingDefaultRegAlloc())
1569
1570 addPass(&GCNPreRALongBranchRegID);
1571
1572 addPass(createSGPRAllocPass(false));
1573
1574 // Equivalent of PEI for SGPRs.
1575 addPass(&SILowerSGPRSpillsLegacyID);
1576
1577 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1579
1580 // For allocating other wwm register operands.
1581 addPass(createWWMRegAllocPass(false));
1582
1583 addPass(&SILowerWWMCopiesID);
1584 addPass(&AMDGPUReserveWWMRegsID);
1585
1586 // For allocating per-thread VGPRs.
1587 addPass(createVGPRAllocPass(false));
1588
1589 return true;
1590}
1591
1592bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1593 if (!usingDefaultRegAlloc())
1595
1596 addPass(&GCNPreRALongBranchRegID);
1597
1598 addPass(createSGPRAllocPass(true));
1599
1600 // Commit allocated register changes. This is mostly necessary because too
1601 // many things rely on the use lists of the physical registers, such as the
1602 // verifier. This is only necessary with allocators which use LiveIntervals,
1603 // since FastRegAlloc does the replacements itself.
1604 addPass(createVirtRegRewriter(false));
1605
1606 // At this point, the sgpr-regalloc has been done and it is good to have the
1607 // stack slot coloring to try to optimize the SGPR spill stack indices before
1608 // attempting the custom SGPR spill lowering.
1609 addPass(&StackSlotColoringID);
1610
1611 // Equivalent of PEI for SGPRs.
1612 addPass(&SILowerSGPRSpillsLegacyID);
1613
1614 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1616
1617 // For allocating other whole wave mode registers.
1618 addPass(createWWMRegAllocPass(true));
1619 addPass(&SILowerWWMCopiesID);
1620 addPass(createVirtRegRewriter(false));
1621 addPass(&AMDGPUReserveWWMRegsID);
1622
1623 // For allocating per-thread VGPRs.
1624 addPass(createVGPRAllocPass(true));
1625
1626 addPreRewrite();
1627 addPass(&VirtRegRewriterID);
1628
1630
1631 return true;
1632}
1633
1634void GCNPassConfig::addPostRegAlloc() {
1635 addPass(&SIFixVGPRCopiesID);
1636 if (getOptLevel() > CodeGenOptLevel::None)
1637 addPass(&SIOptimizeExecMaskingID);
1639}
1640
1641void GCNPassConfig::addPreSched2() {
1642 if (TM->getOptLevel() > CodeGenOptLevel::None)
1644 addPass(&SIPostRABundlerID);
1645}
1646
1647void GCNPassConfig::addPreEmitPass() {
1648 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1649 addPass(&GCNCreateVOPDID);
1650 addPass(createSIMemoryLegalizerPass());
1651 addPass(createSIInsertWaitcntsPass());
1652
1653 addPass(createSIModeRegisterPass());
1654
1655 if (getOptLevel() > CodeGenOptLevel::None)
1656 addPass(&SIInsertHardClausesID);
1657
1659 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1661 if (getOptLevel() > CodeGenOptLevel::None)
1662 addPass(&SIPreEmitPeepholeID);
1663 // The hazard recognizer that runs as part of the post-ra scheduler does not
1664 // guarantee to be able handle all hazards correctly. This is because if there
1665 // are multiple scheduling regions in a basic block, the regions are scheduled
1666 // bottom up, so when we begin to schedule a region we don't know what
1667 // instructions were emitted directly before it.
1668 //
1669 // Here we add a stand-alone hazard recognizer pass which can handle all
1670 // cases.
1671 addPass(&PostRAHazardRecognizerID);
1672
1673 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1674 addPass(&AMDGPUInsertDelayAluID);
1675
1676 addPass(&BranchRelaxationPassID);
1678}
1679
1681 return new GCNPassConfig(*this, PM);
1682}
1683
1685 MachineFunction &MF) const {
1687 MF.getRegInfo().addDelegate(MFI);
1688}
1689
1691 BumpPtrAllocator &Allocator, const Function &F,
1692 const TargetSubtargetInfo *STI) const {
1693 return SIMachineFunctionInfo::create<SIMachineFunctionInfo>(
1694 Allocator, F, static_cast<const GCNSubtarget *>(STI));
1695}
1696
1698 return new yaml::SIMachineFunctionInfo();
1699}
1700
1704 return new yaml::SIMachineFunctionInfo(
1705 *MFI, *MF.getSubtarget<GCNSubtarget>().getRegisterInfo(), MF);
1706}
1707
1710 SMDiagnostic &Error, SMRange &SourceRange) const {
1711 const yaml::SIMachineFunctionInfo &YamlMFI =
1712 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1713 MachineFunction &MF = PFS.MF;
1715 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1716
1717 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1718 return true;
1719
1720 if (MFI->Occupancy == 0) {
1721 // Fixup the subtarget dependent default value.
1722 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1723 }
1724
1725 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1726 Register TempReg;
1727 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1728 SourceRange = RegName.SourceRange;
1729 return true;
1730 }
1731 RegVal = TempReg;
1732
1733 return false;
1734 };
1735
1736 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1737 Register &RegVal) {
1738 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1739 };
1740
1741 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1742 return true;
1743
1744 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1745 return true;
1746
1747 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1748 MFI->LongBranchReservedReg))
1749 return true;
1750
1751 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1752 // Create a diagnostic for a the register string literal.
1753 const MemoryBuffer &Buffer =
1754 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1755 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1756 RegName.Value.size(), SourceMgr::DK_Error,
1757 "incorrect register class for field", RegName.Value,
1758 {}, {});
1759 SourceRange = RegName.SourceRange;
1760 return true;
1761 };
1762
1763 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1764 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1765 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1766 return true;
1767
1768 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1769 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1770 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1771 }
1772
1773 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1774 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1775 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1776 }
1777
1778 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1779 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1780 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1781 }
1782
1783 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1784 Register ParsedReg;
1785 if (parseRegister(YamlReg, ParsedReg))
1786 return true;
1787
1788 MFI->reserveWWMRegister(ParsedReg);
1789 }
1790
1791 for (const auto &[_, Info] : PFS.VRegInfosNamed) {
1792 MFI->setFlag(Info->VReg, Info->Flags);
1793 }
1794 for (const auto &[_, Info] : PFS.VRegInfos) {
1795 MFI->setFlag(Info->VReg, Info->Flags);
1796 }
1797
1798 for (const auto &YamlRegStr : YamlMFI.SpillPhysVGPRS) {
1799 Register ParsedReg;
1800 if (parseRegister(YamlRegStr, ParsedReg))
1801 return true;
1802 MFI->SpillPhysVGPRs.push_back(ParsedReg);
1803 }
1804
1805 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1806 const TargetRegisterClass &RC,
1807 ArgDescriptor &Arg, unsigned UserSGPRs,
1808 unsigned SystemSGPRs) {
1809 // Skip parsing if it's not present.
1810 if (!A)
1811 return false;
1812
1813 if (A->IsRegister) {
1814 Register Reg;
1815 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1816 SourceRange = A->RegisterName.SourceRange;
1817 return true;
1818 }
1819 if (!RC.contains(Reg))
1820 return diagnoseRegisterClass(A->RegisterName);
1822 } else
1823 Arg = ArgDescriptor::createStack(A->StackOffset);
1824 // Check and apply the optional mask.
1825 if (A->Mask)
1826 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1827
1828 MFI->NumUserSGPRs += UserSGPRs;
1829 MFI->NumSystemSGPRs += SystemSGPRs;
1830 return false;
1831 };
1832
1833 if (YamlMFI.ArgInfo &&
1834 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1835 AMDGPU::SGPR_128RegClass,
1836 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1837 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1838 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1839 2, 0) ||
1840 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1841 MFI->ArgInfo.QueuePtr, 2, 0) ||
1842 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1843 AMDGPU::SReg_64RegClass,
1844 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1845 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1846 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1847 2, 0) ||
1848 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1849 AMDGPU::SReg_64RegClass,
1850 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1851 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1852 AMDGPU::SGPR_32RegClass,
1853 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1854 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1855 AMDGPU::SGPR_32RegClass,
1856 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1857 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1858 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1859 0, 1) ||
1860 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1861 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1862 0, 1) ||
1863 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1864 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1865 0, 1) ||
1866 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1867 AMDGPU::SGPR_32RegClass,
1868 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1869 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1870 AMDGPU::SGPR_32RegClass,
1871 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1872 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1873 AMDGPU::SReg_64RegClass,
1874 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1875 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1876 AMDGPU::SReg_64RegClass,
1877 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1878 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1879 AMDGPU::VGPR_32RegClass,
1880 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1881 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1882 AMDGPU::VGPR_32RegClass,
1883 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1884 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1885 AMDGPU::VGPR_32RegClass,
1886 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1887 return true;
1888
1889 if (ST.hasIEEEMode())
1890 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1891 if (ST.hasDX10ClampMode())
1892 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1893
1894 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
1895 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
1898 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
1901
1908
1909 if (YamlMFI.HasInitWholeWave)
1910 MFI->setInitWholeWave();
1911
1912 return false;
1913}
1914
1915//===----------------------------------------------------------------------===//
1916// AMDGPU CodeGen Pass Builder interface.
1917//===----------------------------------------------------------------------===//
1918
1920 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
1922 : CodeGenPassBuilder(TM, Opts, PIC) {
1924 // Exceptions and StackMaps are not supported, so these passes will never do
1925 // anything.
1926 // Garbage collection is not supported.
1927 disablePass<StackMapLivenessPass, FuncletLayoutPass,
1929}
1930
1931void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass) const {
1934
1936 if (LowerCtorDtor)
1937 addPass(AMDGPUCtorDtorLoweringPass());
1938
1941
1942 // This can be disabled by passing ::Disable here or on the command line
1943 // with --expand-variadics-override=disable.
1945
1946 addPass(AMDGPUAlwaysInlinePass());
1947 addPass(AlwaysInlinerPass());
1948
1950
1951 // Runs before PromoteAlloca so the latter can account for function uses
1953 addPass(AMDGPULowerModuleLDSPass(TM));
1954
1956 addPass(InferAddressSpacesPass());
1957
1958 // Run atomic optimizer before Atomic Expand
1962
1963 addPass(AtomicExpandPass(&TM));
1964
1966 addPass(AMDGPUPromoteAllocaPass(TM));
1969
1970 // TODO: Handle EnableAMDGPUAliasAnalysis
1971
1972 // TODO: May want to move later or split into an early and late one.
1973 addPass(AMDGPUCodeGenPreparePass(TM));
1974
1975 // TODO: LICM
1976 }
1977
1978 Base::addIRPasses(addPass);
1979
1980 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1981 // example, GVN can combine
1982 //
1983 // %0 = add %a, %b
1984 // %1 = add %b, %a
1985 //
1986 // and
1987 //
1988 // %0 = shl nsw %a, 2
1989 // %1 = shl %a, 2
1990 //
1991 // but EarlyCSE can do neither of them.
1993 addEarlyCSEOrGVNPass(addPass);
1994}
1995
1996void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
1997 // AMDGPUAnnotateKernelFeaturesPass is missing here, but it will hopefully be
1998 // deleted soon.
1999
2002
2003 // This lowering has been placed after codegenprepare to take advantage of
2004 // address mode matching (which is why it isn't put with the LDS lowerings).
2005 // It could be placed anywhere before uniformity annotations (an analysis
2006 // that it changes by splitting up fat pointers into their components)
2007 // but has been put before switch lowering and CFG flattening so that those
2008 // passes can run on the more optimized control flow this pass creates in
2009 // many cases.
2010 //
2011 // FIXME: This should ideally be put after the LoadStoreVectorizer.
2012 // However, due to some annoying facts about ResourceUsageAnalysis,
2013 // (especially as exercised in the resource-usage-dead-function test),
2014 // we need all the function passes codegenprepare all the way through
2015 // said resource usage analysis to run on the call graph produced
2016 // before codegenprepare runs (because codegenprepare will knock some
2017 // nodes out of the graph, which leads to function-level passes not
2018 // being run on them, which causes crashes in the resource usage analysis).
2020
2021 Base::addCodeGenPrepare(addPass);
2022
2024 addPass(LoadStoreVectorizerPass());
2025
2026 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
2027 // behavior for subsequent passes. Placing it here seems better that these
2028 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
2029 // pass flow.
2030 addPass(LowerSwitchPass());
2031}
2032
2033void AMDGPUCodeGenPassBuilder::addPreISel(AddIRPass &addPass) const {
2034
2036 addPass(FlattenCFGPass());
2037
2039 addPass(SinkingPass());
2040
2042
2043 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
2044 // regions formed by them.
2045
2047 addPass(FixIrreduciblePass());
2048 addPass(UnifyLoopExitsPass());
2049 addPass(StructurizeCFGPass(/*SkipUniformRegions=*/false));
2050
2052
2053 addPass(SIAnnotateControlFlowPass(TM));
2054
2055 // TODO: Move this right after structurizeCFG to avoid extra divergence
2056 // analysis. This depends on stopping SIAnnotateControlFlow from making
2057 // control flow modifications.
2059
2060 addPass(LCSSAPass());
2061
2064
2065 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
2066 // isn't this in addInstSelector?
2068}
2069
2070void AMDGPUCodeGenPassBuilder::addILPOpts(AddMachinePass &addPass) const {
2072 addPass(EarlyIfConverterPass());
2073
2074 Base::addILPOpts(addPass);
2075}
2076
2077void AMDGPUCodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
2078 CreateMCStreamer) const {
2079 // TODO: Add AsmPrinter.
2080}
2081
2082Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &addPass) const {
2083 addPass(AMDGPUISelDAGToDAGPass(TM));
2084 addPass(SIFixSGPRCopiesPass());
2085 addPass(SILowerI1CopiesPass());
2086 return Error::success();
2087}
2088
2090 AddMachinePass &addPass) const {
2092
2093 addPass(SIFoldOperandsPass());
2094 if (EnableDPPCombine) {
2095 addPass(GCNDPPCombinePass());
2096 }
2097 addPass(SILoadStoreOptimizerPass());
2099 addPass(SIPeepholeSDWAPass());
2100 addPass(EarlyMachineLICMPass());
2101 addPass(MachineCSEPass());
2102 addPass(SIFoldOperandsPass());
2103 }
2105 addPass(SIShrinkInstructionsPass());
2106}
2107
2109 CodeGenOptLevel Level) const {
2110 if (Opt.getNumOccurrences())
2111 return Opt;
2112 if (TM.getOptLevel() < Level)
2113 return false;
2114 return Opt;
2115}
2116
2119 addPass(GVNPass());
2120 else
2121 addPass(EarlyCSEPass());
2122}
2123
2125 AddIRPass &addPass) const {
2127 addPass(LoopDataPrefetchPass());
2128
2130
2131 // ReassociateGEPs exposes more opportunities for SLSR. See
2132 // the example in reassociate-geps-and-slsr.ll.
2134
2135 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
2136 // EarlyCSE can reuse.
2137 addEarlyCSEOrGVNPass(addPass);
2138
2139 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
2140 addPass(NaryReassociatePass());
2141
2142 // NaryReassociate on GEPs creates redundant common expressions, so run
2143 // EarlyCSE after it.
2144 addPass(EarlyCSEPass());
2145}
unsigned const MachineRegisterInfo * MRI
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxMemoryClauseSchedRegistry("gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause", createGCNMaxMemoryClauseMachineScheduler)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSwLowerLDS("amdgpu-enable-sw-lower-lds", cl::desc("Enable lowering of lds to global memory pass " "and asan instrument resulting IR."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > HasClosedWorldAssumption("amdgpu-link-time-closed-world", cl::desc("Whether has closed-world assumption at link time"), cl::init(false), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< std::string > AMDGPUSchedStrategy("amdgpu-sched-strategy", cl::desc("Select custom AMDGPU scheduling strategy."), cl::Hidden, cl::init(""))
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > NewRegBankSelect("new-reg-bank-select", cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of " "regbankselect"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfo::Concept conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Provides analysis for continuously CSEing during GISel passes.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
#define LLVM_READNONE
Definition: Compiler.h:299
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:128
This file provides the interface for a simple, fast CSE pass.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define _
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
This header defines various interfaces for pass management in LLVM.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
static std::string computeDataLayout()
This file provides the interface for LLVM's Loop Data Prefetching Pass.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
The AMDGPU TargetMachine interface definition for hw codegen targets.
Basic Register Allocator
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Machine Scheduler interface.
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Error addInstSelector(AddMachinePass &) const
void addMachineSSAOptimization(AddMachinePass &) const
void addEarlyCSEOrGVNPass(AddIRPass &) const
void addStraightLineScalarOptimizationPasses(AddIRPass &) const
AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC)
void addPreISel(AddIRPass &addPass) const
void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const
void addCodeGenPrepare(AddIRPass &) const
void addILPOpts(AddMachinePass &) const
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(TargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Definition: AlwaysInliner.h:32
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:392
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:208
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
This class provides access to building LLVM's passes.
void addILPOpts(AddMachinePass &) const
Add passes that optimize instruction level parallelism for out-of-order targets.
Error buildPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType) const
void addMachineSSAOptimization(AddMachinePass &) const
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
void addCodeGenPrepare(AddIRPass &) const
Add pass to prepare the LLVM IR for code generation.
void disablePass()
Allow the target to disable a specific pass by default.
void addIRPasses(AddIRPass &) const
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
implements a set of functionality in the TargetMachine class for targets that make use of the indepen...
void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Definition: Constants.cpp:739
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
static ErrorSuccess success()
Create a success value.
Definition: Error.h:337
Tagged union holding either a T or a Error.
Definition: Error.h:481
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:291
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Definition: GVN.h:124
Pass to remove unused function declarations.
Definition: GlobalDCE.h:36
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition: Internalize.h:36
Converts loops into loop-closed SSA form.
Definition: LCSSA.h:37
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:43
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:51
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
Definition: MemoryBuffer.h:76
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static const OptimizationLevel O0
Disable as many optimizations as possible.
unsigned getSpeedupLevel() const
static const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
Definition: PassBuilder.h:105
void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel, ThinOrFullLTOPhase)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:482
void registerPipelineStartEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:473
void crossRegisterProxies(LoopAnalysisManager &LAM, FunctionAnalysisManager &FAM, CGSCCAnalysisManager &CGAM, ModuleAnalysisManager &MAM, MachineFunctionAnalysisManager *MFAM=nullptr)
Cross register the analysis managers through their proxies.
void registerOptimizerLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel, ThinOrFullLTOPhase)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:502
void registerPeepholeEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:407
void registerCGSCCOptimizerLateEPCallback(const std::function< void(CGSCCPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:452
void registerRegClassFilterParsingCallback(const std::function< RegAllocFilterFunc(StringRef)> &C)
Register callbacks to parse target specific filter field if regalloc pass needs it.
Definition: PassBuilder.h:592
void registerModuleAnalyses(ModuleAnalysisManager &MAM)
Registers all available module analysis passes.
void registerFullLinkTimeOptimizationLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:521
void registerFunctionAnalyses(FunctionAnalysisManager &FAM)
Registers all available function analysis passes.
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
Definition: PassManager.h:195
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
Definition: RegBankSelect.h:91
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
void setFlag(Register Reg, uint8_t Flag)
bool checkFlag(Register Reg, uint8_t Flag) const
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:281
Represents a location in source code.
Definition: SMLoc.h:23
Represents a range in source code.
Definition: SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:575
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:576
Move instructions into successor blocks when possible.
Definition: Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
unsigned getMainFileID() const
Definition: SourceMgr.h:132
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition: SourceMgr.h:125
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:635
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:90
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
Definition: TargetMachine.h:96
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:395
bool isAMDGCN() const
Tests whether the target is AMDGCN.
Definition: Triple.h:880
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Definition: raw_ostream.h:434
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:903
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFlattenCFGPass()
void initializeSIFormMemoryClausesPass(PassRegistry &)
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
Pass * createLCSSAPass()
Definition: LCSSA.cpp:541
void initializeGCNCreateVOPDPass(PassRegistry &)
char & GCNPreRAOptimizationsID
char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeGCNPreRAOptimizationsPass(PassRegistry &)
Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &)
void initializeAMDGPUAttributorLegacyPass(PassRegistry &)
char & SIPostRABundlerID
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeSIShrinkInstructionsLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
void initializeSIModeRegisterPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
char & GCNRewritePartialRegUsesID
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &)
std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition: Error.cpp:98
void initializeGCNPreRALongBranchRegPass(PassRegistry &)
void initializeSILowerSGPRSpillsLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createNaryReassociatePass()
char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeSIPreEmitPeepholePass(PassRegistry &)
void initializeSIFoldOperandsLegacyPass(PassRegistry &)
char & SILoadStoreOptimizerLegacyID
char & SILowerWWMCopiesID
void initializeSIFixVGPRCopiesPass(PassRegistry &)
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition: CSEInfo.cpp:79
Target & getTheR600Target()
The target for R600 GPUs.
char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
void initializeSILowerWWMCopiesPass(PassRegistry &)
void initializeGCNNSAReassignPass(PassRegistry &)
char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
void initializeAMDGPUOpenCLEnqueuedBlockLoweringLegacyPass(PassRegistry &)
void initializeSIInsertWaitcntsPass(PassRegistry &)
Pass * createLICMPass()
Definition: LICM.cpp:381
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
char & SIFormMemoryClausesID
void initializeSILoadStoreOptimizerLegacyPass(PassRegistry &)
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
void initializeSILateBranchLoweringPass(PassRegistry &)
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition: Pass.h:76
char & AMDGPUUnifyDivergentExitNodesID
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
FunctionPass * createAMDGPUPreloadKernArgPrologLegacyPass()
char & SIOptimizeVGPRLiveRangeLegacyID
char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
char & SILateBranchLoweringPassID
char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
FunctionPass * createSinkingPass()
Definition: Sink.cpp:277
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition: CodeGen.h:83
void initializeSIPostRABundlerPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &)
char & GCNDPPCombineLegacyID
void initializeSIWholeQuadModePass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
Pass * createAMDGPUAnnotateKernelFeaturesPass()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUPassConfig...
char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
FunctionPass * createFixIrreduciblePass()
char & FuncletLayoutID
This pass lays out funclets contiguously.
void initializeSIInsertHardClausesPass(PassRegistry &)
char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
void initializeAMDGPUReserveWWMRegsPass(PassRegistry &)
ModulePass * createAMDGPUPrintfRuntimeBinding()
char & StackSlotColoringID
StackSlotColoring - This pass performs stack slot coloring.
void initializeSIMemoryLegalizerPass(PassRegistry &)
Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
void initializeSILowerControlFlowLegacyPass(PassRegistry &)
char & SIPreAllocateWWMRegsLegacyID
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
char & AMDGPUReserveWWMRegsID
FunctionPass * createAMDGPUPromoteAlloca()
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
char & SIPreEmitPeepholeID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
char & SILowerSGPRSpillsLegacyID
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:17
char & SILowerControlFlowLegacyID
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
ModulePass * createAMDGPUOpenCLEnqueuedBlockLoweringLegacyPass()
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
void initializeSIPreAllocateWWMRegsLegacyPass(PassRegistry &)
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
FunctionPass * createGVNPass()
Create a legacy GVN pass.
Definition: GVN.cpp:3374
FunctionPass * createAMDGPURegBankSelectPass()
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
FunctionPass * createAMDGPURegBankLegalizePass()
char & MachineCSELegacyID
MachineCSE - This pass performs global CSE on machine instructions.
Definition: MachineCSE.cpp:164
char & SIWholeQuadModeID
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
void initializeSIOptimizeExecMaskingPreRAPass(PassRegistry &)
void initializeAMDGPUMarkLastScratchLoadPass(PassRegistry &)
char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
FunctionPass * createSILowerI1CopiesLegacyPass()
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
void initializeAMDGPUResourceUsageAnalysisPass(PassRegistry &)
char & SIFixSGPRCopiesLegacyID
void initializeGCNDPPCombineLegacyPass(PassRegistry &)
char & GCNCreateVOPDID
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
char & SIPeepholeSDWALegacyID
char & VirtRegRewriterID
VirtRegRewriter pass.
Definition: VirtRegMap.cpp:250
char & SIFoldOperandsLegacyID
FunctionPass * createLowerSwitchPass()
void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &)
FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
Definition: VirtRegMap.cpp:734
void initializeR600VectorRegMergerPass(PassRegistry &)
ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
void initializeSIOptimizeExecMaskingPass(PassRegistry &)
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIOptimizeVGPRLiveRangeLegacyPass(PassRegistry &)
void initializeSIPeepholeSDWALegacyPass(PassRegistry &)
char & SIFixVGPRCopiesID
void initializeAMDGPURegBankLegalizePass(PassRegistry &)
char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1944
char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
Definition: MIParser.cpp:3608
FunctionPass * createSIShrinkInstructionsLegacyPass()
char & AMDGPUMarkLastScratchLoadID
char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPURemoveIncompatibleFunctionsLegacyPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluPass(PassRegistry &)
char & SIOptimizeExecMaskingID
void initializeAMDGPUUnifyMetadataPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
char & AMDGPUPerfHintAnalysisLegacyID
char & GCNPreRALongBranchRegID
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
Definition: EarlyCSE.h:30
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
StringMap< VRegInfo * > VRegInfosNamed
Definition: MIParser.h:177
DenseMap< Register, VRegInfo * > VRegInfos
Definition: MIParser.h:176
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
Definition: PassManager.h:878
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition: Threading.h:68
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
SmallVector< StringValue, 2 > SpillPhysVGPRS
A wrapper around std::string which contains a source range that's being set during parsing.