LLVM 20.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
22#include "AMDGPUIGroupLP.h"
23#include "AMDGPUISelDAGToDAG.h"
24#include "AMDGPUMacroFusion.h"
26#include "AMDGPUSplitModule.h"
30#include "GCNDPPCombine.h"
32#include "GCNSchedStrategy.h"
33#include "GCNVOPDUtils.h"
34#include "R600.h"
35#include "R600TargetMachine.h"
36#include "SIFixSGPRCopies.h"
37#include "SIFoldOperands.h"
39#include "SILowerSGPRSpills.h"
41#include "SIMachineScheduler.h"
43#include "SIPeepholeSDWA.h"
61#include "llvm/CodeGen/Passes.h"
64#include "llvm/IR/IntrinsicsAMDGPU.h"
65#include "llvm/IR/PassManager.h"
72#include "llvm/Transforms/IPO.h"
95#include <optional>
96
97using namespace llvm;
98using namespace llvm::PatternMatch;
99
100namespace {
101class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
102public:
103 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
104 : RegisterRegAllocBase(N, D, C) {}
105};
106
107class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
108public:
109 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
110 : RegisterRegAllocBase(N, D, C) {}
111};
112
113class WWMRegisterRegAlloc : public RegisterRegAllocBase<WWMRegisterRegAlloc> {
114public:
115 WWMRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
116 : RegisterRegAllocBase(N, D, C) {}
117};
118
119static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
121 const Register Reg) {
122 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
123 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
124}
125
126static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
128 const Register Reg) {
129 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
130 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
131}
132
133static bool onlyAllocateWWMRegs(const TargetRegisterInfo &TRI,
135 const Register Reg) {
136 const SIMachineFunctionInfo *MFI =
137 MRI.getMF().getInfo<SIMachineFunctionInfo>();
138 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
139 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC) &&
141}
142
143/// -{sgpr|wwm|vgpr}-regalloc=... command line option.
144static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
145
146/// A dummy default pass factory indicates whether the register allocator is
147/// overridden on the command line.
148static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
149static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
150static llvm::once_flag InitializeDefaultWWMRegisterAllocatorFlag;
151
152static SGPRRegisterRegAlloc
153defaultSGPRRegAlloc("default",
154 "pick SGPR register allocator based on -O option",
156
157static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
159SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
160 cl::desc("Register allocator to use for SGPRs"));
161
162static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
164VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
165 cl::desc("Register allocator to use for VGPRs"));
166
167static cl::opt<WWMRegisterRegAlloc::FunctionPassCtor, false,
169 WWMRegAlloc("wwm-regalloc", cl::Hidden,
171 cl::desc("Register allocator to use for WWM registers"));
172
173static void initializeDefaultSGPRRegisterAllocatorOnce() {
174 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
175
176 if (!Ctor) {
177 Ctor = SGPRRegAlloc;
178 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
179 }
180}
181
182static void initializeDefaultVGPRRegisterAllocatorOnce() {
183 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
184
185 if (!Ctor) {
186 Ctor = VGPRRegAlloc;
187 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
188 }
189}
190
191static void initializeDefaultWWMRegisterAllocatorOnce() {
192 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
193
194 if (!Ctor) {
195 Ctor = WWMRegAlloc;
196 WWMRegisterRegAlloc::setDefault(WWMRegAlloc);
197 }
198}
199
200static FunctionPass *createBasicSGPRRegisterAllocator() {
201 return createBasicRegisterAllocator(onlyAllocateSGPRs);
202}
203
204static FunctionPass *createGreedySGPRRegisterAllocator() {
205 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
206}
207
208static FunctionPass *createFastSGPRRegisterAllocator() {
209 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
210}
211
212static FunctionPass *createBasicVGPRRegisterAllocator() {
213 return createBasicRegisterAllocator(onlyAllocateVGPRs);
214}
215
216static FunctionPass *createGreedyVGPRRegisterAllocator() {
217 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
218}
219
220static FunctionPass *createFastVGPRRegisterAllocator() {
221 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
222}
223
224static FunctionPass *createBasicWWMRegisterAllocator() {
225 return createBasicRegisterAllocator(onlyAllocateWWMRegs);
226}
227
228static FunctionPass *createGreedyWWMRegisterAllocator() {
229 return createGreedyRegisterAllocator(onlyAllocateWWMRegs);
230}
231
232static FunctionPass *createFastWWMRegisterAllocator() {
233 return createFastRegisterAllocator(onlyAllocateWWMRegs, false);
234}
235
236static SGPRRegisterRegAlloc basicRegAllocSGPR(
237 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
238static SGPRRegisterRegAlloc greedyRegAllocSGPR(
239 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
240
241static SGPRRegisterRegAlloc fastRegAllocSGPR(
242 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
243
244
245static VGPRRegisterRegAlloc basicRegAllocVGPR(
246 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
247static VGPRRegisterRegAlloc greedyRegAllocVGPR(
248 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
249
250static VGPRRegisterRegAlloc fastRegAllocVGPR(
251 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
252static WWMRegisterRegAlloc basicRegAllocWWMReg("basic",
253 "basic register allocator",
254 createBasicWWMRegisterAllocator);
255static WWMRegisterRegAlloc
256 greedyRegAllocWWMReg("greedy", "greedy register allocator",
257 createGreedyWWMRegisterAllocator);
258static WWMRegisterRegAlloc fastRegAllocWWMReg("fast", "fast register allocator",
259 createFastWWMRegisterAllocator);
260
262 return Phase == ThinOrFullLTOPhase::FullLTOPreLink ||
263 Phase == ThinOrFullLTOPhase::ThinLTOPreLink;
264}
265} // anonymous namespace
266
267static cl::opt<bool>
269 cl::desc("Run early if-conversion"),
270 cl::init(false));
271
272static cl::opt<bool>
273OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
274 cl::desc("Run pre-RA exec mask optimizations"),
275 cl::init(true));
276
277static cl::opt<bool>
278 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
279 cl::desc("Lower GPU ctor / dtors to globals on the device."),
280 cl::init(true), cl::Hidden);
281
282// Option to disable vectorizer for tests.
284 "amdgpu-load-store-vectorizer",
285 cl::desc("Enable load store vectorizer"),
286 cl::init(true),
287 cl::Hidden);
288
289// Option to control global loads scalarization
291 "amdgpu-scalarize-global-loads",
292 cl::desc("Enable global load scalarization"),
293 cl::init(true),
294 cl::Hidden);
295
296// Option to run internalize pass.
298 "amdgpu-internalize-symbols",
299 cl::desc("Enable elimination of non-kernel functions and unused globals"),
300 cl::init(false),
301 cl::Hidden);
302
303// Option to inline all early.
305 "amdgpu-early-inline-all",
306 cl::desc("Inline all functions early"),
307 cl::init(false),
308 cl::Hidden);
309
311 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
312 cl::desc("Enable removal of functions when they"
313 "use features not supported by the target GPU"),
314 cl::init(true));
315
317 "amdgpu-sdwa-peephole",
318 cl::desc("Enable SDWA peepholer"),
319 cl::init(true));
320
322 "amdgpu-dpp-combine",
323 cl::desc("Enable DPP combiner"),
324 cl::init(true));
325
326// Enable address space based alias analysis
328 cl::desc("Enable AMDGPU Alias Analysis"),
329 cl::init(true));
330
331// Enable lib calls simplifications
333 "amdgpu-simplify-libcall",
334 cl::desc("Enable amdgpu library simplifications"),
335 cl::init(true),
336 cl::Hidden);
337
339 "amdgpu-ir-lower-kernel-arguments",
340 cl::desc("Lower kernel argument loads in IR pass"),
341 cl::init(true),
342 cl::Hidden);
343
345 "amdgpu-reassign-regs",
346 cl::desc("Enable register reassign optimizations on gfx10+"),
347 cl::init(true),
348 cl::Hidden);
349
351 "amdgpu-opt-vgpr-liverange",
352 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
353 cl::init(true), cl::Hidden);
354
356 "amdgpu-atomic-optimizer-strategy",
357 cl::desc("Select DPP or Iterative strategy for scan"),
358 cl::init(ScanOptions::Iterative),
360 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
361 clEnumValN(ScanOptions::Iterative, "Iterative",
362 "Use Iterative approach for scan"),
363 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
364
365// Enable Mode register optimization
367 "amdgpu-mode-register",
368 cl::desc("Enable mode register pass"),
369 cl::init(true),
370 cl::Hidden);
371
372// Enable GFX11+ s_delay_alu insertion
373static cl::opt<bool>
374 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
375 cl::desc("Enable s_delay_alu insertion"),
376 cl::init(true), cl::Hidden);
377
378// Enable GFX11+ VOPD
379static cl::opt<bool>
380 EnableVOPD("amdgpu-enable-vopd",
381 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
382 cl::init(true), cl::Hidden);
383
384// Option is used in lit tests to prevent deadcoding of patterns inspected.
385static cl::opt<bool>
386EnableDCEInRA("amdgpu-dce-in-ra",
387 cl::init(true), cl::Hidden,
388 cl::desc("Enable machine DCE inside regalloc"));
389
390static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
391 cl::desc("Adjust wave priority"),
392 cl::init(false), cl::Hidden);
393
395 "amdgpu-scalar-ir-passes",
396 cl::desc("Enable scalar IR passes"),
397 cl::init(true),
398 cl::Hidden);
399
400static cl::opt<bool>
401 EnableSwLowerLDS("amdgpu-enable-sw-lower-lds",
402 cl::desc("Enable lowering of lds to global memory pass "
403 "and asan instrument resulting IR."),
404 cl::init(true), cl::Hidden);
405
407 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
409 cl::Hidden);
410
412 "amdgpu-enable-pre-ra-optimizations",
413 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
414 cl::Hidden);
415
417 "amdgpu-enable-promote-kernel-arguments",
418 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
419 cl::Hidden, cl::init(true));
420
422 "amdgpu-enable-image-intrinsic-optimizer",
423 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
424 cl::Hidden);
425
426static cl::opt<bool>
427 EnableLoopPrefetch("amdgpu-loop-prefetch",
428 cl::desc("Enable loop data prefetch on AMDGPU"),
429 cl::Hidden, cl::init(false));
430
432 AMDGPUSchedStrategy("amdgpu-sched-strategy",
433 cl::desc("Select custom AMDGPU scheduling strategy."),
434 cl::Hidden, cl::init(""));
435
437 "amdgpu-enable-rewrite-partial-reg-uses",
438 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
439 cl::Hidden);
440
442 "amdgpu-enable-hipstdpar",
443 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
444 cl::Hidden);
445
446static cl::opt<bool>
447 EnableAMDGPUAttributor("amdgpu-attributor-enable",
448 cl::desc("Enable AMDGPUAttributorPass"),
449 cl::init(true), cl::Hidden);
450
452 "new-reg-bank-select",
453 cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of "
454 "regbankselect"),
455 cl::init(false), cl::Hidden);
456
458 "amdgpu-link-time-closed-world",
459 cl::desc("Whether has closed-world assumption at link time"),
460 cl::init(false), cl::Hidden);
461
463 // Register the target
466
543}
544
545static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
546 return std::make_unique<AMDGPUTargetObjectFile>();
547}
548
550 return new SIScheduleDAGMI(C);
551}
552
553static ScheduleDAGInstrs *
555 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
556 ScheduleDAGMILive *DAG =
557 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
558 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
559 if (ST.shouldClusterStores())
560 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
561 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
562 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
563 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
564 return DAG;
565}
566
567static ScheduleDAGInstrs *
569 ScheduleDAGMILive *DAG =
570 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
571 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
572 return DAG;
573}
574
575static ScheduleDAGInstrs *
577 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
579 C, std::make_unique<GCNMaxMemoryClauseSchedStrategy>(C));
580 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
581 if (ST.shouldClusterStores())
582 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
583 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
584 return DAG;
585}
586
587static ScheduleDAGInstrs *
589 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
590 auto *DAG = new GCNIterativeScheduler(
592 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
593 if (ST.shouldClusterStores())
594 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
595 return DAG;
596}
597
599 return new GCNIterativeScheduler(C,
601}
602
603static ScheduleDAGInstrs *
605 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
607 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
608 if (ST.shouldClusterStores())
609 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
610 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
611 return DAG;
612}
613
615SISchedRegistry("si", "Run SI's custom scheduler",
617
620 "Run GCN scheduler to maximize occupancy",
622
624 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
626
628 "gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause",
630
632 "gcn-iterative-max-occupancy-experimental",
633 "Run GCN scheduler to maximize occupancy (experimental)",
635
637 "gcn-iterative-minreg",
638 "Run GCN iterative scheduler for minimal register usage (experimental)",
640
642 "gcn-iterative-ilp",
643 "Run GCN iterative scheduler for ILP scheduling (experimental)",
645
647 if (TT.getArch() == Triple::r600) {
648 // 32-bit pointers.
649 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
650 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
651 }
652
653 // 32-bit private, local, and region pointers. 64-bit global, constant and
654 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
655 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
656 // (address space 7), and 128-bit non-integral buffer resourcees (address
657 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
658 // like getelementptr.
659 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
660 "-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-"
661 "v32:32-v48:64-v96:"
662 "128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-"
663 "G1-ni:7:8:9";
664}
665
668 if (!GPU.empty())
669 return GPU;
670
671 // Need to default to a target with flat support for HSA.
672 if (TT.getArch() == Triple::amdgcn)
673 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
674
675 return "r600";
676}
677
678static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
679 // The AMDGPU toolchain only supports generating shared objects, so we
680 // must always use PIC.
681 return Reloc::PIC_;
682}
683
685 StringRef CPU, StringRef FS,
686 const TargetOptions &Options,
687 std::optional<Reloc::Model> RM,
688 std::optional<CodeModel::Model> CM,
689 CodeGenOptLevel OptLevel)
691 T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), FS, Options,
693 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
694 TLOF(createTLOF(getTargetTriple())) {
695 initAsmInfo();
696 if (TT.getArch() == Triple::amdgcn) {
697 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
699 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
701 }
702}
703
706
708
710 Attribute GPUAttr = F.getFnAttribute("target-cpu");
711 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
712}
713
715 Attribute FSAttr = F.getFnAttribute("target-features");
716
717 return FSAttr.isValid() ? FSAttr.getValueAsString()
719}
720
721/// Predicate for Internalize pass.
722static bool mustPreserveGV(const GlobalValue &GV) {
723 if (const Function *F = dyn_cast<Function>(&GV))
724 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
725 F->getName().starts_with("__sanitizer_") ||
726 AMDGPU::isEntryFunctionCC(F->getCallingConv());
727
729 return !GV.use_empty();
730}
731
734}
735
738 if (Params.empty())
740 Params.consume_front("strategy=");
741 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
742 .Case("dpp", ScanOptions::DPP)
743 .Cases("iterative", "", ScanOptions::Iterative)
744 .Case("none", ScanOptions::None)
745 .Default(std::nullopt);
746 if (Result)
747 return *Result;
748 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
749}
750
754 while (!Params.empty()) {
755 StringRef ParamName;
756 std::tie(ParamName, Params) = Params.split(';');
757 if (ParamName == "closed-world") {
758 Result.IsClosedWorld = true;
759 } else {
760 return make_error<StringError>(
761 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
762 .str(),
764 }
765 }
766 return Result;
767}
768
770
771#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
773
775 [](ModulePassManager &PM, OptimizationLevel Level) {
776 if (EnableHipStdPar)
778 });
779
784
785 if (Level == OptimizationLevel::O0)
786 return;
787
789
790 // We don't want to run internalization at per-module stage.
794 }
795
798 });
799
801 [](FunctionPassManager &FPM, OptimizationLevel Level) {
802 if (Level == OptimizationLevel::O0)
803 return;
804
808 });
809
811 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
812 if (Level == OptimizationLevel::O0)
813 return;
814
816
817 // Add promote kernel arguments pass to the opt pipeline right before
818 // infer address spaces which is needed to do actual address space
819 // rewriting.
820 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
823
824 // Add infer address spaces pass to the opt pipeline after inlining
825 // but before SROA to increase SROA opportunities.
827
828 // This should run after inlining to have any chance of doing
829 // anything, and before other cleanup optimizations.
831
832 if (Level != OptimizationLevel::O0) {
833 // Promote alloca to vector before SROA and loop unroll. If we
834 // manage to eliminate allocas before unroll we may choose to unroll
835 // less.
837 }
838
839 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
840 });
841
842 // FIXME: Why is AMDGPUAttributor not in CGSCC?
844 OptimizationLevel Level,
846 if (Level != OptimizationLevel::O0) {
847 if (!isLTOPreLink(Phase))
848 MPM.addPass(AMDGPUAttributorPass(*this));
849 }
850 });
851
853 [this](ModulePassManager &PM, OptimizationLevel Level) {
854 // We want to support the -lto-partitions=N option as "best effort".
855 // For that, we need to lower LDS earlier in the pipeline before the
856 // module is partitioned for codegen.
858 PM.addPass(AMDGPUSwLowerLDSPass(*this));
861 if (Level != OptimizationLevel::O0) {
862 // Do we really need internalization in LTO?
863 if (InternalizeSymbols) {
866 }
870 Opt.IsClosedWorld = true;
871 PM.addPass(AMDGPUAttributorPass(*this, Opt));
872 }
873 }
874 });
875
877 [](StringRef FilterName) -> RegAllocFilterFunc {
878 if (FilterName == "sgpr")
879 return onlyAllocateSGPRs;
880 if (FilterName == "vgpr")
881 return onlyAllocateVGPRs;
882 if (FilterName == "wwm")
883 return onlyAllocateWWMRegs;
884 return nullptr;
885 });
886}
887
888int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
889 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
890 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
891 AddrSpace == AMDGPUAS::REGION_ADDRESS)
892 ? -1
893 : 0;
894}
895
897 unsigned DestAS) const {
898 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
900}
901
903 const auto *LD = dyn_cast<LoadInst>(V);
904 if (!LD) // TODO: Handle invariant load like constant.
906
907 // It must be a generic pointer loaded.
908 assert(V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
909
910 const auto *Ptr = LD->getPointerOperand();
911 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
913 // For a generic pointer loaded from the constant memory, it could be assumed
914 // as a global pointer since the constant memory is only populated on the
915 // host side. As implied by the offload programming model, only global
916 // pointers could be referenced on the host side.
918}
919
920std::pair<const Value *, unsigned>
922 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
923 switch (II->getIntrinsicID()) {
924 case Intrinsic::amdgcn_is_shared:
925 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
926 case Intrinsic::amdgcn_is_private:
927 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
928 default:
929 break;
930 }
931 return std::pair(nullptr, -1);
932 }
933 // Check the global pointer predication based on
934 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
935 // the order of 'is_shared' and 'is_private' is not significant.
936 Value *Ptr;
937 if (match(
938 const_cast<Value *>(V),
939 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
940 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
941 m_Deferred(Ptr))))))
942 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
943
944 return std::pair(nullptr, -1);
945}
946
947unsigned
949 switch (Kind) {
959 }
961}
962
964 Module &M, unsigned NumParts,
965 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
966 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
967 // but all current users of this API don't have one ready and would need to
968 // create one anyway. Let's hide the boilerplate for now to keep it simple.
969
974
975 PassBuilder PB(this);
979
981 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
982 MPM.run(M, MAM);
983 return true;
984}
985
986//===----------------------------------------------------------------------===//
987// GCN Target Machine (SI+)
988//===----------------------------------------------------------------------===//
989
991 StringRef CPU, StringRef FS,
992 const TargetOptions &Options,
993 std::optional<Reloc::Model> RM,
994 std::optional<CodeModel::Model> CM,
995 CodeGenOptLevel OL, bool JIT)
996 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
997
1000 StringRef GPU = getGPUName(F);
1002
1003 SmallString<128> SubtargetKey(GPU);
1004 SubtargetKey.append(FS);
1005
1006 auto &I = SubtargetMap[SubtargetKey];
1007 if (!I) {
1008 // This needs to be done before we create a new subtarget since any
1009 // creation will depend on the TM and the code generation flags on the
1010 // function that reside in TargetOptions.
1012 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
1013 }
1014
1015 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
1016
1017 return I.get();
1018}
1019
1022 return TargetTransformInfo(GCNTTIImpl(this, F));
1023}
1024
1027 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
1029 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
1030 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
1031}
1032
1033//===----------------------------------------------------------------------===//
1034// AMDGPU Legacy Pass Setup
1035//===----------------------------------------------------------------------===//
1036
1037std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
1039}
1040
1041namespace {
1042
1043class GCNPassConfig final : public AMDGPUPassConfig {
1044public:
1045 GCNPassConfig(TargetMachine &TM, PassManagerBase &PM)
1046 : AMDGPUPassConfig(TM, PM) {
1047 // It is necessary to know the register usage of the entire call graph. We
1048 // allow calls without EnableAMDGPUFunctionCalls if they are marked
1049 // noinline, so this is always required.
1050 setRequiresCodeGenSCCOrder(true);
1051 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
1052 }
1053
1054 GCNTargetMachine &getGCNTargetMachine() const {
1055 return getTM<GCNTargetMachine>();
1056 }
1057
1059 createMachineScheduler(MachineSchedContext *C) const override;
1060
1062 createPostMachineScheduler(MachineSchedContext *C) const override {
1064 C, std::make_unique<PostGenericScheduler>(C),
1065 /*RemoveKillFlags=*/true);
1066 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1068 if (ST.shouldClusterStores())
1070 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
1071 DAG->addMutation(
1072 createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA));
1073 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1075 return DAG;
1076 }
1077
1078 bool addPreISel() override;
1079 void addMachineSSAOptimization() override;
1080 bool addILPOpts() override;
1081 bool addInstSelector() override;
1082 bool addIRTranslator() override;
1083 void addPreLegalizeMachineIR() override;
1084 bool addLegalizeMachineIR() override;
1085 void addPreRegBankSelect() override;
1086 bool addRegBankSelect() override;
1087 void addPreGlobalInstructionSelect() override;
1088 bool addGlobalInstructionSelect() override;
1089 void addFastRegAlloc() override;
1090 void addOptimizedRegAlloc() override;
1091
1092 FunctionPass *createSGPRAllocPass(bool Optimized);
1093 FunctionPass *createVGPRAllocPass(bool Optimized);
1094 FunctionPass *createWWMRegAllocPass(bool Optimized);
1095 FunctionPass *createRegAllocPass(bool Optimized) override;
1096
1097 bool addRegAssignAndRewriteFast() override;
1098 bool addRegAssignAndRewriteOptimized() override;
1099
1100 bool addPreRewrite() override;
1101 void addPostRegAlloc() override;
1102 void addPreSched2() override;
1103 void addPreEmitPass() override;
1104};
1105
1106} // end anonymous namespace
1107
1109 : TargetPassConfig(TM, PM) {
1110 // Exceptions and StackMaps are not supported, so these passes will never do
1111 // anything.
1114 // Garbage collection is not supported.
1117}
1118
1122 else
1124}
1125
1130 // ReassociateGEPs exposes more opportunities for SLSR. See
1131 // the example in reassociate-geps-and-slsr.ll.
1133 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1134 // EarlyCSE can reuse.
1136 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1138 // NaryReassociate on GEPs creates redundant common expressions, so run
1139 // EarlyCSE after it.
1141}
1142
1145
1149
1150 // There is no reason to run these.
1154
1156 if (LowerCtorDtor)
1158
1161
1162 // This can be disabled by passing ::Disable here or on the command line
1163 // with --expand-variadics-override=disable.
1165
1166 // Function calls are not supported, so make sure we inline everything.
1169
1170 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1171 if (Arch == Triple::r600)
1173
1174 // Replace OpenCL enqueued block function pointers with global variables.
1176
1177 // Lower LDS accesses to global memory pass if address sanitizer is enabled.
1178 if (EnableSwLowerLDS)
1180
1181 // Runs before PromoteAlloca so the latter can account for function uses
1184 }
1185
1188
1189 // Run atomic optimizer before Atomic Expand
1194 }
1195
1197
1200
1203
1207 AAResults &AAR) {
1208 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1209 AAR.addAAResult(WrapperPass->getResult());
1210 }));
1211 }
1212
1214 // TODO: May want to move later or split into an early and late one.
1216 }
1217
1218 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1219 // have expanded.
1222 }
1223
1225
1226 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1227 // example, GVN can combine
1228 //
1229 // %0 = add %a, %b
1230 // %1 = add %b, %a
1231 //
1232 // and
1233 //
1234 // %0 = shl nsw %a, 2
1235 // %1 = shl %a, 2
1236 //
1237 // but EarlyCSE can do neither of them.
1240}
1241
1244 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
1245 // analysis, and should be removed.
1247 }
1248
1252
1254 // This lowering has been placed after codegenprepare to take advantage of
1255 // address mode matching (which is why it isn't put with the LDS lowerings).
1256 // It could be placed anywhere before uniformity annotations (an analysis
1257 // that it changes by splitting up fat pointers into their components)
1258 // but has been put before switch lowering and CFG flattening so that those
1259 // passes can run on the more optimized control flow this pass creates in
1260 // many cases.
1261 //
1262 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1263 // However, due to some annoying facts about ResourceUsageAnalysis,
1264 // (especially as exercised in the resource-usage-dead-function test),
1265 // we need all the function passes codegenprepare all the way through
1266 // said resource usage analysis to run on the call graph produced
1267 // before codegenprepare runs (because codegenprepare will knock some
1268 // nodes out of the graph, which leads to function-level passes not
1269 // being run on them, which causes crashes in the resource usage analysis).
1271 // In accordance with the above FIXME, manually force all the
1272 // function-level passes into a CGSCCPassManager.
1273 addPass(new DummyCGSCCPass());
1274 }
1275
1277
1280
1281 // LowerSwitch pass may introduce unreachable blocks that can
1282 // cause unexpected behavior for subsequent passes. Placing it
1283 // here seems better that these blocks would get cleaned up by
1284 // UnreachableBlockElim inserted next in the pass flow.
1286}
1287
1291 return false;
1292}
1293
1296 return false;
1297}
1298
1300 // Do nothing. GC is not supported.
1301 return false;
1302}
1303
1306 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1308 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1309 if (ST.shouldClusterStores())
1310 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1311 return DAG;
1312}
1313
1314//===----------------------------------------------------------------------===//
1315// GCN Legacy Pass Setup
1316//===----------------------------------------------------------------------===//
1317
1318ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1319 MachineSchedContext *C) const {
1320 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1321 if (ST.enableSIScheduler())
1323
1324 Attribute SchedStrategyAttr =
1325 C->MF->getFunction().getFnAttribute("amdgpu-sched-strategy");
1326 StringRef SchedStrategy = SchedStrategyAttr.isValid()
1327 ? SchedStrategyAttr.getValueAsString()
1329
1330 if (SchedStrategy == "max-ilp")
1332
1333 if (SchedStrategy == "max-memory-clause")
1335
1337}
1338
1339bool GCNPassConfig::addPreISel() {
1341
1342 if (TM->getOptLevel() > CodeGenOptLevel::None)
1343 addPass(createSinkingPass());
1344
1345 if (TM->getOptLevel() > CodeGenOptLevel::None)
1347
1348 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1349 // regions formed by them.
1351 addPass(createFixIrreduciblePass());
1352 addPass(createUnifyLoopExitsPass());
1353 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1354
1357 // TODO: Move this right after structurizeCFG to avoid extra divergence
1358 // analysis. This depends on stopping SIAnnotateControlFlow from making
1359 // control flow modifications.
1361
1362 addPass(createLCSSAPass());
1363
1364 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1366
1367 return false;
1368}
1369
1370void GCNPassConfig::addMachineSSAOptimization() {
1372
1373 // We want to fold operands after PeepholeOptimizer has run (or as part of
1374 // it), because it will eliminate extra copies making it easier to fold the
1375 // real source operand. We want to eliminate dead instructions after, so that
1376 // we see fewer uses of the copies. We then need to clean up the dead
1377 // instructions leftover after the operands are folded as well.
1378 //
1379 // XXX - Can we get away without running DeadMachineInstructionElim again?
1380 addPass(&SIFoldOperandsLegacyID);
1381 if (EnableDPPCombine)
1382 addPass(&GCNDPPCombineLegacyID);
1384 if (isPassEnabled(EnableSDWAPeephole)) {
1385 addPass(&SIPeepholeSDWALegacyID);
1386 addPass(&EarlyMachineLICMID);
1387 addPass(&MachineCSELegacyID);
1388 addPass(&SIFoldOperandsLegacyID);
1389 }
1392}
1393
1394bool GCNPassConfig::addILPOpts() {
1396 addPass(&EarlyIfConverterLegacyID);
1397
1399 return false;
1400}
1401
1402bool GCNPassConfig::addInstSelector() {
1404 addPass(&SIFixSGPRCopiesLegacyID);
1406 return false;
1407}
1408
1409bool GCNPassConfig::addIRTranslator() {
1410 addPass(new IRTranslator(getOptLevel()));
1411 return false;
1412}
1413
1414void GCNPassConfig::addPreLegalizeMachineIR() {
1415 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1416 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1417 addPass(new Localizer());
1418}
1419
1420bool GCNPassConfig::addLegalizeMachineIR() {
1421 addPass(new Legalizer());
1422 return false;
1423}
1424
1425void GCNPassConfig::addPreRegBankSelect() {
1426 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1427 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1429}
1430
1431bool GCNPassConfig::addRegBankSelect() {
1432 if (NewRegBankSelect) {
1435 } else {
1436 addPass(new RegBankSelect());
1437 }
1438 return false;
1439}
1440
1441void GCNPassConfig::addPreGlobalInstructionSelect() {
1442 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1443 addPass(createAMDGPURegBankCombiner(IsOptNone));
1444}
1445
1446bool GCNPassConfig::addGlobalInstructionSelect() {
1447 addPass(new InstructionSelect(getOptLevel()));
1448 return false;
1449}
1450
1451void GCNPassConfig::addFastRegAlloc() {
1452 // FIXME: We have to disable the verifier here because of PHIElimination +
1453 // TwoAddressInstructions disabling it.
1454
1455 // This must be run immediately after phi elimination and before
1456 // TwoAddressInstructions, otherwise the processing of the tied operand of
1457 // SI_ELSE will introduce a copy of the tied operand source after the else.
1459
1461
1463}
1464
1465void GCNPassConfig::addOptimizedRegAlloc() {
1466 if (EnableDCEInRA)
1468
1469 // FIXME: when an instruction has a Killed operand, and the instruction is
1470 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1471 // the register in LiveVariables, this would trigger a failure in verifier,
1472 // we should fix it and enable the verifier.
1473 if (OptVGPRLiveRange)
1475
1476 // This must be run immediately after phi elimination and before
1477 // TwoAddressInstructions, otherwise the processing of the tied operand of
1478 // SI_ELSE will introduce a copy of the tied operand source after the else.
1480
1483
1484 if (isPassEnabled(EnablePreRAOptimizations))
1486
1487 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1488 // instructions that cause scheduling barriers.
1490
1491 if (OptExecMaskPreRA)
1493
1494 // This is not an essential optimization and it has a noticeable impact on
1495 // compilation time, so we only enable it from O2.
1496 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1498
1500}
1501
1502bool GCNPassConfig::addPreRewrite() {
1504 addPass(&GCNNSAReassignID);
1505 return true;
1506}
1507
1508FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1509 // Initialize the global default.
1510 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1511 initializeDefaultSGPRRegisterAllocatorOnce);
1512
1513 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1514 if (Ctor != useDefaultRegisterAllocator)
1515 return Ctor();
1516
1517 if (Optimized)
1518 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1519
1520 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1521}
1522
1523FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1524 // Initialize the global default.
1525 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1526 initializeDefaultVGPRRegisterAllocatorOnce);
1527
1528 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1529 if (Ctor != useDefaultRegisterAllocator)
1530 return Ctor();
1531
1532 if (Optimized)
1533 return createGreedyVGPRRegisterAllocator();
1534
1535 return createFastVGPRRegisterAllocator();
1536}
1537
1538FunctionPass *GCNPassConfig::createWWMRegAllocPass(bool Optimized) {
1539 // Initialize the global default.
1540 llvm::call_once(InitializeDefaultWWMRegisterAllocatorFlag,
1541 initializeDefaultWWMRegisterAllocatorOnce);
1542
1543 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
1544 if (Ctor != useDefaultRegisterAllocator)
1545 return Ctor();
1546
1547 if (Optimized)
1548 return createGreedyWWMRegisterAllocator();
1549
1550 return createFastWWMRegisterAllocator();
1551}
1552
1553FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1554 llvm_unreachable("should not be used");
1555}
1556
1558 "-regalloc not supported with amdgcn. Use -sgpr-regalloc, -wwm-regalloc, "
1559 "and -vgpr-regalloc";
1560
1561bool GCNPassConfig::addRegAssignAndRewriteFast() {
1562 if (!usingDefaultRegAlloc())
1564
1565 addPass(&GCNPreRALongBranchRegID);
1566
1567 addPass(createSGPRAllocPass(false));
1568
1569 // Equivalent of PEI for SGPRs.
1570 addPass(&SILowerSGPRSpillsLegacyID);
1571
1572 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1574
1575 // For allocating other wwm register operands.
1576 addPass(createWWMRegAllocPass(false));
1577
1578 addPass(&SILowerWWMCopiesID);
1579 addPass(&AMDGPUReserveWWMRegsID);
1580
1581 // For allocating per-thread VGPRs.
1582 addPass(createVGPRAllocPass(false));
1583
1584 return true;
1585}
1586
1587bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1588 if (!usingDefaultRegAlloc())
1590
1591 addPass(&GCNPreRALongBranchRegID);
1592
1593 addPass(createSGPRAllocPass(true));
1594
1595 // Commit allocated register changes. This is mostly necessary because too
1596 // many things rely on the use lists of the physical registers, such as the
1597 // verifier. This is only necessary with allocators which use LiveIntervals,
1598 // since FastRegAlloc does the replacements itself.
1599 addPass(createVirtRegRewriter(false));
1600
1601 // At this point, the sgpr-regalloc has been done and it is good to have the
1602 // stack slot coloring to try to optimize the SGPR spill stack indices before
1603 // attempting the custom SGPR spill lowering.
1604 addPass(&StackSlotColoringID);
1605
1606 // Equivalent of PEI for SGPRs.
1607 addPass(&SILowerSGPRSpillsLegacyID);
1608
1609 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1611
1612 // For allocating other whole wave mode registers.
1613 addPass(createWWMRegAllocPass(true));
1614 addPass(&SILowerWWMCopiesID);
1615 addPass(createVirtRegRewriter(false));
1616 addPass(&AMDGPUReserveWWMRegsID);
1617
1618 // For allocating per-thread VGPRs.
1619 addPass(createVGPRAllocPass(true));
1620
1621 addPreRewrite();
1622 addPass(&VirtRegRewriterID);
1623
1625
1626 return true;
1627}
1628
1629void GCNPassConfig::addPostRegAlloc() {
1630 addPass(&SIFixVGPRCopiesID);
1631 if (getOptLevel() > CodeGenOptLevel::None)
1632 addPass(&SIOptimizeExecMaskingID);
1634}
1635
1636void GCNPassConfig::addPreSched2() {
1637 if (TM->getOptLevel() > CodeGenOptLevel::None)
1639 addPass(&SIPostRABundlerID);
1640}
1641
1642void GCNPassConfig::addPreEmitPass() {
1643 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1644 addPass(&GCNCreateVOPDID);
1645 addPass(createSIMemoryLegalizerPass());
1646 addPass(createSIInsertWaitcntsPass());
1647
1648 addPass(createSIModeRegisterPass());
1649
1650 if (getOptLevel() > CodeGenOptLevel::None)
1651 addPass(&SIInsertHardClausesID);
1652
1654 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1656 if (getOptLevel() > CodeGenOptLevel::None)
1657 addPass(&SIPreEmitPeepholeID);
1658 // The hazard recognizer that runs as part of the post-ra scheduler does not
1659 // guarantee to be able handle all hazards correctly. This is because if there
1660 // are multiple scheduling regions in a basic block, the regions are scheduled
1661 // bottom up, so when we begin to schedule a region we don't know what
1662 // instructions were emitted directly before it.
1663 //
1664 // Here we add a stand-alone hazard recognizer pass which can handle all
1665 // cases.
1666 addPass(&PostRAHazardRecognizerID);
1667
1668 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1669 addPass(&AMDGPUInsertDelayAluID);
1670
1671 addPass(&BranchRelaxationPassID);
1672}
1673
1675 return new GCNPassConfig(*this, PM);
1676}
1677
1679 MachineFunction &MF) const {
1681 MF.getRegInfo().addDelegate(MFI);
1682}
1683
1685 BumpPtrAllocator &Allocator, const Function &F,
1686 const TargetSubtargetInfo *STI) const {
1687 return SIMachineFunctionInfo::create<SIMachineFunctionInfo>(
1688 Allocator, F, static_cast<const GCNSubtarget *>(STI));
1689}
1690
1692 return new yaml::SIMachineFunctionInfo();
1693}
1694
1698 return new yaml::SIMachineFunctionInfo(
1699 *MFI, *MF.getSubtarget<GCNSubtarget>().getRegisterInfo(), MF);
1700}
1701
1704 SMDiagnostic &Error, SMRange &SourceRange) const {
1705 const yaml::SIMachineFunctionInfo &YamlMFI =
1706 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1707 MachineFunction &MF = PFS.MF;
1709 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1710
1711 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1712 return true;
1713
1714 if (MFI->Occupancy == 0) {
1715 // Fixup the subtarget dependent default value.
1716 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1717 }
1718
1719 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1720 Register TempReg;
1721 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1722 SourceRange = RegName.SourceRange;
1723 return true;
1724 }
1725 RegVal = TempReg;
1726
1727 return false;
1728 };
1729
1730 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1731 Register &RegVal) {
1732 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1733 };
1734
1735 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1736 return true;
1737
1738 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1739 return true;
1740
1741 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1742 MFI->LongBranchReservedReg))
1743 return true;
1744
1745 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1746 // Create a diagnostic for a the register string literal.
1747 const MemoryBuffer &Buffer =
1748 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1749 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1750 RegName.Value.size(), SourceMgr::DK_Error,
1751 "incorrect register class for field", RegName.Value,
1752 {}, {});
1753 SourceRange = RegName.SourceRange;
1754 return true;
1755 };
1756
1757 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1758 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1759 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1760 return true;
1761
1762 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1763 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1764 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1765 }
1766
1767 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1768 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1769 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1770 }
1771
1772 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1773 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1774 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1775 }
1776
1777 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1778 Register ParsedReg;
1779 if (parseRegister(YamlReg, ParsedReg))
1780 return true;
1781
1782 MFI->reserveWWMRegister(ParsedReg);
1783 }
1784
1785 for (const auto &[_, Info] : PFS.VRegInfosNamed) {
1786 MFI->setFlag(Info->VReg, Info->Flags);
1787 }
1788 for (const auto &[_, Info] : PFS.VRegInfos) {
1789 MFI->setFlag(Info->VReg, Info->Flags);
1790 }
1791
1792 for (const auto &YamlRegStr : YamlMFI.SpillPhysVGPRS) {
1793 Register ParsedReg;
1794 if (parseRegister(YamlRegStr, ParsedReg))
1795 return true;
1796 MFI->SpillPhysVGPRs.push_back(ParsedReg);
1797 }
1798
1799 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1800 const TargetRegisterClass &RC,
1801 ArgDescriptor &Arg, unsigned UserSGPRs,
1802 unsigned SystemSGPRs) {
1803 // Skip parsing if it's not present.
1804 if (!A)
1805 return false;
1806
1807 if (A->IsRegister) {
1808 Register Reg;
1809 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1810 SourceRange = A->RegisterName.SourceRange;
1811 return true;
1812 }
1813 if (!RC.contains(Reg))
1814 return diagnoseRegisterClass(A->RegisterName);
1816 } else
1817 Arg = ArgDescriptor::createStack(A->StackOffset);
1818 // Check and apply the optional mask.
1819 if (A->Mask)
1820 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1821
1822 MFI->NumUserSGPRs += UserSGPRs;
1823 MFI->NumSystemSGPRs += SystemSGPRs;
1824 return false;
1825 };
1826
1827 if (YamlMFI.ArgInfo &&
1828 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1829 AMDGPU::SGPR_128RegClass,
1830 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1831 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1832 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1833 2, 0) ||
1834 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1835 MFI->ArgInfo.QueuePtr, 2, 0) ||
1836 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1837 AMDGPU::SReg_64RegClass,
1838 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1839 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1840 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1841 2, 0) ||
1842 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1843 AMDGPU::SReg_64RegClass,
1844 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1845 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1846 AMDGPU::SGPR_32RegClass,
1847 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1848 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1849 AMDGPU::SGPR_32RegClass,
1850 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1851 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1852 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1853 0, 1) ||
1854 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1855 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1856 0, 1) ||
1857 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1858 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1859 0, 1) ||
1860 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1861 AMDGPU::SGPR_32RegClass,
1862 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1863 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1864 AMDGPU::SGPR_32RegClass,
1865 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1866 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1867 AMDGPU::SReg_64RegClass,
1868 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1869 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1870 AMDGPU::SReg_64RegClass,
1871 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1872 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1873 AMDGPU::VGPR_32RegClass,
1874 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1875 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1876 AMDGPU::VGPR_32RegClass,
1877 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1878 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1879 AMDGPU::VGPR_32RegClass,
1880 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1881 return true;
1882
1883 if (ST.hasIEEEMode())
1884 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1885 if (ST.hasDX10ClampMode())
1886 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1887
1888 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
1889 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
1892 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
1895
1902
1903 if (YamlMFI.HasInitWholeWave)
1904 MFI->setInitWholeWave();
1905
1906 return false;
1907}
1908
1909//===----------------------------------------------------------------------===//
1910// AMDGPU CodeGen Pass Builder interface.
1911//===----------------------------------------------------------------------===//
1912
1914 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
1916 : CodeGenPassBuilder(TM, Opts, PIC) {
1918 // Exceptions and StackMaps are not supported, so these passes will never do
1919 // anything.
1920 // Garbage collection is not supported.
1921 disablePass<StackMapLivenessPass, FuncletLayoutPass,
1923}
1924
1925void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass) const {
1926 // TODO: Missing AMDGPURemoveIncompatibleFunctions
1927
1929 if (LowerCtorDtor)
1930 addPass(AMDGPUCtorDtorLoweringPass());
1931
1934
1935 // This can be disabled by passing ::Disable here or on the command line
1936 // with --expand-variadics-override=disable.
1938
1939 addPass(AMDGPUAlwaysInlinePass());
1940 addPass(AlwaysInlinerPass());
1941
1942 // TODO: Missing OpenCLEnqueuedBlockLowering
1943
1944 // Runs before PromoteAlloca so the latter can account for function uses
1946 addPass(AMDGPULowerModuleLDSPass(TM));
1947
1949 addPass(InferAddressSpacesPass());
1950
1951 // Run atomic optimizer before Atomic Expand
1955
1956 // FIXME: Adding atomic-expand manages to break -passes=atomic-expand
1957 // addPass(AtomicExpandPass(TM));
1958
1960 addPass(AMDGPUPromoteAllocaPass(TM));
1963
1964 // TODO: Handle EnableAMDGPUAliasAnalysis
1965
1966 // TODO: May want to move later or split into an early and late one.
1967 addPass(AMDGPUCodeGenPreparePass(TM));
1968
1969 // TODO: LICM
1970 }
1971
1972 Base::addIRPasses(addPass);
1973
1974 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1975 // example, GVN can combine
1976 //
1977 // %0 = add %a, %b
1978 // %1 = add %b, %a
1979 //
1980 // and
1981 //
1982 // %0 = shl nsw %a, 2
1983 // %1 = shl %a, 2
1984 //
1985 // but EarlyCSE can do neither of them.
1987 addEarlyCSEOrGVNPass(addPass);
1988}
1989
1990void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
1991 // AMDGPUAnnotateKernelFeaturesPass is missing here, but it will hopefully be
1992 // deleted soon.
1993
1996
1997 // This lowering has been placed after codegenprepare to take advantage of
1998 // address mode matching (which is why it isn't put with the LDS lowerings).
1999 // It could be placed anywhere before uniformity annotations (an analysis
2000 // that it changes by splitting up fat pointers into their components)
2001 // but has been put before switch lowering and CFG flattening so that those
2002 // passes can run on the more optimized control flow this pass creates in
2003 // many cases.
2004 //
2005 // FIXME: This should ideally be put after the LoadStoreVectorizer.
2006 // However, due to some annoying facts about ResourceUsageAnalysis,
2007 // (especially as exercised in the resource-usage-dead-function test),
2008 // we need all the function passes codegenprepare all the way through
2009 // said resource usage analysis to run on the call graph produced
2010 // before codegenprepare runs (because codegenprepare will knock some
2011 // nodes out of the graph, which leads to function-level passes not
2012 // being run on them, which causes crashes in the resource usage analysis).
2014
2015 Base::addCodeGenPrepare(addPass);
2016
2018 addPass(LoadStoreVectorizerPass());
2019
2020 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
2021 // behavior for subsequent passes. Placing it here seems better that these
2022 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
2023 // pass flow.
2024 addPass(LowerSwitchPass());
2025}
2026
2027void AMDGPUCodeGenPassBuilder::addPreISel(AddIRPass &addPass) const {
2028
2030 addPass(FlattenCFGPass());
2031
2033 addPass(SinkingPass());
2034
2036
2037 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
2038 // regions formed by them.
2039
2041 addPass(FixIrreduciblePass());
2042 addPass(UnifyLoopExitsPass());
2043 addPass(StructurizeCFGPass(/*SkipUniformRegions=*/false));
2044
2046
2047 addPass(SIAnnotateControlFlowPass(TM));
2048
2049 // TODO: Move this right after structurizeCFG to avoid extra divergence
2050 // analysis. This depends on stopping SIAnnotateControlFlow from making
2051 // control flow modifications.
2053
2054 addPass(LCSSAPass());
2055
2058
2059 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
2060 // isn't this in addInstSelector?
2062}
2063
2064void AMDGPUCodeGenPassBuilder::addILPOpts(AddMachinePass &addPass) const {
2066 addPass(EarlyIfConverterPass());
2067
2068 Base::addILPOpts(addPass);
2069}
2070
2071void AMDGPUCodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
2072 CreateMCStreamer) const {
2073 // TODO: Add AsmPrinter.
2074}
2075
2076Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &addPass) const {
2077 addPass(AMDGPUISelDAGToDAGPass(TM));
2078 addPass(SIFixSGPRCopiesPass());
2079 addPass(SILowerI1CopiesPass());
2080 return Error::success();
2081}
2082
2084 AddMachinePass &addPass) const {
2086
2087 addPass(SIFoldOperandsPass());
2088 if (EnableDPPCombine) {
2089 addPass(GCNDPPCombinePass());
2090 }
2091 addPass(SILoadStoreOptimizerPass());
2093 addPass(SIPeepholeSDWAPass());
2094 addPass(EarlyMachineLICMPass());
2095 addPass(MachineCSEPass());
2096 addPass(SIFoldOperandsPass());
2097 }
2099 addPass(SIShrinkInstructionsPass());
2100}
2101
2103 CodeGenOptLevel Level) const {
2104 if (Opt.getNumOccurrences())
2105 return Opt;
2106 if (TM.getOptLevel() < Level)
2107 return false;
2108 return Opt;
2109}
2110
2113 addPass(GVNPass());
2114 else
2115 addPass(EarlyCSEPass());
2116}
2117
2119 AddIRPass &addPass) const {
2121 addPass(LoopDataPrefetchPass());
2122
2124
2125 // ReassociateGEPs exposes more opportunities for SLSR. See
2126 // the example in reassociate-geps-and-slsr.ll.
2128
2129 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
2130 // EarlyCSE can reuse.
2131 addEarlyCSEOrGVNPass(addPass);
2132
2133 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
2134 addPass(NaryReassociatePass());
2135
2136 // NaryReassociate on GEPs creates redundant common expressions, so run
2137 // EarlyCSE after it.
2138 addPass(EarlyCSEPass());
2139}
unsigned const MachineRegisterInfo * MRI
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxMemoryClauseSchedRegistry("gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause", createGCNMaxMemoryClauseMachineScheduler)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSwLowerLDS("amdgpu-enable-sw-lower-lds", cl::desc("Enable lowering of lds to global memory pass " "and asan instrument resulting IR."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > HasClosedWorldAssumption("amdgpu-link-time-closed-world", cl::desc("Whether has closed-world assumption at link time"), cl::init(false), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< std::string > AMDGPUSchedStrategy("amdgpu-sched-strategy", cl::desc("Select custom AMDGPU scheduling strategy."), cl::Hidden, cl::init(""))
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > NewRegBankSelect("new-reg-bank-select", cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of " "regbankselect"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfo::Concept conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Provides analysis for continuously CSEing during GISel passes.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
#define LLVM_READNONE
Definition: Compiler.h:299
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:128
This file provides the interface for a simple, fast CSE pass.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define _
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
This header defines various interfaces for pass management in LLVM.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
static std::string computeDataLayout()
This file provides the interface for LLVM's Loop Data Prefetching Pass.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
The AMDGPU TargetMachine interface definition for hw codegen targets.
Basic Register Allocator
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Machine Scheduler interface.
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Error addInstSelector(AddMachinePass &) const
void addMachineSSAOptimization(AddMachinePass &) const
void addEarlyCSEOrGVNPass(AddIRPass &) const
void addStraightLineScalarOptimizationPasses(AddIRPass &) const
AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC)
void addPreISel(AddIRPass &addPass) const
void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const
void addCodeGenPrepare(AddIRPass &) const
void addILPOpts(AddMachinePass &) const
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(TargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Definition: AlwaysInliner.h:32
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:392
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:208
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
This class provides access to building LLVM's passes.
void addILPOpts(AddMachinePass &) const
Add passes that optimize instruction level parallelism for out-of-order targets.
Error buildPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType) const
void addMachineSSAOptimization(AddMachinePass &) const
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
void addCodeGenPrepare(AddIRPass &) const
Add pass to prepare the LLVM IR for code generation.
void disablePass()
Allow the target to disable a specific pass by default.
void addIRPasses(AddIRPass &) const
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
implements a set of functionality in the TargetMachine class for targets that make use of the indepen...
void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Definition: Constants.cpp:739
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
static ErrorSuccess success()
Create a success value.
Definition: Error.h:337
Tagged union holding either a T or a Error.
Definition: Error.h:481
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:291
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Definition: GVN.h:117
Pass to remove unused function declarations.
Definition: GlobalDCE.h:36
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition: Internalize.h:36
Converts loops into loop-closed SSA form.
Definition: LCSSA.h:37
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:43
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:51
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
Definition: MemoryBuffer.h:76
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static const OptimizationLevel O0
Disable as many optimizations as possible.
unsigned getSpeedupLevel() const
static const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
Definition: PassBuilder.h:105
void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel, ThinOrFullLTOPhase)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:482
void registerPipelineStartEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:473
void crossRegisterProxies(LoopAnalysisManager &LAM, FunctionAnalysisManager &FAM, CGSCCAnalysisManager &CGAM, ModuleAnalysisManager &MAM, MachineFunctionAnalysisManager *MFAM=nullptr)
Cross register the analysis managers through their proxies.
void registerOptimizerLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel, ThinOrFullLTOPhase)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:502
void registerPeepholeEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:407
void registerCGSCCOptimizerLateEPCallback(const std::function< void(CGSCCPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:452
void registerRegClassFilterParsingCallback(const std::function< RegAllocFilterFunc(StringRef)> &C)
Register callbacks to parse target specific filter field if regalloc pass needs it.
Definition: PassBuilder.h:592
void registerModuleAnalyses(ModuleAnalysisManager &MAM)
Registers all available module analysis passes.
void registerFullLinkTimeOptimizationLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:521
void registerFunctionAnalyses(FunctionAnalysisManager &FAM)
Registers all available function analysis passes.
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
Definition: PassManager.h:195
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
Definition: RegBankSelect.h:91
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
void setFlag(Register Reg, uint8_t Flag)
bool checkFlag(Register Reg, uint8_t Flag) const
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:281
Represents a location in source code.
Definition: SMLoc.h:23
Represents a range in source code.
Definition: SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:575
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:576
Move instructions into successor blocks when possible.
Definition: Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
unsigned getMainFileID() const
Definition: SourceMgr.h:132
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition: SourceMgr.h:125
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:635
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:90
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
Definition: TargetMachine.h:96
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:383
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Definition: raw_ostream.h:434
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:903
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFlattenCFGPass()
void initializeSIFormMemoryClausesPass(PassRegistry &)
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
Pass * createLCSSAPass()
Definition: LCSSA.cpp:541
void initializeGCNCreateVOPDPass(PassRegistry &)
ModulePass * createAMDGPUOpenCLEnqueuedBlockLoweringPass()
char & GCNPreRAOptimizationsID
char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeGCNPreRAOptimizationsPass(PassRegistry &)
Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &)
void initializeAMDGPUAttributorLegacyPass(PassRegistry &)
char & SIPostRABundlerID
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeSIShrinkInstructionsLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
void initializeSIModeRegisterPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
char & GCNRewritePartialRegUsesID
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &)
std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition: Error.cpp:98
void initializeGCNPreRALongBranchRegPass(PassRegistry &)
void initializeSILowerSGPRSpillsLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createNaryReassociatePass()
char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeSIPreEmitPeepholePass(PassRegistry &)
void initializeSIFoldOperandsLegacyPass(PassRegistry &)
char & SILoadStoreOptimizerLegacyID
char & SILowerWWMCopiesID
void initializeSIFixVGPRCopiesPass(PassRegistry &)
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition: CSEInfo.cpp:79
Target & getTheR600Target()
The target for R600 GPUs.
char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
void initializeAMDGPURemoveIncompatibleFunctionsPass(PassRegistry &)
void initializeSILowerWWMCopiesPass(PassRegistry &)
void initializeGCNNSAReassignPass(PassRegistry &)
char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
void initializeSIInsertWaitcntsPass(PassRegistry &)
Pass * createLICMPass()
Definition: LICM.cpp:381
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
char & SIFormMemoryClausesID
void initializeSILoadStoreOptimizerLegacyPass(PassRegistry &)
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
void initializeSILateBranchLoweringPass(PassRegistry &)
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition: Pass.h:76
char & AMDGPUUnifyDivergentExitNodesID
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
char & SIOptimizeVGPRLiveRangeLegacyID
char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
char & SILateBranchLoweringPassID
char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
FunctionPass * createSinkingPass()
Definition: Sink.cpp:277
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition: CodeGen.h:83
void initializeSIPostRABundlerPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &)
char & GCNDPPCombineLegacyID
void initializeSIWholeQuadModePass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
Pass * createAMDGPUAnnotateKernelFeaturesPass()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUPassConfig...
char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
FunctionPass * createFixIrreduciblePass()
char & FuncletLayoutID
This pass lays out funclets contiguously.
void initializeSIInsertHardClausesPass(PassRegistry &)
char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
void initializeAMDGPUReserveWWMRegsPass(PassRegistry &)
ModulePass * createAMDGPUPrintfRuntimeBinding()
char & StackSlotColoringID
StackSlotColoring - This pass performs stack slot coloring.
void initializeSIMemoryLegalizerPass(PassRegistry &)
Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
char & SIPreAllocateWWMRegsLegacyID
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
char & AMDGPUReserveWWMRegsID
FunctionPass * createAMDGPUPromoteAlloca()
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
char & SIPreEmitPeepholeID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
char & SILowerSGPRSpillsLegacyID
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:17
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
void initializeSIPreAllocateWWMRegsLegacyPass(PassRegistry &)
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
FunctionPass * createAMDGPURegBankSelectPass()
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
FunctionPass * createAMDGPURegBankLegalizePass()
char & MachineCSELegacyID
MachineCSE - This pass performs global CSE on machine instructions.
Definition: MachineCSE.cpp:164
char & SIWholeQuadModeID
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
void initializeSIOptimizeExecMaskingPreRAPass(PassRegistry &)
void initializeAMDGPUMarkLastScratchLoadPass(PassRegistry &)
char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
Definition: GVN.cpp:3352
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
FunctionPass * createSILowerI1CopiesLegacyPass()
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
void initializeAMDGPUResourceUsageAnalysisPass(PassRegistry &)
char & SIFixSGPRCopiesLegacyID
void initializeGCNDPPCombineLegacyPass(PassRegistry &)
char & GCNCreateVOPDID
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
char & SIPeepholeSDWALegacyID
char & VirtRegRewriterID
VirtRegRewriter pass.
Definition: VirtRegMap.cpp:250
char & SIFoldOperandsLegacyID
char & SILowerControlFlowID
FunctionPass * createLowerSwitchPass()
FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
Definition: VirtRegMap.cpp:734
void initializeR600VectorRegMergerPass(PassRegistry &)
ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
void initializeSIOptimizeExecMaskingPass(PassRegistry &)
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSILowerControlFlowPass(PassRegistry &)
void initializeSIOptimizeVGPRLiveRangeLegacyPass(PassRegistry &)
void initializeSIPeepholeSDWALegacyPass(PassRegistry &)
char & SIFixVGPRCopiesID
void initializeAMDGPURegBankLegalizePass(PassRegistry &)
char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1944
char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
Definition: MIParser.cpp:3608
FunctionPass * createSIShrinkInstructionsLegacyPass()
char & AMDGPUMarkLastScratchLoadID
char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluPass(PassRegistry &)
char & SIOptimizeExecMaskingID
void initializeAMDGPUUnifyMetadataPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
char & AMDGPUPerfHintAnalysisLegacyID
char & GCNPreRALongBranchRegID
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
Definition: EarlyCSE.h:30
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
StringMap< VRegInfo * > VRegInfosNamed
Definition: MIParser.h:177
DenseMap< Register, VRegInfo * > VRegInfos
Definition: MIParser.h:176
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
Definition: PassManager.h:878
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition: Threading.h:68
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
SmallVector< StringValue, 2 > SpillPhysVGPRS
A wrapper around std::string which contains a source range that's being set during parsing.