LLVM 20.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
22#include "AMDGPUIGroupLP.h"
23#include "AMDGPUISelDAGToDAG.h"
24#include "AMDGPUMacroFusion.h"
28#include "AMDGPUSplitModule.h"
32#include "GCNDPPCombine.h"
34#include "GCNSchedStrategy.h"
35#include "GCNVOPDUtils.h"
36#include "R600.h"
37#include "R600TargetMachine.h"
38#include "SIFixSGPRCopies.h"
39#include "SIFoldOperands.h"
41#include "SILowerSGPRSpills.h"
43#include "SIMachineScheduler.h"
45#include "SIPeepholeSDWA.h"
64#include "llvm/CodeGen/Passes.h"
67#include "llvm/IR/IntrinsicsAMDGPU.h"
68#include "llvm/IR/PassManager.h"
75#include "llvm/Transforms/IPO.h"
98#include <optional>
99
100using namespace llvm;
101using namespace llvm::PatternMatch;
102
103namespace {
104class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
105public:
106 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
107 : RegisterRegAllocBase(N, D, C) {}
108};
109
110class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
111public:
112 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
113 : RegisterRegAllocBase(N, D, C) {}
114};
115
116class WWMRegisterRegAlloc : public RegisterRegAllocBase<WWMRegisterRegAlloc> {
117public:
118 WWMRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
119 : RegisterRegAllocBase(N, D, C) {}
120};
121
122static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
124 const Register Reg) {
125 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
126 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
127}
128
129static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
131 const Register Reg) {
132 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
133 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
134}
135
136static bool onlyAllocateWWMRegs(const TargetRegisterInfo &TRI,
138 const Register Reg) {
139 const SIMachineFunctionInfo *MFI =
140 MRI.getMF().getInfo<SIMachineFunctionInfo>();
141 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
142 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC) &&
144}
145
146/// -{sgpr|wwm|vgpr}-regalloc=... command line option.
147static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
148
149/// A dummy default pass factory indicates whether the register allocator is
150/// overridden on the command line.
151static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
152static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
153static llvm::once_flag InitializeDefaultWWMRegisterAllocatorFlag;
154
155static SGPRRegisterRegAlloc
156defaultSGPRRegAlloc("default",
157 "pick SGPR register allocator based on -O option",
159
160static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
162SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
163 cl::desc("Register allocator to use for SGPRs"));
164
165static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
167VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
168 cl::desc("Register allocator to use for VGPRs"));
169
170static cl::opt<WWMRegisterRegAlloc::FunctionPassCtor, false,
172 WWMRegAlloc("wwm-regalloc", cl::Hidden,
174 cl::desc("Register allocator to use for WWM registers"));
175
176static void initializeDefaultSGPRRegisterAllocatorOnce() {
177 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
178
179 if (!Ctor) {
180 Ctor = SGPRRegAlloc;
181 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
182 }
183}
184
185static void initializeDefaultVGPRRegisterAllocatorOnce() {
186 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
187
188 if (!Ctor) {
189 Ctor = VGPRRegAlloc;
190 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
191 }
192}
193
194static void initializeDefaultWWMRegisterAllocatorOnce() {
195 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
196
197 if (!Ctor) {
198 Ctor = WWMRegAlloc;
199 WWMRegisterRegAlloc::setDefault(WWMRegAlloc);
200 }
201}
202
203static FunctionPass *createBasicSGPRRegisterAllocator() {
204 return createBasicRegisterAllocator(onlyAllocateSGPRs);
205}
206
207static FunctionPass *createGreedySGPRRegisterAllocator() {
208 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
209}
210
211static FunctionPass *createFastSGPRRegisterAllocator() {
212 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
213}
214
215static FunctionPass *createBasicVGPRRegisterAllocator() {
216 return createBasicRegisterAllocator(onlyAllocateVGPRs);
217}
218
219static FunctionPass *createGreedyVGPRRegisterAllocator() {
220 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
221}
222
223static FunctionPass *createFastVGPRRegisterAllocator() {
224 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
225}
226
227static FunctionPass *createBasicWWMRegisterAllocator() {
228 return createBasicRegisterAllocator(onlyAllocateWWMRegs);
229}
230
231static FunctionPass *createGreedyWWMRegisterAllocator() {
232 return createGreedyRegisterAllocator(onlyAllocateWWMRegs);
233}
234
235static FunctionPass *createFastWWMRegisterAllocator() {
236 return createFastRegisterAllocator(onlyAllocateWWMRegs, false);
237}
238
239static SGPRRegisterRegAlloc basicRegAllocSGPR(
240 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
241static SGPRRegisterRegAlloc greedyRegAllocSGPR(
242 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
243
244static SGPRRegisterRegAlloc fastRegAllocSGPR(
245 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
246
247
248static VGPRRegisterRegAlloc basicRegAllocVGPR(
249 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
250static VGPRRegisterRegAlloc greedyRegAllocVGPR(
251 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
252
253static VGPRRegisterRegAlloc fastRegAllocVGPR(
254 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
255static WWMRegisterRegAlloc basicRegAllocWWMReg("basic",
256 "basic register allocator",
257 createBasicWWMRegisterAllocator);
258static WWMRegisterRegAlloc
259 greedyRegAllocWWMReg("greedy", "greedy register allocator",
260 createGreedyWWMRegisterAllocator);
261static WWMRegisterRegAlloc fastRegAllocWWMReg("fast", "fast register allocator",
262 createFastWWMRegisterAllocator);
263
265 return Phase == ThinOrFullLTOPhase::FullLTOPreLink ||
266 Phase == ThinOrFullLTOPhase::ThinLTOPreLink;
267}
268} // anonymous namespace
269
270static cl::opt<bool>
272 cl::desc("Run early if-conversion"),
273 cl::init(false));
274
275static cl::opt<bool>
276OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
277 cl::desc("Run pre-RA exec mask optimizations"),
278 cl::init(true));
279
280static cl::opt<bool>
281 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
282 cl::desc("Lower GPU ctor / dtors to globals on the device."),
283 cl::init(true), cl::Hidden);
284
285// Option to disable vectorizer for tests.
287 "amdgpu-load-store-vectorizer",
288 cl::desc("Enable load store vectorizer"),
289 cl::init(true),
290 cl::Hidden);
291
292// Option to control global loads scalarization
294 "amdgpu-scalarize-global-loads",
295 cl::desc("Enable global load scalarization"),
296 cl::init(true),
297 cl::Hidden);
298
299// Option to run internalize pass.
301 "amdgpu-internalize-symbols",
302 cl::desc("Enable elimination of non-kernel functions and unused globals"),
303 cl::init(false),
304 cl::Hidden);
305
306// Option to inline all early.
308 "amdgpu-early-inline-all",
309 cl::desc("Inline all functions early"),
310 cl::init(false),
311 cl::Hidden);
312
314 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
315 cl::desc("Enable removal of functions when they"
316 "use features not supported by the target GPU"),
317 cl::init(true));
318
320 "amdgpu-sdwa-peephole",
321 cl::desc("Enable SDWA peepholer"),
322 cl::init(true));
323
325 "amdgpu-dpp-combine",
326 cl::desc("Enable DPP combiner"),
327 cl::init(true));
328
329// Enable address space based alias analysis
331 cl::desc("Enable AMDGPU Alias Analysis"),
332 cl::init(true));
333
334// Enable lib calls simplifications
336 "amdgpu-simplify-libcall",
337 cl::desc("Enable amdgpu library simplifications"),
338 cl::init(true),
339 cl::Hidden);
340
342 "amdgpu-ir-lower-kernel-arguments",
343 cl::desc("Lower kernel argument loads in IR pass"),
344 cl::init(true),
345 cl::Hidden);
346
348 "amdgpu-reassign-regs",
349 cl::desc("Enable register reassign optimizations on gfx10+"),
350 cl::init(true),
351 cl::Hidden);
352
354 "amdgpu-opt-vgpr-liverange",
355 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
356 cl::init(true), cl::Hidden);
357
359 "amdgpu-atomic-optimizer-strategy",
360 cl::desc("Select DPP or Iterative strategy for scan"),
361 cl::init(ScanOptions::Iterative),
363 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
364 clEnumValN(ScanOptions::Iterative, "Iterative",
365 "Use Iterative approach for scan"),
366 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
367
368// Enable Mode register optimization
370 "amdgpu-mode-register",
371 cl::desc("Enable mode register pass"),
372 cl::init(true),
373 cl::Hidden);
374
375// Enable GFX11+ s_delay_alu insertion
376static cl::opt<bool>
377 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
378 cl::desc("Enable s_delay_alu insertion"),
379 cl::init(true), cl::Hidden);
380
381// Enable GFX11+ VOPD
382static cl::opt<bool>
383 EnableVOPD("amdgpu-enable-vopd",
384 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
385 cl::init(true), cl::Hidden);
386
387// Option is used in lit tests to prevent deadcoding of patterns inspected.
388static cl::opt<bool>
389EnableDCEInRA("amdgpu-dce-in-ra",
390 cl::init(true), cl::Hidden,
391 cl::desc("Enable machine DCE inside regalloc"));
392
393static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
394 cl::desc("Adjust wave priority"),
395 cl::init(false), cl::Hidden);
396
398 "amdgpu-scalar-ir-passes",
399 cl::desc("Enable scalar IR passes"),
400 cl::init(true),
401 cl::Hidden);
402
403static cl::opt<bool>
404 EnableSwLowerLDS("amdgpu-enable-sw-lower-lds",
405 cl::desc("Enable lowering of lds to global memory pass "
406 "and asan instrument resulting IR."),
407 cl::init(true), cl::Hidden);
408
410 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
412 cl::Hidden);
413
415 "amdgpu-enable-pre-ra-optimizations",
416 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
417 cl::Hidden);
418
420 "amdgpu-enable-promote-kernel-arguments",
421 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
422 cl::Hidden, cl::init(true));
423
425 "amdgpu-enable-image-intrinsic-optimizer",
426 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
427 cl::Hidden);
428
429static cl::opt<bool>
430 EnableLoopPrefetch("amdgpu-loop-prefetch",
431 cl::desc("Enable loop data prefetch on AMDGPU"),
432 cl::Hidden, cl::init(false));
433
435 AMDGPUSchedStrategy("amdgpu-sched-strategy",
436 cl::desc("Select custom AMDGPU scheduling strategy."),
437 cl::Hidden, cl::init(""));
438
440 "amdgpu-enable-rewrite-partial-reg-uses",
441 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
442 cl::Hidden);
443
445 "amdgpu-enable-hipstdpar",
446 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
447 cl::Hidden);
448
449static cl::opt<bool>
450 EnableAMDGPUAttributor("amdgpu-attributor-enable",
451 cl::desc("Enable AMDGPUAttributorPass"),
452 cl::init(true), cl::Hidden);
453
455 "new-reg-bank-select",
456 cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of "
457 "regbankselect"),
458 cl::init(false), cl::Hidden);
459
461 "amdgpu-link-time-closed-world",
462 cl::desc("Whether has closed-world assumption at link time"),
463 cl::init(false), cl::Hidden);
464
466 // Register the target
469
547}
548
549static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
550 return std::make_unique<AMDGPUTargetObjectFile>();
551}
552
554 return new SIScheduleDAGMI(C);
555}
556
557static ScheduleDAGInstrs *
559 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
560 ScheduleDAGMILive *DAG =
561 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
562 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
563 if (ST.shouldClusterStores())
564 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
565 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
566 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
567 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
568 return DAG;
569}
570
571static ScheduleDAGInstrs *
573 ScheduleDAGMILive *DAG =
574 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
575 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
576 return DAG;
577}
578
579static ScheduleDAGInstrs *
581 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
583 C, std::make_unique<GCNMaxMemoryClauseSchedStrategy>(C));
584 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
585 if (ST.shouldClusterStores())
586 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
587 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
588 return DAG;
589}
590
591static ScheduleDAGInstrs *
593 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
594 auto *DAG = new GCNIterativeScheduler(
596 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
597 if (ST.shouldClusterStores())
598 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
599 return DAG;
600}
601
603 return new GCNIterativeScheduler(C,
605}
606
607static ScheduleDAGInstrs *
609 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
611 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
612 if (ST.shouldClusterStores())
613 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
614 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
615 return DAG;
616}
617
619SISchedRegistry("si", "Run SI's custom scheduler",
621
624 "Run GCN scheduler to maximize occupancy",
626
628 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
630
632 "gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause",
634
636 "gcn-iterative-max-occupancy-experimental",
637 "Run GCN scheduler to maximize occupancy (experimental)",
639
641 "gcn-iterative-minreg",
642 "Run GCN iterative scheduler for minimal register usage (experimental)",
644
646 "gcn-iterative-ilp",
647 "Run GCN iterative scheduler for ILP scheduling (experimental)",
649
651 if (TT.getArch() == Triple::r600) {
652 // 32-bit pointers.
653 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
654 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
655 }
656
657 // 32-bit private, local, and region pointers. 64-bit global, constant and
658 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
659 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
660 // (address space 7), and 128-bit non-integral buffer resourcees (address
661 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
662 // like getelementptr.
663 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
664 "-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-"
665 "v32:32-v48:64-v96:"
666 "128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-"
667 "G1-ni:7:8:9";
668}
669
672 if (!GPU.empty())
673 return GPU;
674
675 // Need to default to a target with flat support for HSA.
676 if (TT.getArch() == Triple::amdgcn)
677 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
678
679 return "r600";
680}
681
682static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
683 // The AMDGPU toolchain only supports generating shared objects, so we
684 // must always use PIC.
685 return Reloc::PIC_;
686}
687
689 StringRef CPU, StringRef FS,
690 const TargetOptions &Options,
691 std::optional<Reloc::Model> RM,
692 std::optional<CodeModel::Model> CM,
693 CodeGenOptLevel OptLevel)
695 T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), FS, Options,
697 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
698 TLOF(createTLOF(getTargetTriple())) {
699 initAsmInfo();
700 if (TT.getArch() == Triple::amdgcn) {
701 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
703 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
705 }
706}
707
710
712
714 Attribute GPUAttr = F.getFnAttribute("target-cpu");
715 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
716}
717
719 Attribute FSAttr = F.getFnAttribute("target-features");
720
721 return FSAttr.isValid() ? FSAttr.getValueAsString()
723}
724
725/// Predicate for Internalize pass.
726static bool mustPreserveGV(const GlobalValue &GV) {
727 if (const Function *F = dyn_cast<Function>(&GV))
728 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
729 F->getName().starts_with("__sanitizer_") ||
730 AMDGPU::isEntryFunctionCC(F->getCallingConv());
731
733 return !GV.use_empty();
734}
735
738}
739
742 if (Params.empty())
744 Params.consume_front("strategy=");
745 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
746 .Case("dpp", ScanOptions::DPP)
747 .Cases("iterative", "", ScanOptions::Iterative)
748 .Case("none", ScanOptions::None)
749 .Default(std::nullopt);
750 if (Result)
751 return *Result;
752 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
753}
754
758 while (!Params.empty()) {
759 StringRef ParamName;
760 std::tie(ParamName, Params) = Params.split(';');
761 if (ParamName == "closed-world") {
762 Result.IsClosedWorld = true;
763 } else {
764 return make_error<StringError>(
765 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
766 .str(),
768 }
769 }
770 return Result;
771}
772
774
775#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
777
779 [](ModulePassManager &PM, OptimizationLevel Level) {
780 if (EnableHipStdPar)
782 });
783
788
789 if (Level == OptimizationLevel::O0)
790 return;
791
793
794 // We don't want to run internalization at per-module stage.
798 }
799
802 });
803
805 [](FunctionPassManager &FPM, OptimizationLevel Level) {
806 if (Level == OptimizationLevel::O0)
807 return;
808
812 });
813
815 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
816 if (Level == OptimizationLevel::O0)
817 return;
818
820
821 // Add promote kernel arguments pass to the opt pipeline right before
822 // infer address spaces which is needed to do actual address space
823 // rewriting.
824 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
827
828 // Add infer address spaces pass to the opt pipeline after inlining
829 // but before SROA to increase SROA opportunities.
831
832 // This should run after inlining to have any chance of doing
833 // anything, and before other cleanup optimizations.
835
836 if (Level != OptimizationLevel::O0) {
837 // Promote alloca to vector before SROA and loop unroll. If we
838 // manage to eliminate allocas before unroll we may choose to unroll
839 // less.
841 }
842
843 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
844 });
845
846 // FIXME: Why is AMDGPUAttributor not in CGSCC?
848 OptimizationLevel Level,
850 if (Level != OptimizationLevel::O0) {
851 if (!isLTOPreLink(Phase))
852 MPM.addPass(AMDGPUAttributorPass(*this));
853 }
854 });
855
857 [this](ModulePassManager &PM, OptimizationLevel Level) {
858 // We want to support the -lto-partitions=N option as "best effort".
859 // For that, we need to lower LDS earlier in the pipeline before the
860 // module is partitioned for codegen.
862 PM.addPass(AMDGPUSwLowerLDSPass(*this));
865 if (Level != OptimizationLevel::O0) {
866 // Do we really need internalization in LTO?
867 if (InternalizeSymbols) {
870 }
874 Opt.IsClosedWorld = true;
875 PM.addPass(AMDGPUAttributorPass(*this, Opt));
876 }
877 }
878 });
879
881 [](StringRef FilterName) -> RegAllocFilterFunc {
882 if (FilterName == "sgpr")
883 return onlyAllocateSGPRs;
884 if (FilterName == "vgpr")
885 return onlyAllocateVGPRs;
886 if (FilterName == "wwm")
887 return onlyAllocateWWMRegs;
888 return nullptr;
889 });
890}
891
892int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
893 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
894 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
895 AddrSpace == AMDGPUAS::REGION_ADDRESS)
896 ? -1
897 : 0;
898}
899
901 unsigned DestAS) const {
902 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
904}
905
907 const auto *LD = dyn_cast<LoadInst>(V);
908 if (!LD) // TODO: Handle invariant load like constant.
910
911 // It must be a generic pointer loaded.
912 assert(V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
913
914 const auto *Ptr = LD->getPointerOperand();
915 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
917 // For a generic pointer loaded from the constant memory, it could be assumed
918 // as a global pointer since the constant memory is only populated on the
919 // host side. As implied by the offload programming model, only global
920 // pointers could be referenced on the host side.
922}
923
924std::pair<const Value *, unsigned>
926 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
927 switch (II->getIntrinsicID()) {
928 case Intrinsic::amdgcn_is_shared:
929 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
930 case Intrinsic::amdgcn_is_private:
931 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
932 default:
933 break;
934 }
935 return std::pair(nullptr, -1);
936 }
937 // Check the global pointer predication based on
938 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
939 // the order of 'is_shared' and 'is_private' is not significant.
940 Value *Ptr;
941 if (match(
942 const_cast<Value *>(V),
943 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
944 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
945 m_Deferred(Ptr))))))
946 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
947
948 return std::pair(nullptr, -1);
949}
950
951unsigned
953 switch (Kind) {
963 }
965}
966
968 Module &M, unsigned NumParts,
969 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
970 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
971 // but all current users of this API don't have one ready and would need to
972 // create one anyway. Let's hide the boilerplate for now to keep it simple.
973
978
979 PassBuilder PB(this);
983
985 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
986 MPM.run(M, MAM);
987 return true;
988}
989
990//===----------------------------------------------------------------------===//
991// GCN Target Machine (SI+)
992//===----------------------------------------------------------------------===//
993
995 StringRef CPU, StringRef FS,
996 const TargetOptions &Options,
997 std::optional<Reloc::Model> RM,
998 std::optional<CodeModel::Model> CM,
999 CodeGenOptLevel OL, bool JIT)
1000 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
1001
1002const TargetSubtargetInfo *
1004 StringRef GPU = getGPUName(F);
1006
1007 SmallString<128> SubtargetKey(GPU);
1008 SubtargetKey.append(FS);
1009
1010 auto &I = SubtargetMap[SubtargetKey];
1011 if (!I) {
1012 // This needs to be done before we create a new subtarget since any
1013 // creation will depend on the TM and the code generation flags on the
1014 // function that reside in TargetOptions.
1016 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
1017 }
1018
1019 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
1020
1021 return I.get();
1022}
1023
1026 return TargetTransformInfo(GCNTTIImpl(this, F));
1027}
1028
1031 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
1033 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
1034 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
1035}
1036
1037//===----------------------------------------------------------------------===//
1038// AMDGPU Legacy Pass Setup
1039//===----------------------------------------------------------------------===//
1040
1041std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
1043}
1044
1045namespace {
1046
1047class GCNPassConfig final : public AMDGPUPassConfig {
1048public:
1049 GCNPassConfig(TargetMachine &TM, PassManagerBase &PM)
1050 : AMDGPUPassConfig(TM, PM) {
1051 // It is necessary to know the register usage of the entire call graph. We
1052 // allow calls without EnableAMDGPUFunctionCalls if they are marked
1053 // noinline, so this is always required.
1054 setRequiresCodeGenSCCOrder(true);
1055 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
1056 }
1057
1058 GCNTargetMachine &getGCNTargetMachine() const {
1059 return getTM<GCNTargetMachine>();
1060 }
1061
1063 createMachineScheduler(MachineSchedContext *C) const override;
1064
1066 createPostMachineScheduler(MachineSchedContext *C) const override {
1068 C, std::make_unique<PostGenericScheduler>(C),
1069 /*RemoveKillFlags=*/true);
1070 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1072 if (ST.shouldClusterStores())
1074 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
1075 DAG->addMutation(
1076 createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA));
1077 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1079 return DAG;
1080 }
1081
1082 bool addPreISel() override;
1083 void addMachineSSAOptimization() override;
1084 bool addILPOpts() override;
1085 bool addInstSelector() override;
1086 bool addIRTranslator() override;
1087 void addPreLegalizeMachineIR() override;
1088 bool addLegalizeMachineIR() override;
1089 void addPreRegBankSelect() override;
1090 bool addRegBankSelect() override;
1091 void addPreGlobalInstructionSelect() override;
1092 bool addGlobalInstructionSelect() override;
1093 void addFastRegAlloc() override;
1094 void addOptimizedRegAlloc() override;
1095
1096 FunctionPass *createSGPRAllocPass(bool Optimized);
1097 FunctionPass *createVGPRAllocPass(bool Optimized);
1098 FunctionPass *createWWMRegAllocPass(bool Optimized);
1099 FunctionPass *createRegAllocPass(bool Optimized) override;
1100
1101 bool addRegAssignAndRewriteFast() override;
1102 bool addRegAssignAndRewriteOptimized() override;
1103
1104 bool addPreRewrite() override;
1105 void addPostRegAlloc() override;
1106 void addPreSched2() override;
1107 void addPreEmitPass() override;
1108};
1109
1110} // end anonymous namespace
1111
1113 : TargetPassConfig(TM, PM) {
1114 // Exceptions and StackMaps are not supported, so these passes will never do
1115 // anything.
1118 // Garbage collection is not supported.
1121}
1122
1126 else
1128}
1129
1134 // ReassociateGEPs exposes more opportunities for SLSR. See
1135 // the example in reassociate-geps-and-slsr.ll.
1137 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1138 // EarlyCSE can reuse.
1140 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1142 // NaryReassociate on GEPs creates redundant common expressions, so run
1143 // EarlyCSE after it.
1145}
1146
1149
1153
1154 // There is no reason to run these.
1158
1160 if (LowerCtorDtor)
1162
1165
1166 // This can be disabled by passing ::Disable here or on the command line
1167 // with --expand-variadics-override=disable.
1169
1170 // Function calls are not supported, so make sure we inline everything.
1173
1174 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1175 if (Arch == Triple::r600)
1177
1178 // Replace OpenCL enqueued block function pointers with global variables.
1180
1181 // Lower LDS accesses to global memory pass if address sanitizer is enabled.
1182 if (EnableSwLowerLDS)
1184
1185 // Runs before PromoteAlloca so the latter can account for function uses
1188 }
1189
1192
1193 // Run atomic optimizer before Atomic Expand
1198 }
1199
1201
1204
1207
1211 AAResults &AAR) {
1212 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1213 AAR.addAAResult(WrapperPass->getResult());
1214 }));
1215 }
1216
1218 // TODO: May want to move later or split into an early and late one.
1220 }
1221
1222 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1223 // have expanded.
1226 }
1227
1229
1230 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1231 // example, GVN can combine
1232 //
1233 // %0 = add %a, %b
1234 // %1 = add %b, %a
1235 //
1236 // and
1237 //
1238 // %0 = shl nsw %a, 2
1239 // %1 = shl %a, 2
1240 //
1241 // but EarlyCSE can do neither of them.
1244}
1245
1248 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
1249 // analysis, and should be removed.
1251 }
1252
1256
1258 // This lowering has been placed after codegenprepare to take advantage of
1259 // address mode matching (which is why it isn't put with the LDS lowerings).
1260 // It could be placed anywhere before uniformity annotations (an analysis
1261 // that it changes by splitting up fat pointers into their components)
1262 // but has been put before switch lowering and CFG flattening so that those
1263 // passes can run on the more optimized control flow this pass creates in
1264 // many cases.
1265 //
1266 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1267 // However, due to some annoying facts about ResourceUsageAnalysis,
1268 // (especially as exercised in the resource-usage-dead-function test),
1269 // we need all the function passes codegenprepare all the way through
1270 // said resource usage analysis to run on the call graph produced
1271 // before codegenprepare runs (because codegenprepare will knock some
1272 // nodes out of the graph, which leads to function-level passes not
1273 // being run on them, which causes crashes in the resource usage analysis).
1275 // In accordance with the above FIXME, manually force all the
1276 // function-level passes into a CGSCCPassManager.
1277 addPass(new DummyCGSCCPass());
1278 }
1279
1281
1284
1285 // LowerSwitch pass may introduce unreachable blocks that can
1286 // cause unexpected behavior for subsequent passes. Placing it
1287 // here seems better that these blocks would get cleaned up by
1288 // UnreachableBlockElim inserted next in the pass flow.
1290}
1291
1295 return false;
1296}
1297
1300 return false;
1301}
1302
1304 // Do nothing. GC is not supported.
1305 return false;
1306}
1307
1310 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1312 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1313 if (ST.shouldClusterStores())
1314 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1315 return DAG;
1316}
1317
1318//===----------------------------------------------------------------------===//
1319// GCN Legacy Pass Setup
1320//===----------------------------------------------------------------------===//
1321
1322ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1323 MachineSchedContext *C) const {
1324 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1325 if (ST.enableSIScheduler())
1327
1328 Attribute SchedStrategyAttr =
1329 C->MF->getFunction().getFnAttribute("amdgpu-sched-strategy");
1330 StringRef SchedStrategy = SchedStrategyAttr.isValid()
1331 ? SchedStrategyAttr.getValueAsString()
1333
1334 if (SchedStrategy == "max-ilp")
1336
1337 if (SchedStrategy == "max-memory-clause")
1339
1341}
1342
1343bool GCNPassConfig::addPreISel() {
1345
1346 if (TM->getOptLevel() > CodeGenOptLevel::None)
1347 addPass(createSinkingPass());
1348
1349 if (TM->getOptLevel() > CodeGenOptLevel::None)
1351
1352 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1353 // regions formed by them.
1355 addPass(createFixIrreduciblePass());
1356 addPass(createUnifyLoopExitsPass());
1357 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1358
1361 // TODO: Move this right after structurizeCFG to avoid extra divergence
1362 // analysis. This depends on stopping SIAnnotateControlFlow from making
1363 // control flow modifications.
1365
1366 addPass(createLCSSAPass());
1367
1368 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1370
1371 return false;
1372}
1373
1374void GCNPassConfig::addMachineSSAOptimization() {
1376
1377 // We want to fold operands after PeepholeOptimizer has run (or as part of
1378 // it), because it will eliminate extra copies making it easier to fold the
1379 // real source operand. We want to eliminate dead instructions after, so that
1380 // we see fewer uses of the copies. We then need to clean up the dead
1381 // instructions leftover after the operands are folded as well.
1382 //
1383 // XXX - Can we get away without running DeadMachineInstructionElim again?
1384 addPass(&SIFoldOperandsLegacyID);
1385 if (EnableDPPCombine)
1386 addPass(&GCNDPPCombineLegacyID);
1388 if (isPassEnabled(EnableSDWAPeephole)) {
1389 addPass(&SIPeepholeSDWALegacyID);
1390 addPass(&EarlyMachineLICMID);
1391 addPass(&MachineCSELegacyID);
1392 addPass(&SIFoldOperandsLegacyID);
1393 }
1396}
1397
1398bool GCNPassConfig::addILPOpts() {
1400 addPass(&EarlyIfConverterLegacyID);
1401
1403 return false;
1404}
1405
1406bool GCNPassConfig::addInstSelector() {
1408 addPass(&SIFixSGPRCopiesLegacyID);
1410 return false;
1411}
1412
1413bool GCNPassConfig::addIRTranslator() {
1414 addPass(new IRTranslator(getOptLevel()));
1415 return false;
1416}
1417
1418void GCNPassConfig::addPreLegalizeMachineIR() {
1419 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1420 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1421 addPass(new Localizer());
1422}
1423
1424bool GCNPassConfig::addLegalizeMachineIR() {
1425 addPass(new Legalizer());
1426 return false;
1427}
1428
1429void GCNPassConfig::addPreRegBankSelect() {
1430 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1431 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1433}
1434
1435bool GCNPassConfig::addRegBankSelect() {
1436 if (NewRegBankSelect) {
1439 } else {
1440 addPass(new RegBankSelect());
1441 }
1442 return false;
1443}
1444
1445void GCNPassConfig::addPreGlobalInstructionSelect() {
1446 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1447 addPass(createAMDGPURegBankCombiner(IsOptNone));
1448}
1449
1450bool GCNPassConfig::addGlobalInstructionSelect() {
1451 addPass(new InstructionSelect(getOptLevel()));
1452 return false;
1453}
1454
1455void GCNPassConfig::addFastRegAlloc() {
1456 // FIXME: We have to disable the verifier here because of PHIElimination +
1457 // TwoAddressInstructions disabling it.
1458
1459 // This must be run immediately after phi elimination and before
1460 // TwoAddressInstructions, otherwise the processing of the tied operand of
1461 // SI_ELSE will introduce a copy of the tied operand source after the else.
1463
1465
1467}
1468
1469void GCNPassConfig::addOptimizedRegAlloc() {
1470 if (EnableDCEInRA)
1472
1473 // FIXME: when an instruction has a Killed operand, and the instruction is
1474 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1475 // the register in LiveVariables, this would trigger a failure in verifier,
1476 // we should fix it and enable the verifier.
1477 if (OptVGPRLiveRange)
1479
1480 // This must be run immediately after phi elimination and before
1481 // TwoAddressInstructions, otherwise the processing of the tied operand of
1482 // SI_ELSE will introduce a copy of the tied operand source after the else.
1484
1487
1488 if (isPassEnabled(EnablePreRAOptimizations))
1490
1491 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1492 // instructions that cause scheduling barriers.
1494
1495 if (OptExecMaskPreRA)
1497
1498 // This is not an essential optimization and it has a noticeable impact on
1499 // compilation time, so we only enable it from O2.
1500 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1502
1504}
1505
1506bool GCNPassConfig::addPreRewrite() {
1508 addPass(&GCNNSAReassignID);
1509 return true;
1510}
1511
1512FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1513 // Initialize the global default.
1514 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1515 initializeDefaultSGPRRegisterAllocatorOnce);
1516
1517 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1518 if (Ctor != useDefaultRegisterAllocator)
1519 return Ctor();
1520
1521 if (Optimized)
1522 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1523
1524 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1525}
1526
1527FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1528 // Initialize the global default.
1529 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1530 initializeDefaultVGPRRegisterAllocatorOnce);
1531
1532 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1533 if (Ctor != useDefaultRegisterAllocator)
1534 return Ctor();
1535
1536 if (Optimized)
1537 return createGreedyVGPRRegisterAllocator();
1538
1539 return createFastVGPRRegisterAllocator();
1540}
1541
1542FunctionPass *GCNPassConfig::createWWMRegAllocPass(bool Optimized) {
1543 // Initialize the global default.
1544 llvm::call_once(InitializeDefaultWWMRegisterAllocatorFlag,
1545 initializeDefaultWWMRegisterAllocatorOnce);
1546
1547 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
1548 if (Ctor != useDefaultRegisterAllocator)
1549 return Ctor();
1550
1551 if (Optimized)
1552 return createGreedyWWMRegisterAllocator();
1553
1554 return createFastWWMRegisterAllocator();
1555}
1556
1557FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1558 llvm_unreachable("should not be used");
1559}
1560
1562 "-regalloc not supported with amdgcn. Use -sgpr-regalloc, -wwm-regalloc, "
1563 "and -vgpr-regalloc";
1564
1565bool GCNPassConfig::addRegAssignAndRewriteFast() {
1566 if (!usingDefaultRegAlloc())
1568
1569 addPass(&GCNPreRALongBranchRegID);
1570
1571 addPass(createSGPRAllocPass(false));
1572
1573 // Equivalent of PEI for SGPRs.
1574 addPass(&SILowerSGPRSpillsLegacyID);
1575
1576 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1578
1579 // For allocating other wwm register operands.
1580 addPass(createWWMRegAllocPass(false));
1581
1582 addPass(&SILowerWWMCopiesID);
1583 addPass(&AMDGPUReserveWWMRegsID);
1584
1585 // For allocating per-thread VGPRs.
1586 addPass(createVGPRAllocPass(false));
1587
1588 return true;
1589}
1590
1591bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1592 if (!usingDefaultRegAlloc())
1594
1595 addPass(&GCNPreRALongBranchRegID);
1596
1597 addPass(createSGPRAllocPass(true));
1598
1599 // Commit allocated register changes. This is mostly necessary because too
1600 // many things rely on the use lists of the physical registers, such as the
1601 // verifier. This is only necessary with allocators which use LiveIntervals,
1602 // since FastRegAlloc does the replacements itself.
1603 addPass(createVirtRegRewriter(false));
1604
1605 // At this point, the sgpr-regalloc has been done and it is good to have the
1606 // stack slot coloring to try to optimize the SGPR spill stack indices before
1607 // attempting the custom SGPR spill lowering.
1608 addPass(&StackSlotColoringID);
1609
1610 // Equivalent of PEI for SGPRs.
1611 addPass(&SILowerSGPRSpillsLegacyID);
1612
1613 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1615
1616 // For allocating other whole wave mode registers.
1617 addPass(createWWMRegAllocPass(true));
1618 addPass(&SILowerWWMCopiesID);
1619 addPass(createVirtRegRewriter(false));
1620 addPass(&AMDGPUReserveWWMRegsID);
1621
1622 // For allocating per-thread VGPRs.
1623 addPass(createVGPRAllocPass(true));
1624
1625 addPreRewrite();
1626 addPass(&VirtRegRewriterID);
1627
1629
1630 return true;
1631}
1632
1633void GCNPassConfig::addPostRegAlloc() {
1634 addPass(&SIFixVGPRCopiesID);
1635 if (getOptLevel() > CodeGenOptLevel::None)
1636 addPass(&SIOptimizeExecMaskingID);
1638}
1639
1640void GCNPassConfig::addPreSched2() {
1641 if (TM->getOptLevel() > CodeGenOptLevel::None)
1643 addPass(&SIPostRABundlerID);
1644}
1645
1646void GCNPassConfig::addPreEmitPass() {
1647 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1648 addPass(&GCNCreateVOPDID);
1649 addPass(createSIMemoryLegalizerPass());
1650 addPass(createSIInsertWaitcntsPass());
1651
1652 addPass(createSIModeRegisterPass());
1653
1654 if (getOptLevel() > CodeGenOptLevel::None)
1655 addPass(&SIInsertHardClausesID);
1656
1658 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1660 if (getOptLevel() > CodeGenOptLevel::None)
1661 addPass(&SIPreEmitPeepholeID);
1662 // The hazard recognizer that runs as part of the post-ra scheduler does not
1663 // guarantee to be able handle all hazards correctly. This is because if there
1664 // are multiple scheduling regions in a basic block, the regions are scheduled
1665 // bottom up, so when we begin to schedule a region we don't know what
1666 // instructions were emitted directly before it.
1667 //
1668 // Here we add a stand-alone hazard recognizer pass which can handle all
1669 // cases.
1670 addPass(&PostRAHazardRecognizerID);
1671
1672 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1673 addPass(&AMDGPUInsertDelayAluID);
1674
1675 addPass(&BranchRelaxationPassID);
1677}
1678
1680 return new GCNPassConfig(*this, PM);
1681}
1682
1684 MachineFunction &MF) const {
1686 MF.getRegInfo().addDelegate(MFI);
1687}
1688
1690 BumpPtrAllocator &Allocator, const Function &F,
1691 const TargetSubtargetInfo *STI) const {
1692 return SIMachineFunctionInfo::create<SIMachineFunctionInfo>(
1693 Allocator, F, static_cast<const GCNSubtarget *>(STI));
1694}
1695
1697 return new yaml::SIMachineFunctionInfo();
1698}
1699
1703 return new yaml::SIMachineFunctionInfo(
1704 *MFI, *MF.getSubtarget<GCNSubtarget>().getRegisterInfo(), MF);
1705}
1706
1709 SMDiagnostic &Error, SMRange &SourceRange) const {
1710 const yaml::SIMachineFunctionInfo &YamlMFI =
1711 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1712 MachineFunction &MF = PFS.MF;
1714 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1715
1716 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1717 return true;
1718
1719 if (MFI->Occupancy == 0) {
1720 // Fixup the subtarget dependent default value.
1721 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1722 }
1723
1724 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1725 Register TempReg;
1726 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1727 SourceRange = RegName.SourceRange;
1728 return true;
1729 }
1730 RegVal = TempReg;
1731
1732 return false;
1733 };
1734
1735 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1736 Register &RegVal) {
1737 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1738 };
1739
1740 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1741 return true;
1742
1743 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1744 return true;
1745
1746 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1747 MFI->LongBranchReservedReg))
1748 return true;
1749
1750 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1751 // Create a diagnostic for a the register string literal.
1752 const MemoryBuffer &Buffer =
1753 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1754 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1755 RegName.Value.size(), SourceMgr::DK_Error,
1756 "incorrect register class for field", RegName.Value,
1757 {}, {});
1758 SourceRange = RegName.SourceRange;
1759 return true;
1760 };
1761
1762 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1763 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1764 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1765 return true;
1766
1767 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1768 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1769 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1770 }
1771
1772 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1773 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1774 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1775 }
1776
1777 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1778 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1779 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1780 }
1781
1782 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1783 Register ParsedReg;
1784 if (parseRegister(YamlReg, ParsedReg))
1785 return true;
1786
1787 MFI->reserveWWMRegister(ParsedReg);
1788 }
1789
1790 for (const auto &[_, Info] : PFS.VRegInfosNamed) {
1791 MFI->setFlag(Info->VReg, Info->Flags);
1792 }
1793 for (const auto &[_, Info] : PFS.VRegInfos) {
1794 MFI->setFlag(Info->VReg, Info->Flags);
1795 }
1796
1797 for (const auto &YamlRegStr : YamlMFI.SpillPhysVGPRS) {
1798 Register ParsedReg;
1799 if (parseRegister(YamlRegStr, ParsedReg))
1800 return true;
1801 MFI->SpillPhysVGPRs.push_back(ParsedReg);
1802 }
1803
1804 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1805 const TargetRegisterClass &RC,
1806 ArgDescriptor &Arg, unsigned UserSGPRs,
1807 unsigned SystemSGPRs) {
1808 // Skip parsing if it's not present.
1809 if (!A)
1810 return false;
1811
1812 if (A->IsRegister) {
1813 Register Reg;
1814 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1815 SourceRange = A->RegisterName.SourceRange;
1816 return true;
1817 }
1818 if (!RC.contains(Reg))
1819 return diagnoseRegisterClass(A->RegisterName);
1821 } else
1822 Arg = ArgDescriptor::createStack(A->StackOffset);
1823 // Check and apply the optional mask.
1824 if (A->Mask)
1825 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1826
1827 MFI->NumUserSGPRs += UserSGPRs;
1828 MFI->NumSystemSGPRs += SystemSGPRs;
1829 return false;
1830 };
1831
1832 if (YamlMFI.ArgInfo &&
1833 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1834 AMDGPU::SGPR_128RegClass,
1835 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1836 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1837 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1838 2, 0) ||
1839 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1840 MFI->ArgInfo.QueuePtr, 2, 0) ||
1841 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1842 AMDGPU::SReg_64RegClass,
1843 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1844 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1845 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1846 2, 0) ||
1847 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1848 AMDGPU::SReg_64RegClass,
1849 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1850 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1851 AMDGPU::SGPR_32RegClass,
1852 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1853 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1854 AMDGPU::SGPR_32RegClass,
1855 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1856 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1857 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1858 0, 1) ||
1859 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1860 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1861 0, 1) ||
1862 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1863 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1864 0, 1) ||
1865 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1866 AMDGPU::SGPR_32RegClass,
1867 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1868 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1869 AMDGPU::SGPR_32RegClass,
1870 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1871 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1872 AMDGPU::SReg_64RegClass,
1873 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1874 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1875 AMDGPU::SReg_64RegClass,
1876 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1877 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1878 AMDGPU::VGPR_32RegClass,
1879 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1880 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1881 AMDGPU::VGPR_32RegClass,
1882 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1883 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1884 AMDGPU::VGPR_32RegClass,
1885 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1886 return true;
1887
1888 if (ST.hasIEEEMode())
1889 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1890 if (ST.hasDX10ClampMode())
1891 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1892
1893 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
1894 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
1897 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
1900
1907
1908 if (YamlMFI.HasInitWholeWave)
1909 MFI->setInitWholeWave();
1910
1911 return false;
1912}
1913
1914//===----------------------------------------------------------------------===//
1915// AMDGPU CodeGen Pass Builder interface.
1916//===----------------------------------------------------------------------===//
1917
1919 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
1921 : CodeGenPassBuilder(TM, Opts, PIC) {
1923 // Exceptions and StackMaps are not supported, so these passes will never do
1924 // anything.
1925 // Garbage collection is not supported.
1926 disablePass<StackMapLivenessPass, FuncletLayoutPass,
1928}
1929
1930void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass) const {
1933
1935 if (LowerCtorDtor)
1936 addPass(AMDGPUCtorDtorLoweringPass());
1937
1940
1941 // This can be disabled by passing ::Disable here or on the command line
1942 // with --expand-variadics-override=disable.
1944
1945 addPass(AMDGPUAlwaysInlinePass());
1946 addPass(AlwaysInlinerPass());
1947
1949
1950 // Runs before PromoteAlloca so the latter can account for function uses
1952 addPass(AMDGPULowerModuleLDSPass(TM));
1953
1955 addPass(InferAddressSpacesPass());
1956
1957 // Run atomic optimizer before Atomic Expand
1961
1962 addPass(AtomicExpandPass(&TM));
1963
1965 addPass(AMDGPUPromoteAllocaPass(TM));
1968
1969 // TODO: Handle EnableAMDGPUAliasAnalysis
1970
1971 // TODO: May want to move later or split into an early and late one.
1972 addPass(AMDGPUCodeGenPreparePass(TM));
1973
1974 // TODO: LICM
1975 }
1976
1977 Base::addIRPasses(addPass);
1978
1979 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1980 // example, GVN can combine
1981 //
1982 // %0 = add %a, %b
1983 // %1 = add %b, %a
1984 //
1985 // and
1986 //
1987 // %0 = shl nsw %a, 2
1988 // %1 = shl %a, 2
1989 //
1990 // but EarlyCSE can do neither of them.
1992 addEarlyCSEOrGVNPass(addPass);
1993}
1994
1995void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
1996 // AMDGPUAnnotateKernelFeaturesPass is missing here, but it will hopefully be
1997 // deleted soon.
1998
2001
2002 // This lowering has been placed after codegenprepare to take advantage of
2003 // address mode matching (which is why it isn't put with the LDS lowerings).
2004 // It could be placed anywhere before uniformity annotations (an analysis
2005 // that it changes by splitting up fat pointers into their components)
2006 // but has been put before switch lowering and CFG flattening so that those
2007 // passes can run on the more optimized control flow this pass creates in
2008 // many cases.
2009 //
2010 // FIXME: This should ideally be put after the LoadStoreVectorizer.
2011 // However, due to some annoying facts about ResourceUsageAnalysis,
2012 // (especially as exercised in the resource-usage-dead-function test),
2013 // we need all the function passes codegenprepare all the way through
2014 // said resource usage analysis to run on the call graph produced
2015 // before codegenprepare runs (because codegenprepare will knock some
2016 // nodes out of the graph, which leads to function-level passes not
2017 // being run on them, which causes crashes in the resource usage analysis).
2019
2020 Base::addCodeGenPrepare(addPass);
2021
2023 addPass(LoadStoreVectorizerPass());
2024
2025 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
2026 // behavior for subsequent passes. Placing it here seems better that these
2027 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
2028 // pass flow.
2029 addPass(LowerSwitchPass());
2030}
2031
2032void AMDGPUCodeGenPassBuilder::addPreISel(AddIRPass &addPass) const {
2033
2035 addPass(FlattenCFGPass());
2036
2038 addPass(SinkingPass());
2039
2041
2042 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
2043 // regions formed by them.
2044
2046 addPass(FixIrreduciblePass());
2047 addPass(UnifyLoopExitsPass());
2048 addPass(StructurizeCFGPass(/*SkipUniformRegions=*/false));
2049
2051
2052 addPass(SIAnnotateControlFlowPass(TM));
2053
2054 // TODO: Move this right after structurizeCFG to avoid extra divergence
2055 // analysis. This depends on stopping SIAnnotateControlFlow from making
2056 // control flow modifications.
2058
2059 addPass(LCSSAPass());
2060
2063
2064 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
2065 // isn't this in addInstSelector?
2067}
2068
2069void AMDGPUCodeGenPassBuilder::addILPOpts(AddMachinePass &addPass) const {
2071 addPass(EarlyIfConverterPass());
2072
2073 Base::addILPOpts(addPass);
2074}
2075
2076void AMDGPUCodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
2077 CreateMCStreamer) const {
2078 // TODO: Add AsmPrinter.
2079}
2080
2081Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &addPass) const {
2082 addPass(AMDGPUISelDAGToDAGPass(TM));
2083 addPass(SIFixSGPRCopiesPass());
2084 addPass(SILowerI1CopiesPass());
2085 return Error::success();
2086}
2087
2089 AddMachinePass &addPass) const {
2091
2092 addPass(SIFoldOperandsPass());
2093 if (EnableDPPCombine) {
2094 addPass(GCNDPPCombinePass());
2095 }
2096 addPass(SILoadStoreOptimizerPass());
2098 addPass(SIPeepholeSDWAPass());
2099 addPass(EarlyMachineLICMPass());
2100 addPass(MachineCSEPass());
2101 addPass(SIFoldOperandsPass());
2102 }
2104 addPass(SIShrinkInstructionsPass());
2105}
2106
2108 CodeGenOptLevel Level) const {
2109 if (Opt.getNumOccurrences())
2110 return Opt;
2111 if (TM.getOptLevel() < Level)
2112 return false;
2113 return Opt;
2114}
2115
2118 addPass(GVNPass());
2119 else
2120 addPass(EarlyCSEPass());
2121}
2122
2124 AddIRPass &addPass) const {
2126 addPass(LoopDataPrefetchPass());
2127
2129
2130 // ReassociateGEPs exposes more opportunities for SLSR. See
2131 // the example in reassociate-geps-and-slsr.ll.
2133
2134 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
2135 // EarlyCSE can reuse.
2136 addEarlyCSEOrGVNPass(addPass);
2137
2138 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
2139 addPass(NaryReassociatePass());
2140
2141 // NaryReassociate on GEPs creates redundant common expressions, so run
2142 // EarlyCSE after it.
2143 addPass(EarlyCSEPass());
2144}
unsigned const MachineRegisterInfo * MRI
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxMemoryClauseSchedRegistry("gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause", createGCNMaxMemoryClauseMachineScheduler)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSwLowerLDS("amdgpu-enable-sw-lower-lds", cl::desc("Enable lowering of lds to global memory pass " "and asan instrument resulting IR."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > HasClosedWorldAssumption("amdgpu-link-time-closed-world", cl::desc("Whether has closed-world assumption at link time"), cl::init(false), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< std::string > AMDGPUSchedStrategy("amdgpu-sched-strategy", cl::desc("Select custom AMDGPU scheduling strategy."), cl::Hidden, cl::init(""))
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > NewRegBankSelect("new-reg-bank-select", cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of " "regbankselect"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfo::Concept conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Provides analysis for continuously CSEing during GISel passes.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
#define LLVM_READNONE
Definition: Compiler.h:299
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:128
This file provides the interface for a simple, fast CSE pass.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define _
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
This header defines various interfaces for pass management in LLVM.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
static std::string computeDataLayout()
This file provides the interface for LLVM's Loop Data Prefetching Pass.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
The AMDGPU TargetMachine interface definition for hw codegen targets.
Basic Register Allocator
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Machine Scheduler interface.
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Error addInstSelector(AddMachinePass &) const
void addMachineSSAOptimization(AddMachinePass &) const
void addEarlyCSEOrGVNPass(AddIRPass &) const
void addStraightLineScalarOptimizationPasses(AddIRPass &) const
AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC)
void addPreISel(AddIRPass &addPass) const
void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const
void addCodeGenPrepare(AddIRPass &) const
void addILPOpts(AddMachinePass &) const
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(TargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Definition: AlwaysInliner.h:32
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:392
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:208
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
This class provides access to building LLVM's passes.
void addILPOpts(AddMachinePass &) const
Add passes that optimize instruction level parallelism for out-of-order targets.
Error buildPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType) const
void addMachineSSAOptimization(AddMachinePass &) const
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
void addCodeGenPrepare(AddIRPass &) const
Add pass to prepare the LLVM IR for code generation.
void disablePass()
Allow the target to disable a specific pass by default.
void addIRPasses(AddIRPass &) const
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
implements a set of functionality in the TargetMachine class for targets that make use of the indepen...
void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Definition: Constants.cpp:739
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
static ErrorSuccess success()
Create a success value.
Definition: Error.h:337
Tagged union holding either a T or a Error.
Definition: Error.h:481
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:291
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Definition: GVN.h:124
Pass to remove unused function declarations.
Definition: GlobalDCE.h:36
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition: Internalize.h:36
Converts loops into loop-closed SSA form.
Definition: LCSSA.h:37
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:43
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:51
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
Definition: MemoryBuffer.h:76
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static const OptimizationLevel O0
Disable as many optimizations as possible.
unsigned getSpeedupLevel() const
static const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
Definition: PassBuilder.h:105
void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel, ThinOrFullLTOPhase)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:482
void registerPipelineStartEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:473
void crossRegisterProxies(LoopAnalysisManager &LAM, FunctionAnalysisManager &FAM, CGSCCAnalysisManager &CGAM, ModuleAnalysisManager &MAM, MachineFunctionAnalysisManager *MFAM=nullptr)
Cross register the analysis managers through their proxies.
void registerOptimizerLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel, ThinOrFullLTOPhase)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:502
void registerPeepholeEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:407
void registerCGSCCOptimizerLateEPCallback(const std::function< void(CGSCCPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:452
void registerRegClassFilterParsingCallback(const std::function< RegAllocFilterFunc(StringRef)> &C)
Register callbacks to parse target specific filter field if regalloc pass needs it.
Definition: PassBuilder.h:592
void registerModuleAnalyses(ModuleAnalysisManager &MAM)
Registers all available module analysis passes.
void registerFullLinkTimeOptimizationLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:521
void registerFunctionAnalyses(FunctionAnalysisManager &FAM)
Registers all available function analysis passes.
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
Definition: PassManager.h:195
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
Definition: RegBankSelect.h:91
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
void setFlag(Register Reg, uint8_t Flag)
bool checkFlag(Register Reg, uint8_t Flag) const
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:281
Represents a location in source code.
Definition: SMLoc.h:23
Represents a range in source code.
Definition: SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:575
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:576
Move instructions into successor blocks when possible.
Definition: Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
unsigned getMainFileID() const
Definition: SourceMgr.h:132
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition: SourceMgr.h:125
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:635
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:90
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
Definition: TargetMachine.h:96
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:383
bool isAMDGCN() const
Tests whether the target is AMDGCN.
Definition: Triple.h:868
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Definition: raw_ostream.h:434
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:903
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFlattenCFGPass()
void initializeSIFormMemoryClausesPass(PassRegistry &)
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
Pass * createLCSSAPass()
Definition: LCSSA.cpp:541
void initializeGCNCreateVOPDPass(PassRegistry &)
char & GCNPreRAOptimizationsID
char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeGCNPreRAOptimizationsPass(PassRegistry &)
Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &)
void initializeAMDGPUAttributorLegacyPass(PassRegistry &)
char & SIPostRABundlerID
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeSIShrinkInstructionsLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
void initializeSIModeRegisterPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
char & GCNRewritePartialRegUsesID
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &)
std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition: Error.cpp:98
void initializeGCNPreRALongBranchRegPass(PassRegistry &)
void initializeSILowerSGPRSpillsLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createNaryReassociatePass()
char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeSIPreEmitPeepholePass(PassRegistry &)
void initializeSIFoldOperandsLegacyPass(PassRegistry &)
char & SILoadStoreOptimizerLegacyID
char & SILowerWWMCopiesID
void initializeSIFixVGPRCopiesPass(PassRegistry &)
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition: CSEInfo.cpp:79
Target & getTheR600Target()
The target for R600 GPUs.
char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
void initializeSILowerWWMCopiesPass(PassRegistry &)
void initializeGCNNSAReassignPass(PassRegistry &)
char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
void initializeAMDGPUOpenCLEnqueuedBlockLoweringLegacyPass(PassRegistry &)
void initializeSIInsertWaitcntsPass(PassRegistry &)
Pass * createLICMPass()
Definition: LICM.cpp:381
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
char & SIFormMemoryClausesID
void initializeSILoadStoreOptimizerLegacyPass(PassRegistry &)
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
void initializeSILateBranchLoweringPass(PassRegistry &)
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition: Pass.h:76
char & AMDGPUUnifyDivergentExitNodesID
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
FunctionPass * createAMDGPUPreloadKernArgPrologLegacyPass()
char & SIOptimizeVGPRLiveRangeLegacyID
char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
char & SILateBranchLoweringPassID
char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
FunctionPass * createSinkingPass()
Definition: Sink.cpp:277
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition: CodeGen.h:83
void initializeSIPostRABundlerPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &)
char & GCNDPPCombineLegacyID
void initializeSIWholeQuadModePass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
Pass * createAMDGPUAnnotateKernelFeaturesPass()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUPassConfig...
char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
FunctionPass * createFixIrreduciblePass()
char & FuncletLayoutID
This pass lays out funclets contiguously.
void initializeSIInsertHardClausesPass(PassRegistry &)
char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
void initializeAMDGPUReserveWWMRegsPass(PassRegistry &)
ModulePass * createAMDGPUPrintfRuntimeBinding()
char & StackSlotColoringID
StackSlotColoring - This pass performs stack slot coloring.
void initializeSIMemoryLegalizerPass(PassRegistry &)
Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
char & SIPreAllocateWWMRegsLegacyID
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
char & AMDGPUReserveWWMRegsID
FunctionPass * createAMDGPUPromoteAlloca()
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
char & SIPreEmitPeepholeID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
char & SILowerSGPRSpillsLegacyID
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:17
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
ModulePass * createAMDGPUOpenCLEnqueuedBlockLoweringLegacyPass()
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
void initializeSIPreAllocateWWMRegsLegacyPass(PassRegistry &)
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
FunctionPass * createGVNPass()
Create a legacy GVN pass.
Definition: GVN.cpp:3374
FunctionPass * createAMDGPURegBankSelectPass()
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
FunctionPass * createAMDGPURegBankLegalizePass()
char & MachineCSELegacyID
MachineCSE - This pass performs global CSE on machine instructions.
Definition: MachineCSE.cpp:164
char & SIWholeQuadModeID
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
void initializeSIOptimizeExecMaskingPreRAPass(PassRegistry &)
void initializeAMDGPUMarkLastScratchLoadPass(PassRegistry &)
char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
FunctionPass * createSILowerI1CopiesLegacyPass()
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
void initializeAMDGPUResourceUsageAnalysisPass(PassRegistry &)
char & SIFixSGPRCopiesLegacyID
void initializeGCNDPPCombineLegacyPass(PassRegistry &)
char & GCNCreateVOPDID
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
char & SIPeepholeSDWALegacyID
char & VirtRegRewriterID
VirtRegRewriter pass.
Definition: VirtRegMap.cpp:250
char & SIFoldOperandsLegacyID
char & SILowerControlFlowID
FunctionPass * createLowerSwitchPass()
void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &)
FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
Definition: VirtRegMap.cpp:734
void initializeR600VectorRegMergerPass(PassRegistry &)
ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
void initializeSIOptimizeExecMaskingPass(PassRegistry &)
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSILowerControlFlowPass(PassRegistry &)
void initializeSIOptimizeVGPRLiveRangeLegacyPass(PassRegistry &)
void initializeSIPeepholeSDWALegacyPass(PassRegistry &)
char & SIFixVGPRCopiesID
void initializeAMDGPURegBankLegalizePass(PassRegistry &)
char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1944
char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
Definition: MIParser.cpp:3608
FunctionPass * createSIShrinkInstructionsLegacyPass()
char & AMDGPUMarkLastScratchLoadID
char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPURemoveIncompatibleFunctionsLegacyPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluPass(PassRegistry &)
char & SIOptimizeExecMaskingID
void initializeAMDGPUUnifyMetadataPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
char & AMDGPUPerfHintAnalysisLegacyID
char & GCNPreRALongBranchRegID
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
Definition: EarlyCSE.h:30
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
StringMap< VRegInfo * > VRegInfosNamed
Definition: MIParser.h:177
DenseMap< Register, VRegInfo * > VRegInfos
Definition: MIParser.h:176
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
Definition: PassManager.h:878
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition: Threading.h:68
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
SmallVector< StringValue, 2 > SpillPhysVGPRS
A wrapper around std::string which contains a source range that's being set during parsing.