LLVM 20.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
22#include "AMDGPUIGroupLP.h"
23#include "AMDGPUISelDAGToDAG.h"
24#include "AMDGPUMacroFusion.h"
26#include "AMDGPURegBankSelect.h"
27#include "AMDGPUSplitModule.h"
32#include "GCNSchedStrategy.h"
33#include "GCNVOPDUtils.h"
34#include "R600.h"
35#include "R600TargetMachine.h"
36#include "SIFixSGPRCopies.h"
38#include "SIMachineScheduler.h"
51#include "llvm/CodeGen/Passes.h"
54#include "llvm/IR/IntrinsicsAMDGPU.h"
55#include "llvm/IR/PassManager.h"
62#include "llvm/Transforms/IPO.h"
86#include <optional>
87
88using namespace llvm;
89using namespace llvm::PatternMatch;
90
91namespace {
92class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
93public:
94 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
96};
97
98class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
99public:
100 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
101 : RegisterRegAllocBase(N, D, C) {}
102};
103
104static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
106 const Register Reg) {
107 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
108 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
109}
110
111static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
113 const Register Reg) {
114 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
115 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
116}
117
118/// -{sgpr|vgpr}-regalloc=... command line option.
119static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
120
121/// A dummy default pass factory indicates whether the register allocator is
122/// overridden on the command line.
123static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
124static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
125
126static SGPRRegisterRegAlloc
127defaultSGPRRegAlloc("default",
128 "pick SGPR register allocator based on -O option",
130
131static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
133SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
134 cl::desc("Register allocator to use for SGPRs"));
135
136static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
138VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
139 cl::desc("Register allocator to use for VGPRs"));
140
141
142static void initializeDefaultSGPRRegisterAllocatorOnce() {
143 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
144
145 if (!Ctor) {
146 Ctor = SGPRRegAlloc;
147 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
148 }
149}
150
151static void initializeDefaultVGPRRegisterAllocatorOnce() {
152 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
153
154 if (!Ctor) {
155 Ctor = VGPRRegAlloc;
156 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
157 }
158}
159
160static FunctionPass *createBasicSGPRRegisterAllocator() {
161 return createBasicRegisterAllocator(onlyAllocateSGPRs);
162}
163
164static FunctionPass *createGreedySGPRRegisterAllocator() {
165 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
166}
167
168static FunctionPass *createFastSGPRRegisterAllocator() {
169 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
170}
171
172static FunctionPass *createBasicVGPRRegisterAllocator() {
173 return createBasicRegisterAllocator(onlyAllocateVGPRs);
174}
175
176static FunctionPass *createGreedyVGPRRegisterAllocator() {
177 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
178}
179
180static FunctionPass *createFastVGPRRegisterAllocator() {
181 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
182}
183
184static SGPRRegisterRegAlloc basicRegAllocSGPR(
185 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
186static SGPRRegisterRegAlloc greedyRegAllocSGPR(
187 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
188
189static SGPRRegisterRegAlloc fastRegAllocSGPR(
190 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
191
192
193static VGPRRegisterRegAlloc basicRegAllocVGPR(
194 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
195static VGPRRegisterRegAlloc greedyRegAllocVGPR(
196 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
197
198static VGPRRegisterRegAlloc fastRegAllocVGPR(
199 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
200} // anonymous namespace
201
202static cl::opt<bool>
204 cl::desc("Run early if-conversion"),
205 cl::init(false));
206
207static cl::opt<bool>
208OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
209 cl::desc("Run pre-RA exec mask optimizations"),
210 cl::init(true));
211
212static cl::opt<bool>
213 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
214 cl::desc("Lower GPU ctor / dtors to globals on the device."),
215 cl::init(true), cl::Hidden);
216
217// Option to disable vectorizer for tests.
219 "amdgpu-load-store-vectorizer",
220 cl::desc("Enable load store vectorizer"),
221 cl::init(true),
222 cl::Hidden);
223
224// Option to control global loads scalarization
226 "amdgpu-scalarize-global-loads",
227 cl::desc("Enable global load scalarization"),
228 cl::init(true),
229 cl::Hidden);
230
231// Option to run internalize pass.
233 "amdgpu-internalize-symbols",
234 cl::desc("Enable elimination of non-kernel functions and unused globals"),
235 cl::init(false),
236 cl::Hidden);
237
238// Option to inline all early.
240 "amdgpu-early-inline-all",
241 cl::desc("Inline all functions early"),
242 cl::init(false),
243 cl::Hidden);
244
246 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
247 cl::desc("Enable removal of functions when they"
248 "use features not supported by the target GPU"),
249 cl::init(true));
250
252 "amdgpu-sdwa-peephole",
253 cl::desc("Enable SDWA peepholer"),
254 cl::init(true));
255
257 "amdgpu-dpp-combine",
258 cl::desc("Enable DPP combiner"),
259 cl::init(true));
260
261// Enable address space based alias analysis
263 cl::desc("Enable AMDGPU Alias Analysis"),
264 cl::init(true));
265
266// Option to run late CFG structurizer
268 "amdgpu-late-structurize",
269 cl::desc("Enable late CFG structurization"),
271 cl::Hidden);
272
273// Disable structurizer-based control-flow lowering in order to test convergence
274// control tokens. This should eventually be replaced by the wave-transform.
276 "amdgpu-disable-structurizer",
277 cl::desc("Disable structurizer for experiments; produces unusable code"),
279
280// Enable lib calls simplifications
282 "amdgpu-simplify-libcall",
283 cl::desc("Enable amdgpu library simplifications"),
284 cl::init(true),
285 cl::Hidden);
286
288 "amdgpu-ir-lower-kernel-arguments",
289 cl::desc("Lower kernel argument loads in IR pass"),
290 cl::init(true),
291 cl::Hidden);
292
294 "amdgpu-reassign-regs",
295 cl::desc("Enable register reassign optimizations on gfx10+"),
296 cl::init(true),
297 cl::Hidden);
298
300 "amdgpu-opt-vgpr-liverange",
301 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
302 cl::init(true), cl::Hidden);
303
305 "amdgpu-atomic-optimizer-strategy",
306 cl::desc("Select DPP or Iterative strategy for scan"),
307 cl::init(ScanOptions::Iterative),
309 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
310 clEnumValN(ScanOptions::Iterative, "Iterative",
311 "Use Iterative approach for scan"),
312 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
313
314// Enable Mode register optimization
316 "amdgpu-mode-register",
317 cl::desc("Enable mode register pass"),
318 cl::init(true),
319 cl::Hidden);
320
321// Enable GFX11.5+ s_singleuse_vdst insertion
322static cl::opt<bool>
323 EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst",
324 cl::desc("Enable s_singleuse_vdst insertion"),
325 cl::init(false), cl::Hidden);
326
327// Enable GFX11+ s_delay_alu insertion
328static cl::opt<bool>
329 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
330 cl::desc("Enable s_delay_alu insertion"),
331 cl::init(true), cl::Hidden);
332
333// Enable GFX11+ VOPD
334static cl::opt<bool>
335 EnableVOPD("amdgpu-enable-vopd",
336 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
337 cl::init(true), cl::Hidden);
338
339// Option is used in lit tests to prevent deadcoding of patterns inspected.
340static cl::opt<bool>
341EnableDCEInRA("amdgpu-dce-in-ra",
342 cl::init(true), cl::Hidden,
343 cl::desc("Enable machine DCE inside regalloc"));
344
345static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
346 cl::desc("Adjust wave priority"),
347 cl::init(false), cl::Hidden);
348
350 "amdgpu-scalar-ir-passes",
351 cl::desc("Enable scalar IR passes"),
352 cl::init(true),
353 cl::Hidden);
354
356 "amdgpu-enable-structurizer-workarounds",
357 cl::desc("Enable workarounds for the StructurizeCFG pass"),
359 cl::init(true), cl::Hidden);
360
362 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
364 cl::Hidden);
365
367 "amdgpu-enable-pre-ra-optimizations",
368 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
369 cl::Hidden);
370
372 "amdgpu-enable-promote-kernel-arguments",
373 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
374 cl::Hidden, cl::init(true));
375
377 "amdgpu-enable-image-intrinsic-optimizer",
378 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
379 cl::Hidden);
380
381static cl::opt<bool>
382 EnableLoopPrefetch("amdgpu-loop-prefetch",
383 cl::desc("Enable loop data prefetch on AMDGPU"),
384 cl::Hidden, cl::init(false));
385
387 "amdgpu-enable-max-ilp-scheduling-strategy",
388 cl::desc("Enable scheduling strategy to maximize ILP for a single wave."),
389 cl::Hidden, cl::init(false));
390
392 "amdgpu-enable-rewrite-partial-reg-uses",
393 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
394 cl::Hidden);
395
397 "amdgpu-enable-hipstdpar",
398 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
399 cl::Hidden);
400
401static cl::opt<bool>
402 EnableAMDGPUAttributor("amdgpu-attributor-enable",
403 cl::desc("Enable AMDGPUAttributorPass"),
404 cl::init(true), cl::Hidden);
405
407 // Register the target
410
485}
486
487static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
488 return std::make_unique<AMDGPUTargetObjectFile>();
489}
490
492 return new SIScheduleDAGMI(C);
493}
494
495static ScheduleDAGInstrs *
497 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
498 ScheduleDAGMILive *DAG =
499 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
500 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
501 if (ST.shouldClusterStores())
502 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
503 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
504 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
505 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
506 return DAG;
507}
508
509static ScheduleDAGInstrs *
511 ScheduleDAGMILive *DAG =
512 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
513 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
514 return DAG;
515}
516
517static ScheduleDAGInstrs *
519 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
520 auto DAG = new GCNIterativeScheduler(C,
522 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
523 if (ST.shouldClusterStores())
524 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
525 return DAG;
526}
527
529 return new GCNIterativeScheduler(C,
531}
532
533static ScheduleDAGInstrs *
535 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
536 auto DAG = new GCNIterativeScheduler(C,
538 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
539 if (ST.shouldClusterStores())
540 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
541 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
542 return DAG;
543}
544
546SISchedRegistry("si", "Run SI's custom scheduler",
548
551 "Run GCN scheduler to maximize occupancy",
553
555 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
557
559 "gcn-iterative-max-occupancy-experimental",
560 "Run GCN scheduler to maximize occupancy (experimental)",
562
564 "gcn-iterative-minreg",
565 "Run GCN iterative scheduler for minimal register usage (experimental)",
567
569 "gcn-iterative-ilp",
570 "Run GCN iterative scheduler for ILP scheduling (experimental)",
572
574 if (TT.getArch() == Triple::r600) {
575 // 32-bit pointers.
576 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
577 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
578 }
579
580 // 32-bit private, local, and region pointers. 64-bit global, constant and
581 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
582 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
583 // (address space 7), and 128-bit non-integral buffer resourcees (address
584 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
585 // like getelementptr.
586 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
587 "-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-"
588 "v32:32-v48:64-v96:"
589 "128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-"
590 "G1-ni:7:8:9";
591}
592
595 if (!GPU.empty())
596 return GPU;
597
598 // Need to default to a target with flat support for HSA.
599 if (TT.getArch() == Triple::amdgcn)
600 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
601
602 return "r600";
603}
604
605static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
606 // The AMDGPU toolchain only supports generating shared objects, so we
607 // must always use PIC.
608 return Reloc::PIC_;
609}
610
612 StringRef CPU, StringRef FS,
613 const TargetOptions &Options,
614 std::optional<Reloc::Model> RM,
615 std::optional<CodeModel::Model> CM,
616 CodeGenOptLevel OptLevel)
619 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
620 TLOF(createTLOF(getTargetTriple())) {
621 initAsmInfo();
622 if (TT.getArch() == Triple::amdgcn) {
623 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
625 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
627 }
628}
629
635
637
639 Attribute GPUAttr = F.getFnAttribute("target-cpu");
640 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
641}
642
644 Attribute FSAttr = F.getFnAttribute("target-features");
645
646 return FSAttr.isValid() ? FSAttr.getValueAsString()
648}
649
650/// Predicate for Internalize pass.
651static bool mustPreserveGV(const GlobalValue &GV) {
652 if (const Function *F = dyn_cast<Function>(&GV))
653 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
654 F->getName().starts_with("__sanitizer_") ||
655 AMDGPU::isEntryFunctionCC(F->getCallingConv());
656
658 return !GV.use_empty();
659}
660
663}
664
667 if (Params.empty())
669 Params.consume_front("strategy=");
670 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
671 .Case("dpp", ScanOptions::DPP)
672 .Cases("iterative", "", ScanOptions::Iterative)
673 .Case("none", ScanOptions::None)
674 .Default(std::nullopt);
675 if (Result)
676 return *Result;
677 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
678}
679
683 while (!Params.empty()) {
684 StringRef ParamName;
685 std::tie(ParamName, Params) = Params.split(';');
686 if (ParamName == "closed-world") {
687 Result.IsClosedWorld = true;
688 } else {
689 return make_error<StringError>(
690 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
691 .str(),
693 }
694 }
695 return Result;
696}
697
699
700#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
702
704 [](ModulePassManager &PM, OptimizationLevel Level) {
706 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
707 if (EnableHipStdPar)
709 });
710
712 [](ModulePassManager &PM, OptimizationLevel Level) {
714
715 if (Level == OptimizationLevel::O0)
716 return;
717
719
720 if (InternalizeSymbols) {
723 }
724
727 });
728
730 [](FunctionPassManager &FPM, OptimizationLevel Level) {
731 if (Level == OptimizationLevel::O0)
732 return;
733
737 });
738
740 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
741 if (Level == OptimizationLevel::O0)
742 return;
743
745
746 // Add promote kernel arguments pass to the opt pipeline right before
747 // infer address spaces which is needed to do actual address space
748 // rewriting.
749 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
752
753 // Add infer address spaces pass to the opt pipeline after inlining
754 // but before SROA to increase SROA opportunities.
756
757 // This should run after inlining to have any chance of doing
758 // anything, and before other cleanup optimizations.
760
761 if (Level != OptimizationLevel::O0) {
762 // Promote alloca to vector before SROA and loop unroll. If we
763 // manage to eliminate allocas before unroll we may choose to unroll
764 // less.
766 }
767
768 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
769 });
770
771 // FIXME: Why is AMDGPUAttributor not in CGSCC?
773 [this](ModulePassManager &MPM, OptimizationLevel Level) {
774 if (Level != OptimizationLevel::O0) {
775 MPM.addPass(AMDGPUAttributorPass(*this));
776 }
777 });
778
780 [this](ModulePassManager &PM, OptimizationLevel Level) {
781 // We want to support the -lto-partitions=N option as "best effort".
782 // For that, we need to lower LDS earlier in the pipeline before the
783 // module is partitioned for codegen.
787 PM.addPass(AMDGPUAttributorPass(*this));
788 });
789
791 [](StringRef FilterName) -> RegAllocFilterFunc {
792 if (FilterName == "sgpr")
793 return onlyAllocateSGPRs;
794 if (FilterName == "vgpr")
795 return onlyAllocateVGPRs;
796 return nullptr;
797 });
798}
799
800int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
801 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
802 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
803 AddrSpace == AMDGPUAS::REGION_ADDRESS)
804 ? -1
805 : 0;
806}
807
809 unsigned DestAS) const {
810 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
812}
813
815 const auto *LD = dyn_cast<LoadInst>(V);
816 if (!LD)
818
819 // It must be a generic pointer loaded.
820 assert(V->getType()->isPointerTy() &&
821 V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
822
823 const auto *Ptr = LD->getPointerOperand();
824 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
826 // For a generic pointer loaded from the constant memory, it could be assumed
827 // as a global pointer since the constant memory is only populated on the
828 // host side. As implied by the offload programming model, only global
829 // pointers could be referenced on the host side.
831}
832
833std::pair<const Value *, unsigned>
835 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
836 switch (II->getIntrinsicID()) {
837 case Intrinsic::amdgcn_is_shared:
838 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
839 case Intrinsic::amdgcn_is_private:
840 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
841 default:
842 break;
843 }
844 return std::pair(nullptr, -1);
845 }
846 // Check the global pointer predication based on
847 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
848 // the order of 'is_shared' and 'is_private' is not significant.
849 Value *Ptr;
850 if (match(
851 const_cast<Value *>(V),
852 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
853 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
854 m_Deferred(Ptr))))))
855 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
856
857 return std::pair(nullptr, -1);
858}
859
860unsigned
862 switch (Kind) {
872 }
874}
875
877 Module &M, unsigned NumParts,
878 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
879 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
880 // but all current users of this API don't have one ready and would need to
881 // create one anyway. Let's hide the boilerplate for now to keep it simple.
882
887
888 PassBuilder PB(this);
892
894 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
895 MPM.run(M, MAM);
896 return true;
897}
898
899//===----------------------------------------------------------------------===//
900// GCN Target Machine (SI+)
901//===----------------------------------------------------------------------===//
902
904 StringRef CPU, StringRef FS,
905 const TargetOptions &Options,
906 std::optional<Reloc::Model> RM,
907 std::optional<CodeModel::Model> CM,
908 CodeGenOptLevel OL, bool JIT)
909 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
910
913 StringRef GPU = getGPUName(F);
915
916 SmallString<128> SubtargetKey(GPU);
917 SubtargetKey.append(FS);
918
919 auto &I = SubtargetMap[SubtargetKey];
920 if (!I) {
921 // This needs to be done before we create a new subtarget since any
922 // creation will depend on the TM and the code generation flags on the
923 // function that reside in TargetOptions.
925 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
926 }
927
928 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
929
930 return I.get();
931}
932
935 return TargetTransformInfo(GCNTTIImpl(this, F));
936}
937
940 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
942 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
943 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
944}
945
946//===----------------------------------------------------------------------===//
947// AMDGPU Legacy Pass Setup
948//===----------------------------------------------------------------------===//
949
950std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
952}
953
954namespace {
955
956class GCNPassConfig final : public AMDGPUPassConfig {
957public:
958 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
959 : AMDGPUPassConfig(TM, PM) {
960 // It is necessary to know the register usage of the entire call graph. We
961 // allow calls without EnableAMDGPUFunctionCalls if they are marked
962 // noinline, so this is always required.
963 setRequiresCodeGenSCCOrder(true);
964 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
965 }
966
967 GCNTargetMachine &getGCNTargetMachine() const {
968 return getTM<GCNTargetMachine>();
969 }
970
972 createMachineScheduler(MachineSchedContext *C) const override;
973
975 createPostMachineScheduler(MachineSchedContext *C) const override {
977 C, std::make_unique<PostGenericScheduler>(C),
978 /*RemoveKillFlags=*/true);
979 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
981 if (ST.shouldClusterStores())
983 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
984 DAG->addMutation(
985 createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA));
986 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
988 return DAG;
989 }
990
991 bool addPreISel() override;
992 void addMachineSSAOptimization() override;
993 bool addILPOpts() override;
994 bool addInstSelector() override;
995 bool addIRTranslator() override;
996 void addPreLegalizeMachineIR() override;
997 bool addLegalizeMachineIR() override;
998 void addPreRegBankSelect() override;
999 bool addRegBankSelect() override;
1000 void addPreGlobalInstructionSelect() override;
1001 bool addGlobalInstructionSelect() override;
1002 void addFastRegAlloc() override;
1003 void addOptimizedRegAlloc() override;
1004
1005 FunctionPass *createSGPRAllocPass(bool Optimized);
1006 FunctionPass *createVGPRAllocPass(bool Optimized);
1007 FunctionPass *createRegAllocPass(bool Optimized) override;
1008
1009 bool addRegAssignAndRewriteFast() override;
1010 bool addRegAssignAndRewriteOptimized() override;
1011
1012 void addPreRegAlloc() override;
1013 bool addPreRewrite() override;
1014 void addPostRegAlloc() override;
1015 void addPreSched2() override;
1016 void addPreEmitPass() override;
1017};
1018
1019} // end anonymous namespace
1020
1022 : TargetPassConfig(TM, PM) {
1023 // Exceptions and StackMaps are not supported, so these passes will never do
1024 // anything.
1027 // Garbage collection is not supported.
1030}
1031
1035 else
1037}
1038
1043 // ReassociateGEPs exposes more opportunities for SLSR. See
1044 // the example in reassociate-geps-and-slsr.ll.
1046 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1047 // EarlyCSE can reuse.
1049 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1051 // NaryReassociate on GEPs creates redundant common expressions, so run
1052 // EarlyCSE after it.
1054}
1055
1058
1062
1063 // There is no reason to run these.
1067
1069 if (LowerCtorDtor)
1071
1074
1075 // This can be disabled by passing ::Disable here or on the command line
1076 // with --expand-variadics-override=disable.
1078
1079 // Function calls are not supported, so make sure we inline everything.
1082
1083 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1084 if (Arch == Triple::r600)
1086
1087 // Replace OpenCL enqueued block function pointers with global variables.
1089
1090 // Runs before PromoteAlloca so the latter can account for function uses
1093 }
1094
1097
1098 // Run atomic optimizer before Atomic Expand
1103 }
1104
1106
1109
1112
1116 AAResults &AAR) {
1117 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1118 AAR.addAAResult(WrapperPass->getResult());
1119 }));
1120 }
1121
1123 // TODO: May want to move later or split into an early and late one.
1125 }
1126
1127 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1128 // have expanded.
1131 }
1132
1134
1135 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1136 // example, GVN can combine
1137 //
1138 // %0 = add %a, %b
1139 // %1 = add %b, %a
1140 //
1141 // and
1142 //
1143 // %0 = shl nsw %a, 2
1144 // %1 = shl %a, 2
1145 //
1146 // but EarlyCSE can do neither of them.
1149}
1150
1153 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
1154 // analysis, and should be removed.
1156 }
1157
1161
1163 // This lowering has been placed after codegenprepare to take advantage of
1164 // address mode matching (which is why it isn't put with the LDS lowerings).
1165 // It could be placed anywhere before uniformity annotations (an analysis
1166 // that it changes by splitting up fat pointers into their components)
1167 // but has been put before switch lowering and CFG flattening so that those
1168 // passes can run on the more optimized control flow this pass creates in
1169 // many cases.
1170 //
1171 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1172 // However, due to some annoying facts about ResourceUsageAnalysis,
1173 // (especially as exercised in the resource-usage-dead-function test),
1174 // we need all the function passes codegenprepare all the way through
1175 // said resource usage analysis to run on the call graph produced
1176 // before codegenprepare runs (because codegenprepare will knock some
1177 // nodes out of the graph, which leads to function-level passes not
1178 // being run on them, which causes crashes in the resource usage analysis).
1180 // In accordance with the above FIXME, manually force all the
1181 // function-level passes into a CGSCCPassManager.
1182 addPass(new DummyCGSCCPass());
1183 }
1184
1186
1189
1190 // LowerSwitch pass may introduce unreachable blocks that can
1191 // cause unexpected behavior for subsequent passes. Placing it
1192 // here seems better that these blocks would get cleaned up by
1193 // UnreachableBlockElim inserted next in the pass flow.
1195}
1196
1200 return false;
1201}
1202
1205 return false;
1206}
1207
1209 // Do nothing. GC is not supported.
1210 return false;
1211}
1212
1215 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1217 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1218 if (ST.shouldClusterStores())
1219 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1220 return DAG;
1221}
1222
1223//===----------------------------------------------------------------------===//
1224// GCN Legacy Pass Setup
1225//===----------------------------------------------------------------------===//
1226
1227ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1228 MachineSchedContext *C) const {
1229 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1230 if (ST.enableSIScheduler())
1232
1235
1237}
1238
1239bool GCNPassConfig::addPreISel() {
1241
1242 if (TM->getOptLevel() > CodeGenOptLevel::None)
1243 addPass(createSinkingPass());
1244
1245 if (TM->getOptLevel() > CodeGenOptLevel::None)
1247
1248 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1249 // regions formed by them.
1253 addPass(createFixIrreduciblePass());
1254 addPass(createUnifyLoopExitsPass());
1255 }
1256 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1257 }
1261 // TODO: Move this right after structurizeCFG to avoid extra divergence
1262 // analysis. This depends on stopping SIAnnotateControlFlow from making
1263 // control flow modifications.
1265 }
1266 addPass(createLCSSAPass());
1267
1268 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1270
1271 return false;
1272}
1273
1274void GCNPassConfig::addMachineSSAOptimization() {
1276
1277 // We want to fold operands after PeepholeOptimizer has run (or as part of
1278 // it), because it will eliminate extra copies making it easier to fold the
1279 // real source operand. We want to eliminate dead instructions after, so that
1280 // we see fewer uses of the copies. We then need to clean up the dead
1281 // instructions leftover after the operands are folded as well.
1282 //
1283 // XXX - Can we get away without running DeadMachineInstructionElim again?
1284 addPass(&SIFoldOperandsID);
1285 if (EnableDPPCombine)
1286 addPass(&GCNDPPCombineID);
1287 addPass(&SILoadStoreOptimizerID);
1288 if (isPassEnabled(EnableSDWAPeephole)) {
1289 addPass(&SIPeepholeSDWAID);
1290 addPass(&EarlyMachineLICMID);
1291 addPass(&MachineCSEID);
1292 addPass(&SIFoldOperandsID);
1293 }
1296}
1297
1298bool GCNPassConfig::addILPOpts() {
1300 addPass(&EarlyIfConverterID);
1301
1303 return false;
1304}
1305
1306bool GCNPassConfig::addInstSelector() {
1308 addPass(&SIFixSGPRCopiesLegacyID);
1310 return false;
1311}
1312
1313bool GCNPassConfig::addIRTranslator() {
1314 addPass(new IRTranslator(getOptLevel()));
1315 return false;
1316}
1317
1318void GCNPassConfig::addPreLegalizeMachineIR() {
1319 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1320 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1321 addPass(new Localizer());
1322}
1323
1324bool GCNPassConfig::addLegalizeMachineIR() {
1325 addPass(new Legalizer());
1326 return false;
1327}
1328
1329void GCNPassConfig::addPreRegBankSelect() {
1330 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1331 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1333}
1334
1335bool GCNPassConfig::addRegBankSelect() {
1336 addPass(new AMDGPURegBankSelect());
1337 return false;
1338}
1339
1340void GCNPassConfig::addPreGlobalInstructionSelect() {
1341 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1342 addPass(createAMDGPURegBankCombiner(IsOptNone));
1343}
1344
1345bool GCNPassConfig::addGlobalInstructionSelect() {
1346 addPass(new InstructionSelect(getOptLevel()));
1347 return false;
1348}
1349
1350void GCNPassConfig::addPreRegAlloc() {
1351 if (LateCFGStructurize) {
1353 }
1354}
1355
1356void GCNPassConfig::addFastRegAlloc() {
1357 // FIXME: We have to disable the verifier here because of PHIElimination +
1358 // TwoAddressInstructions disabling it.
1359
1360 // This must be run immediately after phi elimination and before
1361 // TwoAddressInstructions, otherwise the processing of the tied operand of
1362 // SI_ELSE will introduce a copy of the tied operand source after the else.
1364
1366
1368}
1369
1370void GCNPassConfig::addOptimizedRegAlloc() {
1371 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1372 // instructions that cause scheduling barriers.
1374
1375 if (OptExecMaskPreRA)
1377
1380
1381 if (isPassEnabled(EnablePreRAOptimizations))
1383
1384 // This is not an essential optimization and it has a noticeable impact on
1385 // compilation time, so we only enable it from O2.
1386 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1388
1389 // FIXME: when an instruction has a Killed operand, and the instruction is
1390 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1391 // the register in LiveVariables, this would trigger a failure in verifier,
1392 // we should fix it and enable the verifier.
1393 if (OptVGPRLiveRange)
1395 // This must be run immediately after phi elimination and before
1396 // TwoAddressInstructions, otherwise the processing of the tied operand of
1397 // SI_ELSE will introduce a copy of the tied operand source after the else.
1399
1400 if (EnableDCEInRA)
1402
1404}
1405
1406bool GCNPassConfig::addPreRewrite() {
1407 addPass(&SILowerWWMCopiesID);
1409 addPass(&GCNNSAReassignID);
1410 return true;
1411}
1412
1413FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1414 // Initialize the global default.
1415 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1416 initializeDefaultSGPRRegisterAllocatorOnce);
1417
1418 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1419 if (Ctor != useDefaultRegisterAllocator)
1420 return Ctor();
1421
1422 if (Optimized)
1423 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1424
1425 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1426}
1427
1428FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1429 // Initialize the global default.
1430 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1431 initializeDefaultVGPRRegisterAllocatorOnce);
1432
1433 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1434 if (Ctor != useDefaultRegisterAllocator)
1435 return Ctor();
1436
1437 if (Optimized)
1438 return createGreedyVGPRRegisterAllocator();
1439
1440 return createFastVGPRRegisterAllocator();
1441}
1442
1443FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1444 llvm_unreachable("should not be used");
1445}
1446
1448 "-regalloc not supported with amdgcn. Use -sgpr-regalloc and -vgpr-regalloc";
1449
1450bool GCNPassConfig::addRegAssignAndRewriteFast() {
1451 if (!usingDefaultRegAlloc())
1453
1454 addPass(&GCNPreRALongBranchRegID);
1455
1456 addPass(createSGPRAllocPass(false));
1457
1458 // Equivalent of PEI for SGPRs.
1459 addPass(&SILowerSGPRSpillsID);
1460 addPass(&SIPreAllocateWWMRegsID);
1461
1462 addPass(createVGPRAllocPass(false));
1463
1464 addPass(&SILowerWWMCopiesID);
1465 return true;
1466}
1467
1468bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1469 if (!usingDefaultRegAlloc())
1471
1472 addPass(&GCNPreRALongBranchRegID);
1473
1474 addPass(createSGPRAllocPass(true));
1475
1476 // Commit allocated register changes. This is mostly necessary because too
1477 // many things rely on the use lists of the physical registers, such as the
1478 // verifier. This is only necessary with allocators which use LiveIntervals,
1479 // since FastRegAlloc does the replacements itself.
1480 addPass(createVirtRegRewriter(false));
1481
1482 // Equivalent of PEI for SGPRs.
1483 addPass(&SILowerSGPRSpillsID);
1484 addPass(&SIPreAllocateWWMRegsID);
1485
1486 addPass(createVGPRAllocPass(true));
1487
1488 addPreRewrite();
1489 addPass(&VirtRegRewriterID);
1490
1492
1493 return true;
1494}
1495
1496void GCNPassConfig::addPostRegAlloc() {
1497 addPass(&SIFixVGPRCopiesID);
1498 if (getOptLevel() > CodeGenOptLevel::None)
1499 addPass(&SIOptimizeExecMaskingID);
1501}
1502
1503void GCNPassConfig::addPreSched2() {
1504 if (TM->getOptLevel() > CodeGenOptLevel::None)
1506 addPass(&SIPostRABundlerID);
1507}
1508
1509void GCNPassConfig::addPreEmitPass() {
1510 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1511 addPass(&GCNCreateVOPDID);
1512 addPass(createSIMemoryLegalizerPass());
1513 addPass(createSIInsertWaitcntsPass());
1514
1515 addPass(createSIModeRegisterPass());
1516
1517 if (getOptLevel() > CodeGenOptLevel::None)
1518 addPass(&SIInsertHardClausesID);
1519
1521 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1523 if (getOptLevel() > CodeGenOptLevel::None)
1524 addPass(&SIPreEmitPeepholeID);
1525 // The hazard recognizer that runs as part of the post-ra scheduler does not
1526 // guarantee to be able handle all hazards correctly. This is because if there
1527 // are multiple scheduling regions in a basic block, the regions are scheduled
1528 // bottom up, so when we begin to schedule a region we don't know what
1529 // instructions were emitted directly before it.
1530 //
1531 // Here we add a stand-alone hazard recognizer pass which can handle all
1532 // cases.
1533 addPass(&PostRAHazardRecognizerID);
1534
1537
1538 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1539 addPass(&AMDGPUInsertDelayAluID);
1540
1541 addPass(&BranchRelaxationPassID);
1542}
1543
1545 return new GCNPassConfig(*this, PM);
1546}
1547
1549 MachineFunction &MF) const {
1551 MF.getRegInfo().addDelegate(MFI);
1552}
1553
1555 BumpPtrAllocator &Allocator, const Function &F,
1556 const TargetSubtargetInfo *STI) const {
1557 return SIMachineFunctionInfo::create<SIMachineFunctionInfo>(
1558 Allocator, F, static_cast<const GCNSubtarget *>(STI));
1559}
1560
1562 return new yaml::SIMachineFunctionInfo();
1563}
1564
1568 return new yaml::SIMachineFunctionInfo(
1569 *MFI, *MF.getSubtarget<GCNSubtarget>().getRegisterInfo(), MF);
1570}
1571
1574 SMDiagnostic &Error, SMRange &SourceRange) const {
1575 const yaml::SIMachineFunctionInfo &YamlMFI =
1576 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1577 MachineFunction &MF = PFS.MF;
1579 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1580
1581 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1582 return true;
1583
1584 if (MFI->Occupancy == 0) {
1585 // Fixup the subtarget dependent default value.
1586 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1587 }
1588
1589 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1590 Register TempReg;
1591 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1592 SourceRange = RegName.SourceRange;
1593 return true;
1594 }
1595 RegVal = TempReg;
1596
1597 return false;
1598 };
1599
1600 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1601 Register &RegVal) {
1602 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1603 };
1604
1605 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1606 return true;
1607
1608 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1609 return true;
1610
1611 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1612 MFI->LongBranchReservedReg))
1613 return true;
1614
1615 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1616 // Create a diagnostic for a the register string literal.
1617 const MemoryBuffer &Buffer =
1618 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1619 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1620 RegName.Value.size(), SourceMgr::DK_Error,
1621 "incorrect register class for field", RegName.Value,
1622 std::nullopt, std::nullopt);
1623 SourceRange = RegName.SourceRange;
1624 return true;
1625 };
1626
1627 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1628 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1629 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1630 return true;
1631
1632 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1633 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1634 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1635 }
1636
1637 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1638 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1639 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1640 }
1641
1642 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1643 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1644 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1645 }
1646
1647 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1648 Register ParsedReg;
1649 if (parseRegister(YamlReg, ParsedReg))
1650 return true;
1651
1652 MFI->reserveWWMRegister(ParsedReg);
1653 }
1654
1655 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1656 const TargetRegisterClass &RC,
1657 ArgDescriptor &Arg, unsigned UserSGPRs,
1658 unsigned SystemSGPRs) {
1659 // Skip parsing if it's not present.
1660 if (!A)
1661 return false;
1662
1663 if (A->IsRegister) {
1664 Register Reg;
1665 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1666 SourceRange = A->RegisterName.SourceRange;
1667 return true;
1668 }
1669 if (!RC.contains(Reg))
1670 return diagnoseRegisterClass(A->RegisterName);
1672 } else
1673 Arg = ArgDescriptor::createStack(A->StackOffset);
1674 // Check and apply the optional mask.
1675 if (A->Mask)
1676 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1677
1678 MFI->NumUserSGPRs += UserSGPRs;
1679 MFI->NumSystemSGPRs += SystemSGPRs;
1680 return false;
1681 };
1682
1683 if (YamlMFI.ArgInfo &&
1684 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1685 AMDGPU::SGPR_128RegClass,
1686 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1687 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1688 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1689 2, 0) ||
1690 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1691 MFI->ArgInfo.QueuePtr, 2, 0) ||
1692 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1693 AMDGPU::SReg_64RegClass,
1694 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1695 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1696 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1697 2, 0) ||
1698 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1699 AMDGPU::SReg_64RegClass,
1700 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1701 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1702 AMDGPU::SGPR_32RegClass,
1703 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1704 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1705 AMDGPU::SGPR_32RegClass,
1706 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1707 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1708 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1709 0, 1) ||
1710 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1711 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1712 0, 1) ||
1713 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1714 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1715 0, 1) ||
1716 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1717 AMDGPU::SGPR_32RegClass,
1718 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1719 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1720 AMDGPU::SGPR_32RegClass,
1721 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1722 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1723 AMDGPU::SReg_64RegClass,
1724 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1725 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1726 AMDGPU::SReg_64RegClass,
1727 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1728 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1729 AMDGPU::VGPR_32RegClass,
1730 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1731 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1732 AMDGPU::VGPR_32RegClass,
1733 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1734 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1735 AMDGPU::VGPR_32RegClass,
1736 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1737 return true;
1738
1739 if (ST.hasIEEEMode())
1740 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1741 if (ST.hasDX10ClampMode())
1742 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1743
1744 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
1745 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
1748 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
1751
1758
1759 return false;
1760}
1761
1762//===----------------------------------------------------------------------===//
1763// AMDGPU CodeGen Pass Builder interface.
1764//===----------------------------------------------------------------------===//
1765
1767 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
1769 : CodeGenPassBuilder(TM, Opts, PIC) {
1771 // Exceptions and StackMaps are not supported, so these passes will never do
1772 // anything.
1773 // Garbage collection is not supported.
1774 disablePass<StackMapLivenessPass, FuncletLayoutPass,
1776}
1777
1778void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass) const {
1779 // TODO: Missing AMDGPURemoveIncompatibleFunctions
1780
1782 if (LowerCtorDtor)
1783 addPass(AMDGPUCtorDtorLoweringPass());
1784
1787
1788 // This can be disabled by passing ::Disable here or on the command line
1789 // with --expand-variadics-override=disable.
1791
1792 addPass(AMDGPUAlwaysInlinePass());
1793 addPass(AlwaysInlinerPass());
1794
1795 // TODO: Missing OpenCLEnqueuedBlockLowering
1796
1797 // Runs before PromoteAlloca so the latter can account for function uses
1799 addPass(AMDGPULowerModuleLDSPass(TM));
1800
1802 addPass(InferAddressSpacesPass());
1803
1804 // Run atomic optimizer before Atomic Expand
1808
1809 // FIXME: Adding atomic-expand manages to break -passes=atomic-expand
1810 // addPass(AtomicExpandPass(TM));
1811
1813 addPass(AMDGPUPromoteAllocaPass(TM));
1816
1817 // TODO: Handle EnableAMDGPUAliasAnalysis
1818
1819 // TODO: May want to move later or split into an early and late one.
1820 addPass(AMDGPUCodeGenPreparePass(TM));
1821
1822 // TODO: LICM
1823 }
1824
1825 Base::addIRPasses(addPass);
1826
1827 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1828 // example, GVN can combine
1829 //
1830 // %0 = add %a, %b
1831 // %1 = add %b, %a
1832 //
1833 // and
1834 //
1835 // %0 = shl nsw %a, 2
1836 // %1 = shl %a, 2
1837 //
1838 // but EarlyCSE can do neither of them.
1840 addEarlyCSEOrGVNPass(addPass);
1841}
1842
1843void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
1844 // AMDGPUAnnotateKernelFeaturesPass is missing here, but it will hopefully be
1845 // deleted soon.
1846
1849
1850 // This lowering has been placed after codegenprepare to take advantage of
1851 // address mode matching (which is why it isn't put with the LDS lowerings).
1852 // It could be placed anywhere before uniformity annotations (an analysis
1853 // that it changes by splitting up fat pointers into their components)
1854 // but has been put before switch lowering and CFG flattening so that those
1855 // passes can run on the more optimized control flow this pass creates in
1856 // many cases.
1857 //
1858 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1859 // However, due to some annoying facts about ResourceUsageAnalysis,
1860 // (especially as exercised in the resource-usage-dead-function test),
1861 // we need all the function passes codegenprepare all the way through
1862 // said resource usage analysis to run on the call graph produced
1863 // before codegenprepare runs (because codegenprepare will knock some
1864 // nodes out of the graph, which leads to function-level passes not
1865 // being run on them, which causes crashes in the resource usage analysis).
1867
1868 Base::addCodeGenPrepare(addPass);
1869
1871 addPass(LoadStoreVectorizerPass());
1872
1873 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
1874 // behavior for subsequent passes. Placing it here seems better that these
1875 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
1876 // pass flow.
1877 addPass(LowerSwitchPass());
1878}
1879
1880void AMDGPUCodeGenPassBuilder::addPreISel(AddIRPass &addPass) const {
1885
1887 addPass(FlattenCFGPass());
1888
1890 addPass(SinkingPass());
1891
1893
1894 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1895 // regions formed by them.
1896
1898
1901 addPass(FixIrreduciblePass());
1902 addPass(UnifyLoopExitsPass());
1903 }
1904
1905 addPass(StructurizeCFGPass(/*SkipUniformRegions=*/false));
1906 }
1907
1909
1911 addPass(SIAnnotateControlFlowPass(TM));
1912
1913 // TODO: Move this right after structurizeCFG to avoid extra divergence
1914 // analysis. This depends on stopping SIAnnotateControlFlow from making
1915 // control flow modifications.
1917 }
1918
1919 addPass(LCSSAPass());
1920
1923
1924 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
1925 // isn't this in addInstSelector?
1927}
1928
1929void AMDGPUCodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
1930 CreateMCStreamer) const {
1931 // TODO: Add AsmPrinter.
1932}
1933
1934Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &addPass) const {
1935 addPass(AMDGPUISelDAGToDAGPass(TM));
1936 addPass(SIFixSGPRCopiesPass());
1937 addPass(SILowerI1CopiesPass());
1938 return Error::success();
1939}
1940
1942 CodeGenOptLevel Level) const {
1943 if (Opt.getNumOccurrences())
1944 return Opt;
1945 if (TM.getOptLevel() < Level)
1946 return false;
1947 return Opt;
1948}
1949
1952 addPass(GVNPass());
1953 else
1954 addPass(EarlyCSEPass());
1955}
1956
1958 AddIRPass &addPass) const {
1960 addPass(LoopDataPrefetchPass());
1961
1963
1964 // ReassociateGEPs exposes more opportunities for SLSR. See
1965 // the example in reassociate-geps-and-slsr.ll.
1967
1968 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1969 // EarlyCSE can reuse.
1970 addEarlyCSEOrGVNPass(addPass);
1971
1972 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1973 addPass(NaryReassociatePass());
1974
1975 // NaryReassociate on GEPs creates redundant common expressions, so run
1976 // EarlyCSE after it.
1977 addPass(EarlyCSEPass());
1978}
unsigned const MachineRegisterInfo * MRI
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static cl::opt< bool, true > EnableStructurizerWorkarounds("amdgpu-enable-structurizer-workarounds", cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::location(AMDGPUTargetMachine::EnableStructurizerWorkarounds), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool, true > DisableStructurizer("amdgpu-disable-structurizer", cl::desc("Disable structurizer for experiments; produces unusable code"), cl::location(AMDGPUTargetMachine::DisableStructurizer), cl::ReallyHidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool, true > LateCFGStructurize("amdgpu-late-structurize", cl::desc("Enable late CFG structurization"), cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), cl::Hidden)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableMaxIlpSchedStrategy("amdgpu-enable-max-ilp-scheduling-strategy", cl::desc("Enable scheduling strategy to maximize ILP for a single wave."), cl::Hidden, cl::init(false))
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst", cl::desc("Enable s_singleuse_vdst insertion"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfo::Concept conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
#define LLVM_READNONE
Definition: Compiler.h:216
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:131
This file provides the interface for a simple, fast CSE pass.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
static std::string computeDataLayout()
This file provides the interface for LLVM's Loop Data Prefetching Pass.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
This header defines various interfaces for pass management in LLVM.
The AMDGPU TargetMachine interface definition for hw codegen targets.
Basic Register Allocator
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Machine Scheduler interface.
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Error addInstSelector(AddMachinePass &) const
void addEarlyCSEOrGVNPass(AddIRPass &) const
void addStraightLineScalarOptimizationPasses(AddIRPass &) const
AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC)
void addPreISel(AddIRPass &addPass) const
void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const
void addCodeGenPrepare(AddIRPass &) const
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Definition: AlwaysInliner.h:32
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:392
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
This class provides access to building LLVM's passes.
Error buildPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType) const
void addCodeGenPrepare(AddIRPass &) const
Add pass to prepare the LLVM IR for code generation.
void disablePass()
Allow the target to disable a specific pass by default.
void addIRPasses(AddIRPass &) const
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Definition: Constants.cpp:723
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
static ErrorSuccess success()
Create a success value.
Definition: Error.h:337
Tagged union holding either a T or a Error.
Definition: Error.h:481
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:278
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Definition: GVN.h:117
Pass to remove unused function declarations.
Definition: GlobalDCE.h:36
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition: Internalize.h:36
Converts loops into loop-closed SSA form.
Definition: LCSSA.h:37
This class describes a target machine that is implemented with the LLVM target-independent code gener...
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:43
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:51
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
Definition: MemoryBuffer.h:76
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static const OptimizationLevel O0
Disable as many optimizations as possible.
unsigned getSpeedupLevel() const
static const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
Definition: PassBuilder.h:106
void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:481
void registerPipelineStartEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:472
void crossRegisterProxies(LoopAnalysisManager &LAM, FunctionAnalysisManager &FAM, CGSCCAnalysisManager &CGAM, ModuleAnalysisManager &MAM, MachineFunctionAnalysisManager *MFAM=nullptr)
Cross register the analysis managers through their proxies.
void registerOptimizerLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:499
void registerPeepholeEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:406
void registerCGSCCOptimizerLateEPCallback(const std::function< void(CGSCCPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:451
void registerRegClassFilterParsingCallback(const std::function< RegAllocFilterFunc(StringRef)> &C)
Register callbacks to parse target specific filter field if regalloc pass needs it.
Definition: PassBuilder.h:588
void registerModuleAnalyses(ModuleAnalysisManager &MAM)
Registers all available module analysis passes.
void registerFullLinkTimeOptimizationLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:517
void registerFunctionAnalyses(FunctionAnalysisManager &FAM)
Registers all available function analysis passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
Definition: PassManager.h:195
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:281
Represents a location in source code.
Definition: SMLoc.h:23
Represents a range in source code.
Definition: SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:575
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:576
Move instructions into successor blocks when possible.
Definition: Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
unsigned getMainFileID() const
Definition: SourceMgr.h:132
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition: SourceMgr.h:125
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:685
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:620
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:90
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
Definition: TargetMachine.h:96
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
Target-Independent Code Generator Pass Configuration Options.
LLVMTargetMachine * TM
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Definition: raw_ostream.h:434
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
Definition: AMDGPU.h:458
bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:893
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
@ ReallyHidden
Definition: CommandLine.h:138
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFlattenCFGPass()
void initializeSIFormMemoryClausesPass(PassRegistry &)
char & SIPreAllocateWWMRegsID
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
void initializeAMDGPUInsertSingleUseVDSTPass(PassRegistry &)
Pass * createLCSSAPass()
Definition: LCSSA.cpp:541
void initializeGCNCreateVOPDPass(PassRegistry &)
ModulePass * createAMDGPUOpenCLEnqueuedBlockLoweringPass()
char & GCNPreRAOptimizationsID
char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeGCNPreRAOptimizationsPass(PassRegistry &)
Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &)
void initializeAMDGPUAttributorLegacyPass(PassRegistry &)
char & SIPostRABundlerID
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
void initializeSIModeRegisterPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
void initializeSIOptimizeVGPRLiveRangePass(PassRegistry &)
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
Definition: PassManager.h:848
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
char & GCNRewritePartialRegUsesID
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition: Error.cpp:98
void initializeSIShrinkInstructionsPass(PassRegistry &)
char & SIFoldOperandsID
void initializeGCNPreRALongBranchRegPass(PassRegistry &)
auto formatv(const char *Fmt, Ts &&...Vals) -> formatv_object< decltype(std::make_tuple(support::detail::build_format_adapter(std::forward< Ts >(Vals))...))>
char & SILoadStoreOptimizerID
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createNaryReassociatePass()
char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeSIPreEmitPeepholePass(PassRegistry &)
char & SILowerWWMCopiesID
void initializeSIFixVGPRCopiesPass(PassRegistry &)
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition: CSEInfo.cpp:79
Target & getTheR600Target()
The target for R600 GPUs.
char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
void initializeAMDGPURemoveIncompatibleFunctionsPass(PassRegistry &)
void initializeSILowerWWMCopiesPass(PassRegistry &)
void initializeGCNNSAReassignPass(PassRegistry &)
char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
void initializeSIInsertWaitcntsPass(PassRegistry &)
char & AMDGPUInsertSingleUseVDSTID
Pass * createLICMPass()
Definition: LICM.cpp:381
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
char & SIFormMemoryClausesID
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
void initializeSILoadStoreOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringPass(PassRegistry &)
void initializeSIPeepholeSDWAPass(PassRegistry &)
char & AMDGPUUnifyDivergentExitNodesID
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
char & SILowerSGPRSpillsID
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
char & SILateBranchLoweringPassID
char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
FunctionPass * createSinkingPass()
Definition: Sink.cpp:277
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
FunctionPass * createSIShrinkInstructionsPass()
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition: CodeGen.h:83
void initializeSIPostRABundlerPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &)
void initializeSIWholeQuadModePass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
Pass * createAMDGPUAnnotateKernelFeaturesPass()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUPassConfig...
char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
char & SIOptimizeVGPRLiveRangeID
FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
FunctionPass * createFixIrreduciblePass()
char & FuncletLayoutID
This pass lays out funclets contiguously.
void initializeSIInsertHardClausesPass(PassRegistry &)
char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
ModulePass * createAMDGPUPrintfRuntimeBinding()
void initializeSIMemoryLegalizerPass(PassRegistry &)
Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
char & EarlyIfConverterID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
char & SIPreEmitPeepholeID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:17
void initializeSIPreAllocateWWMRegsPass(PassRegistry &)
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
char & MachineCSEID
MachineCSE - This pass performs global CSE on machine instructions.
Definition: MachineCSE.cpp:165
char & GCNDPPCombineID
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
char & SIWholeQuadModeID
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
void initializeSIOptimizeExecMaskingPreRAPass(PassRegistry &)
void initializeAMDGPUMarkLastScratchLoadPass(PassRegistry &)
char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
Definition: GVN.cpp:3396
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
void initializeSILowerSGPRSpillsPass(PassRegistry &)
FunctionPass * createSILowerI1CopiesLegacyPass()
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
FunctionPass * createAMDGPUMachineCFGStructurizerPass()
void initializeAMDGPUResourceUsageAnalysisPass(PassRegistry &)
char & SIFixSGPRCopiesLegacyID
char & GCNCreateVOPDID
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
char & VirtRegRewriterID
VirtRegRewriter pass.
Definition: VirtRegMap.cpp:227
char & SILowerControlFlowID
FunctionPass * createLowerSwitchPass()
FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
Definition: VirtRegMap.cpp:645
void initializeR600VectorRegMergerPass(PassRegistry &)
ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
void initializeSIOptimizeExecMaskingPass(PassRegistry &)
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIFoldOperandsPass(PassRegistry &)
void initializeSILowerControlFlowPass(PassRegistry &)
char & SIPeepholeSDWAID
char & SIFixVGPRCopiesID
char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1932
void initializeGCNDPPCombinePass(PassRegistry &)
char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
Definition: MIParser.cpp:3597
char & AMDGPUMarkLastScratchLoadID
char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluPass(PassRegistry &)
char & SIOptimizeExecMaskingID
void initializeAMDGPUUnifyMetadataPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
char & AMDGPUPerfHintAnalysisLegacyID
char & GCNPreRALongBranchRegID
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
Definition: EarlyCSE.h:30
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
Definition: PassManager.h:874
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition: Threading.h:68
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
A wrapper around std::string which contains a source range that's being set during parsing.