LLVM 20.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
22#include "AMDGPUIGroupLP.h"
23#include "AMDGPUISelDAGToDAG.h"
24#include "AMDGPUMacroFusion.h"
26#include "AMDGPURegBankSelect.h"
27#include "AMDGPUSplitModule.h"
32#include "GCNSchedStrategy.h"
33#include "GCNVOPDUtils.h"
34#include "R600.h"
35#include "R600TargetMachine.h"
36#include "SIFixSGPRCopies.h"
38#include "SIMachineScheduler.h"
51#include "llvm/CodeGen/Passes.h"
54#include "llvm/IR/IntrinsicsAMDGPU.h"
55#include "llvm/IR/PassManager.h"
62#include "llvm/Transforms/IPO.h"
80#include <optional>
81
82using namespace llvm;
83using namespace llvm::PatternMatch;
84
85namespace {
86class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
87public:
88 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
90};
91
92class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
93public:
94 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
96};
97
98static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
100 const Register Reg) {
101 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
102 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
103}
104
105static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
107 const Register Reg) {
108 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
109 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
110}
111
112/// -{sgpr|vgpr}-regalloc=... command line option.
113static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
114
115/// A dummy default pass factory indicates whether the register allocator is
116/// overridden on the command line.
117static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
118static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
119
120static SGPRRegisterRegAlloc
121defaultSGPRRegAlloc("default",
122 "pick SGPR register allocator based on -O option",
124
125static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
127SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
128 cl::desc("Register allocator to use for SGPRs"));
129
130static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
132VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
133 cl::desc("Register allocator to use for VGPRs"));
134
135
136static void initializeDefaultSGPRRegisterAllocatorOnce() {
137 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
138
139 if (!Ctor) {
140 Ctor = SGPRRegAlloc;
141 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
142 }
143}
144
145static void initializeDefaultVGPRRegisterAllocatorOnce() {
146 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
147
148 if (!Ctor) {
149 Ctor = VGPRRegAlloc;
150 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
151 }
152}
153
154static FunctionPass *createBasicSGPRRegisterAllocator() {
155 return createBasicRegisterAllocator(onlyAllocateSGPRs);
156}
157
158static FunctionPass *createGreedySGPRRegisterAllocator() {
159 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
160}
161
162static FunctionPass *createFastSGPRRegisterAllocator() {
163 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
164}
165
166static FunctionPass *createBasicVGPRRegisterAllocator() {
167 return createBasicRegisterAllocator(onlyAllocateVGPRs);
168}
169
170static FunctionPass *createGreedyVGPRRegisterAllocator() {
171 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
172}
173
174static FunctionPass *createFastVGPRRegisterAllocator() {
175 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
176}
177
178static SGPRRegisterRegAlloc basicRegAllocSGPR(
179 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
180static SGPRRegisterRegAlloc greedyRegAllocSGPR(
181 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
182
183static SGPRRegisterRegAlloc fastRegAllocSGPR(
184 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
185
186
187static VGPRRegisterRegAlloc basicRegAllocVGPR(
188 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
189static VGPRRegisterRegAlloc greedyRegAllocVGPR(
190 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
191
192static VGPRRegisterRegAlloc fastRegAllocVGPR(
193 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
194} // anonymous namespace
195
196static cl::opt<bool>
198 cl::desc("Run early if-conversion"),
199 cl::init(false));
200
201static cl::opt<bool>
202OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
203 cl::desc("Run pre-RA exec mask optimizations"),
204 cl::init(true));
205
206static cl::opt<bool>
207 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
208 cl::desc("Lower GPU ctor / dtors to globals on the device."),
209 cl::init(true), cl::Hidden);
210
211// Option to disable vectorizer for tests.
213 "amdgpu-load-store-vectorizer",
214 cl::desc("Enable load store vectorizer"),
215 cl::init(true),
216 cl::Hidden);
217
218// Option to control global loads scalarization
220 "amdgpu-scalarize-global-loads",
221 cl::desc("Enable global load scalarization"),
222 cl::init(true),
223 cl::Hidden);
224
225// Option to run internalize pass.
227 "amdgpu-internalize-symbols",
228 cl::desc("Enable elimination of non-kernel functions and unused globals"),
229 cl::init(false),
230 cl::Hidden);
231
232// Option to inline all early.
234 "amdgpu-early-inline-all",
235 cl::desc("Inline all functions early"),
236 cl::init(false),
237 cl::Hidden);
238
240 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
241 cl::desc("Enable removal of functions when they"
242 "use features not supported by the target GPU"),
243 cl::init(true));
244
246 "amdgpu-sdwa-peephole",
247 cl::desc("Enable SDWA peepholer"),
248 cl::init(true));
249
251 "amdgpu-dpp-combine",
252 cl::desc("Enable DPP combiner"),
253 cl::init(true));
254
255// Enable address space based alias analysis
257 cl::desc("Enable AMDGPU Alias Analysis"),
258 cl::init(true));
259
260// Option to run late CFG structurizer
262 "amdgpu-late-structurize",
263 cl::desc("Enable late CFG structurization"),
265 cl::Hidden);
266
267// Disable structurizer-based control-flow lowering in order to test convergence
268// control tokens. This should eventually be replaced by the wave-transform.
270 "amdgpu-disable-structurizer",
271 cl::desc("Disable structurizer for experiments; produces unusable code"),
273
274// Enable lib calls simplifications
276 "amdgpu-simplify-libcall",
277 cl::desc("Enable amdgpu library simplifications"),
278 cl::init(true),
279 cl::Hidden);
280
282 "amdgpu-ir-lower-kernel-arguments",
283 cl::desc("Lower kernel argument loads in IR pass"),
284 cl::init(true),
285 cl::Hidden);
286
288 "amdgpu-reassign-regs",
289 cl::desc("Enable register reassign optimizations on gfx10+"),
290 cl::init(true),
291 cl::Hidden);
292
294 "amdgpu-opt-vgpr-liverange",
295 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
296 cl::init(true), cl::Hidden);
297
299 "amdgpu-atomic-optimizer-strategy",
300 cl::desc("Select DPP or Iterative strategy for scan"),
301 cl::init(ScanOptions::Iterative),
303 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
304 clEnumValN(ScanOptions::Iterative, "Iterative",
305 "Use Iterative approach for scan"),
306 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
307
308// Enable Mode register optimization
310 "amdgpu-mode-register",
311 cl::desc("Enable mode register pass"),
312 cl::init(true),
313 cl::Hidden);
314
315// Enable GFX11.5+ s_singleuse_vdst insertion
316static cl::opt<bool>
317 EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst",
318 cl::desc("Enable s_singleuse_vdst insertion"),
319 cl::init(false), cl::Hidden);
320
321// Enable GFX11+ s_delay_alu insertion
322static cl::opt<bool>
323 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
324 cl::desc("Enable s_delay_alu insertion"),
325 cl::init(true), cl::Hidden);
326
327// Enable GFX11+ VOPD
328static cl::opt<bool>
329 EnableVOPD("amdgpu-enable-vopd",
330 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
331 cl::init(true), cl::Hidden);
332
333// Option is used in lit tests to prevent deadcoding of patterns inspected.
334static cl::opt<bool>
335EnableDCEInRA("amdgpu-dce-in-ra",
336 cl::init(true), cl::Hidden,
337 cl::desc("Enable machine DCE inside regalloc"));
338
339static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
340 cl::desc("Adjust wave priority"),
341 cl::init(false), cl::Hidden);
342
344 "amdgpu-scalar-ir-passes",
345 cl::desc("Enable scalar IR passes"),
346 cl::init(true),
347 cl::Hidden);
348
350 "amdgpu-enable-structurizer-workarounds",
351 cl::desc("Enable workarounds for the StructurizeCFG pass"),
353 cl::init(true), cl::Hidden);
354
356 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
358 cl::Hidden);
359
361 "amdgpu-enable-pre-ra-optimizations",
362 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
363 cl::Hidden);
364
366 "amdgpu-enable-promote-kernel-arguments",
367 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
368 cl::Hidden, cl::init(true));
369
371 "amdgpu-enable-image-intrinsic-optimizer",
372 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
373 cl::Hidden);
374
375static cl::opt<bool>
376 EnableLoopPrefetch("amdgpu-loop-prefetch",
377 cl::desc("Enable loop data prefetch on AMDGPU"),
378 cl::Hidden, cl::init(false));
379
381 "amdgpu-enable-max-ilp-scheduling-strategy",
382 cl::desc("Enable scheduling strategy to maximize ILP for a single wave."),
383 cl::Hidden, cl::init(false));
384
386 "amdgpu-enable-rewrite-partial-reg-uses",
387 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
388 cl::Hidden);
389
391 "amdgpu-enable-hipstdpar",
392 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
393 cl::Hidden);
394
395static cl::opt<bool>
396 EnableAMDGPUAttributor("amdgpu-attributor-enable",
397 cl::desc("Enable AMDGPUAttributorPass"),
398 cl::init(true), cl::Hidden);
399
401 // Register the target
404
479}
480
481static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
482 return std::make_unique<AMDGPUTargetObjectFile>();
483}
484
486 return new SIScheduleDAGMI(C);
487}
488
489static ScheduleDAGInstrs *
491 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
492 ScheduleDAGMILive *DAG =
493 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
494 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
495 if (ST.shouldClusterStores())
496 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
497 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
498 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
499 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
500 return DAG;
501}
502
503static ScheduleDAGInstrs *
505 ScheduleDAGMILive *DAG =
506 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
507 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
508 return DAG;
509}
510
511static ScheduleDAGInstrs *
513 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
514 auto DAG = new GCNIterativeScheduler(C,
516 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
517 if (ST.shouldClusterStores())
518 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
519 return DAG;
520}
521
523 return new GCNIterativeScheduler(C,
525}
526
527static ScheduleDAGInstrs *
529 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
530 auto DAG = new GCNIterativeScheduler(C,
532 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
533 if (ST.shouldClusterStores())
534 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
535 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
536 return DAG;
537}
538
540SISchedRegistry("si", "Run SI's custom scheduler",
542
545 "Run GCN scheduler to maximize occupancy",
547
549 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
551
553 "gcn-iterative-max-occupancy-experimental",
554 "Run GCN scheduler to maximize occupancy (experimental)",
556
558 "gcn-iterative-minreg",
559 "Run GCN iterative scheduler for minimal register usage (experimental)",
561
563 "gcn-iterative-ilp",
564 "Run GCN iterative scheduler for ILP scheduling (experimental)",
566
568 if (TT.getArch() == Triple::r600) {
569 // 32-bit pointers.
570 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
571 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
572 }
573
574 // 32-bit private, local, and region pointers. 64-bit global, constant and
575 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
576 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
577 // (address space 7), and 128-bit non-integral buffer resourcees (address
578 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
579 // like getelementptr.
580 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
581 "-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-"
582 "v32:32-v48:64-v96:"
583 "128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-"
584 "G1-ni:7:8:9";
585}
586
589 if (!GPU.empty())
590 return GPU;
591
592 // Need to default to a target with flat support for HSA.
593 if (TT.getArch() == Triple::amdgcn)
594 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
595
596 return "r600";
597}
598
599static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
600 // The AMDGPU toolchain only supports generating shared objects, so we
601 // must always use PIC.
602 return Reloc::PIC_;
603}
604
606 StringRef CPU, StringRef FS,
607 const TargetOptions &Options,
608 std::optional<Reloc::Model> RM,
609 std::optional<CodeModel::Model> CM,
610 CodeGenOptLevel OptLevel)
613 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
614 TLOF(createTLOF(getTargetTriple())) {
615 initAsmInfo();
616 if (TT.getArch() == Triple::amdgcn) {
617 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
619 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
621 }
622}
623
629
631
633 Attribute GPUAttr = F.getFnAttribute("target-cpu");
634 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
635}
636
638 Attribute FSAttr = F.getFnAttribute("target-features");
639
640 return FSAttr.isValid() ? FSAttr.getValueAsString()
642}
643
644/// Predicate for Internalize pass.
645static bool mustPreserveGV(const GlobalValue &GV) {
646 if (const Function *F = dyn_cast<Function>(&GV))
647 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
648 F->getName().starts_with("__sanitizer_") ||
649 AMDGPU::isEntryFunctionCC(F->getCallingConv());
650
652 return !GV.use_empty();
653}
654
657}
658
661 if (Params.empty())
663 Params.consume_front("strategy=");
664 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
665 .Case("dpp", ScanOptions::DPP)
666 .Cases("iterative", "", ScanOptions::Iterative)
667 .Case("none", ScanOptions::None)
668 .Default(std::nullopt);
669 if (Result)
670 return *Result;
671 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
672}
673
677 while (!Params.empty()) {
678 StringRef ParamName;
679 std::tie(ParamName, Params) = Params.split(';');
680 if (ParamName == "closed-world") {
681 Result.IsClosedWorld = true;
682 } else {
683 return make_error<StringError>(
684 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
685 .str(),
687 }
688 }
689 return Result;
690}
691
693
694#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
696
698 [](ModulePassManager &PM, OptimizationLevel Level) {
700 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
701 if (EnableHipStdPar)
703 });
704
706 [](ModulePassManager &PM, OptimizationLevel Level) {
708
709 if (Level == OptimizationLevel::O0)
710 return;
711
713
714 if (InternalizeSymbols) {
717 }
718
721 });
722
724 [](FunctionPassManager &FPM, OptimizationLevel Level) {
725 if (Level == OptimizationLevel::O0)
726 return;
727
731 });
732
734 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
735 if (Level == OptimizationLevel::O0)
736 return;
737
739
740 // Add promote kernel arguments pass to the opt pipeline right before
741 // infer address spaces which is needed to do actual address space
742 // rewriting.
743 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
746
747 // Add infer address spaces pass to the opt pipeline after inlining
748 // but before SROA to increase SROA opportunities.
750
751 // This should run after inlining to have any chance of doing
752 // anything, and before other cleanup optimizations.
754
755 if (Level != OptimizationLevel::O0) {
756 // Promote alloca to vector before SROA and loop unroll. If we
757 // manage to eliminate allocas before unroll we may choose to unroll
758 // less.
760 }
761
762 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
763 });
764
765 // FIXME: Why is AMDGPUAttributor not in CGSCC?
767 [this](ModulePassManager &MPM, OptimizationLevel Level) {
768 if (Level != OptimizationLevel::O0) {
769 MPM.addPass(AMDGPUAttributorPass(*this));
770 }
771 });
772
774 [this](ModulePassManager &PM, OptimizationLevel Level) {
775 // We want to support the -lto-partitions=N option as "best effort".
776 // For that, we need to lower LDS earlier in the pipeline before the
777 // module is partitioned for codegen.
781 PM.addPass(AMDGPUAttributorPass(*this));
782 });
783
785 [](StringRef FilterName) -> RegAllocFilterFunc {
786 if (FilterName == "sgpr")
787 return onlyAllocateSGPRs;
788 if (FilterName == "vgpr")
789 return onlyAllocateVGPRs;
790 return nullptr;
791 });
792}
793
794int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
795 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
796 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
797 AddrSpace == AMDGPUAS::REGION_ADDRESS)
798 ? -1
799 : 0;
800}
801
803 unsigned DestAS) const {
804 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
806}
807
809 const auto *LD = dyn_cast<LoadInst>(V);
810 if (!LD)
812
813 // It must be a generic pointer loaded.
814 assert(V->getType()->isPointerTy() &&
815 V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
816
817 const auto *Ptr = LD->getPointerOperand();
818 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
820 // For a generic pointer loaded from the constant memory, it could be assumed
821 // as a global pointer since the constant memory is only populated on the
822 // host side. As implied by the offload programming model, only global
823 // pointers could be referenced on the host side.
825}
826
827std::pair<const Value *, unsigned>
829 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
830 switch (II->getIntrinsicID()) {
831 case Intrinsic::amdgcn_is_shared:
832 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
833 case Intrinsic::amdgcn_is_private:
834 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
835 default:
836 break;
837 }
838 return std::pair(nullptr, -1);
839 }
840 // Check the global pointer predication based on
841 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
842 // the order of 'is_shared' and 'is_private' is not significant.
843 Value *Ptr;
844 if (match(
845 const_cast<Value *>(V),
846 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
847 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
848 m_Deferred(Ptr))))))
849 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
850
851 return std::pair(nullptr, -1);
852}
853
854unsigned
856 switch (Kind) {
866 }
868}
869
871 Module &M, unsigned NumParts,
872 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
873 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
874 // but all current users of this API don't have one ready and would need to
875 // create one anyway. Let's hide the boilerplate for now to keep it simple.
876
881
882 PassBuilder PB(this);
886
888 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
889 MPM.run(M, MAM);
890 return true;
891}
892
893//===----------------------------------------------------------------------===//
894// GCN Target Machine (SI+)
895//===----------------------------------------------------------------------===//
896
898 StringRef CPU, StringRef FS,
899 const TargetOptions &Options,
900 std::optional<Reloc::Model> RM,
901 std::optional<CodeModel::Model> CM,
902 CodeGenOptLevel OL, bool JIT)
903 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
904
907 StringRef GPU = getGPUName(F);
909
910 SmallString<128> SubtargetKey(GPU);
911 SubtargetKey.append(FS);
912
913 auto &I = SubtargetMap[SubtargetKey];
914 if (!I) {
915 // This needs to be done before we create a new subtarget since any
916 // creation will depend on the TM and the code generation flags on the
917 // function that reside in TargetOptions.
919 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
920 }
921
922 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
923
924 return I.get();
925}
926
929 return TargetTransformInfo(GCNTTIImpl(this, F));
930}
931
934 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
936 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
937 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
938}
939
940//===----------------------------------------------------------------------===//
941// AMDGPU Legacy Pass Setup
942//===----------------------------------------------------------------------===//
943
944std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
946}
947
948namespace {
949
950class GCNPassConfig final : public AMDGPUPassConfig {
951public:
952 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
953 : AMDGPUPassConfig(TM, PM) {
954 // It is necessary to know the register usage of the entire call graph. We
955 // allow calls without EnableAMDGPUFunctionCalls if they are marked
956 // noinline, so this is always required.
957 setRequiresCodeGenSCCOrder(true);
958 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
959 }
960
961 GCNTargetMachine &getGCNTargetMachine() const {
962 return getTM<GCNTargetMachine>();
963 }
964
966 createMachineScheduler(MachineSchedContext *C) const override;
967
969 createPostMachineScheduler(MachineSchedContext *C) const override {
971 C, std::make_unique<PostGenericScheduler>(C),
972 /*RemoveKillFlags=*/true);
973 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
975 if (ST.shouldClusterStores())
977 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
978 DAG->addMutation(
979 createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA));
980 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
982 return DAG;
983 }
984
985 bool addPreISel() override;
986 void addMachineSSAOptimization() override;
987 bool addILPOpts() override;
988 bool addInstSelector() override;
989 bool addIRTranslator() override;
990 void addPreLegalizeMachineIR() override;
991 bool addLegalizeMachineIR() override;
992 void addPreRegBankSelect() override;
993 bool addRegBankSelect() override;
994 void addPreGlobalInstructionSelect() override;
995 bool addGlobalInstructionSelect() override;
996 void addFastRegAlloc() override;
997 void addOptimizedRegAlloc() override;
998
999 FunctionPass *createSGPRAllocPass(bool Optimized);
1000 FunctionPass *createVGPRAllocPass(bool Optimized);
1001 FunctionPass *createRegAllocPass(bool Optimized) override;
1002
1003 bool addRegAssignAndRewriteFast() override;
1004 bool addRegAssignAndRewriteOptimized() override;
1005
1006 void addPreRegAlloc() override;
1007 bool addPreRewrite() override;
1008 void addPostRegAlloc() override;
1009 void addPreSched2() override;
1010 void addPreEmitPass() override;
1011};
1012
1013} // end anonymous namespace
1014
1016 : TargetPassConfig(TM, PM) {
1017 // Exceptions and StackMaps are not supported, so these passes will never do
1018 // anything.
1021 // Garbage collection is not supported.
1024}
1025
1029 else
1031}
1032
1037 // ReassociateGEPs exposes more opportunities for SLSR. See
1038 // the example in reassociate-geps-and-slsr.ll.
1040 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1041 // EarlyCSE can reuse.
1043 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1045 // NaryReassociate on GEPs creates redundant common expressions, so run
1046 // EarlyCSE after it.
1048}
1049
1052
1056
1057 // There is no reason to run these.
1061
1063 if (LowerCtorDtor)
1065
1068
1069 // This can be disabled by passing ::Disable here or on the command line
1070 // with --expand-variadics-override=disable.
1072
1073 // Function calls are not supported, so make sure we inline everything.
1076
1077 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1078 if (Arch == Triple::r600)
1080
1081 // Replace OpenCL enqueued block function pointers with global variables.
1083
1084 // Runs before PromoteAlloca so the latter can account for function uses
1087 }
1088
1091
1092 // Run atomic optimizer before Atomic Expand
1097 }
1098
1100
1103
1106
1110 AAResults &AAR) {
1111 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1112 AAR.addAAResult(WrapperPass->getResult());
1113 }));
1114 }
1115
1117 // TODO: May want to move later or split into an early and late one.
1119 }
1120
1121 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1122 // have expanded.
1125 }
1126
1128
1129 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1130 // example, GVN can combine
1131 //
1132 // %0 = add %a, %b
1133 // %1 = add %b, %a
1134 //
1135 // and
1136 //
1137 // %0 = shl nsw %a, 2
1138 // %1 = shl %a, 2
1139 //
1140 // but EarlyCSE can do neither of them.
1143}
1144
1147 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
1148 // analysis, and should be removed.
1150 }
1151
1155
1157 // This lowering has been placed after codegenprepare to take advantage of
1158 // address mode matching (which is why it isn't put with the LDS lowerings).
1159 // It could be placed anywhere before uniformity annotations (an analysis
1160 // that it changes by splitting up fat pointers into their components)
1161 // but has been put before switch lowering and CFG flattening so that those
1162 // passes can run on the more optimized control flow this pass creates in
1163 // many cases.
1164 //
1165 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1166 // However, due to some annoying facts about ResourceUsageAnalysis,
1167 // (especially as exercised in the resource-usage-dead-function test),
1168 // we need all the function passes codegenprepare all the way through
1169 // said resource usage analysis to run on the call graph produced
1170 // before codegenprepare runs (because codegenprepare will knock some
1171 // nodes out of the graph, which leads to function-level passes not
1172 // being run on them, which causes crashes in the resource usage analysis).
1174 // In accordance with the above FIXME, manually force all the
1175 // function-level passes into a CGSCCPassManager.
1176 addPass(new DummyCGSCCPass());
1177 }
1178
1180
1183
1184 // LowerSwitch pass may introduce unreachable blocks that can
1185 // cause unexpected behavior for subsequent passes. Placing it
1186 // here seems better that these blocks would get cleaned up by
1187 // UnreachableBlockElim inserted next in the pass flow.
1189}
1190
1194 return false;
1195}
1196
1199 return false;
1200}
1201
1203 // Do nothing. GC is not supported.
1204 return false;
1205}
1206
1209 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1211 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1212 if (ST.shouldClusterStores())
1213 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1214 return DAG;
1215}
1216
1217//===----------------------------------------------------------------------===//
1218// GCN Legacy Pass Setup
1219//===----------------------------------------------------------------------===//
1220
1221ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1222 MachineSchedContext *C) const {
1223 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1224 if (ST.enableSIScheduler())
1226
1229
1231}
1232
1233bool GCNPassConfig::addPreISel() {
1235
1236 if (TM->getOptLevel() > CodeGenOptLevel::None)
1237 addPass(createSinkingPass());
1238
1239 if (TM->getOptLevel() > CodeGenOptLevel::None)
1241
1242 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1243 // regions formed by them.
1247 addPass(createFixIrreduciblePass());
1248 addPass(createUnifyLoopExitsPass());
1249 }
1250 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1251 }
1255 // TODO: Move this right after structurizeCFG to avoid extra divergence
1256 // analysis. This depends on stopping SIAnnotateControlFlow from making
1257 // control flow modifications.
1259 }
1260 addPass(createLCSSAPass());
1261
1262 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1264
1265 return false;
1266}
1267
1268void GCNPassConfig::addMachineSSAOptimization() {
1270
1271 // We want to fold operands after PeepholeOptimizer has run (or as part of
1272 // it), because it will eliminate extra copies making it easier to fold the
1273 // real source operand. We want to eliminate dead instructions after, so that
1274 // we see fewer uses of the copies. We then need to clean up the dead
1275 // instructions leftover after the operands are folded as well.
1276 //
1277 // XXX - Can we get away without running DeadMachineInstructionElim again?
1278 addPass(&SIFoldOperandsID);
1279 if (EnableDPPCombine)
1280 addPass(&GCNDPPCombineID);
1281 addPass(&SILoadStoreOptimizerID);
1282 if (isPassEnabled(EnableSDWAPeephole)) {
1283 addPass(&SIPeepholeSDWAID);
1284 addPass(&EarlyMachineLICMID);
1285 addPass(&MachineCSEID);
1286 addPass(&SIFoldOperandsID);
1287 }
1290}
1291
1292bool GCNPassConfig::addILPOpts() {
1294 addPass(&EarlyIfConverterID);
1295
1297 return false;
1298}
1299
1300bool GCNPassConfig::addInstSelector() {
1302 addPass(&SIFixSGPRCopiesLegacyID);
1304 return false;
1305}
1306
1307bool GCNPassConfig::addIRTranslator() {
1308 addPass(new IRTranslator(getOptLevel()));
1309 return false;
1310}
1311
1312void GCNPassConfig::addPreLegalizeMachineIR() {
1313 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1314 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1315 addPass(new Localizer());
1316}
1317
1318bool GCNPassConfig::addLegalizeMachineIR() {
1319 addPass(new Legalizer());
1320 return false;
1321}
1322
1323void GCNPassConfig::addPreRegBankSelect() {
1324 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1325 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1327}
1328
1329bool GCNPassConfig::addRegBankSelect() {
1330 addPass(new AMDGPURegBankSelect());
1331 return false;
1332}
1333
1334void GCNPassConfig::addPreGlobalInstructionSelect() {
1335 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1336 addPass(createAMDGPURegBankCombiner(IsOptNone));
1337}
1338
1339bool GCNPassConfig::addGlobalInstructionSelect() {
1340 addPass(new InstructionSelect(getOptLevel()));
1341 return false;
1342}
1343
1344void GCNPassConfig::addPreRegAlloc() {
1345 if (LateCFGStructurize) {
1347 }
1348}
1349
1350void GCNPassConfig::addFastRegAlloc() {
1351 // FIXME: We have to disable the verifier here because of PHIElimination +
1352 // TwoAddressInstructions disabling it.
1353
1354 // This must be run immediately after phi elimination and before
1355 // TwoAddressInstructions, otherwise the processing of the tied operand of
1356 // SI_ELSE will introduce a copy of the tied operand source after the else.
1358
1360
1362}
1363
1364void GCNPassConfig::addOptimizedRegAlloc() {
1365 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1366 // instructions that cause scheduling barriers.
1368
1369 if (OptExecMaskPreRA)
1371
1374
1375 if (isPassEnabled(EnablePreRAOptimizations))
1377
1378 // This is not an essential optimization and it has a noticeable impact on
1379 // compilation time, so we only enable it from O2.
1380 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1382
1383 // FIXME: when an instruction has a Killed operand, and the instruction is
1384 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1385 // the register in LiveVariables, this would trigger a failure in verifier,
1386 // we should fix it and enable the verifier.
1387 if (OptVGPRLiveRange)
1389 // This must be run immediately after phi elimination and before
1390 // TwoAddressInstructions, otherwise the processing of the tied operand of
1391 // SI_ELSE will introduce a copy of the tied operand source after the else.
1393
1394 if (EnableDCEInRA)
1396
1398}
1399
1400bool GCNPassConfig::addPreRewrite() {
1401 addPass(&SILowerWWMCopiesID);
1403 addPass(&GCNNSAReassignID);
1404 return true;
1405}
1406
1407FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1408 // Initialize the global default.
1409 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1410 initializeDefaultSGPRRegisterAllocatorOnce);
1411
1412 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1413 if (Ctor != useDefaultRegisterAllocator)
1414 return Ctor();
1415
1416 if (Optimized)
1417 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1418
1419 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1420}
1421
1422FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1423 // Initialize the global default.
1424 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1425 initializeDefaultVGPRRegisterAllocatorOnce);
1426
1427 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1428 if (Ctor != useDefaultRegisterAllocator)
1429 return Ctor();
1430
1431 if (Optimized)
1432 return createGreedyVGPRRegisterAllocator();
1433
1434 return createFastVGPRRegisterAllocator();
1435}
1436
1437FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1438 llvm_unreachable("should not be used");
1439}
1440
1442 "-regalloc not supported with amdgcn. Use -sgpr-regalloc and -vgpr-regalloc";
1443
1444bool GCNPassConfig::addRegAssignAndRewriteFast() {
1445 if (!usingDefaultRegAlloc())
1447
1448 addPass(&GCNPreRALongBranchRegID);
1449
1450 addPass(createSGPRAllocPass(false));
1451
1452 // Equivalent of PEI for SGPRs.
1453 addPass(&SILowerSGPRSpillsID);
1454 addPass(&SIPreAllocateWWMRegsID);
1455
1456 addPass(createVGPRAllocPass(false));
1457
1458 addPass(&SILowerWWMCopiesID);
1459 return true;
1460}
1461
1462bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1463 if (!usingDefaultRegAlloc())
1465
1466 addPass(&GCNPreRALongBranchRegID);
1467
1468 addPass(createSGPRAllocPass(true));
1469
1470 // Commit allocated register changes. This is mostly necessary because too
1471 // many things rely on the use lists of the physical registers, such as the
1472 // verifier. This is only necessary with allocators which use LiveIntervals,
1473 // since FastRegAlloc does the replacements itself.
1474 addPass(createVirtRegRewriter(false));
1475
1476 // Equivalent of PEI for SGPRs.
1477 addPass(&SILowerSGPRSpillsID);
1478 addPass(&SIPreAllocateWWMRegsID);
1479
1480 addPass(createVGPRAllocPass(true));
1481
1482 addPreRewrite();
1483 addPass(&VirtRegRewriterID);
1484
1486
1487 return true;
1488}
1489
1490void GCNPassConfig::addPostRegAlloc() {
1491 addPass(&SIFixVGPRCopiesID);
1492 if (getOptLevel() > CodeGenOptLevel::None)
1493 addPass(&SIOptimizeExecMaskingID);
1495}
1496
1497void GCNPassConfig::addPreSched2() {
1498 if (TM->getOptLevel() > CodeGenOptLevel::None)
1500 addPass(&SIPostRABundlerID);
1501}
1502
1503void GCNPassConfig::addPreEmitPass() {
1504 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1505 addPass(&GCNCreateVOPDID);
1506 addPass(createSIMemoryLegalizerPass());
1507 addPass(createSIInsertWaitcntsPass());
1508
1509 addPass(createSIModeRegisterPass());
1510
1511 if (getOptLevel() > CodeGenOptLevel::None)
1512 addPass(&SIInsertHardClausesID);
1513
1515 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1517 if (getOptLevel() > CodeGenOptLevel::None)
1518 addPass(&SIPreEmitPeepholeID);
1519 // The hazard recognizer that runs as part of the post-ra scheduler does not
1520 // guarantee to be able handle all hazards correctly. This is because if there
1521 // are multiple scheduling regions in a basic block, the regions are scheduled
1522 // bottom up, so when we begin to schedule a region we don't know what
1523 // instructions were emitted directly before it.
1524 //
1525 // Here we add a stand-alone hazard recognizer pass which can handle all
1526 // cases.
1527 addPass(&PostRAHazardRecognizerID);
1528
1531
1532 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1533 addPass(&AMDGPUInsertDelayAluID);
1534
1535 addPass(&BranchRelaxationPassID);
1536}
1537
1539 return new GCNPassConfig(*this, PM);
1540}
1541
1543 MachineFunction &MF) const {
1545 MF.getRegInfo().addDelegate(MFI);
1546}
1547
1549 BumpPtrAllocator &Allocator, const Function &F,
1550 const TargetSubtargetInfo *STI) const {
1551 return SIMachineFunctionInfo::create<SIMachineFunctionInfo>(
1552 Allocator, F, static_cast<const GCNSubtarget *>(STI));
1553}
1554
1556 return new yaml::SIMachineFunctionInfo();
1557}
1558
1562 return new yaml::SIMachineFunctionInfo(
1563 *MFI, *MF.getSubtarget<GCNSubtarget>().getRegisterInfo(), MF);
1564}
1565
1568 SMDiagnostic &Error, SMRange &SourceRange) const {
1569 const yaml::SIMachineFunctionInfo &YamlMFI =
1570 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1571 MachineFunction &MF = PFS.MF;
1573 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1574
1575 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1576 return true;
1577
1578 if (MFI->Occupancy == 0) {
1579 // Fixup the subtarget dependent default value.
1580 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1581 }
1582
1583 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1584 Register TempReg;
1585 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1586 SourceRange = RegName.SourceRange;
1587 return true;
1588 }
1589 RegVal = TempReg;
1590
1591 return false;
1592 };
1593
1594 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1595 Register &RegVal) {
1596 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1597 };
1598
1599 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1600 return true;
1601
1602 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1603 return true;
1604
1605 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1606 MFI->LongBranchReservedReg))
1607 return true;
1608
1609 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1610 // Create a diagnostic for a the register string literal.
1611 const MemoryBuffer &Buffer =
1612 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1613 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1614 RegName.Value.size(), SourceMgr::DK_Error,
1615 "incorrect register class for field", RegName.Value,
1616 std::nullopt, std::nullopt);
1617 SourceRange = RegName.SourceRange;
1618 return true;
1619 };
1620
1621 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1622 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1623 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1624 return true;
1625
1626 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1627 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1628 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1629 }
1630
1631 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1632 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1633 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1634 }
1635
1636 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1637 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1638 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1639 }
1640
1641 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1642 Register ParsedReg;
1643 if (parseRegister(YamlReg, ParsedReg))
1644 return true;
1645
1646 MFI->reserveWWMRegister(ParsedReg);
1647 }
1648
1649 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1650 const TargetRegisterClass &RC,
1651 ArgDescriptor &Arg, unsigned UserSGPRs,
1652 unsigned SystemSGPRs) {
1653 // Skip parsing if it's not present.
1654 if (!A)
1655 return false;
1656
1657 if (A->IsRegister) {
1658 Register Reg;
1659 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1660 SourceRange = A->RegisterName.SourceRange;
1661 return true;
1662 }
1663 if (!RC.contains(Reg))
1664 return diagnoseRegisterClass(A->RegisterName);
1666 } else
1667 Arg = ArgDescriptor::createStack(A->StackOffset);
1668 // Check and apply the optional mask.
1669 if (A->Mask)
1670 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1671
1672 MFI->NumUserSGPRs += UserSGPRs;
1673 MFI->NumSystemSGPRs += SystemSGPRs;
1674 return false;
1675 };
1676
1677 if (YamlMFI.ArgInfo &&
1678 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1679 AMDGPU::SGPR_128RegClass,
1680 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1681 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1682 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1683 2, 0) ||
1684 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1685 MFI->ArgInfo.QueuePtr, 2, 0) ||
1686 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1687 AMDGPU::SReg_64RegClass,
1688 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1689 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1690 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1691 2, 0) ||
1692 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1693 AMDGPU::SReg_64RegClass,
1694 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1695 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1696 AMDGPU::SGPR_32RegClass,
1697 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1698 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1699 AMDGPU::SGPR_32RegClass,
1700 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1701 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1702 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1703 0, 1) ||
1704 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1705 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1706 0, 1) ||
1707 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1708 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1709 0, 1) ||
1710 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1711 AMDGPU::SGPR_32RegClass,
1712 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1713 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1714 AMDGPU::SGPR_32RegClass,
1715 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1716 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1717 AMDGPU::SReg_64RegClass,
1718 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1719 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1720 AMDGPU::SReg_64RegClass,
1721 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1722 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1723 AMDGPU::VGPR_32RegClass,
1724 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1725 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1726 AMDGPU::VGPR_32RegClass,
1727 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1728 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1729 AMDGPU::VGPR_32RegClass,
1730 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1731 return true;
1732
1733 if (ST.hasIEEEMode())
1734 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1735 if (ST.hasDX10ClampMode())
1736 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1737
1738 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
1739 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
1742 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
1745
1752
1753 return false;
1754}
1755
1756//===----------------------------------------------------------------------===//
1757// AMDGPU CodeGen Pass Builder interface.
1758//===----------------------------------------------------------------------===//
1759
1761 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
1763 : CodeGenPassBuilder(TM, Opts, PIC) {
1765 // Exceptions and StackMaps are not supported, so these passes will never do
1766 // anything.
1767 // Garbage collection is not supported.
1768 disablePass<StackMapLivenessPass, FuncletLayoutPass,
1770}
1771
1772void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
1773 Base::addCodeGenPrepare(addPass);
1774
1775 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
1776 // behavior for subsequent passes. Placing it here seems better that these
1777 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
1778 // pass flow.
1779 addPass(LowerSwitchPass());
1780}
1781
1782void AMDGPUCodeGenPassBuilder::addPreISel(AddIRPass &addPass) const {
1787
1789 addPass(FlattenCFGPass());
1790
1792 addPass(SinkingPass());
1793
1795
1796 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1797 // regions formed by them.
1798
1800
1803 addPass(FixIrreduciblePass());
1804 addPass(UnifyLoopExitsPass());
1805 }
1806
1807 addPass(StructurizeCFGPass(/*SkipUniformRegions=*/false));
1808 }
1809
1811
1813 addPass(SIAnnotateControlFlowPass(TM));
1814
1815 // TODO: Move this right after structurizeCFG to avoid extra divergence
1816 // analysis. This depends on stopping SIAnnotateControlFlow from making
1817 // control flow modifications.
1819 }
1820
1821 addPass(LCSSAPass());
1822
1825
1826 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
1827 // isn't this in addInstSelector?
1829}
1830
1831void AMDGPUCodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
1832 CreateMCStreamer) const {
1833 // TODO: Add AsmPrinter.
1834}
1835
1836Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &addPass) const {
1837 addPass(AMDGPUISelDAGToDAGPass(TM));
1838 addPass(SIFixSGPRCopiesPass());
1839 addPass(SILowerI1CopiesPass());
1840 return Error::success();
1841}
unsigned const MachineRegisterInfo * MRI
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static cl::opt< bool, true > EnableStructurizerWorkarounds("amdgpu-enable-structurizer-workarounds", cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::location(AMDGPUTargetMachine::EnableStructurizerWorkarounds), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool, true > DisableStructurizer("amdgpu-disable-structurizer", cl::desc("Disable structurizer for experiments; produces unusable code"), cl::location(AMDGPUTargetMachine::DisableStructurizer), cl::ReallyHidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool, true > LateCFGStructurize("amdgpu-late-structurize", cl::desc("Enable late CFG structurization"), cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), cl::Hidden)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableMaxIlpSchedStrategy("amdgpu-enable-max-ilp-scheduling-strategy", cl::desc("Enable scheduling strategy to maximize ILP for a single wave."), cl::Hidden, cl::init(false))
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst", cl::desc("Enable s_singleuse_vdst insertion"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfo::Concept conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
#define LLVM_READNONE
Definition: Compiler.h:216
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:131
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
static std::string computeDataLayout()
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
This header defines various interfaces for pass management in LLVM.
The AMDGPU TargetMachine interface definition for hw codegen targets.
Basic Register Allocator
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Machine Scheduler interface.
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Error addInstSelector(AddMachinePass &) const
AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC)
void addPreISel(AddIRPass &addPass) const
void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const
void addCodeGenPrepare(AddIRPass &) const
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:392
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
This class provides access to building LLVM's passes.
Error buildPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType) const
void addCodeGenPrepare(AddIRPass &) const
Add pass to prepare the LLVM IR for code generation.
void disablePass()
Allow the target to disable a specific pass by default.
void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Definition: Constants.cpp:723
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
static ErrorSuccess success()
Create a success value.
Definition: Error.h:337
Tagged union holding either a T or a Error.
Definition: Error.h:481
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:278
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
Pass to remove unused function declarations.
Definition: GlobalDCE.h:36
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition: Internalize.h:36
Converts loops into loop-closed SSA form.
Definition: LCSSA.h:37
This class describes a target machine that is implemented with the LLVM target-independent code gener...
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:43
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:51
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
Definition: MemoryBuffer.h:76
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static const OptimizationLevel O0
Disable as many optimizations as possible.
unsigned getSpeedupLevel() const
static const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
Definition: PassBuilder.h:106
void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:481
void registerPipelineStartEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:472
void crossRegisterProxies(LoopAnalysisManager &LAM, FunctionAnalysisManager &FAM, CGSCCAnalysisManager &CGAM, ModuleAnalysisManager &MAM, MachineFunctionAnalysisManager *MFAM=nullptr)
Cross register the analysis managers through their proxies.
void registerOptimizerLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:499
void registerPeepholeEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:406
void registerCGSCCOptimizerLateEPCallback(const std::function< void(CGSCCPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:451
void registerRegClassFilterParsingCallback(const std::function< RegAllocFilterFunc(StringRef)> &C)
Register callbacks to parse target specific filter field if regalloc pass needs it.
Definition: PassBuilder.h:588
void registerModuleAnalyses(ModuleAnalysisManager &MAM)
Registers all available module analysis passes.
void registerFullLinkTimeOptimizationLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:517
void registerFunctionAnalyses(FunctionAnalysisManager &FAM)
Registers all available function analysis passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
Definition: PassManager.h:195
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:281
Represents a location in source code.
Definition: SMLoc.h:23
Represents a range in source code.
Definition: SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:575
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:576
Move instructions into successor blocks when possible.
Definition: Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
unsigned getMainFileID() const
Definition: SourceMgr.h:132
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition: SourceMgr.h:125
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:685
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:620
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:90
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
Definition: TargetMachine.h:96
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
Target-Independent Code Generator Pass Configuration Options.
LLVMTargetMachine * TM
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Definition: raw_ostream.h:434
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
Definition: AMDGPU.h:458
bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:893
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
@ ReallyHidden
Definition: CommandLine.h:138
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFlattenCFGPass()
void initializeSIFormMemoryClausesPass(PassRegistry &)
char & SIPreAllocateWWMRegsID
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
void initializeAMDGPUInsertSingleUseVDSTPass(PassRegistry &)
Pass * createLCSSAPass()
Definition: LCSSA.cpp:541
void initializeGCNCreateVOPDPass(PassRegistry &)
ModulePass * createAMDGPUOpenCLEnqueuedBlockLoweringPass()
char & GCNPreRAOptimizationsID
char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeGCNPreRAOptimizationsPass(PassRegistry &)
Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &)
void initializeAMDGPUAttributorLegacyPass(PassRegistry &)
char & SIPostRABundlerID
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
void initializeSIModeRegisterPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
void initializeSIOptimizeVGPRLiveRangePass(PassRegistry &)
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
Definition: PassManager.h:848
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
char & GCNRewritePartialRegUsesID
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition: Error.cpp:98
void initializeSIShrinkInstructionsPass(PassRegistry &)
char & SIFoldOperandsID
void initializeGCNPreRALongBranchRegPass(PassRegistry &)
auto formatv(const char *Fmt, Ts &&...Vals) -> formatv_object< decltype(std::make_tuple(support::detail::build_format_adapter(std::forward< Ts >(Vals))...))>
char & SILoadStoreOptimizerID
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createNaryReassociatePass()
char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeSIPreEmitPeepholePass(PassRegistry &)
char & SILowerWWMCopiesID
void initializeSIFixVGPRCopiesPass(PassRegistry &)
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition: CSEInfo.cpp:79
Target & getTheR600Target()
The target for R600 GPUs.
char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
void initializeAMDGPURemoveIncompatibleFunctionsPass(PassRegistry &)
void initializeSILowerWWMCopiesPass(PassRegistry &)
void initializeGCNNSAReassignPass(PassRegistry &)
char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
void initializeSIInsertWaitcntsPass(PassRegistry &)
char & AMDGPUInsertSingleUseVDSTID
Pass * createLICMPass()
Definition: LICM.cpp:381
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
char & SIFormMemoryClausesID
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
void initializeSILoadStoreOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringPass(PassRegistry &)
void initializeSIPeepholeSDWAPass(PassRegistry &)
char & AMDGPUUnifyDivergentExitNodesID
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
char & SILowerSGPRSpillsID
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
char & SILateBranchLoweringPassID
char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
FunctionPass * createSinkingPass()
Definition: Sink.cpp:277
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
FunctionPass * createSIShrinkInstructionsPass()
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition: CodeGen.h:83
void initializeSIPostRABundlerPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &)
void initializeSIWholeQuadModePass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
Pass * createAMDGPUAnnotateKernelFeaturesPass()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUPassConfig...
char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
char & SIOptimizeVGPRLiveRangeID
FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
FunctionPass * createFixIrreduciblePass()
char & FuncletLayoutID
This pass lays out funclets contiguously.
void initializeSIInsertHardClausesPass(PassRegistry &)
char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
ModulePass * createAMDGPUPrintfRuntimeBinding()
void initializeSIMemoryLegalizerPass(PassRegistry &)
Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
char & EarlyIfConverterID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
char & SIPreEmitPeepholeID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:17
void initializeSIPreAllocateWWMRegsPass(PassRegistry &)
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
char & MachineCSEID
MachineCSE - This pass performs global CSE on machine instructions.
Definition: MachineCSE.cpp:165
char & GCNDPPCombineID
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
char & SIWholeQuadModeID
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
void initializeSIOptimizeExecMaskingPreRAPass(PassRegistry &)
void initializeAMDGPUMarkLastScratchLoadPass(PassRegistry &)
char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
Definition: GVN.cpp:3396
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
void initializeSILowerSGPRSpillsPass(PassRegistry &)
FunctionPass * createSILowerI1CopiesLegacyPass()
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
FunctionPass * createAMDGPUMachineCFGStructurizerPass()
void initializeAMDGPUResourceUsageAnalysisPass(PassRegistry &)
char & SIFixSGPRCopiesLegacyID
char & GCNCreateVOPDID
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
char & VirtRegRewriterID
VirtRegRewriter pass.
Definition: VirtRegMap.cpp:227
char & SILowerControlFlowID
FunctionPass * createLowerSwitchPass()
FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
Definition: VirtRegMap.cpp:645
void initializeR600VectorRegMergerPass(PassRegistry &)
ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
void initializeSIOptimizeExecMaskingPass(PassRegistry &)
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIFoldOperandsPass(PassRegistry &)
void initializeSILowerControlFlowPass(PassRegistry &)
char & SIPeepholeSDWAID
char & SIFixVGPRCopiesID
char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1932
void initializeGCNDPPCombinePass(PassRegistry &)
char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
Definition: MIParser.cpp:3597
char & AMDGPUMarkLastScratchLoadID
char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluPass(PassRegistry &)
char & SIOptimizeExecMaskingID
void initializeAMDGPUUnifyMetadataPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
char & AMDGPUPerfHintAnalysisLegacyID
char & GCNPreRALongBranchRegID
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
Definition: PassManager.h:874
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition: Threading.h:68
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
A wrapper around std::string which contains a source range that's being set during parsing.