LLVM 19.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU target machine contains all of the hardware specific
11/// information needed to emit code for SI+ GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUTargetMachine.h"
16#include "AMDGPU.h"
17#include "AMDGPUAliasAnalysis.h"
21#include "AMDGPUIGroupLP.h"
22#include "AMDGPUISelDAGToDAG.h"
23#include "AMDGPUMacroFusion.h"
24#include "AMDGPURegBankSelect.h"
25#include "AMDGPUSplitModule.h"
30#include "GCNSchedStrategy.h"
31#include "GCNVOPDUtils.h"
32#include "R600.h"
34#include "R600TargetMachine.h"
36#include "SIMachineScheduler.h"
48#include "llvm/CodeGen/Passes.h"
51#include "llvm/IR/IntrinsicsAMDGPU.h"
52#include "llvm/IR/PassManager.h"
58#include "llvm/Transforms/IPO.h"
69#include <optional>
70
71using namespace llvm;
72using namespace llvm::PatternMatch;
73
74namespace {
75class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
76public:
77 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
79};
80
81class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
82public:
83 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
85};
86
87static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
88 const TargetRegisterClass &RC) {
89 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
90}
91
92static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
93 const TargetRegisterClass &RC) {
94 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
95}
96
97
98/// -{sgpr|vgpr}-regalloc=... command line option.
99static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
100
101/// A dummy default pass factory indicates whether the register allocator is
102/// overridden on the command line.
103static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
104static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
105
106static SGPRRegisterRegAlloc
107defaultSGPRRegAlloc("default",
108 "pick SGPR register allocator based on -O option",
110
111static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
113SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
114 cl::desc("Register allocator to use for SGPRs"));
115
116static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
118VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
119 cl::desc("Register allocator to use for VGPRs"));
120
121
122static void initializeDefaultSGPRRegisterAllocatorOnce() {
123 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
124
125 if (!Ctor) {
126 Ctor = SGPRRegAlloc;
127 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
128 }
129}
130
131static void initializeDefaultVGPRRegisterAllocatorOnce() {
132 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
133
134 if (!Ctor) {
135 Ctor = VGPRRegAlloc;
136 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
137 }
138}
139
140static FunctionPass *createBasicSGPRRegisterAllocator() {
141 return createBasicRegisterAllocator(onlyAllocateSGPRs);
142}
143
144static FunctionPass *createGreedySGPRRegisterAllocator() {
145 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
146}
147
148static FunctionPass *createFastSGPRRegisterAllocator() {
149 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
150}
151
152static FunctionPass *createBasicVGPRRegisterAllocator() {
153 return createBasicRegisterAllocator(onlyAllocateVGPRs);
154}
155
156static FunctionPass *createGreedyVGPRRegisterAllocator() {
157 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
158}
159
160static FunctionPass *createFastVGPRRegisterAllocator() {
161 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
162}
163
164static SGPRRegisterRegAlloc basicRegAllocSGPR(
165 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
166static SGPRRegisterRegAlloc greedyRegAllocSGPR(
167 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
168
169static SGPRRegisterRegAlloc fastRegAllocSGPR(
170 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
171
172
173static VGPRRegisterRegAlloc basicRegAllocVGPR(
174 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
175static VGPRRegisterRegAlloc greedyRegAllocVGPR(
176 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
177
178static VGPRRegisterRegAlloc fastRegAllocVGPR(
179 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
180} // anonymous namespace
181
182static cl::opt<bool>
184 cl::desc("Run early if-conversion"),
185 cl::init(false));
186
187static cl::opt<bool>
188OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
189 cl::desc("Run pre-RA exec mask optimizations"),
190 cl::init(true));
191
192static cl::opt<bool>
193 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
194 cl::desc("Lower GPU ctor / dtors to globals on the device."),
195 cl::init(true), cl::Hidden);
196
197// Option to disable vectorizer for tests.
199 "amdgpu-load-store-vectorizer",
200 cl::desc("Enable load store vectorizer"),
201 cl::init(true),
202 cl::Hidden);
203
204// Option to control global loads scalarization
206 "amdgpu-scalarize-global-loads",
207 cl::desc("Enable global load scalarization"),
208 cl::init(true),
209 cl::Hidden);
210
211// Option to run internalize pass.
213 "amdgpu-internalize-symbols",
214 cl::desc("Enable elimination of non-kernel functions and unused globals"),
215 cl::init(false),
216 cl::Hidden);
217
218// Option to inline all early.
220 "amdgpu-early-inline-all",
221 cl::desc("Inline all functions early"),
222 cl::init(false),
223 cl::Hidden);
224
226 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
227 cl::desc("Enable removal of functions when they"
228 "use features not supported by the target GPU"),
229 cl::init(true));
230
232 "amdgpu-sdwa-peephole",
233 cl::desc("Enable SDWA peepholer"),
234 cl::init(true));
235
237 "amdgpu-dpp-combine",
238 cl::desc("Enable DPP combiner"),
239 cl::init(true));
240
241// Enable address space based alias analysis
243 cl::desc("Enable AMDGPU Alias Analysis"),
244 cl::init(true));
245
246// Option to run late CFG structurizer
248 "amdgpu-late-structurize",
249 cl::desc("Enable late CFG structurization"),
251 cl::Hidden);
252
253// Disable structurizer-based control-flow lowering in order to test convergence
254// control tokens. This should eventually be replaced by the wave-transform.
256 "amdgpu-disable-structurizer",
257 cl::desc("Disable structurizer for experiments; produces unusable code"),
259
260// Enable lib calls simplifications
262 "amdgpu-simplify-libcall",
263 cl::desc("Enable amdgpu library simplifications"),
264 cl::init(true),
265 cl::Hidden);
266
268 "amdgpu-ir-lower-kernel-arguments",
269 cl::desc("Lower kernel argument loads in IR pass"),
270 cl::init(true),
271 cl::Hidden);
272
274 "amdgpu-reassign-regs",
275 cl::desc("Enable register reassign optimizations on gfx10+"),
276 cl::init(true),
277 cl::Hidden);
278
280 "amdgpu-opt-vgpr-liverange",
281 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
282 cl::init(true), cl::Hidden);
283
285 "amdgpu-atomic-optimizer-strategy",
286 cl::desc("Select DPP or Iterative strategy for scan"),
287 cl::init(ScanOptions::Iterative),
289 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
290 clEnumValN(ScanOptions::Iterative, "Iterative",
291 "Use Iterative approach for scan"),
292 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
293
294// Enable Mode register optimization
296 "amdgpu-mode-register",
297 cl::desc("Enable mode register pass"),
298 cl::init(true),
299 cl::Hidden);
300
301// Enable GFX11.5+ s_singleuse_vdst insertion
302static cl::opt<bool>
303 EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst",
304 cl::desc("Enable s_singleuse_vdst insertion"),
305 cl::init(false), cl::Hidden);
306
307// Enable GFX11+ s_delay_alu insertion
308static cl::opt<bool>
309 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
310 cl::desc("Enable s_delay_alu insertion"),
311 cl::init(true), cl::Hidden);
312
313// Enable GFX11+ VOPD
314static cl::opt<bool>
315 EnableVOPD("amdgpu-enable-vopd",
316 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
317 cl::init(true), cl::Hidden);
318
319// Option is used in lit tests to prevent deadcoding of patterns inspected.
320static cl::opt<bool>
321EnableDCEInRA("amdgpu-dce-in-ra",
322 cl::init(true), cl::Hidden,
323 cl::desc("Enable machine DCE inside regalloc"));
324
325static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
326 cl::desc("Adjust wave priority"),
327 cl::init(false), cl::Hidden);
328
330 "amdgpu-scalar-ir-passes",
331 cl::desc("Enable scalar IR passes"),
332 cl::init(true),
333 cl::Hidden);
334
336 "amdgpu-enable-structurizer-workarounds",
337 cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true),
338 cl::Hidden);
339
341 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
343 cl::Hidden);
344
346 "amdgpu-enable-pre-ra-optimizations",
347 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
348 cl::Hidden);
349
351 "amdgpu-enable-promote-kernel-arguments",
352 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
353 cl::Hidden, cl::init(true));
354
356 "amdgpu-enable-image-intrinsic-optimizer",
357 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
358 cl::Hidden);
359
360static cl::opt<bool>
361 EnableLoopPrefetch("amdgpu-loop-prefetch",
362 cl::desc("Enable loop data prefetch on AMDGPU"),
363 cl::Hidden, cl::init(false));
364
366 "amdgpu-enable-max-ilp-scheduling-strategy",
367 cl::desc("Enable scheduling strategy to maximize ILP for a single wave."),
368 cl::Hidden, cl::init(false));
369
371 "amdgpu-enable-rewrite-partial-reg-uses",
372 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
373 cl::Hidden);
374
376 "amdgpu-enable-hipstdpar",
377 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
378 cl::Hidden);
379
381 // Register the target
384
459}
460
461static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
462 return std::make_unique<AMDGPUTargetObjectFile>();
463}
464
466 return new SIScheduleDAGMI(C);
467}
468
469static ScheduleDAGInstrs *
471 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
472 ScheduleDAGMILive *DAG =
473 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
474 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
475 if (ST.shouldClusterStores())
476 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
477 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
478 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
479 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
480 return DAG;
481}
482
483static ScheduleDAGInstrs *
485 ScheduleDAGMILive *DAG =
486 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
487 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
488 return DAG;
489}
490
491static ScheduleDAGInstrs *
493 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
494 auto DAG = new GCNIterativeScheduler(C,
496 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
497 if (ST.shouldClusterStores())
498 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
499 return DAG;
500}
501
503 return new GCNIterativeScheduler(C,
505}
506
507static ScheduleDAGInstrs *
509 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
510 auto DAG = new GCNIterativeScheduler(C,
512 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
513 if (ST.shouldClusterStores())
514 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
515 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
516 return DAG;
517}
518
520SISchedRegistry("si", "Run SI's custom scheduler",
522
525 "Run GCN scheduler to maximize occupancy",
527
529 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
531
533 "gcn-iterative-max-occupancy-experimental",
534 "Run GCN scheduler to maximize occupancy (experimental)",
536
538 "gcn-iterative-minreg",
539 "Run GCN iterative scheduler for minimal register usage (experimental)",
541
543 "gcn-iterative-ilp",
544 "Run GCN iterative scheduler for ILP scheduling (experimental)",
546
548 if (TT.getArch() == Triple::r600) {
549 // 32-bit pointers.
550 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
551 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
552 }
553
554 // 32-bit private, local, and region pointers. 64-bit global, constant and
555 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
556 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
557 // (address space 7), and 128-bit non-integral buffer resourcees (address
558 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
559 // like getelementptr.
560 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
561 "-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-"
562 "v32:32-v48:64-v96:"
563 "128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-"
564 "G1-ni:7:8:9";
565}
566
569 if (!GPU.empty())
570 return GPU;
571
572 // Need to default to a target with flat support for HSA.
573 if (TT.getArch() == Triple::amdgcn)
574 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
575
576 return "r600";
577}
578
579static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
580 // The AMDGPU toolchain only supports generating shared objects, so we
581 // must always use PIC.
582 return Reloc::PIC_;
583}
584
586 StringRef CPU, StringRef FS,
587 const TargetOptions &Options,
588 std::optional<Reloc::Model> RM,
589 std::optional<CodeModel::Model> CM,
590 CodeGenOptLevel OptLevel)
593 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
594 TLOF(createTLOF(getTargetTriple())) {
595 initAsmInfo();
596 if (TT.getArch() == Triple::amdgcn) {
597 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
599 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
601 }
602}
603
608
610
612 Attribute GPUAttr = F.getFnAttribute("target-cpu");
613 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
614}
615
617 Attribute FSAttr = F.getFnAttribute("target-features");
618
619 return FSAttr.isValid() ? FSAttr.getValueAsString()
621}
622
623/// Predicate for Internalize pass.
624static bool mustPreserveGV(const GlobalValue &GV) {
625 if (const Function *F = dyn_cast<Function>(&GV))
626 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
627 F->getName().starts_with("__sanitizer_") ||
628 AMDGPU::isEntryFunctionCC(F->getCallingConv());
629
631 return !GV.use_empty();
632}
633
636}
637
640 if (Params.empty())
642 Params.consume_front("strategy=");
643 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
644 .Case("dpp", ScanOptions::DPP)
645 .Cases("iterative", "", ScanOptions::Iterative)
646 .Case("none", ScanOptions::None)
647 .Default(std::nullopt);
648 if (Result)
649 return *Result;
650 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
651}
652
655 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
657 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
658 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
659}
660
662
663#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
665
667 [](ModulePassManager &PM, OptimizationLevel Level) {
669 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
670 if (EnableHipStdPar)
672 });
673
675 [](ModulePassManager &PM, OptimizationLevel Level) {
677
678 if (Level == OptimizationLevel::O0)
679 return;
680
682
683 if (InternalizeSymbols) {
686 }
687
690 });
691
693 [](FunctionPassManager &FPM, OptimizationLevel Level) {
694 if (Level == OptimizationLevel::O0)
695 return;
696
700 });
701
703 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
704 if (Level == OptimizationLevel::O0)
705 return;
706
708
709 // Add promote kernel arguments pass to the opt pipeline right before
710 // infer address spaces which is needed to do actual address space
711 // rewriting.
712 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
715
716 // Add infer address spaces pass to the opt pipeline after inlining
717 // but before SROA to increase SROA opportunities.
719
720 // This should run after inlining to have any chance of doing
721 // anything, and before other cleanup optimizations.
723
724 if (Level != OptimizationLevel::O0) {
725 // Promote alloca to vector before SROA and loop unroll. If we
726 // manage to eliminate allocas before unroll we may choose to unroll
727 // less.
729 }
730
731 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
732 });
733
734 // FIXME: Why is AMDGPUAttributor not in CGSCC?
736 [this](ModulePassManager &MPM, OptimizationLevel Level) {
737 if (Level != OptimizationLevel::O0) {
739 }
740 });
741
743 [this](ModulePassManager &PM, OptimizationLevel Level) {
744 // We want to support the -lto-partitions=N option as "best effort".
745 // For that, we need to lower LDS earlier in the pipeline before the
746 // module is partitioned for codegen.
749 });
750
752 [](StringRef FilterName) -> RegClassFilterFunc {
753 if (FilterName == "sgpr")
754 return onlyAllocateSGPRs;
755 if (FilterName == "vgpr")
756 return onlyAllocateVGPRs;
757 return nullptr;
758 });
759}
760
761int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
762 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
763 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
764 AddrSpace == AMDGPUAS::REGION_ADDRESS)
765 ? -1
766 : 0;
767}
768
770 unsigned DestAS) const {
771 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
773}
774
776 const auto *LD = dyn_cast<LoadInst>(V);
777 if (!LD)
779
780 // It must be a generic pointer loaded.
781 assert(V->getType()->isPointerTy() &&
782 V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
783
784 const auto *Ptr = LD->getPointerOperand();
785 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
787 // For a generic pointer loaded from the constant memory, it could be assumed
788 // as a global pointer since the constant memory is only populated on the
789 // host side. As implied by the offload programming model, only global
790 // pointers could be referenced on the host side.
792}
793
794std::pair<const Value *, unsigned>
796 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
797 switch (II->getIntrinsicID()) {
798 case Intrinsic::amdgcn_is_shared:
799 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
800 case Intrinsic::amdgcn_is_private:
801 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
802 default:
803 break;
804 }
805 return std::pair(nullptr, -1);
806 }
807 // Check the global pointer predication based on
808 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
809 // the order of 'is_shared' and 'is_private' is not significant.
810 Value *Ptr;
811 if (match(
812 const_cast<Value *>(V),
813 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
814 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
815 m_Deferred(Ptr))))))
816 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
817
818 return std::pair(nullptr, -1);
819}
820
821unsigned
823 switch (Kind) {
833 }
835}
836
838 Module &M, unsigned NumParts,
839 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
840 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
841 // but all current users of this API don't have one ready and would need to
842 // create one anyway. Let's hide the boilerplate for now to keep it simple.
843
848
849 PassBuilder PB(this);
853
855 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
856 MPM.run(M, MAM);
857 return true;
858}
859
860//===----------------------------------------------------------------------===//
861// GCN Target Machine (SI+)
862//===----------------------------------------------------------------------===//
863
865 StringRef CPU, StringRef FS,
866 const TargetOptions &Options,
867 std::optional<Reloc::Model> RM,
868 std::optional<CodeModel::Model> CM,
869 CodeGenOptLevel OL, bool JIT)
870 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
871
874 StringRef GPU = getGPUName(F);
876
877 SmallString<128> SubtargetKey(GPU);
878 SubtargetKey.append(FS);
879
880 auto &I = SubtargetMap[SubtargetKey];
881 if (!I) {
882 // This needs to be done before we create a new subtarget since any
883 // creation will depend on the TM and the code generation flags on the
884 // function that reside in TargetOptions.
886 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
887 }
888
889 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
890
891 return I.get();
892}
893
896 return TargetTransformInfo(GCNTTIImpl(this, F));
897}
898
899//===----------------------------------------------------------------------===//
900// AMDGPU Pass Setup
901//===----------------------------------------------------------------------===//
902
903std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
905}
906
907namespace {
908
909class GCNPassConfig final : public AMDGPUPassConfig {
910public:
911 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
912 : AMDGPUPassConfig(TM, PM) {
913 // It is necessary to know the register usage of the entire call graph. We
914 // allow calls without EnableAMDGPUFunctionCalls if they are marked
915 // noinline, so this is always required.
916 setRequiresCodeGenSCCOrder(true);
917 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
918 }
919
920 GCNTargetMachine &getGCNTargetMachine() const {
921 return getTM<GCNTargetMachine>();
922 }
923
925 createMachineScheduler(MachineSchedContext *C) const override;
926
928 createPostMachineScheduler(MachineSchedContext *C) const override {
930 C, std::make_unique<PostGenericScheduler>(C),
931 /*RemoveKillFlags=*/true);
932 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
934 if (ST.shouldClusterStores())
936 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
937 DAG->addMutation(
938 createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA));
939 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
941 return DAG;
942 }
943
944 bool addPreISel() override;
945 void addMachineSSAOptimization() override;
946 bool addILPOpts() override;
947 bool addInstSelector() override;
948 bool addIRTranslator() override;
949 void addPreLegalizeMachineIR() override;
950 bool addLegalizeMachineIR() override;
951 void addPreRegBankSelect() override;
952 bool addRegBankSelect() override;
953 void addPreGlobalInstructionSelect() override;
954 bool addGlobalInstructionSelect() override;
955 void addFastRegAlloc() override;
956 void addOptimizedRegAlloc() override;
957
958 FunctionPass *createSGPRAllocPass(bool Optimized);
959 FunctionPass *createVGPRAllocPass(bool Optimized);
960 FunctionPass *createRegAllocPass(bool Optimized) override;
961
962 bool addRegAssignAndRewriteFast() override;
963 bool addRegAssignAndRewriteOptimized() override;
964
965 void addPreRegAlloc() override;
966 bool addPreRewrite() override;
967 void addPostRegAlloc() override;
968 void addPreSched2() override;
969 void addPreEmitPass() override;
970};
971
972} // end anonymous namespace
973
975 : TargetPassConfig(TM, PM) {
976 // Exceptions and StackMaps are not supported, so these passes will never do
977 // anything.
980 // Garbage collection is not supported.
983}
984
988 else
990}
991
996 // ReassociateGEPs exposes more opportunities for SLSR. See
997 // the example in reassociate-geps-and-slsr.ll.
999 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1000 // EarlyCSE can reuse.
1002 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1004 // NaryReassociate on GEPs creates redundant common expressions, so run
1005 // EarlyCSE after it.
1007}
1008
1011
1015
1016 // There is no reason to run these.
1020
1022 if (LowerCtorDtor)
1024
1027
1028 // This can be disabled by passing ::Disable here or on the command line
1029 // with --expand-variadics-override=disable.
1031
1032 // Function calls are not supported, so make sure we inline everything.
1035
1036 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1037 if (Arch == Triple::r600)
1039
1040 // Replace OpenCL enqueued block function pointers with global variables.
1042
1043 // Runs before PromoteAlloca so the latter can account for function uses
1046 }
1047
1050
1051 // Run atomic optimizer before Atomic Expand
1056 }
1057
1059
1062
1065
1069 AAResults &AAR) {
1070 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1071 AAR.addAAResult(WrapperPass->getResult());
1072 }));
1073 }
1074
1076 // TODO: May want to move later or split into an early and late one.
1078 }
1079
1080 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1081 // have expanded.
1084 }
1085
1087
1088 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1089 // example, GVN can combine
1090 //
1091 // %0 = add %a, %b
1092 // %1 = add %b, %a
1093 //
1094 // and
1095 //
1096 // %0 = shl nsw %a, 2
1097 // %1 = shl %a, 2
1098 //
1099 // but EarlyCSE can do neither of them.
1102}
1103
1106 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
1107 // analysis, and should be removed.
1109 }
1110
1114
1116 // This lowering has been placed after codegenprepare to take advantage of
1117 // address mode matching (which is why it isn't put with the LDS lowerings).
1118 // It could be placed anywhere before uniformity annotations (an analysis
1119 // that it changes by splitting up fat pointers into their components)
1120 // but has been put before switch lowering and CFG flattening so that those
1121 // passes can run on the more optimized control flow this pass creates in
1122 // many cases.
1123 //
1124 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1125 // However, due to some annoying facts about ResourceUsageAnalysis,
1126 // (especially as exercised in the resource-usage-dead-function test),
1127 // we need all the function passes codegenprepare all the way through
1128 // said resource usage analysis to run on the call graph produced
1129 // before codegenprepare runs (because codegenprepare will knock some
1130 // nodes out of the graph, which leads to function-level passes not
1131 // being run on them, which causes crashes in the resource usage analysis).
1133 // In accordance with the above FIXME, manually force all the
1134 // function-level passes into a CGSCCPassManager.
1135 addPass(new DummyCGSCCPass());
1136 }
1137
1139
1142
1143 // LowerSwitch pass may introduce unreachable blocks that can
1144 // cause unexpected behavior for subsequent passes. Placing it
1145 // here seems better that these blocks would get cleaned up by
1146 // UnreachableBlockElim inserted next in the pass flow.
1148}
1149
1153 return false;
1154}
1155
1158 return false;
1159}
1160
1162 // Do nothing. GC is not supported.
1163 return false;
1164}
1165
1168 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1170 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1171 if (ST.shouldClusterStores())
1172 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1173 return DAG;
1174}
1175
1177 BumpPtrAllocator &Allocator, const Function &F,
1178 const TargetSubtargetInfo *STI) const {
1179 return R600MachineFunctionInfo::create<R600MachineFunctionInfo>(
1180 Allocator, F, static_cast<const R600Subtarget *>(STI));
1181}
1182
1183//===----------------------------------------------------------------------===//
1184// GCN Pass Setup
1185//===----------------------------------------------------------------------===//
1186
1187ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1188 MachineSchedContext *C) const {
1189 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1190 if (ST.enableSIScheduler())
1192
1195
1197}
1198
1199bool GCNPassConfig::addPreISel() {
1201
1202 if (TM->getOptLevel() > CodeGenOptLevel::None)
1203 addPass(createSinkingPass());
1204
1205 if (TM->getOptLevel() > CodeGenOptLevel::None)
1207
1208 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1209 // regions formed by them.
1213 addPass(createFixIrreduciblePass());
1214 addPass(createUnifyLoopExitsPass());
1215 }
1216 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1217 }
1221 // TODO: Move this right after structurizeCFG to avoid extra divergence
1222 // analysis. This depends on stopping SIAnnotateControlFlow from making
1223 // control flow modifications.
1225 }
1226 addPass(createLCSSAPass());
1227
1228 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1229 addPass(&AMDGPUPerfHintAnalysisID);
1230
1231 return false;
1232}
1233
1234void GCNPassConfig::addMachineSSAOptimization() {
1236
1237 // We want to fold operands after PeepholeOptimizer has run (or as part of
1238 // it), because it will eliminate extra copies making it easier to fold the
1239 // real source operand. We want to eliminate dead instructions after, so that
1240 // we see fewer uses of the copies. We then need to clean up the dead
1241 // instructions leftover after the operands are folded as well.
1242 //
1243 // XXX - Can we get away without running DeadMachineInstructionElim again?
1244 addPass(&SIFoldOperandsID);
1245 if (EnableDPPCombine)
1246 addPass(&GCNDPPCombineID);
1247 addPass(&SILoadStoreOptimizerID);
1248 if (isPassEnabled(EnableSDWAPeephole)) {
1249 addPass(&SIPeepholeSDWAID);
1250 addPass(&EarlyMachineLICMID);
1251 addPass(&MachineCSEID);
1252 addPass(&SIFoldOperandsID);
1253 }
1256}
1257
1258bool GCNPassConfig::addILPOpts() {
1260 addPass(&EarlyIfConverterID);
1261
1263 return false;
1264}
1265
1266bool GCNPassConfig::addInstSelector() {
1268 addPass(&SIFixSGPRCopiesID);
1269 addPass(createSILowerI1CopiesPass());
1270 return false;
1271}
1272
1273bool GCNPassConfig::addIRTranslator() {
1274 addPass(new IRTranslator(getOptLevel()));
1275 return false;
1276}
1277
1278void GCNPassConfig::addPreLegalizeMachineIR() {
1279 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1280 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1281 addPass(new Localizer());
1282}
1283
1284bool GCNPassConfig::addLegalizeMachineIR() {
1285 addPass(new Legalizer());
1286 return false;
1287}
1288
1289void GCNPassConfig::addPreRegBankSelect() {
1290 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1291 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1293}
1294
1295bool GCNPassConfig::addRegBankSelect() {
1296 addPass(new AMDGPURegBankSelect());
1297 return false;
1298}
1299
1300void GCNPassConfig::addPreGlobalInstructionSelect() {
1301 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1302 addPass(createAMDGPURegBankCombiner(IsOptNone));
1303}
1304
1305bool GCNPassConfig::addGlobalInstructionSelect() {
1306 addPass(new InstructionSelect(getOptLevel()));
1307 return false;
1308}
1309
1310void GCNPassConfig::addPreRegAlloc() {
1311 if (LateCFGStructurize) {
1313 }
1314}
1315
1316void GCNPassConfig::addFastRegAlloc() {
1317 // FIXME: We have to disable the verifier here because of PHIElimination +
1318 // TwoAddressInstructions disabling it.
1319
1320 // This must be run immediately after phi elimination and before
1321 // TwoAddressInstructions, otherwise the processing of the tied operand of
1322 // SI_ELSE will introduce a copy of the tied operand source after the else.
1324
1326
1328}
1329
1330void GCNPassConfig::addOptimizedRegAlloc() {
1331 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1332 // instructions that cause scheduling barriers.
1334
1335 if (OptExecMaskPreRA)
1337
1340
1341 if (isPassEnabled(EnablePreRAOptimizations))
1343
1344 // This is not an essential optimization and it has a noticeable impact on
1345 // compilation time, so we only enable it from O2.
1346 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1348
1349 // FIXME: when an instruction has a Killed operand, and the instruction is
1350 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1351 // the register in LiveVariables, this would trigger a failure in verifier,
1352 // we should fix it and enable the verifier.
1353 if (OptVGPRLiveRange)
1355 // This must be run immediately after phi elimination and before
1356 // TwoAddressInstructions, otherwise the processing of the tied operand of
1357 // SI_ELSE will introduce a copy of the tied operand source after the else.
1359
1360 if (EnableDCEInRA)
1362
1364}
1365
1366bool GCNPassConfig::addPreRewrite() {
1367 addPass(&SILowerWWMCopiesID);
1369 addPass(&GCNNSAReassignID);
1370 return true;
1371}
1372
1373FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1374 // Initialize the global default.
1375 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1376 initializeDefaultSGPRRegisterAllocatorOnce);
1377
1378 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1379 if (Ctor != useDefaultRegisterAllocator)
1380 return Ctor();
1381
1382 if (Optimized)
1383 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1384
1385 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1386}
1387
1388FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1389 // Initialize the global default.
1390 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1391 initializeDefaultVGPRRegisterAllocatorOnce);
1392
1393 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1394 if (Ctor != useDefaultRegisterAllocator)
1395 return Ctor();
1396
1397 if (Optimized)
1398 return createGreedyVGPRRegisterAllocator();
1399
1400 return createFastVGPRRegisterAllocator();
1401}
1402
1403FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1404 llvm_unreachable("should not be used");
1405}
1406
1408 "-regalloc not supported with amdgcn. Use -sgpr-regalloc and -vgpr-regalloc";
1409
1410bool GCNPassConfig::addRegAssignAndRewriteFast() {
1411 if (!usingDefaultRegAlloc())
1413
1414 addPass(&GCNPreRALongBranchRegID);
1415
1416 addPass(createSGPRAllocPass(false));
1417
1418 // Equivalent of PEI for SGPRs.
1419 addPass(&SILowerSGPRSpillsID);
1420 addPass(&SIPreAllocateWWMRegsID);
1421
1422 addPass(createVGPRAllocPass(false));
1423
1424 addPass(&SILowerWWMCopiesID);
1425 return true;
1426}
1427
1428bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1429 if (!usingDefaultRegAlloc())
1431
1432 addPass(&GCNPreRALongBranchRegID);
1433
1434 addPass(createSGPRAllocPass(true));
1435
1436 // Commit allocated register changes. This is mostly necessary because too
1437 // many things rely on the use lists of the physical registers, such as the
1438 // verifier. This is only necessary with allocators which use LiveIntervals,
1439 // since FastRegAlloc does the replacements itself.
1440 addPass(createVirtRegRewriter(false));
1441
1442 // Equivalent of PEI for SGPRs.
1443 addPass(&SILowerSGPRSpillsID);
1444 addPass(&SIPreAllocateWWMRegsID);
1445
1446 addPass(createVGPRAllocPass(true));
1447
1448 addPreRewrite();
1449 addPass(&VirtRegRewriterID);
1450
1452
1453 return true;
1454}
1455
1456void GCNPassConfig::addPostRegAlloc() {
1457 addPass(&SIFixVGPRCopiesID);
1458 if (getOptLevel() > CodeGenOptLevel::None)
1459 addPass(&SIOptimizeExecMaskingID);
1461}
1462
1463void GCNPassConfig::addPreSched2() {
1464 if (TM->getOptLevel() > CodeGenOptLevel::None)
1466 addPass(&SIPostRABundlerID);
1467}
1468
1469void GCNPassConfig::addPreEmitPass() {
1470 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1471 addPass(&GCNCreateVOPDID);
1472 addPass(createSIMemoryLegalizerPass());
1473 addPass(createSIInsertWaitcntsPass());
1474
1475 addPass(createSIModeRegisterPass());
1476
1477 if (getOptLevel() > CodeGenOptLevel::None)
1478 addPass(&SIInsertHardClausesID);
1479
1481 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1483 if (getOptLevel() > CodeGenOptLevel::None)
1484 addPass(&SIPreEmitPeepholeID);
1485 // The hazard recognizer that runs as part of the post-ra scheduler does not
1486 // guarantee to be able handle all hazards correctly. This is because if there
1487 // are multiple scheduling regions in a basic block, the regions are scheduled
1488 // bottom up, so when we begin to schedule a region we don't know what
1489 // instructions were emitted directly before it.
1490 //
1491 // Here we add a stand-alone hazard recognizer pass which can handle all
1492 // cases.
1493 addPass(&PostRAHazardRecognizerID);
1494
1497
1498 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1499 addPass(&AMDGPUInsertDelayAluID);
1500
1501 addPass(&BranchRelaxationPassID);
1502}
1503
1505 return new GCNPassConfig(*this, PM);
1506}
1507
1509 MachineFunction &MF) const {
1511 MF.getRegInfo().addDelegate(MFI);
1512}
1513
1515 BumpPtrAllocator &Allocator, const Function &F,
1516 const TargetSubtargetInfo *STI) const {
1517 return SIMachineFunctionInfo::create<SIMachineFunctionInfo>(
1518 Allocator, F, static_cast<const GCNSubtarget *>(STI));
1519}
1520
1522 return new yaml::SIMachineFunctionInfo();
1523}
1524
1528 return new yaml::SIMachineFunctionInfo(
1529 *MFI, *MF.getSubtarget<GCNSubtarget>().getRegisterInfo(), MF);
1530}
1531
1534 SMDiagnostic &Error, SMRange &SourceRange) const {
1535 const yaml::SIMachineFunctionInfo &YamlMFI =
1536 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1537 MachineFunction &MF = PFS.MF;
1539 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1540
1541 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1542 return true;
1543
1544 if (MFI->Occupancy == 0) {
1545 // Fixup the subtarget dependent default value.
1546 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1547 }
1548
1549 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1550 Register TempReg;
1551 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1552 SourceRange = RegName.SourceRange;
1553 return true;
1554 }
1555 RegVal = TempReg;
1556
1557 return false;
1558 };
1559
1560 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1561 Register &RegVal) {
1562 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1563 };
1564
1565 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1566 return true;
1567
1568 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1569 return true;
1570
1571 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1572 MFI->LongBranchReservedReg))
1573 return true;
1574
1575 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1576 // Create a diagnostic for a the register string literal.
1577 const MemoryBuffer &Buffer =
1578 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1579 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1580 RegName.Value.size(), SourceMgr::DK_Error,
1581 "incorrect register class for field", RegName.Value,
1582 std::nullopt, std::nullopt);
1583 SourceRange = RegName.SourceRange;
1584 return true;
1585 };
1586
1587 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1588 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1589 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1590 return true;
1591
1592 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1593 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1594 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1595 }
1596
1597 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1598 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1599 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1600 }
1601
1602 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1603 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1604 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1605 }
1606
1607 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1608 Register ParsedReg;
1609 if (parseRegister(YamlReg, ParsedReg))
1610 return true;
1611
1612 MFI->reserveWWMRegister(ParsedReg);
1613 }
1614
1615 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1616 const TargetRegisterClass &RC,
1617 ArgDescriptor &Arg, unsigned UserSGPRs,
1618 unsigned SystemSGPRs) {
1619 // Skip parsing if it's not present.
1620 if (!A)
1621 return false;
1622
1623 if (A->IsRegister) {
1624 Register Reg;
1625 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1626 SourceRange = A->RegisterName.SourceRange;
1627 return true;
1628 }
1629 if (!RC.contains(Reg))
1630 return diagnoseRegisterClass(A->RegisterName);
1632 } else
1633 Arg = ArgDescriptor::createStack(A->StackOffset);
1634 // Check and apply the optional mask.
1635 if (A->Mask)
1636 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1637
1638 MFI->NumUserSGPRs += UserSGPRs;
1639 MFI->NumSystemSGPRs += SystemSGPRs;
1640 return false;
1641 };
1642
1643 if (YamlMFI.ArgInfo &&
1644 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1645 AMDGPU::SGPR_128RegClass,
1646 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1647 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1648 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1649 2, 0) ||
1650 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1651 MFI->ArgInfo.QueuePtr, 2, 0) ||
1652 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1653 AMDGPU::SReg_64RegClass,
1654 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1655 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1656 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1657 2, 0) ||
1658 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1659 AMDGPU::SReg_64RegClass,
1660 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1661 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1662 AMDGPU::SGPR_32RegClass,
1663 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1664 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1665 AMDGPU::SGPR_32RegClass,
1666 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1667 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1668 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1669 0, 1) ||
1670 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1671 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1672 0, 1) ||
1673 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1674 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1675 0, 1) ||
1676 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1677 AMDGPU::SGPR_32RegClass,
1678 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1679 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1680 AMDGPU::SGPR_32RegClass,
1681 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1682 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1683 AMDGPU::SReg_64RegClass,
1684 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1685 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1686 AMDGPU::SReg_64RegClass,
1687 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1688 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1689 AMDGPU::VGPR_32RegClass,
1690 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1691 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1692 AMDGPU::VGPR_32RegClass,
1693 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1694 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1695 AMDGPU::VGPR_32RegClass,
1696 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1697 return true;
1698
1699 if (ST.hasIEEEMode())
1700 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1701 if (ST.hasDX10ClampMode())
1702 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1703
1704 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
1705 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
1708 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
1711
1718
1719 return false;
1720}
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool, true > DisableStructurizer("amdgpu-disable-structurizer", cl::desc("Disable structurizer for experiments; produces unusable code"), cl::location(AMDGPUTargetMachine::DisableStructurizer), cl::ReallyHidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
static cl::opt< bool > EnableStructurizerWorkarounds("amdgpu-enable-structurizer-workarounds", cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool, true > LateCFGStructurize("amdgpu-late-structurize", cl::desc("Enable late CFG structurization"), cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), cl::Hidden)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableMaxIlpSchedStrategy("amdgpu-enable-max-ilp-scheduling-strategy", cl::desc("Enable scheduling strategy to maximize ILP for a single wave."), cl::Hidden, cl::init(false))
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst", cl::desc("Enable s_singleuse_vdst insertion"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfo::Concept conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
#define LLVM_READNONE
Definition: Compiler.h:220
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:135
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
static std::string computeDataLayout()
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
CGSCCAnalysisManager CGAM
ModulePassManager MPM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const char LLVMTargetMachineRef TM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
This header defines various interfaces for pass management in LLVM.
The AMDGPU TargetMachine interface definition for hw codegen targets.
Basic Register Allocator
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Machine Scheduler interface.
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
Target-Independent Code Generator Pass Configuration Options pass.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:391
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
Error buildPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType) const
void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Definition: Constants.cpp:723
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Tagged union holding either a T or a Error.
Definition: Error.h:481
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:277
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
Pass to remove unused function declarations.
Definition: GlobalDCE.h:36
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition: Internalize.h:36
This class describes a target machine that is implemented with the LLVM target-independent code gener...
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:43
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:51
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
Definition: MemoryBuffer.h:76
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static const OptimizationLevel O0
Disable as many optimizations as possible.
unsigned getSpeedupLevel() const
static const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
Definition: PassBuilder.h:106
void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:481
void registerPipelineStartEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:472
void crossRegisterProxies(LoopAnalysisManager &LAM, FunctionAnalysisManager &FAM, CGSCCAnalysisManager &CGAM, ModuleAnalysisManager &MAM, MachineFunctionAnalysisManager *MFAM=nullptr)
Cross register the analysis managers through their proxies.
void registerOptimizerLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:499
void registerPeepholeEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:406
void registerCGSCCOptimizerLateEPCallback(const std::function< void(CGSCCPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:451
void registerRegClassFilterParsingCallback(const std::function< RegClassFilterFunc(StringRef)> &C)
Register callbacks to parse target specific filter field if regalloc pass needs it.
Definition: PassBuilder.h:588
void registerModuleAnalyses(ModuleAnalysisManager &MAM)
Registers all available module analysis passes.
void registerFullLinkTimeOptimizationLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:517
void registerFunctionAnalyses(FunctionAnalysisManager &FAM)
Registers all available function analysis passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
Definition: PassManager.h:195
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:281
Represents a location in source code.
Definition: SMLoc.h:23
Represents a range in source code.
Definition: SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:575
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:576
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
unsigned getMainFileID() const
Definition: SourceMgr.h:132
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition: SourceMgr.h:125
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:620
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:90
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
Definition: TargetMachine.h:96
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Target-Independent Code Generator Pass Configuration Options.
LLVMTargetMachine * TM
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Definition: raw_ostream.h:434
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
Definition: AMDGPU.h:415
bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:893
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
@ ReallyHidden
Definition: CommandLine.h:138
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFlattenCFGPass()
void initializeSIFormMemoryClausesPass(PassRegistry &)
char & SIPreAllocateWWMRegsID
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
FunctionPass * createAMDGPUSetWavePriorityPass()
void initializeAMDGPUInsertSingleUseVDSTPass(PassRegistry &)
Pass * createLCSSAPass()
Definition: LCSSA.cpp:506
void initializeGCNCreateVOPDPass(PassRegistry &)
ModulePass * createAMDGPUOpenCLEnqueuedBlockLoweringPass()
char & GCNPreRAOptimizationsID
char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeGCNPreRAOptimizationsPass(PassRegistry &)
Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &)
void initializeAMDGPUAttributorLegacyPass(PassRegistry &)
char & SIPostRABundlerID
FunctionPass * createSIModeRegisterPass()
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
void initializeSIModeRegisterPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
void initializeSIOptimizeVGPRLiveRangePass(PassRegistry &)
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
Definition: PassManager.h:848
void initializeAMDGPULateCodeGenPreparePass(PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
char & GCNRewritePartialRegUsesID
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPUAnnotateUniformValuesPass(PassRegistry &)
std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition: Error.cpp:98
void initializeSIShrinkInstructionsPass(PassRegistry &)
char & SIFoldOperandsID
void initializeGCNPreRALongBranchRegPass(PassRegistry &)
char & SILoadStoreOptimizerID
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createNaryReassociatePass()
char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeSIPreEmitPeepholePass(PassRegistry &)
char & SILowerWWMCopiesID
void initializeSIFixVGPRCopiesPass(PassRegistry &)
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition: CSEInfo.cpp:79
Target & getTheR600Target()
The target for R600 GPUs.
char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
void initializeAMDGPURemoveIncompatibleFunctionsPass(PassRegistry &)
void initializeSILowerWWMCopiesPass(PassRegistry &)
void initializeGCNNSAReassignPass(PassRegistry &)
char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
void initializeSIInsertWaitcntsPass(PassRegistry &)
char & AMDGPUInsertSingleUseVDSTID
Pass * createLICMPass()
Definition: LICM.cpp:379
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
char & SIFormMemoryClausesID
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
void initializeSILoadStoreOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringPass(PassRegistry &)
void initializeSIPeepholeSDWAPass(PassRegistry &)
char & AMDGPUUnifyDivergentExitNodesID
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
char & AMDGPUPerfHintAnalysisID
char & SILowerSGPRSpillsID
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
char & SILateBranchLoweringPassID
char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
FunctionPass * createSinkingPass()
Definition: Sink.cpp:277
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
FunctionPass * createSIShrinkInstructionsPass()
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition: CodeGen.h:83
void initializeSIPostRABundlerPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &)
void initializeSIWholeQuadModePass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
Pass * createAMDGPUAnnotateKernelFeaturesPass()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUPassConfig...
char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
char & SIOptimizeVGPRLiveRangeID
FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
FunctionPass * createFixIrreduciblePass()
char & FuncletLayoutID
This pass lays out funclets contiguously.
void initializeSIInsertHardClausesPass(PassRegistry &)
char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
void initializeSIAnnotateControlFlowPass(PassRegistry &)
ModulePass * createAMDGPUPrintfRuntimeBinding()
void initializeSIMemoryLegalizerPass(PassRegistry &)
Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
FunctionPass * createAMDGPUAnnotateUniformValues()
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
char & EarlyIfConverterID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
char & SIPreEmitPeepholeID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
FunctionPass * createSILowerI1CopiesPass()
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:17
void initializeSIPreAllocateWWMRegsPass(PassRegistry &)
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
char & MachineCSEID
MachineCSE - This pass performs global CSE on machine instructions.
Definition: MachineCSE.cpp:165
char & GCNDPPCombineID
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
char & SIWholeQuadModeID
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
void initializeSIOptimizeExecMaskingPreRAPass(PassRegistry &)
void initializeAMDGPUMarkLastScratchLoadPass(PassRegistry &)
char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
Definition: GVN.cpp:3396
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
void initializeSILowerSGPRSpillsPass(PassRegistry &)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
FunctionPass * createAMDGPUMachineCFGStructurizerPass()
void initializeAMDGPUResourceUsageAnalysisPass(PassRegistry &)
void initializeSIFixSGPRCopiesPass(PassRegistry &)
char & GCNCreateVOPDID
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
char & VirtRegRewriterID
VirtRegRewriter pass.
Definition: VirtRegMap.cpp:227
void initializeSILowerI1CopiesPass(PassRegistry &)
char & SILowerControlFlowID
FunctionPass * createLowerSwitchPass()
FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
Definition: VirtRegMap.cpp:645
void initializeR600VectorRegMergerPass(PassRegistry &)
ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
void initializeSIOptimizeExecMaskingPass(PassRegistry &)
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeSIFoldOperandsPass(PassRegistry &)
void initializeSILowerControlFlowPass(PassRegistry &)
char & SIPeepholeSDWAID
char & SIFixVGPRCopiesID
char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1932
void initializeGCNDPPCombinePass(PassRegistry &)
char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
Definition: MIParser.cpp:3598
FunctionPass * createAMDGPULateCodeGenPreparePass()
char & AMDGPUMarkLastScratchLoadID
char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluPass(PassRegistry &)
char & SIOptimizeExecMaskingID
void initializeAMDGPUUnifyMetadataPass(PassRegistry &)
char & SIFixSGPRCopiesID
FunctionPass * createSIAnnotateControlFlowPass()
Create the annotation pass.
std::function< bool(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC)> RegClassFilterFunc
Filter function for register classes during regalloc.
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
char & GCNPreRALongBranchRegID
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
RegisterTargetMachine - Helper template for registering a target machine implementation,...
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition: Threading.h:68
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
A wrapper around std::string which contains a source range that's being set during parsing.