LLVM 22.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
25#include "AMDGPUIGroupLP.h"
26#include "AMDGPUISelDAGToDAG.h"
28#include "AMDGPUMacroFusion.h"
35#include "AMDGPUSplitModule.h"
40#include "GCNDPPCombine.h"
42#include "GCNNSAReassign.h"
46#include "GCNSchedStrategy.h"
47#include "GCNVOPDUtils.h"
48#include "R600.h"
49#include "R600TargetMachine.h"
50#include "SIFixSGPRCopies.h"
51#include "SIFixVGPRCopies.h"
52#include "SIFoldOperands.h"
53#include "SIFormMemoryClauses.h"
55#include "SILowerControlFlow.h"
56#include "SILowerSGPRSpills.h"
57#include "SILowerWWMCopies.h"
59#include "SIMachineScheduler.h"
63#include "SIPeepholeSDWA.h"
64#include "SIPostRABundler.h"
67#include "SIWholeQuadMode.h"
88#include "llvm/CodeGen/Passes.h"
92#include "llvm/IR/IntrinsicsAMDGPU.h"
93#include "llvm/IR/PassManager.h"
102#include "llvm/Transforms/IPO.h"
127#include <optional>
128
129using namespace llvm;
130using namespace llvm::PatternMatch;
131
132namespace {
133//===----------------------------------------------------------------------===//
134// AMDGPU CodeGen Pass Builder interface.
135//===----------------------------------------------------------------------===//
136
137class AMDGPUCodeGenPassBuilder
138 : public CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine> {
139 using Base = CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine>;
140
141public:
142 AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM,
143 const CGPassBuilderOption &Opts,
144 PassInstrumentationCallbacks *PIC);
145
146 void addIRPasses(PassManagerWrapper &PMW) const;
147 void addCodeGenPrepare(PassManagerWrapper &PMW) const;
148 void addPreISel(PassManagerWrapper &PMW) const;
149 void addILPOpts(PassManagerWrapper &PMWM) const;
150 void addAsmPrinter(PassManagerWrapper &PMW, CreateMCStreamer) const;
151 Error addInstSelector(PassManagerWrapper &PMW) const;
152 void addPreRewrite(PassManagerWrapper &PMW) const;
153 void addMachineSSAOptimization(PassManagerWrapper &PMW) const;
154 void addPostRegAlloc(PassManagerWrapper &PMW) const;
155 void addPreEmitPass(PassManagerWrapper &PMWM) const;
156 void addPreEmitRegAlloc(PassManagerWrapper &PMW) const;
157 Error addRegAssignmentOptimized(PassManagerWrapper &PMW) const;
158 void addPreRegAlloc(PassManagerWrapper &PMW) const;
159 void addOptimizedRegAlloc(PassManagerWrapper &PMW) const;
160 void addPreSched2(PassManagerWrapper &PMW) const;
161 void addPostBBSections(PassManagerWrapper &PMW) const;
162
163 /// Check if a pass is enabled given \p Opt option. The option always
164 /// overrides defaults if explicitly used. Otherwise its default will be used
165 /// given that a pass shall work at an optimization \p Level minimum.
166 bool isPassEnabled(const cl::opt<bool> &Opt,
167 CodeGenOptLevel Level = CodeGenOptLevel::Default) const;
168 void addEarlyCSEOrGVNPass(PassManagerWrapper &PMW) const;
169 void addStraightLineScalarOptimizationPasses(PassManagerWrapper &PMW) const;
170};
171
172class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
173public:
174 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
175 : RegisterRegAllocBase(N, D, C) {}
176};
177
178class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
179public:
180 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
181 : RegisterRegAllocBase(N, D, C) {}
182};
183
184class WWMRegisterRegAlloc : public RegisterRegAllocBase<WWMRegisterRegAlloc> {
185public:
186 WWMRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
187 : RegisterRegAllocBase(N, D, C) {}
188};
189
190static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
192 const Register Reg) {
193 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
194 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
195}
196
197static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
199 const Register Reg) {
200 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
201 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
202}
203
204static bool onlyAllocateWWMRegs(const TargetRegisterInfo &TRI,
206 const Register Reg) {
207 const SIMachineFunctionInfo *MFI =
208 MRI.getMF().getInfo<SIMachineFunctionInfo>();
209 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
210 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC) &&
212}
213
214/// -{sgpr|wwm|vgpr}-regalloc=... command line option.
215static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
216
217/// A dummy default pass factory indicates whether the register allocator is
218/// overridden on the command line.
219static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
220static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
221static llvm::once_flag InitializeDefaultWWMRegisterAllocatorFlag;
222
223static SGPRRegisterRegAlloc
224defaultSGPRRegAlloc("default",
225 "pick SGPR register allocator based on -O option",
227
228static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
230SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
231 cl::desc("Register allocator to use for SGPRs"));
232
233static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
235VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
236 cl::desc("Register allocator to use for VGPRs"));
237
238static cl::opt<WWMRegisterRegAlloc::FunctionPassCtor, false,
240 WWMRegAlloc("wwm-regalloc", cl::Hidden,
242 cl::desc("Register allocator to use for WWM registers"));
243
244static void initializeDefaultSGPRRegisterAllocatorOnce() {
245 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
246
247 if (!Ctor) {
248 Ctor = SGPRRegAlloc;
249 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
250 }
251}
252
253static void initializeDefaultVGPRRegisterAllocatorOnce() {
254 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
255
256 if (!Ctor) {
257 Ctor = VGPRRegAlloc;
258 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
259 }
260}
261
262static void initializeDefaultWWMRegisterAllocatorOnce() {
263 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
264
265 if (!Ctor) {
266 Ctor = WWMRegAlloc;
267 WWMRegisterRegAlloc::setDefault(WWMRegAlloc);
268 }
269}
270
271static FunctionPass *createBasicSGPRRegisterAllocator() {
272 return createBasicRegisterAllocator(onlyAllocateSGPRs);
273}
274
275static FunctionPass *createGreedySGPRRegisterAllocator() {
276 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
277}
278
279static FunctionPass *createFastSGPRRegisterAllocator() {
280 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
281}
282
283static FunctionPass *createBasicVGPRRegisterAllocator() {
284 return createBasicRegisterAllocator(onlyAllocateVGPRs);
285}
286
287static FunctionPass *createGreedyVGPRRegisterAllocator() {
288 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
289}
290
291static FunctionPass *createFastVGPRRegisterAllocator() {
292 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
293}
294
295static FunctionPass *createBasicWWMRegisterAllocator() {
296 return createBasicRegisterAllocator(onlyAllocateWWMRegs);
297}
298
299static FunctionPass *createGreedyWWMRegisterAllocator() {
300 return createGreedyRegisterAllocator(onlyAllocateWWMRegs);
301}
302
303static FunctionPass *createFastWWMRegisterAllocator() {
304 return createFastRegisterAllocator(onlyAllocateWWMRegs, false);
305}
306
307static SGPRRegisterRegAlloc basicRegAllocSGPR(
308 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
309static SGPRRegisterRegAlloc greedyRegAllocSGPR(
310 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
311
312static SGPRRegisterRegAlloc fastRegAllocSGPR(
313 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
314
315
316static VGPRRegisterRegAlloc basicRegAllocVGPR(
317 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
318static VGPRRegisterRegAlloc greedyRegAllocVGPR(
319 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
320
321static VGPRRegisterRegAlloc fastRegAllocVGPR(
322 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
323static WWMRegisterRegAlloc basicRegAllocWWMReg("basic",
324 "basic register allocator",
325 createBasicWWMRegisterAllocator);
326static WWMRegisterRegAlloc
327 greedyRegAllocWWMReg("greedy", "greedy register allocator",
328 createGreedyWWMRegisterAllocator);
329static WWMRegisterRegAlloc fastRegAllocWWMReg("fast", "fast register allocator",
330 createFastWWMRegisterAllocator);
331
335}
336} // anonymous namespace
337
338static cl::opt<bool>
340 cl::desc("Run early if-conversion"),
341 cl::init(false));
342
343static cl::opt<bool>
344OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
345 cl::desc("Run pre-RA exec mask optimizations"),
346 cl::init(true));
347
348static cl::opt<bool>
349 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
350 cl::desc("Lower GPU ctor / dtors to globals on the device."),
351 cl::init(true), cl::Hidden);
352
353// Option to disable vectorizer for tests.
355 "amdgpu-load-store-vectorizer",
356 cl::desc("Enable load store vectorizer"),
357 cl::init(true),
358 cl::Hidden);
359
360// Option to control global loads scalarization
362 "amdgpu-scalarize-global-loads",
363 cl::desc("Enable global load scalarization"),
364 cl::init(true),
365 cl::Hidden);
366
367// Option to run internalize pass.
369 "amdgpu-internalize-symbols",
370 cl::desc("Enable elimination of non-kernel functions and unused globals"),
371 cl::init(false),
372 cl::Hidden);
373
374// Option to inline all early.
376 "amdgpu-early-inline-all",
377 cl::desc("Inline all functions early"),
378 cl::init(false),
379 cl::Hidden);
380
382 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
383 cl::desc("Enable removal of functions when they"
384 "use features not supported by the target GPU"),
385 cl::init(true));
386
388 "amdgpu-sdwa-peephole",
389 cl::desc("Enable SDWA peepholer"),
390 cl::init(true));
391
393 "amdgpu-dpp-combine",
394 cl::desc("Enable DPP combiner"),
395 cl::init(true));
396
397// Enable address space based alias analysis
399 cl::desc("Enable AMDGPU Alias Analysis"),
400 cl::init(true));
401
402// Enable lib calls simplifications
404 "amdgpu-simplify-libcall",
405 cl::desc("Enable amdgpu library simplifications"),
406 cl::init(true),
407 cl::Hidden);
408
410 "amdgpu-ir-lower-kernel-arguments",
411 cl::desc("Lower kernel argument loads in IR pass"),
412 cl::init(true),
413 cl::Hidden);
414
416 "amdgpu-reassign-regs",
417 cl::desc("Enable register reassign optimizations on gfx10+"),
418 cl::init(true),
419 cl::Hidden);
420
422 "amdgpu-opt-vgpr-liverange",
423 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
424 cl::init(true), cl::Hidden);
425
427 "amdgpu-atomic-optimizer-strategy",
428 cl::desc("Select DPP or Iterative strategy for scan"),
431 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
433 "Use Iterative approach for scan"),
434 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
435
436// Enable Mode register optimization
438 "amdgpu-mode-register",
439 cl::desc("Enable mode register pass"),
440 cl::init(true),
441 cl::Hidden);
442
443// Enable GFX11+ s_delay_alu insertion
444static cl::opt<bool>
445 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
446 cl::desc("Enable s_delay_alu insertion"),
447 cl::init(true), cl::Hidden);
448
449// Enable GFX11+ VOPD
450static cl::opt<bool>
451 EnableVOPD("amdgpu-enable-vopd",
452 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
453 cl::init(true), cl::Hidden);
454
455// Option is used in lit tests to prevent deadcoding of patterns inspected.
456static cl::opt<bool>
457EnableDCEInRA("amdgpu-dce-in-ra",
458 cl::init(true), cl::Hidden,
459 cl::desc("Enable machine DCE inside regalloc"));
460
461static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
462 cl::desc("Adjust wave priority"),
463 cl::init(false), cl::Hidden);
464
466 "amdgpu-scalar-ir-passes",
467 cl::desc("Enable scalar IR passes"),
468 cl::init(true),
469 cl::Hidden);
470
472 "amdgpu-enable-lower-exec-sync",
473 cl::desc("Enable lowering of execution synchronization."), cl::init(true),
474 cl::Hidden);
475
476static cl::opt<bool>
477 EnableSwLowerLDS("amdgpu-enable-sw-lower-lds",
478 cl::desc("Enable lowering of lds to global memory pass "
479 "and asan instrument resulting IR."),
480 cl::init(true), cl::Hidden);
481
483 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
485 cl::Hidden);
486
488 "amdgpu-enable-pre-ra-optimizations",
489 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
490 cl::Hidden);
491
493 "amdgpu-enable-promote-kernel-arguments",
494 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
495 cl::Hidden, cl::init(true));
496
498 "amdgpu-enable-image-intrinsic-optimizer",
499 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
500 cl::Hidden);
501
502static cl::opt<bool>
503 EnableLoopPrefetch("amdgpu-loop-prefetch",
504 cl::desc("Enable loop data prefetch on AMDGPU"),
505 cl::Hidden, cl::init(false));
506
508 AMDGPUSchedStrategy("amdgpu-sched-strategy",
509 cl::desc("Select custom AMDGPU scheduling strategy."),
510 cl::Hidden, cl::init(""));
511
513 "amdgpu-enable-rewrite-partial-reg-uses",
514 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
515 cl::Hidden);
516
518 "amdgpu-enable-hipstdpar",
519 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
520 cl::Hidden);
521
522static cl::opt<bool>
523 EnableAMDGPUAttributor("amdgpu-attributor-enable",
524 cl::desc("Enable AMDGPUAttributorPass"),
525 cl::init(true), cl::Hidden);
526
528 "new-reg-bank-select",
529 cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of "
530 "regbankselect"),
531 cl::init(false), cl::Hidden);
532
534 "amdgpu-link-time-closed-world",
535 cl::desc("Whether has closed-world assumption at link time"),
536 cl::init(false), cl::Hidden);
537
539 "amdgpu-enable-uniform-intrinsic-combine",
540 cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"),
541 cl::init(true), cl::Hidden);
542
544 // Register the target
547
632}
633
634static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
635 return std::make_unique<AMDGPUTargetObjectFile>();
636}
637
641
642static ScheduleDAGInstrs *
644 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
645 ScheduleDAGMILive *DAG =
646 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
647 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
648 if (ST.shouldClusterStores())
649 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
651 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
652 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
653 DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation(C->MF));
654 return DAG;
655}
656
657static ScheduleDAGInstrs *
659 ScheduleDAGMILive *DAG =
660 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
662 return DAG;
663}
664
665static ScheduleDAGInstrs *
667 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
669 C, std::make_unique<GCNMaxMemoryClauseSchedStrategy>(C));
670 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
671 if (ST.shouldClusterStores())
672 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
673 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
674 DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation(C->MF));
675 return DAG;
676}
677
678static ScheduleDAGInstrs *
680 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
681 auto *DAG = new GCNIterativeScheduler(
683 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
684 if (ST.shouldClusterStores())
685 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
687 return DAG;
688}
689
696
697static ScheduleDAGInstrs *
699 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
701 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
702 if (ST.shouldClusterStores())
703 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
704 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
706 return DAG;
707}
708
710SISchedRegistry("si", "Run SI's custom scheduler",
712
715 "Run GCN scheduler to maximize occupancy",
717
719 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
721
723 "gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause",
725
727 "gcn-iterative-max-occupancy-experimental",
728 "Run GCN scheduler to maximize occupancy (experimental)",
730
732 "gcn-iterative-minreg",
733 "Run GCN iterative scheduler for minimal register usage (experimental)",
735
737 "gcn-iterative-ilp",
738 "Run GCN iterative scheduler for ILP scheduling (experimental)",
740
743 if (!GPU.empty())
744 return GPU;
745
746 // Need to default to a target with flat support for HSA.
747 if (TT.isAMDGCN())
748 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
749
750 return "r600";
751}
752
754 // The AMDGPU toolchain only supports generating shared objects, so we
755 // must always use PIC.
756 return Reloc::PIC_;
757}
758
760 StringRef CPU, StringRef FS,
761 const TargetOptions &Options,
762 std::optional<Reloc::Model> RM,
763 std::optional<CodeModel::Model> CM,
766 T, TT.computeDataLayout(), TT, getGPUOrDefault(TT, CPU), FS, Options,
768 OptLevel),
770 initAsmInfo();
771 if (TT.isAMDGCN()) {
772 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
774 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
776 }
777}
778
781
783
785 Attribute GPUAttr = F.getFnAttribute("target-cpu");
786 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
787}
788
790 Attribute FSAttr = F.getFnAttribute("target-features");
791
792 return FSAttr.isValid() ? FSAttr.getValueAsString()
794}
795
798 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
800 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
801 if (ST.shouldClusterStores())
802 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
803 return DAG;
804}
805
806/// Predicate for Internalize pass.
807static bool mustPreserveGV(const GlobalValue &GV) {
808 if (const Function *F = dyn_cast<Function>(&GV))
809 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
810 F->getName().starts_with("__sanitizer_") ||
811 AMDGPU::isEntryFunctionCC(F->getCallingConv());
812
814 return !GV.use_empty();
815}
816
820
823 if (Params.empty())
825 Params.consume_front("strategy=");
826 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
827 .Case("dpp", ScanOptions::DPP)
828 .Cases({"iterative", ""}, ScanOptions::Iterative)
829 .Case("none", ScanOptions::None)
830 .Default(std::nullopt);
831 if (Result)
832 return *Result;
833 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
834}
835
839 while (!Params.empty()) {
840 StringRef ParamName;
841 std::tie(ParamName, Params) = Params.split(';');
842 if (ParamName == "closed-world") {
843 Result.IsClosedWorld = true;
844 } else {
846 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
847 .str(),
849 }
850 }
851 return Result;
852}
853
855
856#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
858
859 PB.registerScalarOptimizerLateEPCallback(
860 [](FunctionPassManager &FPM, OptimizationLevel Level) {
861 if (Level == OptimizationLevel::O0)
862 return;
863
865 });
866
867 PB.registerVectorizerEndEPCallback(
868 [](FunctionPassManager &FPM, OptimizationLevel Level) {
869 if (Level == OptimizationLevel::O0)
870 return;
871
873 });
874
875 PB.registerPipelineEarlySimplificationEPCallback(
878 if (!isLTOPreLink(Phase)) {
879 // When we are not using -fgpu-rdc, we can run accelerator code
880 // selection relatively early, but still after linking to prevent
881 // eager removal of potentially reachable symbols.
882 if (EnableHipStdPar) {
885 }
887 }
888
889 if (Level == OptimizationLevel::O0)
890 return;
891
892 // We don't want to run internalization at per-module stage.
896 }
897
900 });
901
902 PB.registerPeepholeEPCallback(
903 [](FunctionPassManager &FPM, OptimizationLevel Level) {
904 if (Level == OptimizationLevel::O0)
905 return;
906
910
913 });
914
915 PB.registerCGSCCOptimizerLateEPCallback(
916 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
917 if (Level == OptimizationLevel::O0)
918 return;
919
921
922 // Add promote kernel arguments pass to the opt pipeline right before
923 // infer address spaces which is needed to do actual address space
924 // rewriting.
925 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
928
929 // Add infer address spaces pass to the opt pipeline after inlining
930 // but before SROA to increase SROA opportunities.
932
933 // This should run after inlining to have any chance of doing
934 // anything, and before other cleanup optimizations.
936
937 if (Level != OptimizationLevel::O0) {
938 // Promote alloca to vector before SROA and loop unroll. If we
939 // manage to eliminate allocas before unroll we may choose to unroll
940 // less.
942 }
943
944 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
945 });
946
947 // FIXME: Why is AMDGPUAttributor not in CGSCC?
948 PB.registerOptimizerLastEPCallback([this](ModulePassManager &MPM,
949 OptimizationLevel Level,
951 if (Level != OptimizationLevel::O0) {
952 if (!isLTOPreLink(Phase)) {
953 if (EnableAMDGPUAttributor && getTargetTriple().isAMDGCN()) {
955 MPM.addPass(AMDGPUAttributorPass(*this, Opts, Phase));
956 }
957 }
958 }
959 });
960
961 PB.registerFullLinkTimeOptimizationLastEPCallback(
962 [this](ModulePassManager &PM, OptimizationLevel Level) {
963 // When we are using -fgpu-rdc, we can only run accelerator code
964 // selection after linking to prevent, otherwise we end up removing
965 // potentially reachable symbols that were exported as external in other
966 // modules.
967 if (EnableHipStdPar) {
970 }
971 // We want to support the -lto-partitions=N option as "best effort".
972 // For that, we need to lower LDS earlier in the pipeline before the
973 // module is partitioned for codegen.
977 PM.addPass(AMDGPUSwLowerLDSPass(*this));
980 if (Level != OptimizationLevel::O0) {
981 // We only want to run this with O2 or higher since inliner and SROA
982 // don't run in O1.
983 if (Level != OptimizationLevel::O1) {
984 PM.addPass(
986 }
987 // Do we really need internalization in LTO?
988 if (InternalizeSymbols) {
991 }
992 if (EnableAMDGPUAttributor && getTargetTriple().isAMDGCN()) {
995 Opt.IsClosedWorld = true;
998 }
999 }
1000 if (!NoKernelInfoEndLTO) {
1002 FPM.addPass(KernelInfoPrinter(this));
1003 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
1004 }
1005 });
1006
1007 PB.registerRegClassFilterParsingCallback(
1008 [](StringRef FilterName) -> RegAllocFilterFunc {
1009 if (FilterName == "sgpr")
1010 return onlyAllocateSGPRs;
1011 if (FilterName == "vgpr")
1012 return onlyAllocateVGPRs;
1013 if (FilterName == "wwm")
1014 return onlyAllocateWWMRegs;
1015 return nullptr;
1016 });
1017}
1018
1019int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
1020 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1021 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1022 AddrSpace == AMDGPUAS::REGION_ADDRESS)
1023 ? -1
1024 : 0;
1025}
1026
1028 unsigned DestAS) const {
1029 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
1031}
1032
1034 if (auto *Arg = dyn_cast<Argument>(V);
1035 Arg &&
1036 AMDGPU::isModuleEntryFunctionCC(Arg->getParent()->getCallingConv()) &&
1037 !Arg->hasByRefAttr())
1039
1040 const auto *LD = dyn_cast<LoadInst>(V);
1041 if (!LD) // TODO: Handle invariant load like constant.
1043
1044 // It must be a generic pointer loaded.
1045 assert(V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
1046
1047 const auto *Ptr = LD->getPointerOperand();
1048 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
1050 // For a generic pointer loaded from the constant memory, it could be assumed
1051 // as a global pointer since the constant memory is only populated on the
1052 // host side. As implied by the offload programming model, only global
1053 // pointers could be referenced on the host side.
1055}
1056
1057std::pair<const Value *, unsigned>
1059 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
1060 switch (II->getIntrinsicID()) {
1061 case Intrinsic::amdgcn_is_shared:
1062 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
1063 case Intrinsic::amdgcn_is_private:
1064 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
1065 default:
1066 break;
1067 }
1068 return std::pair(nullptr, -1);
1069 }
1070 // Check the global pointer predication based on
1071 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
1072 // the order of 'is_shared' and 'is_private' is not significant.
1073 Value *Ptr;
1074 if (match(
1075 const_cast<Value *>(V),
1078 m_Deferred(Ptr))))))
1079 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
1080
1081 return std::pair(nullptr, -1);
1082}
1083
1084unsigned
1099
1101 Module &M, unsigned NumParts,
1102 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
1103 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
1104 // but all current users of this API don't have one ready and would need to
1105 // create one anyway. Let's hide the boilerplate for now to keep it simple.
1106
1111
1112 PassBuilder PB(this);
1113 PB.registerModuleAnalyses(MAM);
1114 PB.registerFunctionAnalyses(FAM);
1115 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
1116
1118 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
1119 MPM.run(M, MAM);
1120 return true;
1121}
1122
1123//===----------------------------------------------------------------------===//
1124// GCN Target Machine (SI+)
1125//===----------------------------------------------------------------------===//
1126
1128 StringRef CPU, StringRef FS,
1129 const TargetOptions &Options,
1130 std::optional<Reloc::Model> RM,
1131 std::optional<CodeModel::Model> CM,
1132 CodeGenOptLevel OL, bool JIT)
1133 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
1134
1135const TargetSubtargetInfo *
1137 StringRef GPU = getGPUName(F);
1139
1140 SmallString<128> SubtargetKey(GPU);
1141 SubtargetKey.append(FS);
1142
1143 auto &I = SubtargetMap[SubtargetKey];
1144 if (!I) {
1145 // This needs to be done before we create a new subtarget since any
1146 // creation will depend on the TM and the code generation flags on the
1147 // function that reside in TargetOptions.
1149 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
1150 }
1151
1152 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
1153
1154 return I.get();
1155}
1156
1159 return TargetTransformInfo(std::make_unique<GCNTTIImpl>(this, F));
1160}
1161
1164 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
1166 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
1167 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
1168}
1169
1172 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1173 if (ST.enableSIScheduler())
1175
1176 Attribute SchedStrategyAttr =
1177 C->MF->getFunction().getFnAttribute("amdgpu-sched-strategy");
1178 StringRef SchedStrategy = SchedStrategyAttr.isValid()
1179 ? SchedStrategyAttr.getValueAsString()
1181
1182 if (SchedStrategy == "max-ilp")
1184
1185 if (SchedStrategy == "max-memory-clause")
1187
1188 if (SchedStrategy == "iterative-ilp")
1190
1191 if (SchedStrategy == "iterative-minreg")
1192 return createMinRegScheduler(C);
1193
1194 if (SchedStrategy == "iterative-maxocc")
1196
1198}
1199
1202 ScheduleDAGMI *DAG =
1203 new GCNPostScheduleDAGMILive(C, std::make_unique<PostGenericScheduler>(C),
1204 /*RemoveKillFlags=*/true);
1205 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1207 if (ST.shouldClusterStores())
1210 if ((EnableVOPD.getNumOccurrences() ||
1212 EnableVOPD)
1216 return DAG;
1217}
1218//===----------------------------------------------------------------------===//
1219// AMDGPU Legacy Pass Setup
1220//===----------------------------------------------------------------------===//
1221
1222std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
1223 return getStandardCSEConfigForOpt(TM->getOptLevel());
1224}
1225
1226namespace {
1227
1228class GCNPassConfig final : public AMDGPUPassConfig {
1229public:
1230 GCNPassConfig(TargetMachine &TM, PassManagerBase &PM)
1231 : AMDGPUPassConfig(TM, PM) {
1232 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
1233 }
1234
1235 GCNTargetMachine &getGCNTargetMachine() const {
1236 return getTM<GCNTargetMachine>();
1237 }
1238
1239 bool addPreISel() override;
1240 void addMachineSSAOptimization() override;
1241 bool addILPOpts() override;
1242 bool addInstSelector() override;
1243 bool addIRTranslator() override;
1244 void addPreLegalizeMachineIR() override;
1245 bool addLegalizeMachineIR() override;
1246 void addPreRegBankSelect() override;
1247 bool addRegBankSelect() override;
1248 void addPreGlobalInstructionSelect() override;
1249 bool addGlobalInstructionSelect() override;
1250 void addPreRegAlloc() override;
1251 void addFastRegAlloc() override;
1252 void addOptimizedRegAlloc() override;
1253
1254 FunctionPass *createSGPRAllocPass(bool Optimized);
1255 FunctionPass *createVGPRAllocPass(bool Optimized);
1256 FunctionPass *createWWMRegAllocPass(bool Optimized);
1257 FunctionPass *createRegAllocPass(bool Optimized) override;
1258
1259 bool addRegAssignAndRewriteFast() override;
1260 bool addRegAssignAndRewriteOptimized() override;
1261
1262 bool addPreRewrite() override;
1263 void addPostRegAlloc() override;
1264 void addPreSched2() override;
1265 void addPreEmitPass() override;
1266 void addPostBBSections() override;
1267};
1268
1269} // end anonymous namespace
1270
1272 : TargetPassConfig(TM, PM) {
1273 // Exceptions and StackMaps are not supported, so these passes will never do
1274 // anything.
1277 // Garbage collection is not supported.
1280}
1281
1288
1293 // ReassociateGEPs exposes more opportunities for SLSR. See
1294 // the example in reassociate-geps-and-slsr.ll.
1296 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1297 // EarlyCSE can reuse.
1299 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1301 // NaryReassociate on GEPs creates redundant common expressions, so run
1302 // EarlyCSE after it.
1304}
1305
1308
1309 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN())
1311
1312 // There is no reason to run these.
1316
1318 if (LowerCtorDtor)
1320
1321 if (TM.getTargetTriple().isAMDGCN() &&
1324
1327
1328 // This can be disabled by passing ::Disable here or on the command line
1329 // with --expand-variadics-override=disable.
1331
1332 // Function calls are not supported, so make sure we inline everything.
1335
1336 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1337 if (TM.getTargetTriple().getArch() == Triple::r600)
1339
1340 // Make enqueued block runtime handles externally visible.
1342
1343 // Lower special LDS accesses.
1346
1347 // Lower LDS accesses to global memory pass if address sanitizer is enabled.
1348 if (EnableSwLowerLDS)
1350
1351 // Runs before PromoteAlloca so the latter can account for function uses
1354 }
1355
1356 // Run atomic optimizer before Atomic Expand
1357 if ((TM.getTargetTriple().isAMDGCN()) &&
1358 (TM.getOptLevel() >= CodeGenOptLevel::Less) &&
1361 }
1362
1364
1365 if (TM.getOptLevel() > CodeGenOptLevel::None) {
1367
1370
1374 AAResults &AAR) {
1375 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1376 AAR.addAAResult(WrapperPass->getResult());
1377 }));
1378 }
1379
1380 if (TM.getTargetTriple().isAMDGCN()) {
1381 // TODO: May want to move later or split into an early and late one.
1383 }
1384
1385 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1386 // have expanded.
1387 if (TM.getOptLevel() > CodeGenOptLevel::Less)
1389 }
1390
1392
1393 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1394 // example, GVN can combine
1395 //
1396 // %0 = add %a, %b
1397 // %1 = add %b, %a
1398 //
1399 // and
1400 //
1401 // %0 = shl nsw %a, 2
1402 // %1 = shl %a, 2
1403 //
1404 // but EarlyCSE can do neither of them.
1407}
1408
1410 if (TM->getTargetTriple().isAMDGCN() &&
1411 TM->getOptLevel() > CodeGenOptLevel::None)
1413
1414 if (TM->getTargetTriple().isAMDGCN() && EnableLowerKernelArguments)
1416
1418
1421
1422 if (TM->getTargetTriple().isAMDGCN()) {
1423 // This lowering has been placed after codegenprepare to take advantage of
1424 // address mode matching (which is why it isn't put with the LDS lowerings).
1425 // It could be placed anywhere before uniformity annotations (an analysis
1426 // that it changes by splitting up fat pointers into their components)
1427 // but has been put before switch lowering and CFG flattening so that those
1428 // passes can run on the more optimized control flow this pass creates in
1429 // many cases.
1432 }
1433
1434 // LowerSwitch pass may introduce unreachable blocks that can
1435 // cause unexpected behavior for subsequent passes. Placing it
1436 // here seems better that these blocks would get cleaned up by
1437 // UnreachableBlockElim inserted next in the pass flow.
1439}
1440
1442 if (TM->getOptLevel() > CodeGenOptLevel::None)
1444 return false;
1445}
1446
1451
1453 // Do nothing. GC is not supported.
1454 return false;
1455}
1456
1457//===----------------------------------------------------------------------===//
1458// GCN Legacy Pass Setup
1459//===----------------------------------------------------------------------===//
1460
1461bool GCNPassConfig::addPreISel() {
1463
1464 if (TM->getOptLevel() > CodeGenOptLevel::None)
1465 addPass(createSinkingPass());
1466
1467 if (TM->getOptLevel() > CodeGenOptLevel::None)
1469
1470 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1471 // regions formed by them.
1473 addPass(createFixIrreduciblePass());
1474 addPass(createUnifyLoopExitsPass());
1475 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1476
1479 // TODO: Move this right after structurizeCFG to avoid extra divergence
1480 // analysis. This depends on stopping SIAnnotateControlFlow from making
1481 // control flow modifications.
1483
1484 // SDAG requires LCSSA, GlobalISel does not. Disable LCSSA for -global-isel
1485 // with -new-reg-bank-select and without any of the fallback options.
1487 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
1488 addPass(createLCSSAPass());
1489
1490 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1492
1493 return false;
1494}
1495
1496void GCNPassConfig::addMachineSSAOptimization() {
1498
1499 // We want to fold operands after PeepholeOptimizer has run (or as part of
1500 // it), because it will eliminate extra copies making it easier to fold the
1501 // real source operand. We want to eliminate dead instructions after, so that
1502 // we see fewer uses of the copies. We then need to clean up the dead
1503 // instructions leftover after the operands are folded as well.
1504 //
1505 // XXX - Can we get away without running DeadMachineInstructionElim again?
1506 addPass(&SIFoldOperandsLegacyID);
1507 if (EnableDPPCombine)
1508 addPass(&GCNDPPCombineLegacyID);
1510 if (isPassEnabled(EnableSDWAPeephole)) {
1511 addPass(&SIPeepholeSDWALegacyID);
1512 addPass(&EarlyMachineLICMID);
1513 addPass(&MachineCSELegacyID);
1514 addPass(&SIFoldOperandsLegacyID);
1515 }
1518}
1519
1520bool GCNPassConfig::addILPOpts() {
1522 addPass(&EarlyIfConverterLegacyID);
1523
1525 return false;
1526}
1527
1528bool GCNPassConfig::addInstSelector() {
1530 addPass(&SIFixSGPRCopiesLegacyID);
1532 return false;
1533}
1534
1535bool GCNPassConfig::addIRTranslator() {
1536 addPass(new IRTranslator(getOptLevel()));
1537 return false;
1538}
1539
1540void GCNPassConfig::addPreLegalizeMachineIR() {
1541 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1542 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1543 addPass(new Localizer());
1544}
1545
1546bool GCNPassConfig::addLegalizeMachineIR() {
1547 addPass(new Legalizer());
1548 return false;
1549}
1550
1551void GCNPassConfig::addPreRegBankSelect() {
1552 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1553 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1555}
1556
1557bool GCNPassConfig::addRegBankSelect() {
1558 if (NewRegBankSelect) {
1561 } else {
1562 addPass(new RegBankSelect());
1563 }
1564 return false;
1565}
1566
1567void GCNPassConfig::addPreGlobalInstructionSelect() {
1568 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1569 addPass(createAMDGPURegBankCombiner(IsOptNone));
1570}
1571
1572bool GCNPassConfig::addGlobalInstructionSelect() {
1573 addPass(new InstructionSelect(getOptLevel()));
1574 return false;
1575}
1576
1577void GCNPassConfig::addFastRegAlloc() {
1578 // FIXME: We have to disable the verifier here because of PHIElimination +
1579 // TwoAddressInstructions disabling it.
1580
1581 // This must be run immediately after phi elimination and before
1582 // TwoAddressInstructions, otherwise the processing of the tied operand of
1583 // SI_ELSE will introduce a copy of the tied operand source after the else.
1585
1587
1589}
1590
1591void GCNPassConfig::addPreRegAlloc() {
1592 if (getOptLevel() != CodeGenOptLevel::None)
1594}
1595
1596void GCNPassConfig::addOptimizedRegAlloc() {
1597 if (EnableDCEInRA)
1599
1600 // FIXME: when an instruction has a Killed operand, and the instruction is
1601 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1602 // the register in LiveVariables, this would trigger a failure in verifier,
1603 // we should fix it and enable the verifier.
1604 if (OptVGPRLiveRange)
1606
1607 // This must be run immediately after phi elimination and before
1608 // TwoAddressInstructions, otherwise the processing of the tied operand of
1609 // SI_ELSE will introduce a copy of the tied operand source after the else.
1611
1614
1615 if (isPassEnabled(EnablePreRAOptimizations))
1617
1618 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1619 // instructions that cause scheduling barriers.
1621
1622 if (OptExecMaskPreRA)
1624
1625 // This is not an essential optimization and it has a noticeable impact on
1626 // compilation time, so we only enable it from O2.
1627 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1629
1631}
1632
1633bool GCNPassConfig::addPreRewrite() {
1635 addPass(&GCNNSAReassignID);
1636
1638 return true;
1639}
1640
1641FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1642 // Initialize the global default.
1643 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1644 initializeDefaultSGPRRegisterAllocatorOnce);
1645
1646 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1647 if (Ctor != useDefaultRegisterAllocator)
1648 return Ctor();
1649
1650 if (Optimized)
1651 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1652
1653 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1654}
1655
1656FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1657 // Initialize the global default.
1658 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1659 initializeDefaultVGPRRegisterAllocatorOnce);
1660
1661 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1662 if (Ctor != useDefaultRegisterAllocator)
1663 return Ctor();
1664
1665 if (Optimized)
1666 return createGreedyVGPRRegisterAllocator();
1667
1668 return createFastVGPRRegisterAllocator();
1669}
1670
1671FunctionPass *GCNPassConfig::createWWMRegAllocPass(bool Optimized) {
1672 // Initialize the global default.
1673 llvm::call_once(InitializeDefaultWWMRegisterAllocatorFlag,
1674 initializeDefaultWWMRegisterAllocatorOnce);
1675
1676 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
1677 if (Ctor != useDefaultRegisterAllocator)
1678 return Ctor();
1679
1680 if (Optimized)
1681 return createGreedyWWMRegisterAllocator();
1682
1683 return createFastWWMRegisterAllocator();
1684}
1685
1686FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1687 llvm_unreachable("should not be used");
1688}
1689
1691 "-regalloc not supported with amdgcn. Use -sgpr-regalloc, -wwm-regalloc, "
1692 "and -vgpr-regalloc";
1693
1694bool GCNPassConfig::addRegAssignAndRewriteFast() {
1695 if (!usingDefaultRegAlloc())
1697
1698 addPass(&GCNPreRALongBranchRegID);
1699
1700 addPass(createSGPRAllocPass(false));
1701
1702 // Equivalent of PEI for SGPRs.
1703 addPass(&SILowerSGPRSpillsLegacyID);
1704
1705 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1707
1708 // For allocating other wwm register operands.
1709 addPass(createWWMRegAllocPass(false));
1710
1711 addPass(&SILowerWWMCopiesLegacyID);
1713
1714 // For allocating per-thread VGPRs.
1715 addPass(createVGPRAllocPass(false));
1716
1717 return true;
1718}
1719
1720bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1721 if (!usingDefaultRegAlloc())
1723
1724 addPass(&GCNPreRALongBranchRegID);
1725
1726 addPass(createSGPRAllocPass(true));
1727
1728 // Commit allocated register changes. This is mostly necessary because too
1729 // many things rely on the use lists of the physical registers, such as the
1730 // verifier. This is only necessary with allocators which use LiveIntervals,
1731 // since FastRegAlloc does the replacements itself.
1732 addPass(createVirtRegRewriter(false));
1733
1734 // At this point, the sgpr-regalloc has been done and it is good to have the
1735 // stack slot coloring to try to optimize the SGPR spill stack indices before
1736 // attempting the custom SGPR spill lowering.
1737 addPass(&StackSlotColoringID);
1738
1739 // Equivalent of PEI for SGPRs.
1740 addPass(&SILowerSGPRSpillsLegacyID);
1741
1742 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1744
1745 // For allocating other whole wave mode registers.
1746 addPass(createWWMRegAllocPass(true));
1747 addPass(&SILowerWWMCopiesLegacyID);
1748 addPass(createVirtRegRewriter(false));
1750
1751 // For allocating per-thread VGPRs.
1752 addPass(createVGPRAllocPass(true));
1753
1754 addPreRewrite();
1755 addPass(&VirtRegRewriterID);
1756
1758
1759 return true;
1760}
1761
1762void GCNPassConfig::addPostRegAlloc() {
1763 addPass(&SIFixVGPRCopiesID);
1764 if (getOptLevel() > CodeGenOptLevel::None)
1767}
1768
1769void GCNPassConfig::addPreSched2() {
1770 if (TM->getOptLevel() > CodeGenOptLevel::None)
1772 addPass(&SIPostRABundlerLegacyID);
1773}
1774
1775void GCNPassConfig::addPreEmitPass() {
1776 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1777 addPass(&GCNCreateVOPDID);
1778 addPass(createSIMemoryLegalizerPass());
1779 addPass(createSIInsertWaitcntsPass());
1780
1781 addPass(createSIModeRegisterPass());
1782
1783 if (getOptLevel() > CodeGenOptLevel::None)
1784 addPass(&SIInsertHardClausesID);
1785
1787 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1789 if (getOptLevel() > CodeGenOptLevel::None)
1790 addPass(&SIPreEmitPeepholeID);
1791 // The hazard recognizer that runs as part of the post-ra scheduler does not
1792 // guarantee to be able handle all hazards correctly. This is because if there
1793 // are multiple scheduling regions in a basic block, the regions are scheduled
1794 // bottom up, so when we begin to schedule a region we don't know what
1795 // instructions were emitted directly before it.
1796 //
1797 // Here we add a stand-alone hazard recognizer pass which can handle all
1798 // cases.
1799 addPass(&PostRAHazardRecognizerID);
1800
1802
1804
1805 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1806 addPass(&AMDGPUInsertDelayAluID);
1807
1808 addPass(&BranchRelaxationPassID);
1809}
1810
1811void GCNPassConfig::addPostBBSections() {
1812 // We run this later to avoid passes like livedebugvalues and BBSections
1813 // having to deal with the apparent multi-entry functions we may generate.
1815}
1816
1818 return new GCNPassConfig(*this, PM);
1819}
1820
1826
1833
1837
1844
1847 SMDiagnostic &Error, SMRange &SourceRange) const {
1848 const yaml::SIMachineFunctionInfo &YamlMFI =
1849 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1850 MachineFunction &MF = PFS.MF;
1852 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1853
1854 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1855 return true;
1856
1857 if (MFI->Occupancy == 0) {
1858 // Fixup the subtarget dependent default value.
1859 MFI->Occupancy = ST.getOccupancyWithWorkGroupSizes(MF).second;
1860 }
1861
1862 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1863 Register TempReg;
1864 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1865 SourceRange = RegName.SourceRange;
1866 return true;
1867 }
1868 RegVal = TempReg;
1869
1870 return false;
1871 };
1872
1873 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1874 Register &RegVal) {
1875 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1876 };
1877
1878 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1879 return true;
1880
1881 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1882 return true;
1883
1884 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1885 MFI->LongBranchReservedReg))
1886 return true;
1887
1888 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1889 // Create a diagnostic for a the register string literal.
1890 const MemoryBuffer &Buffer =
1891 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1892 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1893 RegName.Value.size(), SourceMgr::DK_Error,
1894 "incorrect register class for field", RegName.Value,
1895 {}, {});
1896 SourceRange = RegName.SourceRange;
1897 return true;
1898 };
1899
1900 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1901 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1902 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1903 return true;
1904
1905 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1906 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1907 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1908 }
1909
1910 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1911 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1912 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1913 }
1914
1915 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1916 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1917 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1918 }
1919
1920 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1921 Register ParsedReg;
1922 if (parseRegister(YamlReg, ParsedReg))
1923 return true;
1924
1925 MFI->reserveWWMRegister(ParsedReg);
1926 }
1927
1928 for (const auto &[_, Info] : PFS.VRegInfosNamed) {
1929 MFI->setFlag(Info->VReg, Info->Flags);
1930 }
1931 for (const auto &[_, Info] : PFS.VRegInfos) {
1932 MFI->setFlag(Info->VReg, Info->Flags);
1933 }
1934
1935 for (const auto &YamlRegStr : YamlMFI.SpillPhysVGPRS) {
1936 Register ParsedReg;
1937 if (parseRegister(YamlRegStr, ParsedReg))
1938 return true;
1939 MFI->SpillPhysVGPRs.push_back(ParsedReg);
1940 }
1941
1942 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1943 const TargetRegisterClass &RC,
1944 ArgDescriptor &Arg, unsigned UserSGPRs,
1945 unsigned SystemSGPRs) {
1946 // Skip parsing if it's not present.
1947 if (!A)
1948 return false;
1949
1950 if (A->IsRegister) {
1951 Register Reg;
1952 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1953 SourceRange = A->RegisterName.SourceRange;
1954 return true;
1955 }
1956 if (!RC.contains(Reg))
1957 return diagnoseRegisterClass(A->RegisterName);
1959 } else
1960 Arg = ArgDescriptor::createStack(A->StackOffset);
1961 // Check and apply the optional mask.
1962 if (A->Mask)
1963 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1964
1965 MFI->NumUserSGPRs += UserSGPRs;
1966 MFI->NumSystemSGPRs += SystemSGPRs;
1967 return false;
1968 };
1969
1970 if (YamlMFI.ArgInfo &&
1971 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1972 AMDGPU::SGPR_128RegClass,
1973 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1974 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1975 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1976 2, 0) ||
1977 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1978 MFI->ArgInfo.QueuePtr, 2, 0) ||
1979 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1980 AMDGPU::SReg_64RegClass,
1981 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1982 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1983 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1984 2, 0) ||
1985 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1986 AMDGPU::SReg_64RegClass,
1987 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1988 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1989 AMDGPU::SGPR_32RegClass,
1990 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1991 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1992 AMDGPU::SGPR_32RegClass,
1993 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1994 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1995 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1996 0, 1) ||
1997 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1998 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1999 0, 1) ||
2000 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
2001 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
2002 0, 1) ||
2003 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
2004 AMDGPU::SGPR_32RegClass,
2005 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
2006 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
2007 AMDGPU::SGPR_32RegClass,
2008 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
2009 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
2010 AMDGPU::SReg_64RegClass,
2011 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
2012 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
2013 AMDGPU::SReg_64RegClass,
2014 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
2015 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
2016 AMDGPU::VGPR_32RegClass,
2017 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
2018 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
2019 AMDGPU::VGPR_32RegClass,
2020 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
2021 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
2022 AMDGPU::VGPR_32RegClass,
2023 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
2024 return true;
2025
2026 // Parse FirstKernArgPreloadReg separately, since it's a Register,
2027 // not ArgDescriptor.
2028 if (YamlMFI.ArgInfo && YamlMFI.ArgInfo->FirstKernArgPreloadReg) {
2029 const yaml::SIArgument &A = *YamlMFI.ArgInfo->FirstKernArgPreloadReg;
2030
2031 if (!A.IsRegister) {
2032 // For stack arguments, we don't have RegisterName.SourceRange,
2033 // but we should have some location info from the YAML parser
2034 const MemoryBuffer &Buffer =
2035 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
2036 // Create a minimal valid source range
2038 SMRange Range(Loc, Loc);
2039
2041 *PFS.SM, Loc, Buffer.getBufferIdentifier(), 1, 0, SourceMgr::DK_Error,
2042 "firstKernArgPreloadReg must be a register, not a stack location", "",
2043 {}, {});
2044
2045 SourceRange = Range;
2046 return true;
2047 }
2048
2049 Register Reg;
2050 if (parseNamedRegisterReference(PFS, Reg, A.RegisterName.Value, Error)) {
2051 SourceRange = A.RegisterName.SourceRange;
2052 return true;
2053 }
2054
2055 if (!AMDGPU::SGPR_32RegClass.contains(Reg))
2056 return diagnoseRegisterClass(A.RegisterName);
2057
2058 MFI->ArgInfo.FirstKernArgPreloadReg = Reg;
2059 MFI->NumUserSGPRs += YamlMFI.NumKernargPreloadSGPRs;
2060 }
2061
2062 if (ST.hasIEEEMode())
2063 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
2064 if (ST.hasDX10ClampMode())
2065 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
2066
2067 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
2068 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
2071 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
2074
2081
2082 if (YamlMFI.HasInitWholeWave)
2083 MFI->setInitWholeWave();
2084
2085 return false;
2086}
2087
2088//===----------------------------------------------------------------------===//
2089// AMDGPU CodeGen Pass Builder interface.
2090//===----------------------------------------------------------------------===//
2091
2092AMDGPUCodeGenPassBuilder::AMDGPUCodeGenPassBuilder(
2093 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
2095 : CodeGenPassBuilder(TM, Opts, PIC) {
2096 Opt.MISchedPostRA = true;
2097 Opt.RequiresCodeGenSCCOrder = true;
2098 // Exceptions and StackMaps are not supported, so these passes will never do
2099 // anything.
2100 // Garbage collection is not supported.
2101 disablePass<StackMapLivenessPass, FuncletLayoutPass,
2103}
2104
2105void AMDGPUCodeGenPassBuilder::addIRPasses(PassManagerWrapper &PMW) const {
2106 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN()) {
2107 flushFPMsToMPM(PMW);
2108 addModulePass(AMDGPURemoveIncompatibleFunctionsPass(TM), PMW);
2109 }
2110
2111 flushFPMsToMPM(PMW);
2112 addModulePass(AMDGPUPrintfRuntimeBindingPass(), PMW);
2113 if (LowerCtorDtor)
2114 addModulePass(AMDGPUCtorDtorLoweringPass(), PMW);
2115
2116 if (isPassEnabled(EnableImageIntrinsicOptimizer))
2117 addFunctionPass(AMDGPUImageIntrinsicOptimizerPass(TM), PMW);
2118
2120 addFunctionPass(AMDGPUUniformIntrinsicCombinePass(), PMW);
2121 // This can be disabled by passing ::Disable here or on the command line
2122 // with --expand-variadics-override=disable.
2123 flushFPMsToMPM(PMW);
2125
2126 addModulePass(AMDGPUAlwaysInlinePass(), PMW);
2127 addModulePass(AlwaysInlinerPass(), PMW);
2128
2129 addModulePass(AMDGPUExportKernelRuntimeHandlesPass(), PMW);
2130
2132 addModulePass(AMDGPULowerExecSyncPass(), PMW);
2133
2134 if (EnableSwLowerLDS)
2135 addModulePass(AMDGPUSwLowerLDSPass(TM), PMW);
2136
2137 // Runs before PromoteAlloca so the latter can account for function uses
2139 addModulePass(AMDGPULowerModuleLDSPass(TM), PMW);
2140
2141 // Run atomic optimizer before Atomic Expand
2142 if (TM.getOptLevel() >= CodeGenOptLevel::Less &&
2144 addFunctionPass(
2146
2147 addFunctionPass(AtomicExpandPass(TM), PMW);
2148
2149 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2150 addFunctionPass(AMDGPUPromoteAllocaPass(TM), PMW);
2151 if (isPassEnabled(EnableScalarIRPasses))
2152 addStraightLineScalarOptimizationPasses(PMW);
2153
2154 // TODO: Handle EnableAMDGPUAliasAnalysis
2155
2156 // TODO: May want to move later or split into an early and late one.
2157 addFunctionPass(AMDGPUCodeGenPreparePass(TM), PMW);
2158
2159 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
2160 // have expanded.
2161 if (TM.getOptLevel() > CodeGenOptLevel::Less) {
2163 /*UseMemorySSA=*/true),
2164 PMW);
2165 }
2166 }
2167
2168 Base::addIRPasses(PMW);
2169
2170 // EarlyCSE is not always strong enough to clean up what LSR produces. For
2171 // example, GVN can combine
2172 //
2173 // %0 = add %a, %b
2174 // %1 = add %b, %a
2175 //
2176 // and
2177 //
2178 // %0 = shl nsw %a, 2
2179 // %1 = shl %a, 2
2180 //
2181 // but EarlyCSE can do neither of them.
2182 if (isPassEnabled(EnableScalarIRPasses))
2183 addEarlyCSEOrGVNPass(PMW);
2184}
2185
2186void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(
2187 PassManagerWrapper &PMW) const {
2188 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2189 flushFPMsToMPM(PMW);
2190 addModulePass(AMDGPUPreloadKernelArgumentsPass(TM), PMW);
2191 }
2192
2194 addFunctionPass(AMDGPULowerKernelArgumentsPass(TM), PMW);
2195
2196 Base::addCodeGenPrepare(PMW);
2197
2198 if (isPassEnabled(EnableLoadStoreVectorizer))
2199 addFunctionPass(LoadStoreVectorizerPass(), PMW);
2200
2201 // This lowering has been placed after codegenprepare to take advantage of
2202 // address mode matching (which is why it isn't put with the LDS lowerings).
2203 // It could be placed anywhere before uniformity annotations (an analysis
2204 // that it changes by splitting up fat pointers into their components)
2205 // but has been put before switch lowering and CFG flattening so that those
2206 // passes can run on the more optimized control flow this pass creates in
2207 // many cases.
2208 flushFPMsToMPM(PMW);
2209 addModulePass(AMDGPULowerBufferFatPointersPass(TM), PMW);
2210 flushFPMsToMPM(PMW);
2211 requireCGSCCOrder(PMW);
2212
2213 addModulePass(AMDGPULowerIntrinsicsPass(TM), PMW);
2214
2215 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
2216 // behavior for subsequent passes. Placing it here seems better that these
2217 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
2218 // pass flow.
2219 addFunctionPass(LowerSwitchPass(), PMW);
2220}
2221
2222void AMDGPUCodeGenPassBuilder::addPreISel(PassManagerWrapper &PMW) const {
2223
2224 // Require AMDGPUArgumentUsageAnalysis so that it's available during ISel.
2225 flushFPMsToMPM(PMW);
2227 PMW);
2228
2229 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2230 addFunctionPass(FlattenCFGPass(), PMW);
2231 addFunctionPass(SinkingPass(), PMW);
2232 addFunctionPass(AMDGPULateCodeGenPreparePass(TM), PMW);
2233 }
2234
2235 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
2236 // regions formed by them.
2237
2238 addFunctionPass(AMDGPUUnifyDivergentExitNodesPass(), PMW);
2239 addFunctionPass(FixIrreduciblePass(), PMW);
2240 addFunctionPass(UnifyLoopExitsPass(), PMW);
2241 addFunctionPass(StructurizeCFGPass(/*SkipUniformRegions=*/false), PMW);
2242
2243 addFunctionPass(AMDGPUAnnotateUniformValuesPass(), PMW);
2244
2245 addFunctionPass(SIAnnotateControlFlowPass(TM), PMW);
2246
2247 // TODO: Move this right after structurizeCFG to avoid extra divergence
2248 // analysis. This depends on stopping SIAnnotateControlFlow from making
2249 // control flow modifications.
2250 addFunctionPass(AMDGPURewriteUndefForPHIPass(), PMW);
2251
2253 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
2254 addFunctionPass(LCSSAPass(), PMW);
2255
2256 if (TM.getOptLevel() > CodeGenOptLevel::Less) {
2257 flushFPMsToMPM(PMW);
2258 addModulePass(AMDGPUPerfHintAnalysisPass(TM), PMW);
2259 }
2260
2261 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
2262 // isn't this in addInstSelector?
2264 /*Force=*/true);
2265}
2266
2267void AMDGPUCodeGenPassBuilder::addILPOpts(PassManagerWrapper &PMW) const {
2269 addMachineFunctionPass(EarlyIfConverterPass(), PMW);
2270
2271 Base::addILPOpts(PMW);
2272}
2273
2274void AMDGPUCodeGenPassBuilder::addAsmPrinter(PassManagerWrapper &PMW,
2275 CreateMCStreamer) const {
2276 // TODO: Add AsmPrinter.
2277}
2278
2279Error AMDGPUCodeGenPassBuilder::addInstSelector(PassManagerWrapper &PMW) const {
2280 addMachineFunctionPass(AMDGPUISelDAGToDAGPass(TM), PMW);
2281 addMachineFunctionPass(SIFixSGPRCopiesPass(), PMW);
2282 addMachineFunctionPass(SILowerI1CopiesPass(), PMW);
2283 return Error::success();
2284}
2285
2286void AMDGPUCodeGenPassBuilder::addPreRewrite(PassManagerWrapper &PMW) const {
2287 if (EnableRegReassign) {
2288 addMachineFunctionPass(GCNNSAReassignPass(), PMW);
2289 }
2290}
2291
2292void AMDGPUCodeGenPassBuilder::addMachineSSAOptimization(
2293 PassManagerWrapper &PMW) const {
2294 Base::addMachineSSAOptimization(PMW);
2295
2296 addMachineFunctionPass(SIFoldOperandsPass(), PMW);
2297 if (EnableDPPCombine) {
2298 addMachineFunctionPass(GCNDPPCombinePass(), PMW);
2299 }
2300 addMachineFunctionPass(SILoadStoreOptimizerPass(), PMW);
2301 if (isPassEnabled(EnableSDWAPeephole)) {
2302 addMachineFunctionPass(SIPeepholeSDWAPass(), PMW);
2303 addMachineFunctionPass(EarlyMachineLICMPass(), PMW);
2304 addMachineFunctionPass(MachineCSEPass(), PMW);
2305 addMachineFunctionPass(SIFoldOperandsPass(), PMW);
2306 }
2307 addMachineFunctionPass(DeadMachineInstructionElimPass(), PMW);
2308 addMachineFunctionPass(SIShrinkInstructionsPass(), PMW);
2309}
2310
2311void AMDGPUCodeGenPassBuilder::addOptimizedRegAlloc(
2312 PassManagerWrapper &PMW) const {
2313 if (EnableDCEInRA)
2314 insertPass<DetectDeadLanesPass>(DeadMachineInstructionElimPass());
2315
2316 // FIXME: when an instruction has a Killed operand, and the instruction is
2317 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
2318 // the register in LiveVariables, this would trigger a failure in verifier,
2319 // we should fix it and enable the verifier.
2320 if (OptVGPRLiveRange)
2321 insertPass<RequireAnalysisPass<LiveVariablesAnalysis, MachineFunction>>(
2323
2324 // This must be run immediately after phi elimination and before
2325 // TwoAddressInstructions, otherwise the processing of the tied operand of
2326 // SI_ELSE will introduce a copy of the tied operand source after the else.
2327 insertPass<PHIEliminationPass>(SILowerControlFlowPass());
2328
2330 insertPass<RenameIndependentSubregsPass>(GCNRewritePartialRegUsesPass());
2331
2332 if (isPassEnabled(EnablePreRAOptimizations))
2333 insertPass<MachineSchedulerPass>(GCNPreRAOptimizationsPass());
2334
2335 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
2336 // instructions that cause scheduling barriers.
2337 insertPass<MachineSchedulerPass>(SIWholeQuadModePass());
2338
2339 if (OptExecMaskPreRA)
2340 insertPass<MachineSchedulerPass>(SIOptimizeExecMaskingPreRAPass());
2341
2342 // This is not an essential optimization and it has a noticeable impact on
2343 // compilation time, so we only enable it from O2.
2344 if (TM.getOptLevel() > CodeGenOptLevel::Less)
2345 insertPass<MachineSchedulerPass>(SIFormMemoryClausesPass());
2346
2347 Base::addOptimizedRegAlloc(PMW);
2348}
2349
2350void AMDGPUCodeGenPassBuilder::addPreRegAlloc(PassManagerWrapper &PMW) const {
2351 if (getOptLevel() != CodeGenOptLevel::None)
2352 addMachineFunctionPass(AMDGPUPrepareAGPRAllocPass(), PMW);
2353}
2354
2355Error AMDGPUCodeGenPassBuilder::addRegAssignmentOptimized(
2356 PassManagerWrapper &PMW) const {
2357 // TODO: Check --regalloc-npm option
2358
2359 addMachineFunctionPass(GCNPreRALongBranchRegPass(), PMW);
2360
2361 addMachineFunctionPass(RAGreedyPass({onlyAllocateSGPRs, "sgpr"}), PMW);
2362
2363 // Commit allocated register changes. This is mostly necessary because too
2364 // many things rely on the use lists of the physical registers, such as the
2365 // verifier. This is only necessary with allocators which use LiveIntervals,
2366 // since FastRegAlloc does the replacements itself.
2367 addMachineFunctionPass(VirtRegRewriterPass(false), PMW);
2368
2369 // At this point, the sgpr-regalloc has been done and it is good to have the
2370 // stack slot coloring to try to optimize the SGPR spill stack indices before
2371 // attempting the custom SGPR spill lowering.
2372 addMachineFunctionPass(StackSlotColoringPass(), PMW);
2373
2374 // Equivalent of PEI for SGPRs.
2375 addMachineFunctionPass(SILowerSGPRSpillsPass(), PMW);
2376
2377 // To Allocate wwm registers used in whole quad mode operations (for shaders).
2378 addMachineFunctionPass(SIPreAllocateWWMRegsPass(), PMW);
2379
2380 // For allocating other wwm register operands.
2381 addMachineFunctionPass(RAGreedyPass({onlyAllocateWWMRegs, "wwm"}), PMW);
2382 addMachineFunctionPass(SILowerWWMCopiesPass(), PMW);
2383 addMachineFunctionPass(VirtRegRewriterPass(false), PMW);
2384 addMachineFunctionPass(AMDGPUReserveWWMRegsPass(), PMW);
2385
2386 // For allocating per-thread VGPRs.
2387 addMachineFunctionPass(RAGreedyPass({onlyAllocateVGPRs, "vgpr"}), PMW);
2388
2389 addPreRewrite(PMW);
2390 addMachineFunctionPass(VirtRegRewriterPass(true), PMW);
2391
2392 addMachineFunctionPass(AMDGPUMarkLastScratchLoadPass(), PMW);
2393 return Error::success();
2394}
2395
2396void AMDGPUCodeGenPassBuilder::addPostRegAlloc(PassManagerWrapper &PMW) const {
2397 addMachineFunctionPass(SIFixVGPRCopiesPass(), PMW);
2398 if (TM.getOptLevel() > CodeGenOptLevel::None)
2399 addMachineFunctionPass(SIOptimizeExecMaskingPass(), PMW);
2400 Base::addPostRegAlloc(PMW);
2401}
2402
2403void AMDGPUCodeGenPassBuilder::addPreSched2(PassManagerWrapper &PMW) const {
2404 if (TM.getOptLevel() > CodeGenOptLevel::None)
2405 addMachineFunctionPass(SIShrinkInstructionsPass(), PMW);
2406 addMachineFunctionPass(SIPostRABundlerPass(), PMW);
2407}
2408
2409void AMDGPUCodeGenPassBuilder::addPostBBSections(
2410 PassManagerWrapper &PMW) const {
2411 // We run this later to avoid passes like livedebugvalues and BBSections
2412 // having to deal with the apparent multi-entry functions we may generate.
2413 addMachineFunctionPass(AMDGPUPreloadKernArgPrologPass(), PMW);
2414}
2415
2416void AMDGPUCodeGenPassBuilder::addPreEmitPass(PassManagerWrapper &PMW) const {
2417 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less)) {
2418 addMachineFunctionPass(GCNCreateVOPDPass(), PMW);
2419 }
2420
2421 addMachineFunctionPass(SIMemoryLegalizerPass(), PMW);
2422 addMachineFunctionPass(SIInsertWaitcntsPass(), PMW);
2423
2424 addMachineFunctionPass(SIModeRegisterPass(), PMW);
2425
2426 if (TM.getOptLevel() > CodeGenOptLevel::None)
2427 addMachineFunctionPass(SIInsertHardClausesPass(), PMW);
2428
2429 addMachineFunctionPass(SILateBranchLoweringPass(), PMW);
2430
2431 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
2432 addMachineFunctionPass(AMDGPUSetWavePriorityPass(), PMW);
2433
2434 if (TM.getOptLevel() > CodeGenOptLevel::None)
2435 addMachineFunctionPass(SIPreEmitPeepholePass(), PMW);
2436
2437 // The hazard recognizer that runs as part of the post-ra scheduler does not
2438 // guarantee to be able handle all hazards correctly. This is because if there
2439 // are multiple scheduling regions in a basic block, the regions are scheduled
2440 // bottom up, so when we begin to schedule a region we don't know what
2441 // instructions were emitted directly before it.
2442 //
2443 // Here we add a stand-alone hazard recognizer pass which can handle all
2444 // cases.
2445 addMachineFunctionPass(PostRAHazardRecognizerPass(), PMW);
2446 addMachineFunctionPass(AMDGPUWaitSGPRHazardsPass(), PMW);
2447 addMachineFunctionPass(AMDGPULowerVGPREncodingPass(), PMW);
2448
2449 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less)) {
2450 addMachineFunctionPass(AMDGPUInsertDelayAluPass(), PMW);
2451 }
2452
2453 addMachineFunctionPass(BranchRelaxationPass(), PMW);
2454}
2455
2456bool AMDGPUCodeGenPassBuilder::isPassEnabled(const cl::opt<bool> &Opt,
2457 CodeGenOptLevel Level) const {
2458 if (Opt.getNumOccurrences())
2459 return Opt;
2460 if (TM.getOptLevel() < Level)
2461 return false;
2462 return Opt;
2463}
2464
2465void AMDGPUCodeGenPassBuilder::addEarlyCSEOrGVNPass(
2466 PassManagerWrapper &PMW) const {
2467 if (TM.getOptLevel() == CodeGenOptLevel::Aggressive)
2468 addFunctionPass(GVNPass(), PMW);
2469 else
2470 addFunctionPass(EarlyCSEPass(), PMW);
2471}
2472
2473void AMDGPUCodeGenPassBuilder::addStraightLineScalarOptimizationPasses(
2474 PassManagerWrapper &PMW) const {
2476 addFunctionPass(LoopDataPrefetchPass(), PMW);
2477
2478 addFunctionPass(SeparateConstOffsetFromGEPPass(), PMW);
2479
2480 // ReassociateGEPs exposes more opportunities for SLSR. See
2481 // the example in reassociate-geps-and-slsr.ll.
2482 addFunctionPass(StraightLineStrengthReducePass(), PMW);
2483
2484 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
2485 // EarlyCSE can reuse.
2486 addEarlyCSEOrGVNPass(PMW);
2487
2488 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
2489 addFunctionPass(NaryReassociatePass(), PMW);
2490
2491 // NaryReassociate on GEPs creates redundant common expressions, so run
2492 // EarlyCSE after it.
2493 addFunctionPass(EarlyCSEPass(), PMW);
2494}
unsigned const MachineRegisterInfo * MRI
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
static std::unique_ptr< TargetLoweringObjectFile > createTLOF(const Triple &TT)
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
Analyzes how many registers and other resources are used by functions.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxMemoryClauseSchedRegistry("gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause", createGCNMaxMemoryClauseMachineScheduler)
static Reloc::Model getEffectiveRelocModel()
static cl::opt< bool > EnableUniformIntrinsicCombine("amdgpu-enable-uniform-intrinsic-combine", cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSwLowerLDS("amdgpu-enable-sw-lower-lds", cl::desc("Enable lowering of lds to global memory pass " "and asan instrument resulting IR."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > HasClosedWorldAssumption("amdgpu-link-time-closed-world", cl::desc("Whether has closed-world assumption at link time"), cl::init(false), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< std::string > AMDGPUSchedStrategy("amdgpu-sched-strategy", cl::desc("Select custom AMDGPU scheduling strategy."), cl::Hidden, cl::init(""))
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
static cl::opt< bool > NewRegBankSelect("new-reg-bank-select", cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of " "regbankselect"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLowerExecSync("amdgpu-enable-lower-exec-sync", cl::desc("Enable lowering of execution synchronization."), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfoImplBase conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
Interfaces for producing common pass manager configurations.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_READNONE
Definition Compiler.h:315
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
DXIL Legalizer
This file provides the interface for a simple, fast CSE pass.
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define _
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
This header defines various interfaces for pass management in LLVM.
#define RegName(no)
This file provides the interface for LLVM's Loop Data Prefetching Pass.
This header provides classes for managing a pipeline of passes over loops in LLVM IR.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
const GCNTargetMachine & getTM(const GCNSubtarget *STI)
SI Machine Scheduler interface.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
static cl::opt< cl::boolOrDefault > EnableGlobalISelOption("global-isel", cl::Hidden, cl::desc("Enable the \"global\" instruction selector"))
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(TargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
std::unique_ptr< TargetLoweringObjectFile > TLOF
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
This class provides access to building LLVM's passes.
CodeGenTargetMachineImpl(const Target &T, StringRef DataLayoutString, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOptLevel OL)
LLVM_ABI void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
static ErrorSuccess success()
Create a success value.
Definition Error.h:336
Tagged union holding either a T or a Error.
Definition Error.h:485
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
const SIRegisterInfo * getRegisterInfo() const override
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
ScheduleDAGInstrs * createPostMachineScheduler(MachineSchedContext *C) const override
Similar to createMachineScheduler but used when postRA machine scheduling is enabled.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Definition GVN.h:128
Pass to remove unused function declarations.
Definition GlobalDCE.h:38
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition Internalize.h:37
Converts loops into loop-closed SSA form.
Definition LCSSA.h:38
Performs Loop Invariant Code Motion Pass.
Definition LICM.h:66
This pass implements the localization mechanism described at the top of this file.
Definition Localizer.h:43
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
const char * getBufferStart() const
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static LLVM_ABI const OptimizationLevel O0
Disable as many optimizations as possible.
static LLVM_ABI const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
void setFlag(Register Reg, uint8_t Flag)
bool checkFlag(Register Reg, uint8_t Flag) const
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition SourceMgr.h:297
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
Represents a range in source code.
Definition SMLoc.h:47
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
const TargetRegisterInfo * TRI
Target processor register info.
Move instructions into successor blocks when possible.
Definition Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition SmallString.h:68
unsigned getMainFileID() const
Definition SourceMgr.h:148
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition SourceMgr.h:141
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
bool consume_front(char Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
A switch()-like statement whose cases are string literals.
StringSwitch & Cases(std::initializer_list< StringLiteral > CaseStrings, T Value)
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
TargetOptions Options
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
CodeGenOptLevel OptLevel
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetPassConfig(TargetMachine &TM, PassManagerBase &PM)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
LLVM Value Representation.
Definition Value.h:75
bool use_empty() const
Definition Value.h:346
int getNumOccurrences() const
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
LLVM_READNONE constexpr bool isModuleEntryFunctionCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
template class LLVM_TEMPLATE_ABI opt< bool >
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
LLVM_ABI FunctionPass * createFlattenCFGPass()
std::unique_ptr< ScheduleDAGMutation > createAMDGPUBarrierLatencyDAGMutation(MachineFunction *MF)
LLVM_ABI FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
LLVM_ABI char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
LLVM_ABI char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
LLVM_ABI Pass * createLCSSAPass()
Definition LCSSA.cpp:525
void initializeAMDGPUMarkLastScratchLoadLegacyPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluLegacyPass(PassRegistry &)
void initializeSIOptimizeExecMaskingPreRALegacyPass(PassRegistry &)
char & GCNPreRAOptimizationsID
LLVM_ABI char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeSIInsertHardClausesLegacyPass(PassRegistry &)
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
void initializeGCNPreRAOptimizationsLegacyPass(PassRegistry &)
void initializeSILowerWWMCopiesLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeSIShrinkInstructionsLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeGCNRewritePartialRegUsesLegacyPass(llvm::PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
char & GCNRewritePartialRegUsesID
void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &)
LLVM_ABI std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition Error.cpp:98
void initializeAMDGPULowerVGPREncodingLegacyPass(PassRegistry &)
char & AMDGPUWaitSGPRHazardsLegacyID
void initializeSILowerSGPRSpillsLegacyPass(PassRegistry &)
LLVM_ABI Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
LLVM_ABI FunctionPass * createNaryReassociatePass()
char & AMDGPUReserveWWMRegsLegacyID
void initializeAMDGPUWaitSGPRHazardsLegacyPass(PassRegistry &)
LLVM_ABI char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & SIOptimizeExecMaskingLegacyID
LLVM_ABI char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUExportKernelRuntimeHandlesLegacyPass()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeAMDGPUAsmPrinterPass(PassRegistry &)
void initializeSIFoldOperandsLegacyPass(PassRegistry &)
char & SILoadStoreOptimizerLegacyID
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
PassManager< LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &, CGSCCUpdateResult & > CGSCCPassManager
The CGSCC pass manager.
LLVM_ABI std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition CSEInfo.cpp:89
Target & getTheR600Target()
The target for R600 GPUs.
LLVM_ABI char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
LLVM_ABI Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
LLVM_ABI char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
LLVM_ABI Pass * createLICMPass()
Definition LICM.cpp:386
char & SIFormMemoryClausesID
void initializeSILoadStoreOptimizerLegacyPass(PassRegistry &)
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
AnalysisManager< LazyCallGraph::SCC, LazyCallGraph & > CGSCCAnalysisManager
The CGSCC analysis manager.
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
LLVM_ABI char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
FunctionPass * createAMDGPUUniformIntrinsicCombineLegacyPass()
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition Pass.h:77
@ FullLTOPreLink
Full LTO prelink phase.
Definition Pass.h:85
@ FullLTOPostLink
Full LTO postlink (backend compile) phase.
Definition Pass.h:87
@ ThinLTOPreLink
ThinLTO prelink (summary) phase.
Definition Pass.h:81
char & AMDGPUUnifyDivergentExitNodesID
void initializeAMDGPUPrepareAGPRAllocLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
FunctionPass * createAMDGPUPreloadKernArgPrologLegacyPass()
char & SIOptimizeVGPRLiveRangeLegacyID
LLVM_ABI char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
void initializeSIModeRegisterLegacyPass(PassRegistry &)
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
void initializeAMDGPUPreloadKernelArgumentsLegacyPass(PassRegistry &)
char & SILateBranchLoweringPassID
FunctionToLoopPassAdaptor createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA=false)
A function to deduce a loop pass type and wrap it in the templated adaptor.
LLVM_ABI char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
LLVM_ABI FunctionPass * createSinkingPass()
Definition Sink.cpp:275
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
void initializeSIMemoryLegalizerLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerIntrinsicsLegacyPass()
void initializeR600MachineCFGStructurizerPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition CodeGen.h:111
char & GCNDPPCombineLegacyID
PassManager< Module > ModulePassManager
Convenience typedef for a pass manager over modules.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUTargetMach...
LLVM_ABI char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
void initializeGCNPreRALongBranchRegLegacyPass(PassRegistry &)
char & SILowerWWMCopiesLegacyID
LLVM_ABI FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
LLVM_ABI FunctionPass * createFixIrreduciblePass()
void initializeR600EmitClauseMarkersPass(PassRegistry &)
LLVM_ABI char & FuncletLayoutID
This pass lays out funclets contiguously.
LLVM_ABI char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPULowerExecSyncLegacyPass(PassRegistry &)
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
void initializeAMDGPUExportKernelRuntimeHandlesLegacyPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
void initializeSIInsertWaitcntsLegacyPass(PassRegistry &)
ModulePass * createAMDGPUPreloadKernelArgumentsLegacyPass(const TargetMachine *)
ModulePass * createAMDGPUPrintfRuntimeBinding()
LLVM_ABI char & StackSlotColoringID
StackSlotColoring - This pass performs stack slot coloring.
LLVM_ABI Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringLegacyPass(PassRegistry &)
void initializeSILowerControlFlowLegacyPass(PassRegistry &)
void initializeSIFormMemoryClausesLegacyPass(PassRegistry &)
char & SIPreAllocateWWMRegsLegacyID
Error make_error(ArgTs &&... Args)
Make a Error instance representing failure using the given error info type.
Definition Error.h:340
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
void initializeAMDGPUArgumentUsageInfoWrapperLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
void initializeAMDGPUReserveWWMRegsLegacyPass(PassRegistry &)
char & SIPreEmitPeepholeID
char & SIPostRABundlerLegacyID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
char & SILowerSGPRSpillsLegacyID
LLVM_ABI FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
LLVM_ABI void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
char & SILowerControlFlowLegacyID
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
void initializeGCNCreateVOPDLegacyPass(PassRegistry &)
void initializeAMDGPUUniformIntrinsicCombineLegacyPass(PassRegistry &)
void initializeSIPreAllocateWWMRegsLegacyPass(PassRegistry &)
void initializeSIFixVGPRCopiesLegacyPass(PassRegistry &)
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
void initializeAMDGPULowerIntrinsicsLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
Definition GVN.cpp:3402
void initializeAMDGPURewriteAGPRCopyMFMALegacyPass(PassRegistry &)
void initializeSIPostRABundlerLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankSelectPass()
FunctionPass * createAMDGPURegBankLegalizePass()
LLVM_ABI char & MachineCSELegacyID
MachineCSE - This pass performs global CSE on machine instructions.
char & SIWholeQuadModeID
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
PassManager< Function > FunctionPassManager
Convenience typedef for a pass manager over functions.
LLVM_ABI char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void initializeSIOptimizeExecMaskingLegacyPass(PassRegistry &)
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition Threading.h:86
FunctionPass * createSILowerI1CopiesLegacyPass()
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
char & SIFixSGPRCopiesLegacyID
void initializeGCNDPPCombineLegacyPass(PassRegistry &)
char & GCNCreateVOPDID
char & SIPeepholeSDWALegacyID
LLVM_ABI char & VirtRegRewriterID
VirtRegRewriter pass.
char & SIFixVGPRCopiesID
char & SIFoldOperandsLegacyID
void initializeGCNNSAReassignLegacyPass(PassRegistry &)
char & AMDGPUPrepareAGPRAllocLegacyID
LLVM_ABI FunctionPass * createLowerSwitchPass()
void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
void initializeR600VectorRegMergerPass(PassRegistry &)
char & AMDGPURewriteAGPRCopyMFMALegacyID
ModulePass * createAMDGPULowerExecSyncLegacyPass()
char & AMDGPULowerVGPREncodingLegacyID
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIOptimizeVGPRLiveRangeLegacyPass(PassRegistry &)
void initializeSIPeepholeSDWALegacyPass(PassRegistry &)
void initializeAMDGPURegBankLegalizePass(PassRegistry &)
LLVM_ABI char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
LLVM_ABI FunctionPass * createStraightLineStrengthReducePass()
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
LLVM_ABI FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
void initializeSIWholeQuadModeLegacyPass(PassRegistry &)
LLVM_ABI char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
LLVM_ABI llvm::cl::opt< bool > NoKernelInfoEndLTO
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
void initializeAMDGPUResourceUsageAnalysisWrapperPassPass(PassRegistry &)
FunctionPass * createSIShrinkInstructionsLegacyPass()
char & AMDGPUMarkLastScratchLoadID
LLVM_ABI char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPURemoveIncompatibleFunctionsLegacyPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
LLVM_ABI char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
void initializeSIPreEmitPeepholeLegacyPass(PassRegistry &)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
char & AMDGPUPerfHintAnalysisLegacyID
LLVM_ABI ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
char & GCNPreRALongBranchRegID
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
Definition EarlyCSE.h:31
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
static FuncInfoTy * create(BumpPtrAllocator &Allocator, const Function &F, const SubtargetTy *STI)
Factory function: default behavior is to call new using the supplied allocator.
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
StringMap< VRegInfo * > VRegInfosNamed
Definition MIParser.h:177
DenseMap< Register, VRegInfo * > VRegInfos
Definition MIParser.h:176
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition Threading.h:67
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
SmallVector< StringValue, 2 > SpillPhysVGPRS
A wrapper around std::string which contains a source range that's being set during parsing.