Bug Summary

File:llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
Warning:line 112, column 5
Value stored to 'Ctor' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUTargetMachine.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Target/AMDGPU -I include -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-16-232930-107970-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU target machine contains all of the hardware specific
11/// information needed to emit code for SI+ GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUTargetMachine.h"
16#include "AMDGPU.h"
17#include "AMDGPUAliasAnalysis.h"
18#include "AMDGPUExportClustering.h"
19#include "AMDGPUMacroFusion.h"
20#include "AMDGPUTargetObjectFile.h"
21#include "AMDGPUTargetTransformInfo.h"
22#include "GCNIterativeScheduler.h"
23#include "GCNSchedStrategy.h"
24#include "R600.h"
25#include "R600TargetMachine.h"
26#include "SIMachineFunctionInfo.h"
27#include "SIMachineScheduler.h"
28#include "TargetInfo/AMDGPUTargetInfo.h"
29#include "llvm/Analysis/CGSCCPassManager.h"
30#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
31#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
32#include "llvm/CodeGen/GlobalISel/Legalizer.h"
33#include "llvm/CodeGen/GlobalISel/Localizer.h"
34#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
35#include "llvm/CodeGen/MIRParser/MIParser.h"
36#include "llvm/CodeGen/Passes.h"
37#include "llvm/CodeGen/RegAllocRegistry.h"
38#include "llvm/CodeGen/TargetPassConfig.h"
39#include "llvm/IR/IntrinsicsAMDGPU.h"
40#include "llvm/IR/LegacyPassManager.h"
41#include "llvm/IR/PassManager.h"
42#include "llvm/IR/PatternMatch.h"
43#include "llvm/InitializePasses.h"
44#include "llvm/MC/TargetRegistry.h"
45#include "llvm/Passes/PassBuilder.h"
46#include "llvm/Transforms/IPO.h"
47#include "llvm/Transforms/IPO/AlwaysInliner.h"
48#include "llvm/Transforms/IPO/GlobalDCE.h"
49#include "llvm/Transforms/IPO/Internalize.h"
50#include "llvm/Transforms/IPO/PassManagerBuilder.h"
51#include "llvm/Transforms/Scalar.h"
52#include "llvm/Transforms/Scalar/GVN.h"
53#include "llvm/Transforms/Scalar/InferAddressSpaces.h"
54#include "llvm/Transforms/Utils.h"
55#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
56#include "llvm/Transforms/Vectorize.h"
57
58using namespace llvm;
59
60namespace {
61class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
62public:
63 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
64 : RegisterRegAllocBase(N, D, C) {}
65};
66
67class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
68public:
69 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
70 : RegisterRegAllocBase(N, D, C) {}
71};
72
73static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
74 const TargetRegisterClass &RC) {
75 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
76}
77
78static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
79 const TargetRegisterClass &RC) {
80 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
81}
82
83
84/// -{sgpr|vgpr}-regalloc=... command line option.
85static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
86
87/// A dummy default pass factory indicates whether the register allocator is
88/// overridden on the command line.
89static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
90static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
91
92static SGPRRegisterRegAlloc
93defaultSGPRRegAlloc("default",
94 "pick SGPR register allocator based on -O option",
95 useDefaultRegisterAllocator);
96
97static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
98 RegisterPassParser<SGPRRegisterRegAlloc>>
99SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
100 cl::desc("Register allocator to use for SGPRs"));
101
102static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
103 RegisterPassParser<VGPRRegisterRegAlloc>>
104VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
105 cl::desc("Register allocator to use for VGPRs"));
106
107
108static void initializeDefaultSGPRRegisterAllocatorOnce() {
109 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
110
111 if (!Ctor) {
112 Ctor = SGPRRegAlloc;
Value stored to 'Ctor' is never read
113 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
114 }
115}
116
117static void initializeDefaultVGPRRegisterAllocatorOnce() {
118 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
119
120 if (!Ctor) {
121 Ctor = VGPRRegAlloc;
122 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
123 }
124}
125
126static FunctionPass *createBasicSGPRRegisterAllocator() {
127 return createBasicRegisterAllocator(onlyAllocateSGPRs);
128}
129
130static FunctionPass *createGreedySGPRRegisterAllocator() {
131 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
132}
133
134static FunctionPass *createFastSGPRRegisterAllocator() {
135 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
136}
137
138static FunctionPass *createBasicVGPRRegisterAllocator() {
139 return createBasicRegisterAllocator(onlyAllocateVGPRs);
140}
141
142static FunctionPass *createGreedyVGPRRegisterAllocator() {
143 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
144}
145
146static FunctionPass *createFastVGPRRegisterAllocator() {
147 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
148}
149
150static SGPRRegisterRegAlloc basicRegAllocSGPR(
151 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
152static SGPRRegisterRegAlloc greedyRegAllocSGPR(
153 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
154
155static SGPRRegisterRegAlloc fastRegAllocSGPR(
156 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
157
158
159static VGPRRegisterRegAlloc basicRegAllocVGPR(
160 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
161static VGPRRegisterRegAlloc greedyRegAllocVGPR(
162 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
163
164static VGPRRegisterRegAlloc fastRegAllocVGPR(
165 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
166}
167
168static cl::opt<bool> EnableSROA(
169 "amdgpu-sroa",
170 cl::desc("Run SROA after promote alloca pass"),
171 cl::ReallyHidden,
172 cl::init(true));
173
174static cl::opt<bool>
175EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
176 cl::desc("Run early if-conversion"),
177 cl::init(false));
178
179static cl::opt<bool>
180OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
181 cl::desc("Run pre-RA exec mask optimizations"),
182 cl::init(true));
183
184// Option to disable vectorizer for tests.
185static cl::opt<bool> EnableLoadStoreVectorizer(
186 "amdgpu-load-store-vectorizer",
187 cl::desc("Enable load store vectorizer"),
188 cl::init(true),
189 cl::Hidden);
190
191// Option to control global loads scalarization
192static cl::opt<bool> ScalarizeGlobal(
193 "amdgpu-scalarize-global-loads",
194 cl::desc("Enable global load scalarization"),
195 cl::init(true),
196 cl::Hidden);
197
198// Option to run internalize pass.
199static cl::opt<bool> InternalizeSymbols(
200 "amdgpu-internalize-symbols",
201 cl::desc("Enable elimination of non-kernel functions and unused globals"),
202 cl::init(false),
203 cl::Hidden);
204
205// Option to inline all early.
206static cl::opt<bool> EarlyInlineAll(
207 "amdgpu-early-inline-all",
208 cl::desc("Inline all functions early"),
209 cl::init(false),
210 cl::Hidden);
211
212static cl::opt<bool> EnableSDWAPeephole(
213 "amdgpu-sdwa-peephole",
214 cl::desc("Enable SDWA peepholer"),
215 cl::init(true));
216
217static cl::opt<bool> EnableDPPCombine(
218 "amdgpu-dpp-combine",
219 cl::desc("Enable DPP combiner"),
220 cl::init(true));
221
222// Enable address space based alias analysis
223static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
224 cl::desc("Enable AMDGPU Alias Analysis"),
225 cl::init(true));
226
227// Option to run late CFG structurizer
228static cl::opt<bool, true> LateCFGStructurize(
229 "amdgpu-late-structurize",
230 cl::desc("Enable late CFG structurization"),
231 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
232 cl::Hidden);
233
234// Enable lib calls simplifications
235static cl::opt<bool> EnableLibCallSimplify(
236 "amdgpu-simplify-libcall",
237 cl::desc("Enable amdgpu library simplifications"),
238 cl::init(true),
239 cl::Hidden);
240
241static cl::opt<bool> EnableLowerKernelArguments(
242 "amdgpu-ir-lower-kernel-arguments",
243 cl::desc("Lower kernel argument loads in IR pass"),
244 cl::init(true),
245 cl::Hidden);
246
247static cl::opt<bool> EnableRegReassign(
248 "amdgpu-reassign-regs",
249 cl::desc("Enable register reassign optimizations on gfx10+"),
250 cl::init(true),
251 cl::Hidden);
252
253static cl::opt<bool> OptVGPRLiveRange(
254 "amdgpu-opt-vgpr-liverange",
255 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
256 cl::init(true), cl::Hidden);
257
258// Enable atomic optimization
259static cl::opt<bool> EnableAtomicOptimizations(
260 "amdgpu-atomic-optimizations",
261 cl::desc("Enable atomic optimizations"),
262 cl::init(false),
263 cl::Hidden);
264
265// Enable Mode register optimization
266static cl::opt<bool> EnableSIModeRegisterPass(
267 "amdgpu-mode-register",
268 cl::desc("Enable mode register pass"),
269 cl::init(true),
270 cl::Hidden);
271
272// Option is used in lit tests to prevent deadcoding of patterns inspected.
273static cl::opt<bool>
274EnableDCEInRA("amdgpu-dce-in-ra",
275 cl::init(true), cl::Hidden,
276 cl::desc("Enable machine DCE inside regalloc"));
277
278static cl::opt<bool> EnableScalarIRPasses(
279 "amdgpu-scalar-ir-passes",
280 cl::desc("Enable scalar IR passes"),
281 cl::init(true),
282 cl::Hidden);
283
284static cl::opt<bool> EnableStructurizerWorkarounds(
285 "amdgpu-enable-structurizer-workarounds",
286 cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true),
287 cl::Hidden);
288
289static cl::opt<bool> EnableLDSReplaceWithPointer(
290 "amdgpu-enable-lds-replace-with-pointer",
291 cl::desc("Enable LDS replace with pointer pass"), cl::init(false),
292 cl::Hidden);
293
294static cl::opt<bool, true> EnableLowerModuleLDS(
295 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
296 cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true),
297 cl::Hidden);
298
299static cl::opt<bool> EnablePreRAOptimizations(
300 "amdgpu-enable-pre-ra-optimizations",
301 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
302 cl::Hidden);
303
304static cl::opt<bool> EnablePromoteKernelArguments(
305 "amdgpu-enable-promote-kernel-arguments",
306 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
307 cl::Hidden, cl::init(true));
308
309extern "C" LLVM_EXTERNAL_VISIBILITY__attribute__((visibility("default"))) void LLVMInitializeAMDGPUTarget() {
310 // Register the target
311 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
312 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
313
314 PassRegistry *PR = PassRegistry::getPassRegistry();
315 initializeR600ClauseMergePassPass(*PR);
316 initializeR600ControlFlowFinalizerPass(*PR);
317 initializeR600PacketizerPass(*PR);
318 initializeR600ExpandSpecialInstrsPassPass(*PR);
319 initializeR600VectorRegMergerPass(*PR);
320 initializeGlobalISel(*PR);
321 initializeAMDGPUDAGToDAGISelPass(*PR);
322 initializeGCNDPPCombinePass(*PR);
323 initializeSILowerI1CopiesPass(*PR);
324 initializeSILowerSGPRSpillsPass(*PR);
325 initializeSIFixSGPRCopiesPass(*PR);
326 initializeSIFixVGPRCopiesPass(*PR);
327 initializeSIFoldOperandsPass(*PR);
328 initializeSIPeepholeSDWAPass(*PR);
329 initializeSIShrinkInstructionsPass(*PR);
330 initializeSIOptimizeExecMaskingPreRAPass(*PR);
331 initializeSIOptimizeVGPRLiveRangePass(*PR);
332 initializeSILoadStoreOptimizerPass(*PR);
333 initializeAMDGPUFixFunctionBitcastsPass(*PR);
334 initializeAMDGPUCtorDtorLoweringPass(*PR);
335 initializeAMDGPUAlwaysInlinePass(*PR);
336 initializeAMDGPUAttributorPass(*PR);
337 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
338 initializeAMDGPUAnnotateUniformValuesPass(*PR);
339 initializeAMDGPUArgumentUsageInfoPass(*PR);
340 initializeAMDGPUAtomicOptimizerPass(*PR);
341 initializeAMDGPULowerKernelArgumentsPass(*PR);
342 initializeAMDGPUPromoteKernelArgumentsPass(*PR);
343 initializeAMDGPULowerKernelAttributesPass(*PR);
344 initializeAMDGPULowerIntrinsicsPass(*PR);
345 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
346 initializeAMDGPUPostLegalizerCombinerPass(*PR);
347 initializeAMDGPUPreLegalizerCombinerPass(*PR);
348 initializeAMDGPURegBankCombinerPass(*PR);
349 initializeAMDGPUPromoteAllocaPass(*PR);
350 initializeAMDGPUPromoteAllocaToVectorPass(*PR);
351 initializeAMDGPUCodeGenPreparePass(*PR);
352 initializeAMDGPULateCodeGenPreparePass(*PR);
353 initializeAMDGPUPropagateAttributesEarlyPass(*PR);
354 initializeAMDGPUPropagateAttributesLatePass(*PR);
355 initializeAMDGPUReplaceLDSUseWithPointerPass(*PR);
356 initializeAMDGPULowerModuleLDSPass(*PR);
357 initializeAMDGPURewriteOutArgumentsPass(*PR);
358 initializeAMDGPUUnifyMetadataPass(*PR);
359 initializeSIAnnotateControlFlowPass(*PR);
360 initializeSIInsertHardClausesPass(*PR);
361 initializeSIInsertWaitcntsPass(*PR);
362 initializeSIModeRegisterPass(*PR);
363 initializeSIWholeQuadModePass(*PR);
364 initializeSILowerControlFlowPass(*PR);
365 initializeSIPreEmitPeepholePass(*PR);
366 initializeSILateBranchLoweringPass(*PR);
367 initializeSIMemoryLegalizerPass(*PR);
368 initializeSIOptimizeExecMaskingPass(*PR);
369 initializeSIPreAllocateWWMRegsPass(*PR);
370 initializeSIFormMemoryClausesPass(*PR);
371 initializeSIPostRABundlerPass(*PR);
372 initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
373 initializeAMDGPUAAWrapperPassPass(*PR);
374 initializeAMDGPUExternalAAWrapperPass(*PR);
375 initializeAMDGPUUseNativeCallsPass(*PR);
376 initializeAMDGPUSimplifyLibCallsPass(*PR);
377 initializeAMDGPUPrintfRuntimeBindingPass(*PR);
378 initializeAMDGPUResourceUsageAnalysisPass(*PR);
379 initializeGCNNSAReassignPass(*PR);
380 initializeGCNPreRAOptimizationsPass(*PR);
381}
382
383static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
384 return std::make_unique<AMDGPUTargetObjectFile>();
385}
386
387static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
388 return new SIScheduleDAGMI(C);
389}
390
391static ScheduleDAGInstrs *
392createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
393 ScheduleDAGMILive *DAG =
394 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
395 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
396 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
397 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
398 return DAG;
399}
400
401static ScheduleDAGInstrs *
402createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
403 auto DAG = new GCNIterativeScheduler(C,
404 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
405 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
406 return DAG;
407}
408
409static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
410 return new GCNIterativeScheduler(C,
411 GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
412}
413
414static ScheduleDAGInstrs *
415createIterativeILPMachineScheduler(MachineSchedContext *C) {
416 auto DAG = new GCNIterativeScheduler(C,
417 GCNIterativeScheduler::SCHEDULE_ILP);
418 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
419 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
420 return DAG;
421}
422
423static MachineSchedRegistry
424SISchedRegistry("si", "Run SI's custom scheduler",
425 createSIMachineScheduler);
426
427static MachineSchedRegistry
428GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
429 "Run GCN scheduler to maximize occupancy",
430 createGCNMaxOccupancyMachineScheduler);
431
432static MachineSchedRegistry
433IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
434 "Run GCN scheduler to maximize occupancy (experimental)",
435 createIterativeGCNMaxOccupancyMachineScheduler);
436
437static MachineSchedRegistry
438GCNMinRegSchedRegistry("gcn-minreg",
439 "Run GCN iterative scheduler for minimal register usage (experimental)",
440 createMinRegScheduler);
441
442static MachineSchedRegistry
443GCNILPSchedRegistry("gcn-ilp",
444 "Run GCN iterative scheduler for ILP scheduling (experimental)",
445 createIterativeILPMachineScheduler);
446
447static StringRef computeDataLayout(const Triple &TT) {
448 if (TT.getArch() == Triple::r600) {
449 // 32-bit pointers.
450 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
451 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
452 }
453
454 // 32-bit private, local, and region pointers. 64-bit global, constant and
455 // flat, non-integral buffer fat pointers.
456 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
457 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
458 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
459 "-ni:7";
460}
461
462LLVM_READNONE__attribute__((__const__))
463static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
464 if (!GPU.empty())
465 return GPU;
466
467 // Need to default to a target with flat support for HSA.
468 if (TT.getArch() == Triple::amdgcn)
469 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
470
471 return "r600";
472}
473
474static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
475 // The AMDGPU toolchain only supports generating shared objects, so we
476 // must always use PIC.
477 return Reloc::PIC_;
478}
479
480AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
481 StringRef CPU, StringRef FS,
482 TargetOptions Options,
483 Optional<Reloc::Model> RM,
484 Optional<CodeModel::Model> CM,
485 CodeGenOpt::Level OptLevel)
486 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
487 FS, Options, getEffectiveRelocModel(RM),
488 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
489 TLOF(createTLOF(getTargetTriple())) {
490 initAsmInfo();
491 if (TT.getArch() == Triple::amdgcn) {
492 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
493 MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave64));
494 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
495 MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave32));
496 }
497}
498
499bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
500bool AMDGPUTargetMachine::EnableFunctionCalls = false;
501bool AMDGPUTargetMachine::EnableLowerModuleLDS = true;
502
503AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
504
505StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
506 Attribute GPUAttr = F.getFnAttribute("target-cpu");
507 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
508}
509
510StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
511 Attribute FSAttr = F.getFnAttribute("target-features");
512
513 return FSAttr.isValid() ? FSAttr.getValueAsString()
514 : getTargetFeatureString();
515}
516
517/// Predicate for Internalize pass.
518static bool mustPreserveGV(const GlobalValue &GV) {
519 if (const Function *F = dyn_cast<Function>(&GV))
520 return F->isDeclaration() || F->getName().startswith("__asan_") ||
521 F->getName().startswith("__sanitizer_") ||
522 AMDGPU::isEntryFunctionCC(F->getCallingConv());
523
524 GV.removeDeadConstantUsers();
525 return !GV.use_empty();
526}
527
528void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
529 Builder.DivergentTarget = true;
530
531 bool EnableOpt = getOptLevel() > CodeGenOpt::None;
532 bool Internalize = InternalizeSymbols;
533 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls;
534 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
535 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
536 bool PromoteKernelArguments =
537 EnablePromoteKernelArguments && getOptLevel() > CodeGenOpt::Less;
538
539 if (EnableFunctionCalls) {
540 delete Builder.Inliner;
541 Builder.Inliner = createFunctionInliningPass();
542 }
543
544 Builder.addExtension(
545 PassManagerBuilder::EP_ModuleOptimizerEarly,
546 [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &,
547 legacy::PassManagerBase &PM) {
548 if (AMDGPUAA) {
549 PM.add(createAMDGPUAAWrapperPass());
550 PM.add(createAMDGPUExternalAAWrapperPass());
551 }
552 PM.add(createAMDGPUUnifyMetadataPass());
553 PM.add(createAMDGPUPrintfRuntimeBinding());
554 if (Internalize)
555 PM.add(createInternalizePass(mustPreserveGV));
556 PM.add(createAMDGPUPropagateAttributesLatePass(this));
557 if (Internalize)
558 PM.add(createGlobalDCEPass());
559 if (EarlyInline)
560 PM.add(createAMDGPUAlwaysInlinePass(false));
561 });
562
563 Builder.addExtension(
564 PassManagerBuilder::EP_EarlyAsPossible,
565 [AMDGPUAA, LibCallSimplify, this](const PassManagerBuilder &,
566 legacy::PassManagerBase &PM) {
567 if (AMDGPUAA) {
568 PM.add(createAMDGPUAAWrapperPass());
569 PM.add(createAMDGPUExternalAAWrapperPass());
570 }
571 PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this));
572 PM.add(llvm::createAMDGPUUseNativeCallsPass());
573 if (LibCallSimplify)
574 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(this));
575 });
576
577 Builder.addExtension(
578 PassManagerBuilder::EP_CGSCCOptimizerLate,
579 [EnableOpt, PromoteKernelArguments](const PassManagerBuilder &,
580 legacy::PassManagerBase &PM) {
581 // Add promote kernel arguments pass to the opt pipeline right before
582 // infer address spaces which is needed to do actual address space
583 // rewriting.
584 if (PromoteKernelArguments)
585 PM.add(createAMDGPUPromoteKernelArgumentsPass());
586
587 // Add infer address spaces pass to the opt pipeline after inlining
588 // but before SROA to increase SROA opportunities.
589 PM.add(createInferAddressSpacesPass());
590
591 // This should run after inlining to have any chance of doing anything,
592 // and before other cleanup optimizations.
593 PM.add(createAMDGPULowerKernelAttributesPass());
594
595 // Promote alloca to vector before SROA and loop unroll. If we manage
596 // to eliminate allocas before unroll we may choose to unroll less.
597 if (EnableOpt)
598 PM.add(createAMDGPUPromoteAllocaToVector());
599 });
600}
601
602void AMDGPUTargetMachine::registerDefaultAliasAnalyses(AAManager &AAM) {
603 AAM.registerFunctionAnalysis<AMDGPUAA>();
604}
605
606void AMDGPUTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) {
607 PB.registerPipelineParsingCallback(
608 [this](StringRef PassName, ModulePassManager &PM,
609 ArrayRef<PassBuilder::PipelineElement>) {
610 if (PassName == "amdgpu-propagate-attributes-late") {
611 PM.addPass(AMDGPUPropagateAttributesLatePass(*this));
612 return true;
613 }
614 if (PassName == "amdgpu-unify-metadata") {
615 PM.addPass(AMDGPUUnifyMetadataPass());
616 return true;
617 }
618 if (PassName == "amdgpu-printf-runtime-binding") {
619 PM.addPass(AMDGPUPrintfRuntimeBindingPass());
620 return true;
621 }
622 if (PassName == "amdgpu-always-inline") {
623 PM.addPass(AMDGPUAlwaysInlinePass());
624 return true;
625 }
626 if (PassName == "amdgpu-replace-lds-use-with-pointer") {
627 PM.addPass(AMDGPUReplaceLDSUseWithPointerPass());
628 return true;
629 }
630 if (PassName == "amdgpu-lower-module-lds") {
631 PM.addPass(AMDGPULowerModuleLDSPass());
632 return true;
633 }
634 return false;
635 });
636 PB.registerPipelineParsingCallback(
637 [this](StringRef PassName, FunctionPassManager &PM,
638 ArrayRef<PassBuilder::PipelineElement>) {
639 if (PassName == "amdgpu-simplifylib") {
640 PM.addPass(AMDGPUSimplifyLibCallsPass(*this));
641 return true;
642 }
643 if (PassName == "amdgpu-usenative") {
644 PM.addPass(AMDGPUUseNativeCallsPass());
645 return true;
646 }
647 if (PassName == "amdgpu-promote-alloca") {
648 PM.addPass(AMDGPUPromoteAllocaPass(*this));
649 return true;
650 }
651 if (PassName == "amdgpu-promote-alloca-to-vector") {
652 PM.addPass(AMDGPUPromoteAllocaToVectorPass(*this));
653 return true;
654 }
655 if (PassName == "amdgpu-lower-kernel-attributes") {
656 PM.addPass(AMDGPULowerKernelAttributesPass());
657 return true;
658 }
659 if (PassName == "amdgpu-propagate-attributes-early") {
660 PM.addPass(AMDGPUPropagateAttributesEarlyPass(*this));
661 return true;
662 }
663 if (PassName == "amdgpu-promote-kernel-arguments") {
664 PM.addPass(AMDGPUPromoteKernelArgumentsPass());
665 return true;
666 }
667 return false;
668 });
669
670 PB.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &FAM) {
671 FAM.registerPass([&] { return AMDGPUAA(); });
672 });
673
674 PB.registerParseAACallback([](StringRef AAName, AAManager &AAM) {
675 if (AAName == "amdgpu-aa") {
676 AAM.registerFunctionAnalysis<AMDGPUAA>();
677 return true;
678 }
679 return false;
680 });
681
682 PB.registerPipelineStartEPCallback(
683 [this](ModulePassManager &PM, OptimizationLevel Level) {
684 FunctionPassManager FPM;
685 FPM.addPass(AMDGPUPropagateAttributesEarlyPass(*this));
686 FPM.addPass(AMDGPUUseNativeCallsPass());
687 if (EnableLibCallSimplify && Level != OptimizationLevel::O0)
688 FPM.addPass(AMDGPUSimplifyLibCallsPass(*this));
689 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
690 });
691
692 PB.registerPipelineEarlySimplificationEPCallback(
693 [this](ModulePassManager &PM, OptimizationLevel Level) {
694 if (Level == OptimizationLevel::O0)
695 return;
696
697 PM.addPass(AMDGPUUnifyMetadataPass());
698 PM.addPass(AMDGPUPrintfRuntimeBindingPass());
699
700 if (InternalizeSymbols) {
701 PM.addPass(InternalizePass(mustPreserveGV));
702 }
703 PM.addPass(AMDGPUPropagateAttributesLatePass(*this));
704 if (InternalizeSymbols) {
705 PM.addPass(GlobalDCEPass());
706 }
707 if (EarlyInlineAll && !EnableFunctionCalls)
708 PM.addPass(AMDGPUAlwaysInlinePass());
709 });
710
711 PB.registerCGSCCOptimizerLateEPCallback(
712 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
713 if (Level == OptimizationLevel::O0)
714 return;
715
716 FunctionPassManager FPM;
717
718 // Add promote kernel arguments pass to the opt pipeline right before
719 // infer address spaces which is needed to do actual address space
720 // rewriting.
721 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
722 EnablePromoteKernelArguments)
723 FPM.addPass(AMDGPUPromoteKernelArgumentsPass());
724
725 // Add infer address spaces pass to the opt pipeline after inlining
726 // but before SROA to increase SROA opportunities.
727 FPM.addPass(InferAddressSpacesPass());
728
729 // This should run after inlining to have any chance of doing
730 // anything, and before other cleanup optimizations.
731 FPM.addPass(AMDGPULowerKernelAttributesPass());
732
733 if (Level != OptimizationLevel::O0) {
734 // Promote alloca to vector before SROA and loop unroll. If we
735 // manage to eliminate allocas before unroll we may choose to unroll
736 // less.
737 FPM.addPass(AMDGPUPromoteAllocaToVectorPass(*this));
738 }
739
740 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
741 });
742}
743
744int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
745 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
746 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
747 AddrSpace == AMDGPUAS::REGION_ADDRESS)
748 ? -1
749 : 0;
750}
751
752bool AMDGPUTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS,
753 unsigned DestAS) const {
754 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
755 AMDGPU::isFlatGlobalAddrSpace(DestAS);
756}
757
758unsigned AMDGPUTargetMachine::getAssumedAddrSpace(const Value *V) const {
759 const auto *LD = dyn_cast<LoadInst>(V);
760 if (!LD)
761 return AMDGPUAS::UNKNOWN_ADDRESS_SPACE;
762
763 // It must be a generic pointer loaded.
764 assert(V->getType()->isPointerTy() &&(static_cast <bool> (V->getType()->isPointerTy() &&
V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS
) ? void (0) : __assert_fail ("V->getType()->isPointerTy() && V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS"
, "llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp", 765, __extension__
__PRETTY_FUNCTION__))
765 V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS)(static_cast <bool> (V->getType()->isPointerTy() &&
V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS
) ? void (0) : __assert_fail ("V->getType()->isPointerTy() && V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS"
, "llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp", 765, __extension__
__PRETTY_FUNCTION__))
;
766
767 const auto *Ptr = LD->getPointerOperand();
768 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
769 return AMDGPUAS::UNKNOWN_ADDRESS_SPACE;
770 // For a generic pointer loaded from the constant memory, it could be assumed
771 // as a global pointer since the constant memory is only populated on the
772 // host side. As implied by the offload programming model, only global
773 // pointers could be referenced on the host side.
774 return AMDGPUAS::GLOBAL_ADDRESS;
775}
776
777std::pair<const Value *, unsigned>
778AMDGPUTargetMachine::getPredicatedAddrSpace(const Value *V) const {
779 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
780 switch (II->getIntrinsicID()) {
781 case Intrinsic::amdgcn_is_shared:
782 return std::make_pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
783 case Intrinsic::amdgcn_is_private:
784 return std::make_pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
785 default:
786 break;
787 }
788 return std::make_pair(nullptr, -1);
789 }
790 // Check the global pointer predication based on
791 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
792 // the order of 'is_shared' and 'is_private' is not significant.
793 Value *Ptr;
794 if (match(
795 const_cast<Value *>(V),
796 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
797 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
798 m_Deferred(Ptr))))))
799 return std::make_pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
800
801 return std::make_pair(nullptr, -1);
802}
803
804//===----------------------------------------------------------------------===//
805// GCN Target Machine (SI+)
806//===----------------------------------------------------------------------===//
807
808GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
809 StringRef CPU, StringRef FS,
810 TargetOptions Options,
811 Optional<Reloc::Model> RM,
812 Optional<CodeModel::Model> CM,
813 CodeGenOpt::Level OL, bool JIT)
814 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
815
816const TargetSubtargetInfo *
817GCNTargetMachine::getSubtargetImpl(const Function &F) const {
818 StringRef GPU = getGPUName(F);
819 StringRef FS = getFeatureString(F);
820
821 SmallString<128> SubtargetKey(GPU);
822 SubtargetKey.append(FS);
823
824 auto &I = SubtargetMap[SubtargetKey];
825 if (!I) {
826 // This needs to be done before we create a new subtarget since any
827 // creation will depend on the TM and the code generation flags on the
828 // function that reside in TargetOptions.
829 resetTargetOptions(F);
830 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
831 }
832
833 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
834
835 return I.get();
836}
837
838TargetTransformInfo
839GCNTargetMachine::getTargetTransformInfo(const Function &F) {
840 return TargetTransformInfo(GCNTTIImpl(this, F));
841}
842
843//===----------------------------------------------------------------------===//
844// AMDGPU Pass Setup
845//===----------------------------------------------------------------------===//
846
847std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
848 return getStandardCSEConfigForOpt(TM->getOptLevel());
849}
850
851namespace {
852
853class GCNPassConfig final : public AMDGPUPassConfig {
854public:
855 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
856 : AMDGPUPassConfig(TM, PM) {
857 // It is necessary to know the register usage of the entire call graph. We
858 // allow calls without EnableAMDGPUFunctionCalls if they are marked
859 // noinline, so this is always required.
860 setRequiresCodeGenSCCOrder(true);
861 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
862 }
863
864 GCNTargetMachine &getGCNTargetMachine() const {
865 return getTM<GCNTargetMachine>();
866 }
867
868 ScheduleDAGInstrs *
869 createMachineScheduler(MachineSchedContext *C) const override;
870
871 ScheduleDAGInstrs *
872 createPostMachineScheduler(MachineSchedContext *C) const override {
873 ScheduleDAGMI *DAG = createGenericSchedPostRA(C);
874 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
875 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
876 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
877 return DAG;
878 }
879
880 bool addPreISel() override;
881 void addMachineSSAOptimization() override;
882 bool addILPOpts() override;
883 bool addInstSelector() override;
884 bool addIRTranslator() override;
885 void addPreLegalizeMachineIR() override;
886 bool addLegalizeMachineIR() override;
887 void addPreRegBankSelect() override;
888 bool addRegBankSelect() override;
889 void addPreGlobalInstructionSelect() override;
890 bool addGlobalInstructionSelect() override;
891 void addFastRegAlloc() override;
892 void addOptimizedRegAlloc() override;
893
894 FunctionPass *createSGPRAllocPass(bool Optimized);
895 FunctionPass *createVGPRAllocPass(bool Optimized);
896 FunctionPass *createRegAllocPass(bool Optimized) override;
897
898 bool addRegAssignAndRewriteFast() override;
899 bool addRegAssignAndRewriteOptimized() override;
900
901 void addPreRegAlloc() override;
902 bool addPreRewrite() override;
903 void addPostRegAlloc() override;
904 void addPreSched2() override;
905 void addPreEmitPass() override;
906};
907
908} // end anonymous namespace
909
910AMDGPUPassConfig::AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
911 : TargetPassConfig(TM, PM) {
912 // Exceptions and StackMaps are not supported, so these passes will never do
913 // anything.
914 disablePass(&StackMapLivenessID);
915 disablePass(&FuncletLayoutID);
916 // Garbage collection is not supported.
917 disablePass(&GCLoweringID);
918 disablePass(&ShadowStackGCLoweringID);
919}
920
921void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
922 if (getOptLevel() == CodeGenOpt::Aggressive)
923 addPass(createGVNPass());
924 else
925 addPass(createEarlyCSEPass());
926}
927
928void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
929 addPass(createLICMPass());
930 addPass(createSeparateConstOffsetFromGEPPass());
931 addPass(createSpeculativeExecutionPass());
932 // ReassociateGEPs exposes more opportunities for SLSR. See
933 // the example in reassociate-geps-and-slsr.ll.
934 addPass(createStraightLineStrengthReducePass());
935 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
936 // EarlyCSE can reuse.
937 addEarlyCSEOrGVNPass();
938 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
939 addPass(createNaryReassociatePass());
940 // NaryReassociate on GEPs creates redundant common expressions, so run
941 // EarlyCSE after it.
942 addPass(createEarlyCSEPass());
943}
944
945void AMDGPUPassConfig::addIRPasses() {
946 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
947
948 // There is no reason to run these.
949 disablePass(&StackMapLivenessID);
950 disablePass(&FuncletLayoutID);
951 disablePass(&PatchableFunctionID);
952
953 addPass(createAMDGPUPrintfRuntimeBinding());
954 addPass(createAMDGPUCtorDtorLoweringPass());
955
956 // This must occur before inlining, as the inliner will not look through
957 // bitcast calls.
958 addPass(createAMDGPUFixFunctionBitcastsPass());
959
960 // A call to propagate attributes pass in the backend in case opt was not run.
961 addPass(createAMDGPUPropagateAttributesEarlyPass(&TM));
962
963 addPass(createAMDGPULowerIntrinsicsPass());
964
965 // Function calls are not supported, so make sure we inline everything.
966 addPass(createAMDGPUAlwaysInlinePass());
967 addPass(createAlwaysInlinerLegacyPass());
968 // We need to add the barrier noop pass, otherwise adding the function
969 // inlining pass will cause all of the PassConfigs passes to be run
970 // one function at a time, which means if we have a nodule with two
971 // functions, then we will generate code for the first function
972 // without ever running any passes on the second.
973 addPass(createBarrierNoopPass());
974
975 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
976 if (TM.getTargetTriple().getArch() == Triple::r600)
977 addPass(createR600OpenCLImageTypeLoweringPass());
978
979 // Replace OpenCL enqueued block function pointers with global variables.
980 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
981
982 // Can increase LDS used by kernel so runs before PromoteAlloca
983 if (EnableLowerModuleLDS) {
984 // The pass "amdgpu-replace-lds-use-with-pointer" need to be run before the
985 // pass "amdgpu-lower-module-lds", and also it required to be run only if
986 // "amdgpu-lower-module-lds" pass is enabled.
987 if (EnableLDSReplaceWithPointer)
988 addPass(createAMDGPUReplaceLDSUseWithPointerPass());
989
990 addPass(createAMDGPULowerModuleLDSPass());
991 }
992
993 if (TM.getOptLevel() > CodeGenOpt::None)
994 addPass(createInferAddressSpacesPass());
995
996 addPass(createAtomicExpandPass());
997
998 if (TM.getOptLevel() > CodeGenOpt::None) {
999 addPass(createAMDGPUPromoteAlloca());
1000
1001 if (EnableSROA)
1002 addPass(createSROAPass());
1003 if (isPassEnabled(EnableScalarIRPasses))
1004 addStraightLineScalarOptimizationPasses();
1005
1006 if (EnableAMDGPUAliasAnalysis) {
1007 addPass(createAMDGPUAAWrapperPass());
1008 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
1009 AAResults &AAR) {
1010 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1011 AAR.addAAResult(WrapperPass->getResult());
1012 }));
1013 }
1014
1015 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
1016 // TODO: May want to move later or split into an early and late one.
1017 addPass(createAMDGPUCodeGenPreparePass());
1018 }
1019 }
1020
1021 TargetPassConfig::addIRPasses();
1022
1023 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1024 // example, GVN can combine
1025 //
1026 // %0 = add %a, %b
1027 // %1 = add %b, %a
1028 //
1029 // and
1030 //
1031 // %0 = shl nsw %a, 2
1032 // %1 = shl %a, 2
1033 //
1034 // but EarlyCSE can do neither of them.
1035 if (isPassEnabled(EnableScalarIRPasses))
1036 addEarlyCSEOrGVNPass();
1037}
1038
1039void AMDGPUPassConfig::addCodeGenPrepare() {
1040 if (TM->getTargetTriple().getArch() == Triple::amdgcn) {
1041 addPass(createAMDGPUAttributorPass());
1042
1043 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
1044 // analysis, and should be removed.
1045 addPass(createAMDGPUAnnotateKernelFeaturesPass());
1046 }
1047
1048 if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
1049 EnableLowerKernelArguments)
1050 addPass(createAMDGPULowerKernelArgumentsPass());
1051
1052 TargetPassConfig::addCodeGenPrepare();
1053
1054 if (isPassEnabled(EnableLoadStoreVectorizer))
1055 addPass(createLoadStoreVectorizerPass());
1056
1057 // LowerSwitch pass may introduce unreachable blocks that can
1058 // cause unexpected behavior for subsequent passes. Placing it
1059 // here seems better that these blocks would get cleaned up by
1060 // UnreachableBlockElim inserted next in the pass flow.
1061 addPass(createLowerSwitchPass());
1062}
1063
1064bool AMDGPUPassConfig::addPreISel() {
1065 if (TM->getOptLevel() > CodeGenOpt::None)
1066 addPass(createFlattenCFGPass());
1067 return false;
1068}
1069
1070bool AMDGPUPassConfig::addInstSelector() {
1071 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
1072 return false;
1073}
1074
1075bool AMDGPUPassConfig::addGCPasses() {
1076 // Do nothing. GC is not supported.
1077 return false;
1078}
1079
1080llvm::ScheduleDAGInstrs *
1081AMDGPUPassConfig::createMachineScheduler(MachineSchedContext *C) const {
1082 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
1083 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1084 return DAG;
1085}
1086
1087//===----------------------------------------------------------------------===//
1088// GCN Pass Setup
1089//===----------------------------------------------------------------------===//
1090
1091ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1092 MachineSchedContext *C) const {
1093 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1094 if (ST.enableSIScheduler())
1095 return createSIMachineScheduler(C);
1096 return createGCNMaxOccupancyMachineScheduler(C);
1097}
1098
1099bool GCNPassConfig::addPreISel() {
1100 AMDGPUPassConfig::addPreISel();
1101
1102 if (TM->getOptLevel() > CodeGenOpt::None)
1103 addPass(createAMDGPULateCodeGenPreparePass());
1104
1105 if (isPassEnabled(EnableAtomicOptimizations, CodeGenOpt::Less)) {
1106 addPass(createAMDGPUAtomicOptimizerPass());
1107 }
1108
1109 if (TM->getOptLevel() > CodeGenOpt::None)
1110 addPass(createSinkingPass());
1111
1112 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1113 // regions formed by them.
1114 addPass(&AMDGPUUnifyDivergentExitNodesID);
1115 if (!LateCFGStructurize) {
1116 if (EnableStructurizerWorkarounds) {
1117 addPass(createFixIrreduciblePass());
1118 addPass(createUnifyLoopExitsPass());
1119 }
1120 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1121 }
1122 addPass(createAMDGPUAnnotateUniformValues());
1123 if (!LateCFGStructurize) {
1124 addPass(createSIAnnotateControlFlowPass());
1125 }
1126 addPass(createLCSSAPass());
1127
1128 if (TM->getOptLevel() > CodeGenOpt::Less)
1129 addPass(&AMDGPUPerfHintAnalysisID);
1130
1131 return false;
1132}
1133
1134void GCNPassConfig::addMachineSSAOptimization() {
1135 TargetPassConfig::addMachineSSAOptimization();
1136
1137 // We want to fold operands after PeepholeOptimizer has run (or as part of
1138 // it), because it will eliminate extra copies making it easier to fold the
1139 // real source operand. We want to eliminate dead instructions after, so that
1140 // we see fewer uses of the copies. We then need to clean up the dead
1141 // instructions leftover after the operands are folded as well.
1142 //
1143 // XXX - Can we get away without running DeadMachineInstructionElim again?
1144 addPass(&SIFoldOperandsID);
1145 if (EnableDPPCombine)
1146 addPass(&GCNDPPCombineID);
1147 addPass(&SILoadStoreOptimizerID);
1148 if (isPassEnabled(EnableSDWAPeephole)) {
1149 addPass(&SIPeepholeSDWAID);
1150 addPass(&EarlyMachineLICMID);
1151 addPass(&MachineCSEID);
1152 addPass(&SIFoldOperandsID);
1153 }
1154 addPass(&DeadMachineInstructionElimID);
1155 addPass(createSIShrinkInstructionsPass());
1156}
1157
1158bool GCNPassConfig::addILPOpts() {
1159 if (EnableEarlyIfConversion)
1160 addPass(&EarlyIfConverterID);
1161
1162 TargetPassConfig::addILPOpts();
1163 return false;
1164}
1165
1166bool GCNPassConfig::addInstSelector() {
1167 AMDGPUPassConfig::addInstSelector();
1168 addPass(&SIFixSGPRCopiesID);
1169 addPass(createSILowerI1CopiesPass());
1170 return false;
1171}
1172
1173bool GCNPassConfig::addIRTranslator() {
1174 addPass(new IRTranslator(getOptLevel()));
1175 return false;
1176}
1177
1178void GCNPassConfig::addPreLegalizeMachineIR() {
1179 bool IsOptNone = getOptLevel() == CodeGenOpt::None;
1180 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1181 addPass(new Localizer());
1182}
1183
1184bool GCNPassConfig::addLegalizeMachineIR() {
1185 addPass(new Legalizer());
1186 return false;
1187}
1188
1189void GCNPassConfig::addPreRegBankSelect() {
1190 bool IsOptNone = getOptLevel() == CodeGenOpt::None;
1191 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1192}
1193
1194bool GCNPassConfig::addRegBankSelect() {
1195 addPass(new RegBankSelect());
1196 return false;
1197}
1198
1199void GCNPassConfig::addPreGlobalInstructionSelect() {
1200 bool IsOptNone = getOptLevel() == CodeGenOpt::None;
1201 addPass(createAMDGPURegBankCombiner(IsOptNone));
1202}
1203
1204bool GCNPassConfig::addGlobalInstructionSelect() {
1205 addPass(new InstructionSelect(getOptLevel()));
1206 return false;
1207}
1208
1209void GCNPassConfig::addPreRegAlloc() {
1210 if (LateCFGStructurize) {
1211 addPass(createAMDGPUMachineCFGStructurizerPass());
1212 }
1213}
1214
1215void GCNPassConfig::addFastRegAlloc() {
1216 // FIXME: We have to disable the verifier here because of PHIElimination +
1217 // TwoAddressInstructions disabling it.
1218
1219 // This must be run immediately after phi elimination and before
1220 // TwoAddressInstructions, otherwise the processing of the tied operand of
1221 // SI_ELSE will introduce a copy of the tied operand source after the else.
1222 insertPass(&PHIEliminationID, &SILowerControlFlowID);
1223
1224 insertPass(&TwoAddressInstructionPassID, &SIWholeQuadModeID);
1225 insertPass(&TwoAddressInstructionPassID, &SIPreAllocateWWMRegsID);
1226
1227 TargetPassConfig::addFastRegAlloc();
1228}
1229
1230void GCNPassConfig::addOptimizedRegAlloc() {
1231 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1232 // instructions that cause scheduling barriers.
1233 insertPass(&MachineSchedulerID, &SIWholeQuadModeID);
1234 insertPass(&MachineSchedulerID, &SIPreAllocateWWMRegsID);
1235
1236 if (OptExecMaskPreRA)
1237 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
1238
1239 if (isPassEnabled(EnablePreRAOptimizations))
1240 insertPass(&RenameIndependentSubregsID, &GCNPreRAOptimizationsID);
1241
1242 // This is not an essential optimization and it has a noticeable impact on
1243 // compilation time, so we only enable it from O2.
1244 if (TM->getOptLevel() > CodeGenOpt::Less)
1245 insertPass(&MachineSchedulerID, &SIFormMemoryClausesID);
1246
1247 // FIXME: when an instruction has a Killed operand, and the instruction is
1248 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1249 // the register in LiveVariables, this would trigger a failure in verifier,
1250 // we should fix it and enable the verifier.
1251 if (OptVGPRLiveRange)
1252 insertPass(&LiveVariablesID, &SIOptimizeVGPRLiveRangeID);
1253 // This must be run immediately after phi elimination and before
1254 // TwoAddressInstructions, otherwise the processing of the tied operand of
1255 // SI_ELSE will introduce a copy of the tied operand source after the else.
1256 insertPass(&PHIEliminationID, &SILowerControlFlowID);
1257
1258 if (EnableDCEInRA)
1259 insertPass(&DetectDeadLanesID, &DeadMachineInstructionElimID);
1260
1261 TargetPassConfig::addOptimizedRegAlloc();
1262}
1263
1264bool GCNPassConfig::addPreRewrite() {
1265 if (EnableRegReassign)
1266 addPass(&GCNNSAReassignID);
1267 return true;
1268}
1269
1270FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1271 // Initialize the global default.
1272 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1273 initializeDefaultSGPRRegisterAllocatorOnce);
1274
1275 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1276 if (Ctor != useDefaultRegisterAllocator)
1277 return Ctor();
1278
1279 if (Optimized)
1280 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1281
1282 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1283}
1284
1285FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1286 // Initialize the global default.
1287 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1288 initializeDefaultVGPRRegisterAllocatorOnce);
1289
1290 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1291 if (Ctor != useDefaultRegisterAllocator)
1292 return Ctor();
1293
1294 if (Optimized)
1295 return createGreedyVGPRRegisterAllocator();
1296
1297 return createFastVGPRRegisterAllocator();
1298}
1299
1300FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1301 llvm_unreachable("should not be used")::llvm::llvm_unreachable_internal("should not be used", "llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp"
, 1301)
;
1302}
1303
1304static const char RegAllocOptNotSupportedMessage[] =
1305 "-regalloc not supported with amdgcn. Use -sgpr-regalloc and -vgpr-regalloc";
1306
1307bool GCNPassConfig::addRegAssignAndRewriteFast() {
1308 if (!usingDefaultRegAlloc())
1309 report_fatal_error(RegAllocOptNotSupportedMessage);
1310
1311 addPass(createSGPRAllocPass(false));
1312
1313 // Equivalent of PEI for SGPRs.
1314 addPass(&SILowerSGPRSpillsID);
1315
1316 addPass(createVGPRAllocPass(false));
1317 return true;
1318}
1319
1320bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1321 if (!usingDefaultRegAlloc())
1322 report_fatal_error(RegAllocOptNotSupportedMessage);
1323
1324 addPass(createSGPRAllocPass(true));
1325
1326 // Commit allocated register changes. This is mostly necessary because too
1327 // many things rely on the use lists of the physical registers, such as the
1328 // verifier. This is only necessary with allocators which use LiveIntervals,
1329 // since FastRegAlloc does the replacements itself.
1330 addPass(createVirtRegRewriter(false));
1331
1332 // Equivalent of PEI for SGPRs.
1333 addPass(&SILowerSGPRSpillsID);
1334
1335 addPass(createVGPRAllocPass(true));
1336
1337 addPreRewrite();
1338 addPass(&VirtRegRewriterID);
1339
1340 return true;
1341}
1342
1343void GCNPassConfig::addPostRegAlloc() {
1344 addPass(&SIFixVGPRCopiesID);
1345 if (getOptLevel() > CodeGenOpt::None)
1346 addPass(&SIOptimizeExecMaskingID);
1347 TargetPassConfig::addPostRegAlloc();
1348}
1349
1350void GCNPassConfig::addPreSched2() {
1351 if (TM->getOptLevel() > CodeGenOpt::None)
1352 addPass(createSIShrinkInstructionsPass());
1353 addPass(&SIPostRABundlerID);
1354}
1355
1356void GCNPassConfig::addPreEmitPass() {
1357 addPass(createSIMemoryLegalizerPass());
1358 addPass(createSIInsertWaitcntsPass());
1359
1360 addPass(createSIModeRegisterPass());
1361
1362 if (getOptLevel() > CodeGenOpt::None)
1363 addPass(&SIInsertHardClausesID);
1364
1365 addPass(&SILateBranchLoweringPassID);
1366 if (getOptLevel() > CodeGenOpt::None)
1367 addPass(&SIPreEmitPeepholeID);
1368 // The hazard recognizer that runs as part of the post-ra scheduler does not
1369 // guarantee to be able handle all hazards correctly. This is because if there
1370 // are multiple scheduling regions in a basic block, the regions are scheduled
1371 // bottom up, so when we begin to schedule a region we don't know what
1372 // instructions were emitted directly before it.
1373 //
1374 // Here we add a stand-alone hazard recognizer pass which can handle all
1375 // cases.
1376 addPass(&PostRAHazardRecognizerID);
1377 addPass(&BranchRelaxationPassID);
1378}
1379
1380TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
1381 return new GCNPassConfig(*this, PM);
1382}
1383
1384yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const {
1385 return new yaml::SIMachineFunctionInfo();
1386}
1387
1388yaml::MachineFunctionInfo *
1389GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
1390 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1391 return new yaml::SIMachineFunctionInfo(
1392 *MFI, *MF.getSubtarget().getRegisterInfo(), MF);
1393}
1394
1395bool GCNTargetMachine::parseMachineFunctionInfo(
1396 const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS,
1397 SMDiagnostic &Error, SMRange &SourceRange) const {
1398 const yaml::SIMachineFunctionInfo &YamlMFI =
1399 reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1400 MachineFunction &MF = PFS.MF;
1401 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1402
1403 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1404 return true;
1405
1406 if (MFI->Occupancy == 0) {
1407 // Fixup the subtarget dependent default value.
1408 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1409 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1410 }
1411
1412 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1413 Register TempReg;
1414 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1415 SourceRange = RegName.SourceRange;
1416 return true;
1417 }
1418 RegVal = TempReg;
1419
1420 return false;
1421 };
1422
1423 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1424 // Create a diagnostic for a the register string literal.
1425 const MemoryBuffer &Buffer =
1426 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1427 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1428 RegName.Value.size(), SourceMgr::DK_Error,
1429 "incorrect register class for field", RegName.Value,
1430 None, None);
1431 SourceRange = RegName.SourceRange;
1432 return true;
1433 };
1434
1435 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1436 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1437 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1438 return true;
1439
1440 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1441 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1442 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1443 }
1444
1445 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1446 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1447 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1448 }
1449
1450 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1451 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1452 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1453 }
1454
1455 auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A,
1456 const TargetRegisterClass &RC,
1457 ArgDescriptor &Arg, unsigned UserSGPRs,
1458 unsigned SystemSGPRs) {
1459 // Skip parsing if it's not present.
1460 if (!A)
1461 return false;
1462
1463 if (A->IsRegister) {
1464 Register Reg;
1465 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1466 SourceRange = A->RegisterName.SourceRange;
1467 return true;
1468 }
1469 if (!RC.contains(Reg))
1470 return diagnoseRegisterClass(A->RegisterName);
1471 Arg = ArgDescriptor::createRegister(Reg);
1472 } else
1473 Arg = ArgDescriptor::createStack(A->StackOffset);
1474 // Check and apply the optional mask.
1475 if (A->Mask)
1476 Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue());
1477
1478 MFI->NumUserSGPRs += UserSGPRs;
1479 MFI->NumSystemSGPRs += SystemSGPRs;
1480 return false;
1481 };
1482
1483 if (YamlMFI.ArgInfo &&
1484 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1485 AMDGPU::SGPR_128RegClass,
1486 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1487 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1488 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1489 2, 0) ||
1490 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1491 MFI->ArgInfo.QueuePtr, 2, 0) ||
1492 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1493 AMDGPU::SReg_64RegClass,
1494 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1495 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1496 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1497 2, 0) ||
1498 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1499 AMDGPU::SReg_64RegClass,
1500 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1501 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1502 AMDGPU::SGPR_32RegClass,
1503 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1504 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1505 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1506 0, 1) ||
1507 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1508 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1509 0, 1) ||
1510 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1511 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1512 0, 1) ||
1513 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1514 AMDGPU::SGPR_32RegClass,
1515 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1516 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1517 AMDGPU::SGPR_32RegClass,
1518 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1519 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1520 AMDGPU::SReg_64RegClass,
1521 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1522 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1523 AMDGPU::SReg_64RegClass,
1524 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1525 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1526 AMDGPU::VGPR_32RegClass,
1527 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1528 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1529 AMDGPU::VGPR_32RegClass,
1530 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1531 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1532 AMDGPU::VGPR_32RegClass,
1533 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1534 return true;
1535
1536 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1537 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1538 MFI->Mode.FP32InputDenormals = YamlMFI.Mode.FP32InputDenormals;
1539 MFI->Mode.FP32OutputDenormals = YamlMFI.Mode.FP32OutputDenormals;
1540 MFI->Mode.FP64FP16InputDenormals = YamlMFI.Mode.FP64FP16InputDenormals;
1541 MFI->Mode.FP64FP16OutputDenormals = YamlMFI.Mode.FP64FP16OutputDenormals;
1542
1543 return false;
1544}