File: | llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |
Warning: | line 119, column 5 Value stored to 'Ctor' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file |
10 | /// The AMDGPU target machine contains all of the hardware specific |
11 | /// information needed to emit code for SI+ GPUs. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #include "AMDGPUTargetMachine.h" |
16 | #include "AMDGPU.h" |
17 | #include "AMDGPUAliasAnalysis.h" |
18 | #include "AMDGPUExportClustering.h" |
19 | #include "AMDGPUMacroFusion.h" |
20 | #include "AMDGPUTargetObjectFile.h" |
21 | #include "AMDGPUTargetTransformInfo.h" |
22 | #include "GCNIterativeScheduler.h" |
23 | #include "GCNSchedStrategy.h" |
24 | #include "R600.h" |
25 | #include "R600TargetMachine.h" |
26 | #include "SIMachineFunctionInfo.h" |
27 | #include "SIMachineScheduler.h" |
28 | #include "TargetInfo/AMDGPUTargetInfo.h" |
29 | #include "llvm/Analysis/CGSCCPassManager.h" |
30 | #include "llvm/CodeGen/GlobalISel/IRTranslator.h" |
31 | #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" |
32 | #include "llvm/CodeGen/GlobalISel/Legalizer.h" |
33 | #include "llvm/CodeGen/GlobalISel/Localizer.h" |
34 | #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" |
35 | #include "llvm/CodeGen/MIRParser/MIParser.h" |
36 | #include "llvm/CodeGen/Passes.h" |
37 | #include "llvm/CodeGen/RegAllocRegistry.h" |
38 | #include "llvm/CodeGen/TargetPassConfig.h" |
39 | #include "llvm/IR/LegacyPassManager.h" |
40 | #include "llvm/IR/PassManager.h" |
41 | #include "llvm/InitializePasses.h" |
42 | #include "llvm/Passes/PassBuilder.h" |
43 | #include "llvm/Support/TargetRegistry.h" |
44 | #include "llvm/Transforms/IPO.h" |
45 | #include "llvm/Transforms/IPO/AlwaysInliner.h" |
46 | #include "llvm/Transforms/IPO/GlobalDCE.h" |
47 | #include "llvm/Transforms/IPO/Internalize.h" |
48 | #include "llvm/Transforms/IPO/PassManagerBuilder.h" |
49 | #include "llvm/Transforms/Scalar.h" |
50 | #include "llvm/Transforms/Scalar/GVN.h" |
51 | #include "llvm/Transforms/Scalar/InferAddressSpaces.h" |
52 | #include "llvm/Transforms/Utils.h" |
53 | #include "llvm/Transforms/Utils/SimplifyLibCalls.h" |
54 | #include "llvm/Transforms/Vectorize.h" |
55 | |
56 | using namespace llvm; |
57 | |
58 | namespace { |
59 | class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> { |
60 | public: |
61 | SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C) |
62 | : RegisterRegAllocBase(N, D, C) {} |
63 | }; |
64 | |
65 | class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> { |
66 | public: |
67 | VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C) |
68 | : RegisterRegAllocBase(N, D, C) {} |
69 | }; |
70 | |
71 | static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI, |
72 | const TargetRegisterClass &RC) { |
73 | return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC); |
74 | } |
75 | |
76 | static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI, |
77 | const TargetRegisterClass &RC) { |
78 | return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC); |
79 | } |
80 | |
81 | |
82 | /// -{sgpr|vgpr}-regalloc=... command line option. |
83 | static FunctionPass *useDefaultRegisterAllocator() { return nullptr; } |
84 | |
85 | /// A dummy default pass factory indicates whether the register allocator is |
86 | /// overridden on the command line. |
87 | static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag; |
88 | static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag; |
89 | |
90 | static SGPRRegisterRegAlloc |
91 | defaultSGPRRegAlloc("default", |
92 | "pick SGPR register allocator based on -O option", |
93 | useDefaultRegisterAllocator); |
94 | |
95 | static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false, |
96 | RegisterPassParser<SGPRRegisterRegAlloc>> |
97 | SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator), |
98 | cl::desc("Register allocator to use for SGPRs")); |
99 | |
100 | static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false, |
101 | RegisterPassParser<VGPRRegisterRegAlloc>> |
102 | VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator), |
103 | cl::desc("Register allocator to use for VGPRs")); |
104 | |
105 | |
106 | static void initializeDefaultSGPRRegisterAllocatorOnce() { |
107 | RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault(); |
108 | |
109 | if (!Ctor) { |
110 | Ctor = SGPRRegAlloc; |
111 | SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc); |
112 | } |
113 | } |
114 | |
115 | static void initializeDefaultVGPRRegisterAllocatorOnce() { |
116 | RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault(); |
117 | |
118 | if (!Ctor) { |
119 | Ctor = VGPRRegAlloc; |
Value stored to 'Ctor' is never read | |
120 | VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc); |
121 | } |
122 | } |
123 | |
124 | static FunctionPass *createBasicSGPRRegisterAllocator() { |
125 | return createBasicRegisterAllocator(onlyAllocateSGPRs); |
126 | } |
127 | |
128 | static FunctionPass *createGreedySGPRRegisterAllocator() { |
129 | return createGreedyRegisterAllocator(onlyAllocateSGPRs); |
130 | } |
131 | |
132 | static FunctionPass *createFastSGPRRegisterAllocator() { |
133 | return createFastRegisterAllocator(onlyAllocateSGPRs, false); |
134 | } |
135 | |
136 | static FunctionPass *createBasicVGPRRegisterAllocator() { |
137 | return createBasicRegisterAllocator(onlyAllocateVGPRs); |
138 | } |
139 | |
140 | static FunctionPass *createGreedyVGPRRegisterAllocator() { |
141 | return createGreedyRegisterAllocator(onlyAllocateVGPRs); |
142 | } |
143 | |
144 | static FunctionPass *createFastVGPRRegisterAllocator() { |
145 | return createFastRegisterAllocator(onlyAllocateVGPRs, true); |
146 | } |
147 | |
148 | static SGPRRegisterRegAlloc basicRegAllocSGPR( |
149 | "basic", "basic register allocator", createBasicSGPRRegisterAllocator); |
150 | static SGPRRegisterRegAlloc greedyRegAllocSGPR( |
151 | "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator); |
152 | |
153 | static SGPRRegisterRegAlloc fastRegAllocSGPR( |
154 | "fast", "fast register allocator", createFastSGPRRegisterAllocator); |
155 | |
156 | |
157 | static VGPRRegisterRegAlloc basicRegAllocVGPR( |
158 | "basic", "basic register allocator", createBasicVGPRRegisterAllocator); |
159 | static VGPRRegisterRegAlloc greedyRegAllocVGPR( |
160 | "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator); |
161 | |
162 | static VGPRRegisterRegAlloc fastRegAllocVGPR( |
163 | "fast", "fast register allocator", createFastVGPRRegisterAllocator); |
164 | } |
165 | |
166 | static cl::opt<bool> EnableSROA( |
167 | "amdgpu-sroa", |
168 | cl::desc("Run SROA after promote alloca pass"), |
169 | cl::ReallyHidden, |
170 | cl::init(true)); |
171 | |
172 | static cl::opt<bool> |
173 | EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, |
174 | cl::desc("Run early if-conversion"), |
175 | cl::init(false)); |
176 | |
177 | static cl::opt<bool> |
178 | OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, |
179 | cl::desc("Run pre-RA exec mask optimizations"), |
180 | cl::init(true)); |
181 | |
182 | // Option to disable vectorizer for tests. |
183 | static cl::opt<bool> EnableLoadStoreVectorizer( |
184 | "amdgpu-load-store-vectorizer", |
185 | cl::desc("Enable load store vectorizer"), |
186 | cl::init(true), |
187 | cl::Hidden); |
188 | |
189 | // Option to control global loads scalarization |
190 | static cl::opt<bool> ScalarizeGlobal( |
191 | "amdgpu-scalarize-global-loads", |
192 | cl::desc("Enable global load scalarization"), |
193 | cl::init(true), |
194 | cl::Hidden); |
195 | |
196 | // Option to run internalize pass. |
197 | static cl::opt<bool> InternalizeSymbols( |
198 | "amdgpu-internalize-symbols", |
199 | cl::desc("Enable elimination of non-kernel functions and unused globals"), |
200 | cl::init(false), |
201 | cl::Hidden); |
202 | |
203 | // Option to inline all early. |
204 | static cl::opt<bool> EarlyInlineAll( |
205 | "amdgpu-early-inline-all", |
206 | cl::desc("Inline all functions early"), |
207 | cl::init(false), |
208 | cl::Hidden); |
209 | |
210 | static cl::opt<bool> EnableSDWAPeephole( |
211 | "amdgpu-sdwa-peephole", |
212 | cl::desc("Enable SDWA peepholer"), |
213 | cl::init(true)); |
214 | |
215 | static cl::opt<bool> EnableDPPCombine( |
216 | "amdgpu-dpp-combine", |
217 | cl::desc("Enable DPP combiner"), |
218 | cl::init(true)); |
219 | |
220 | // Enable address space based alias analysis |
221 | static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, |
222 | cl::desc("Enable AMDGPU Alias Analysis"), |
223 | cl::init(true)); |
224 | |
225 | // Option to run late CFG structurizer |
226 | static cl::opt<bool, true> LateCFGStructurize( |
227 | "amdgpu-late-structurize", |
228 | cl::desc("Enable late CFG structurization"), |
229 | cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), |
230 | cl::Hidden); |
231 | |
232 | static cl::opt<bool, true> EnableAMDGPUFixedFunctionABIOpt( |
233 | "amdgpu-fixed-function-abi", |
234 | cl::desc("Enable all implicit function arguments"), |
235 | cl::location(AMDGPUTargetMachine::EnableFixedFunctionABI), |
236 | cl::init(false), |
237 | cl::Hidden); |
238 | |
239 | // Enable lib calls simplifications |
240 | static cl::opt<bool> EnableLibCallSimplify( |
241 | "amdgpu-simplify-libcall", |
242 | cl::desc("Enable amdgpu library simplifications"), |
243 | cl::init(true), |
244 | cl::Hidden); |
245 | |
246 | static cl::opt<bool> EnableLowerKernelArguments( |
247 | "amdgpu-ir-lower-kernel-arguments", |
248 | cl::desc("Lower kernel argument loads in IR pass"), |
249 | cl::init(true), |
250 | cl::Hidden); |
251 | |
252 | static cl::opt<bool> EnableRegReassign( |
253 | "amdgpu-reassign-regs", |
254 | cl::desc("Enable register reassign optimizations on gfx10+"), |
255 | cl::init(true), |
256 | cl::Hidden); |
257 | |
258 | static cl::opt<bool> OptVGPRLiveRange( |
259 | "amdgpu-opt-vgpr-liverange", |
260 | cl::desc("Enable VGPR liverange optimizations for if-else structure"), |
261 | cl::init(true), cl::Hidden); |
262 | |
263 | // Enable atomic optimization |
264 | static cl::opt<bool> EnableAtomicOptimizations( |
265 | "amdgpu-atomic-optimizations", |
266 | cl::desc("Enable atomic optimizations"), |
267 | cl::init(false), |
268 | cl::Hidden); |
269 | |
270 | // Enable Mode register optimization |
271 | static cl::opt<bool> EnableSIModeRegisterPass( |
272 | "amdgpu-mode-register", |
273 | cl::desc("Enable mode register pass"), |
274 | cl::init(true), |
275 | cl::Hidden); |
276 | |
277 | // Option is used in lit tests to prevent deadcoding of patterns inspected. |
278 | static cl::opt<bool> |
279 | EnableDCEInRA("amdgpu-dce-in-ra", |
280 | cl::init(true), cl::Hidden, |
281 | cl::desc("Enable machine DCE inside regalloc")); |
282 | |
283 | static cl::opt<bool> EnableScalarIRPasses( |
284 | "amdgpu-scalar-ir-passes", |
285 | cl::desc("Enable scalar IR passes"), |
286 | cl::init(true), |
287 | cl::Hidden); |
288 | |
289 | static cl::opt<bool> EnableStructurizerWorkarounds( |
290 | "amdgpu-enable-structurizer-workarounds", |
291 | cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true), |
292 | cl::Hidden); |
293 | |
294 | static cl::opt<bool> EnableLDSReplaceWithPointer( |
295 | "amdgpu-enable-lds-replace-with-pointer", |
296 | cl::desc("Enable LDS replace with pointer pass"), cl::init(false), |
297 | cl::Hidden); |
298 | |
299 | static cl::opt<bool, true> EnableLowerModuleLDS( |
300 | "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), |
301 | cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), |
302 | cl::Hidden); |
303 | |
304 | static cl::opt<bool> EnablePreRAOptimizations( |
305 | "amdgpu-enable-pre-ra-optimizations", |
306 | cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), |
307 | cl::Hidden); |
308 | |
309 | extern "C" LLVM_EXTERNAL_VISIBILITY__attribute__ ((visibility("default"))) void LLVMInitializeAMDGPUTarget() { |
310 | // Register the target |
311 | RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget()); |
312 | RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget()); |
313 | |
314 | PassRegistry *PR = PassRegistry::getPassRegistry(); |
315 | initializeR600ClauseMergePassPass(*PR); |
316 | initializeR600ControlFlowFinalizerPass(*PR); |
317 | initializeR600PacketizerPass(*PR); |
318 | initializeR600ExpandSpecialInstrsPassPass(*PR); |
319 | initializeR600VectorRegMergerPass(*PR); |
320 | initializeGlobalISel(*PR); |
321 | initializeAMDGPUDAGToDAGISelPass(*PR); |
322 | initializeGCNDPPCombinePass(*PR); |
323 | initializeSILowerI1CopiesPass(*PR); |
324 | initializeSILowerSGPRSpillsPass(*PR); |
325 | initializeSIFixSGPRCopiesPass(*PR); |
326 | initializeSIFixVGPRCopiesPass(*PR); |
327 | initializeSIFoldOperandsPass(*PR); |
328 | initializeSIPeepholeSDWAPass(*PR); |
329 | initializeSIShrinkInstructionsPass(*PR); |
330 | initializeSIOptimizeExecMaskingPreRAPass(*PR); |
331 | initializeSIOptimizeVGPRLiveRangePass(*PR); |
332 | initializeSILoadStoreOptimizerPass(*PR); |
333 | initializeAMDGPUFixFunctionBitcastsPass(*PR); |
334 | initializeAMDGPUCtorDtorLoweringPass(*PR); |
335 | initializeAMDGPUAlwaysInlinePass(*PR); |
336 | initializeAMDGPUAttributorPass(*PR); |
337 | initializeAMDGPUAnnotateKernelFeaturesPass(*PR); |
338 | initializeAMDGPUAnnotateUniformValuesPass(*PR); |
339 | initializeAMDGPUArgumentUsageInfoPass(*PR); |
340 | initializeAMDGPUAtomicOptimizerPass(*PR); |
341 | initializeAMDGPULowerKernelArgumentsPass(*PR); |
342 | initializeAMDGPULowerKernelAttributesPass(*PR); |
343 | initializeAMDGPULowerIntrinsicsPass(*PR); |
344 | initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR); |
345 | initializeAMDGPUPostLegalizerCombinerPass(*PR); |
346 | initializeAMDGPUPreLegalizerCombinerPass(*PR); |
347 | initializeAMDGPURegBankCombinerPass(*PR); |
348 | initializeAMDGPUPromoteAllocaPass(*PR); |
349 | initializeAMDGPUPromoteAllocaToVectorPass(*PR); |
350 | initializeAMDGPUCodeGenPreparePass(*PR); |
351 | initializeAMDGPULateCodeGenPreparePass(*PR); |
352 | initializeAMDGPUPropagateAttributesEarlyPass(*PR); |
353 | initializeAMDGPUPropagateAttributesLatePass(*PR); |
354 | initializeAMDGPUReplaceLDSUseWithPointerPass(*PR); |
355 | initializeAMDGPULowerModuleLDSPass(*PR); |
356 | initializeAMDGPURewriteOutArgumentsPass(*PR); |
357 | initializeAMDGPUUnifyMetadataPass(*PR); |
358 | initializeSIAnnotateControlFlowPass(*PR); |
359 | initializeSIInsertHardClausesPass(*PR); |
360 | initializeSIInsertWaitcntsPass(*PR); |
361 | initializeSIModeRegisterPass(*PR); |
362 | initializeSIWholeQuadModePass(*PR); |
363 | initializeSILowerControlFlowPass(*PR); |
364 | initializeSIPreEmitPeepholePass(*PR); |
365 | initializeSILateBranchLoweringPass(*PR); |
366 | initializeSIMemoryLegalizerPass(*PR); |
367 | initializeSIOptimizeExecMaskingPass(*PR); |
368 | initializeSIPreAllocateWWMRegsPass(*PR); |
369 | initializeSIFormMemoryClausesPass(*PR); |
370 | initializeSIPostRABundlerPass(*PR); |
371 | initializeAMDGPUUnifyDivergentExitNodesPass(*PR); |
372 | initializeAMDGPUAAWrapperPassPass(*PR); |
373 | initializeAMDGPUExternalAAWrapperPass(*PR); |
374 | initializeAMDGPUUseNativeCallsPass(*PR); |
375 | initializeAMDGPUSimplifyLibCallsPass(*PR); |
376 | initializeAMDGPUPrintfRuntimeBindingPass(*PR); |
377 | initializeAMDGPUResourceUsageAnalysisPass(*PR); |
378 | initializeGCNNSAReassignPass(*PR); |
379 | initializeGCNPreRAOptimizationsPass(*PR); |
380 | } |
381 | |
382 | static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { |
383 | return std::make_unique<AMDGPUTargetObjectFile>(); |
384 | } |
385 | |
386 | static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) { |
387 | return new SIScheduleDAGMI(C); |
388 | } |
389 | |
390 | static ScheduleDAGInstrs * |
391 | createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { |
392 | ScheduleDAGMILive *DAG = |
393 | new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C)); |
394 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
395 | DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); |
396 | DAG->addMutation(createAMDGPUExportClusteringDAGMutation()); |
397 | return DAG; |
398 | } |
399 | |
400 | static ScheduleDAGInstrs * |
401 | createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { |
402 | auto DAG = new GCNIterativeScheduler(C, |
403 | GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY); |
404 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
405 | return DAG; |
406 | } |
407 | |
408 | static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) { |
409 | return new GCNIterativeScheduler(C, |
410 | GCNIterativeScheduler::SCHEDULE_MINREGFORCED); |
411 | } |
412 | |
413 | static ScheduleDAGInstrs * |
414 | createIterativeILPMachineScheduler(MachineSchedContext *C) { |
415 | auto DAG = new GCNIterativeScheduler(C, |
416 | GCNIterativeScheduler::SCHEDULE_ILP); |
417 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
418 | DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); |
419 | return DAG; |
420 | } |
421 | |
422 | static MachineSchedRegistry |
423 | SISchedRegistry("si", "Run SI's custom scheduler", |
424 | createSIMachineScheduler); |
425 | |
426 | static MachineSchedRegistry |
427 | GCNMaxOccupancySchedRegistry("gcn-max-occupancy", |
428 | "Run GCN scheduler to maximize occupancy", |
429 | createGCNMaxOccupancyMachineScheduler); |
430 | |
431 | static MachineSchedRegistry |
432 | IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental", |
433 | "Run GCN scheduler to maximize occupancy (experimental)", |
434 | createIterativeGCNMaxOccupancyMachineScheduler); |
435 | |
436 | static MachineSchedRegistry |
437 | GCNMinRegSchedRegistry("gcn-minreg", |
438 | "Run GCN iterative scheduler for minimal register usage (experimental)", |
439 | createMinRegScheduler); |
440 | |
441 | static MachineSchedRegistry |
442 | GCNILPSchedRegistry("gcn-ilp", |
443 | "Run GCN iterative scheduler for ILP scheduling (experimental)", |
444 | createIterativeILPMachineScheduler); |
445 | |
446 | static StringRef computeDataLayout(const Triple &TT) { |
447 | if (TT.getArch() == Triple::r600) { |
448 | // 32-bit pointers. |
449 | return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" |
450 | "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"; |
451 | } |
452 | |
453 | // 32-bit private, local, and region pointers. 64-bit global, constant and |
454 | // flat, non-integral buffer fat pointers. |
455 | return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32" |
456 | "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" |
457 | "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1" |
458 | "-ni:7"; |
459 | } |
460 | |
461 | LLVM_READNONE__attribute__((__const__)) |
462 | static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) { |
463 | if (!GPU.empty()) |
464 | return GPU; |
465 | |
466 | // Need to default to a target with flat support for HSA. |
467 | if (TT.getArch() == Triple::amdgcn) |
468 | return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic"; |
469 | |
470 | return "r600"; |
471 | } |
472 | |
473 | static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) { |
474 | // The AMDGPU toolchain only supports generating shared objects, so we |
475 | // must always use PIC. |
476 | return Reloc::PIC_; |
477 | } |
478 | |
479 | AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT, |
480 | StringRef CPU, StringRef FS, |
481 | TargetOptions Options, |
482 | Optional<Reloc::Model> RM, |
483 | Optional<CodeModel::Model> CM, |
484 | CodeGenOpt::Level OptLevel) |
485 | : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), |
486 | FS, Options, getEffectiveRelocModel(RM), |
487 | getEffectiveCodeModel(CM, CodeModel::Small), OptLevel), |
488 | TLOF(createTLOF(getTargetTriple())) { |
489 | initAsmInfo(); |
490 | if (TT.getArch() == Triple::amdgcn) { |
491 | if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64")) |
492 | MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave64)); |
493 | else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32")) |
494 | MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave32)); |
495 | } |
496 | } |
497 | |
498 | bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false; |
499 | bool AMDGPUTargetMachine::EnableFunctionCalls = false; |
500 | bool AMDGPUTargetMachine::EnableFixedFunctionABI = false; |
501 | bool AMDGPUTargetMachine::EnableLowerModuleLDS = true; |
502 | |
503 | AMDGPUTargetMachine::~AMDGPUTargetMachine() = default; |
504 | |
505 | StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const { |
506 | Attribute GPUAttr = F.getFnAttribute("target-cpu"); |
507 | return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU(); |
508 | } |
509 | |
510 | StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const { |
511 | Attribute FSAttr = F.getFnAttribute("target-features"); |
512 | |
513 | return FSAttr.isValid() ? FSAttr.getValueAsString() |
514 | : getTargetFeatureString(); |
515 | } |
516 | |
517 | /// Predicate for Internalize pass. |
518 | static bool mustPreserveGV(const GlobalValue &GV) { |
519 | if (const Function *F = dyn_cast<Function>(&GV)) |
520 | return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv()); |
521 | |
522 | GV.removeDeadConstantUsers(); |
523 | return !GV.use_empty(); |
524 | } |
525 | |
526 | void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) { |
527 | Builder.DivergentTarget = true; |
528 | |
529 | bool EnableOpt = getOptLevel() > CodeGenOpt::None; |
530 | bool Internalize = InternalizeSymbols; |
531 | bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls; |
532 | bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt; |
533 | bool LibCallSimplify = EnableLibCallSimplify && EnableOpt; |
534 | |
535 | if (EnableFunctionCalls) { |
536 | delete Builder.Inliner; |
537 | Builder.Inliner = createFunctionInliningPass(); |
538 | } |
539 | |
540 | Builder.addExtension( |
541 | PassManagerBuilder::EP_ModuleOptimizerEarly, |
542 | [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &, |
543 | legacy::PassManagerBase &PM) { |
544 | if (AMDGPUAA) { |
545 | PM.add(createAMDGPUAAWrapperPass()); |
546 | PM.add(createAMDGPUExternalAAWrapperPass()); |
547 | } |
548 | PM.add(createAMDGPUUnifyMetadataPass()); |
549 | PM.add(createAMDGPUPrintfRuntimeBinding()); |
550 | if (Internalize) |
551 | PM.add(createInternalizePass(mustPreserveGV)); |
552 | PM.add(createAMDGPUPropagateAttributesLatePass(this)); |
553 | if (Internalize) |
554 | PM.add(createGlobalDCEPass()); |
555 | if (EarlyInline) |
556 | PM.add(createAMDGPUAlwaysInlinePass(false)); |
557 | }); |
558 | |
559 | Builder.addExtension( |
560 | PassManagerBuilder::EP_EarlyAsPossible, |
561 | [AMDGPUAA, LibCallSimplify, this](const PassManagerBuilder &, |
562 | legacy::PassManagerBase &PM) { |
563 | if (AMDGPUAA) { |
564 | PM.add(createAMDGPUAAWrapperPass()); |
565 | PM.add(createAMDGPUExternalAAWrapperPass()); |
566 | } |
567 | PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this)); |
568 | PM.add(llvm::createAMDGPUUseNativeCallsPass()); |
569 | if (LibCallSimplify) |
570 | PM.add(llvm::createAMDGPUSimplifyLibCallsPass(this)); |
571 | }); |
572 | |
573 | Builder.addExtension( |
574 | PassManagerBuilder::EP_CGSCCOptimizerLate, |
575 | [EnableOpt](const PassManagerBuilder &, legacy::PassManagerBase &PM) { |
576 | // Add infer address spaces pass to the opt pipeline after inlining |
577 | // but before SROA to increase SROA opportunities. |
578 | PM.add(createInferAddressSpacesPass()); |
579 | |
580 | // This should run after inlining to have any chance of doing anything, |
581 | // and before other cleanup optimizations. |
582 | PM.add(createAMDGPULowerKernelAttributesPass()); |
583 | |
584 | // Promote alloca to vector before SROA and loop unroll. If we manage |
585 | // to eliminate allocas before unroll we may choose to unroll less. |
586 | if (EnableOpt) |
587 | PM.add(createAMDGPUPromoteAllocaToVector()); |
588 | }); |
589 | } |
590 | |
591 | void AMDGPUTargetMachine::registerDefaultAliasAnalyses(AAManager &AAM) { |
592 | AAM.registerFunctionAnalysis<AMDGPUAA>(); |
593 | } |
594 | |
595 | void AMDGPUTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { |
596 | PB.registerPipelineParsingCallback( |
597 | [this](StringRef PassName, ModulePassManager &PM, |
598 | ArrayRef<PassBuilder::PipelineElement>) { |
599 | if (PassName == "amdgpu-propagate-attributes-late") { |
600 | PM.addPass(AMDGPUPropagateAttributesLatePass(*this)); |
601 | return true; |
602 | } |
603 | if (PassName == "amdgpu-unify-metadata") { |
604 | PM.addPass(AMDGPUUnifyMetadataPass()); |
605 | return true; |
606 | } |
607 | if (PassName == "amdgpu-printf-runtime-binding") { |
608 | PM.addPass(AMDGPUPrintfRuntimeBindingPass()); |
609 | return true; |
610 | } |
611 | if (PassName == "amdgpu-always-inline") { |
612 | PM.addPass(AMDGPUAlwaysInlinePass()); |
613 | return true; |
614 | } |
615 | if (PassName == "amdgpu-replace-lds-use-with-pointer") { |
616 | PM.addPass(AMDGPUReplaceLDSUseWithPointerPass()); |
617 | return true; |
618 | } |
619 | if (PassName == "amdgpu-lower-module-lds") { |
620 | PM.addPass(AMDGPULowerModuleLDSPass()); |
621 | return true; |
622 | } |
623 | return false; |
624 | }); |
625 | PB.registerPipelineParsingCallback( |
626 | [this](StringRef PassName, FunctionPassManager &PM, |
627 | ArrayRef<PassBuilder::PipelineElement>) { |
628 | if (PassName == "amdgpu-simplifylib") { |
629 | PM.addPass(AMDGPUSimplifyLibCallsPass(*this)); |
630 | return true; |
631 | } |
632 | if (PassName == "amdgpu-usenative") { |
633 | PM.addPass(AMDGPUUseNativeCallsPass()); |
634 | return true; |
635 | } |
636 | if (PassName == "amdgpu-promote-alloca") { |
637 | PM.addPass(AMDGPUPromoteAllocaPass(*this)); |
638 | return true; |
639 | } |
640 | if (PassName == "amdgpu-promote-alloca-to-vector") { |
641 | PM.addPass(AMDGPUPromoteAllocaToVectorPass(*this)); |
642 | return true; |
643 | } |
644 | if (PassName == "amdgpu-lower-kernel-attributes") { |
645 | PM.addPass(AMDGPULowerKernelAttributesPass()); |
646 | return true; |
647 | } |
648 | if (PassName == "amdgpu-propagate-attributes-early") { |
649 | PM.addPass(AMDGPUPropagateAttributesEarlyPass(*this)); |
650 | return true; |
651 | } |
652 | return false; |
653 | }); |
654 | |
655 | PB.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &FAM) { |
656 | FAM.registerPass([&] { return AMDGPUAA(); }); |
657 | }); |
658 | |
659 | PB.registerParseAACallback([](StringRef AAName, AAManager &AAM) { |
660 | if (AAName == "amdgpu-aa") { |
661 | AAM.registerFunctionAnalysis<AMDGPUAA>(); |
662 | return true; |
663 | } |
664 | return false; |
665 | }); |
666 | |
667 | PB.registerPipelineStartEPCallback( |
668 | [this](ModulePassManager &PM, OptimizationLevel Level) { |
669 | FunctionPassManager FPM; |
670 | FPM.addPass(AMDGPUPropagateAttributesEarlyPass(*this)); |
671 | FPM.addPass(AMDGPUUseNativeCallsPass()); |
672 | if (EnableLibCallSimplify && Level != OptimizationLevel::O0) |
673 | FPM.addPass(AMDGPUSimplifyLibCallsPass(*this)); |
674 | PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); |
675 | }); |
676 | |
677 | PB.registerPipelineEarlySimplificationEPCallback( |
678 | [this](ModulePassManager &PM, OptimizationLevel Level) { |
679 | if (Level == OptimizationLevel::O0) |
680 | return; |
681 | |
682 | PM.addPass(AMDGPUUnifyMetadataPass()); |
683 | PM.addPass(AMDGPUPrintfRuntimeBindingPass()); |
684 | |
685 | if (InternalizeSymbols) { |
686 | PM.addPass(InternalizePass(mustPreserveGV)); |
687 | } |
688 | PM.addPass(AMDGPUPropagateAttributesLatePass(*this)); |
689 | if (InternalizeSymbols) { |
690 | PM.addPass(GlobalDCEPass()); |
691 | } |
692 | if (EarlyInlineAll && !EnableFunctionCalls) |
693 | PM.addPass(AMDGPUAlwaysInlinePass()); |
694 | }); |
695 | |
696 | PB.registerCGSCCOptimizerLateEPCallback( |
697 | [this](CGSCCPassManager &PM, OptimizationLevel Level) { |
698 | if (Level == OptimizationLevel::O0) |
699 | return; |
700 | |
701 | FunctionPassManager FPM; |
702 | |
703 | // Add infer address spaces pass to the opt pipeline after inlining |
704 | // but before SROA to increase SROA opportunities. |
705 | FPM.addPass(InferAddressSpacesPass()); |
706 | |
707 | // This should run after inlining to have any chance of doing |
708 | // anything, and before other cleanup optimizations. |
709 | FPM.addPass(AMDGPULowerKernelAttributesPass()); |
710 | |
711 | if (Level != OptimizationLevel::O0) { |
712 | // Promote alloca to vector before SROA and loop unroll. If we |
713 | // manage to eliminate allocas before unroll we may choose to unroll |
714 | // less. |
715 | FPM.addPass(AMDGPUPromoteAllocaToVectorPass(*this)); |
716 | } |
717 | |
718 | PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM))); |
719 | }); |
720 | } |
721 | |
722 | int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) { |
723 | return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || |
724 | AddrSpace == AMDGPUAS::PRIVATE_ADDRESS || |
725 | AddrSpace == AMDGPUAS::REGION_ADDRESS) |
726 | ? -1 |
727 | : 0; |
728 | } |
729 | |
730 | bool AMDGPUTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS, |
731 | unsigned DestAS) const { |
732 | return AMDGPU::isFlatGlobalAddrSpace(SrcAS) && |
733 | AMDGPU::isFlatGlobalAddrSpace(DestAS); |
734 | } |
735 | |
736 | unsigned AMDGPUTargetMachine::getAssumedAddrSpace(const Value *V) const { |
737 | const auto *LD = dyn_cast<LoadInst>(V); |
738 | if (!LD) |
739 | return AMDGPUAS::UNKNOWN_ADDRESS_SPACE; |
740 | |
741 | // It must be a generic pointer loaded. |
742 | assert(V->getType()->isPointerTy() &&(static_cast <bool> (V->getType()->isPointerTy() && V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS ) ? void (0) : __assert_fail ("V->getType()->isPointerTy() && V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp" , 743, __extension__ __PRETTY_FUNCTION__)) |
743 | V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS)(static_cast <bool> (V->getType()->isPointerTy() && V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS ) ? void (0) : __assert_fail ("V->getType()->isPointerTy() && V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp" , 743, __extension__ __PRETTY_FUNCTION__)); |
744 | |
745 | const auto *Ptr = LD->getPointerOperand(); |
746 | if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) |
747 | return AMDGPUAS::UNKNOWN_ADDRESS_SPACE; |
748 | // For a generic pointer loaded from the constant memory, it could be assumed |
749 | // as a global pointer since the constant memory is only populated on the |
750 | // host side. As implied by the offload programming model, only global |
751 | // pointers could be referenced on the host side. |
752 | return AMDGPUAS::GLOBAL_ADDRESS; |
753 | } |
754 | |
755 | //===----------------------------------------------------------------------===// |
756 | // GCN Target Machine (SI+) |
757 | //===----------------------------------------------------------------------===// |
758 | |
759 | GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT, |
760 | StringRef CPU, StringRef FS, |
761 | TargetOptions Options, |
762 | Optional<Reloc::Model> RM, |
763 | Optional<CodeModel::Model> CM, |
764 | CodeGenOpt::Level OL, bool JIT) |
765 | : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {} |
766 | |
767 | const TargetSubtargetInfo * |
768 | GCNTargetMachine::getSubtargetImpl(const Function &F) const { |
769 | StringRef GPU = getGPUName(F); |
770 | StringRef FS = getFeatureString(F); |
771 | |
772 | SmallString<128> SubtargetKey(GPU); |
773 | SubtargetKey.append(FS); |
774 | |
775 | auto &I = SubtargetMap[SubtargetKey]; |
776 | if (!I) { |
777 | // This needs to be done before we create a new subtarget since any |
778 | // creation will depend on the TM and the code generation flags on the |
779 | // function that reside in TargetOptions. |
780 | resetTargetOptions(F); |
781 | I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this); |
782 | } |
783 | |
784 | I->setScalarizeGlobalBehavior(ScalarizeGlobal); |
785 | |
786 | return I.get(); |
787 | } |
788 | |
789 | TargetTransformInfo |
790 | GCNTargetMachine::getTargetTransformInfo(const Function &F) { |
791 | return TargetTransformInfo(GCNTTIImpl(this, F)); |
792 | } |
793 | |
794 | //===----------------------------------------------------------------------===// |
795 | // AMDGPU Pass Setup |
796 | //===----------------------------------------------------------------------===// |
797 | |
798 | std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const { |
799 | return getStandardCSEConfigForOpt(TM->getOptLevel()); |
800 | } |
801 | |
802 | namespace { |
803 | |
804 | class GCNPassConfig final : public AMDGPUPassConfig { |
805 | public: |
806 | GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) |
807 | : AMDGPUPassConfig(TM, PM) { |
808 | // It is necessary to know the register usage of the entire call graph. We |
809 | // allow calls without EnableAMDGPUFunctionCalls if they are marked |
810 | // noinline, so this is always required. |
811 | setRequiresCodeGenSCCOrder(true); |
812 | } |
813 | |
814 | GCNTargetMachine &getGCNTargetMachine() const { |
815 | return getTM<GCNTargetMachine>(); |
816 | } |
817 | |
818 | ScheduleDAGInstrs * |
819 | createMachineScheduler(MachineSchedContext *C) const override; |
820 | |
821 | bool addPreISel() override; |
822 | void addMachineSSAOptimization() override; |
823 | bool addILPOpts() override; |
824 | bool addInstSelector() override; |
825 | bool addIRTranslator() override; |
826 | void addPreLegalizeMachineIR() override; |
827 | bool addLegalizeMachineIR() override; |
828 | void addPreRegBankSelect() override; |
829 | bool addRegBankSelect() override; |
830 | void addPreGlobalInstructionSelect() override; |
831 | bool addGlobalInstructionSelect() override; |
832 | void addFastRegAlloc() override; |
833 | void addOptimizedRegAlloc() override; |
834 | |
835 | FunctionPass *createSGPRAllocPass(bool Optimized); |
836 | FunctionPass *createVGPRAllocPass(bool Optimized); |
837 | FunctionPass *createRegAllocPass(bool Optimized) override; |
838 | |
839 | bool addRegAssignAndRewriteFast() override; |
840 | bool addRegAssignAndRewriteOptimized() override; |
841 | |
842 | void addPreRegAlloc() override; |
843 | bool addPreRewrite() override; |
844 | void addPostRegAlloc() override; |
845 | void addPreSched2() override; |
846 | void addPreEmitPass() override; |
847 | }; |
848 | |
849 | } // end anonymous namespace |
850 | |
851 | AMDGPUPassConfig::AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) |
852 | : TargetPassConfig(TM, PM) { |
853 | // Exceptions and StackMaps are not supported, so these passes will never do |
854 | // anything. |
855 | disablePass(&StackMapLivenessID); |
856 | disablePass(&FuncletLayoutID); |
857 | // Garbage collection is not supported. |
858 | disablePass(&GCLoweringID); |
859 | disablePass(&ShadowStackGCLoweringID); |
860 | } |
861 | |
862 | void AMDGPUPassConfig::addEarlyCSEOrGVNPass() { |
863 | if (getOptLevel() == CodeGenOpt::Aggressive) |
864 | addPass(createGVNPass()); |
865 | else |
866 | addPass(createEarlyCSEPass()); |
867 | } |
868 | |
869 | void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() { |
870 | addPass(createLICMPass()); |
871 | addPass(createSeparateConstOffsetFromGEPPass()); |
872 | addPass(createSpeculativeExecutionPass()); |
873 | // ReassociateGEPs exposes more opportunites for SLSR. See |
874 | // the example in reassociate-geps-and-slsr.ll. |
875 | addPass(createStraightLineStrengthReducePass()); |
876 | // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or |
877 | // EarlyCSE can reuse. |
878 | addEarlyCSEOrGVNPass(); |
879 | // Run NaryReassociate after EarlyCSE/GVN to be more effective. |
880 | addPass(createNaryReassociatePass()); |
881 | // NaryReassociate on GEPs creates redundant common expressions, so run |
882 | // EarlyCSE after it. |
883 | addPass(createEarlyCSEPass()); |
884 | } |
885 | |
886 | void AMDGPUPassConfig::addIRPasses() { |
887 | const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine(); |
888 | |
889 | // There is no reason to run these. |
890 | disablePass(&StackMapLivenessID); |
891 | disablePass(&FuncletLayoutID); |
892 | disablePass(&PatchableFunctionID); |
893 | |
894 | addPass(createAMDGPUPrintfRuntimeBinding()); |
895 | addPass(createAMDGPUCtorDtorLoweringPass()); |
896 | |
897 | // This must occur before inlining, as the inliner will not look through |
898 | // bitcast calls. |
899 | addPass(createAMDGPUFixFunctionBitcastsPass()); |
900 | |
901 | // A call to propagate attributes pass in the backend in case opt was not run. |
902 | addPass(createAMDGPUPropagateAttributesEarlyPass(&TM)); |
903 | |
904 | addPass(createAMDGPULowerIntrinsicsPass()); |
905 | |
906 | // Function calls are not supported, so make sure we inline everything. |
907 | addPass(createAMDGPUAlwaysInlinePass()); |
908 | addPass(createAlwaysInlinerLegacyPass()); |
909 | // We need to add the barrier noop pass, otherwise adding the function |
910 | // inlining pass will cause all of the PassConfigs passes to be run |
911 | // one function at a time, which means if we have a nodule with two |
912 | // functions, then we will generate code for the first function |
913 | // without ever running any passes on the second. |
914 | addPass(createBarrierNoopPass()); |
915 | |
916 | // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments. |
917 | if (TM.getTargetTriple().getArch() == Triple::r600) |
918 | addPass(createR600OpenCLImageTypeLoweringPass()); |
919 | |
920 | // Replace OpenCL enqueued block function pointers with global variables. |
921 | addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass()); |
922 | |
923 | // Can increase LDS used by kernel so runs before PromoteAlloca |
924 | if (EnableLowerModuleLDS) { |
925 | // The pass "amdgpu-replace-lds-use-with-pointer" need to be run before the |
926 | // pass "amdgpu-lower-module-lds", and also it required to be run only if |
927 | // "amdgpu-lower-module-lds" pass is enabled. |
928 | if (EnableLDSReplaceWithPointer) |
929 | addPass(createAMDGPUReplaceLDSUseWithPointerPass()); |
930 | |
931 | addPass(createAMDGPULowerModuleLDSPass()); |
932 | } |
933 | |
934 | if (TM.getOptLevel() > CodeGenOpt::None) |
935 | addPass(createInferAddressSpacesPass()); |
936 | |
937 | addPass(createAtomicExpandPass()); |
938 | |
939 | if (TM.getOptLevel() > CodeGenOpt::None) { |
940 | addPass(createAMDGPUPromoteAlloca()); |
941 | |
942 | if (EnableSROA) |
943 | addPass(createSROAPass()); |
944 | if (isPassEnabled(EnableScalarIRPasses)) |
945 | addStraightLineScalarOptimizationPasses(); |
946 | |
947 | if (EnableAMDGPUAliasAnalysis) { |
948 | addPass(createAMDGPUAAWrapperPass()); |
949 | addPass(createExternalAAWrapperPass([](Pass &P, Function &, |
950 | AAResults &AAR) { |
951 | if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>()) |
952 | AAR.addAAResult(WrapperPass->getResult()); |
953 | })); |
954 | } |
955 | |
956 | if (TM.getTargetTriple().getArch() == Triple::amdgcn) { |
957 | // TODO: May want to move later or split into an early and late one. |
958 | addPass(createAMDGPUCodeGenPreparePass()); |
959 | } |
960 | } |
961 | |
962 | TargetPassConfig::addIRPasses(); |
963 | |
964 | // EarlyCSE is not always strong enough to clean up what LSR produces. For |
965 | // example, GVN can combine |
966 | // |
967 | // %0 = add %a, %b |
968 | // %1 = add %b, %a |
969 | // |
970 | // and |
971 | // |
972 | // %0 = shl nsw %a, 2 |
973 | // %1 = shl %a, 2 |
974 | // |
975 | // but EarlyCSE can do neither of them. |
976 | if (isPassEnabled(EnableScalarIRPasses)) |
977 | addEarlyCSEOrGVNPass(); |
978 | } |
979 | |
980 | void AMDGPUPassConfig::addCodeGenPrepare() { |
981 | if (TM->getTargetTriple().getArch() == Triple::amdgcn) |
982 | addPass(createAMDGPUAnnotateKernelFeaturesPass()); |
983 | |
984 | if (TM->getTargetTriple().getArch() == Triple::amdgcn && |
985 | EnableLowerKernelArguments) |
986 | addPass(createAMDGPULowerKernelArgumentsPass()); |
987 | |
988 | TargetPassConfig::addCodeGenPrepare(); |
989 | |
990 | if (isPassEnabled(EnableLoadStoreVectorizer)) |
991 | addPass(createLoadStoreVectorizerPass()); |
992 | |
993 | // LowerSwitch pass may introduce unreachable blocks that can |
994 | // cause unexpected behavior for subsequent passes. Placing it |
995 | // here seems better that these blocks would get cleaned up by |
996 | // UnreachableBlockElim inserted next in the pass flow. |
997 | addPass(createLowerSwitchPass()); |
998 | } |
999 | |
1000 | bool AMDGPUPassConfig::addPreISel() { |
1001 | if (TM->getOptLevel() > CodeGenOpt::None) |
1002 | addPass(createFlattenCFGPass()); |
1003 | return false; |
1004 | } |
1005 | |
1006 | bool AMDGPUPassConfig::addInstSelector() { |
1007 | // Defer the verifier until FinalizeISel. |
1008 | addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()), false); |
1009 | return false; |
1010 | } |
1011 | |
1012 | bool AMDGPUPassConfig::addGCPasses() { |
1013 | // Do nothing. GC is not supported. |
1014 | return false; |
1015 | } |
1016 | |
1017 | llvm::ScheduleDAGInstrs * |
1018 | AMDGPUPassConfig::createMachineScheduler(MachineSchedContext *C) const { |
1019 | ScheduleDAGMILive *DAG = createGenericSchedLive(C); |
1020 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
1021 | return DAG; |
1022 | } |
1023 | |
1024 | //===----------------------------------------------------------------------===// |
1025 | // GCN Pass Setup |
1026 | //===----------------------------------------------------------------------===// |
1027 | |
1028 | ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler( |
1029 | MachineSchedContext *C) const { |
1030 | const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>(); |
1031 | if (ST.enableSIScheduler()) |
1032 | return createSIMachineScheduler(C); |
1033 | return createGCNMaxOccupancyMachineScheduler(C); |
1034 | } |
1035 | |
1036 | bool GCNPassConfig::addPreISel() { |
1037 | AMDGPUPassConfig::addPreISel(); |
1038 | |
1039 | if (TM->getOptLevel() > CodeGenOpt::None) |
1040 | addPass(createAMDGPULateCodeGenPreparePass()); |
1041 | |
1042 | if (isPassEnabled(EnableAtomicOptimizations, CodeGenOpt::Less)) { |
1043 | addPass(createAMDGPUAtomicOptimizerPass()); |
1044 | } |
1045 | |
1046 | if (TM->getOptLevel() > CodeGenOpt::None) |
1047 | addPass(createSinkingPass()); |
1048 | |
1049 | // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit |
1050 | // regions formed by them. |
1051 | addPass(&AMDGPUUnifyDivergentExitNodesID); |
1052 | if (!LateCFGStructurize) { |
1053 | if (EnableStructurizerWorkarounds) { |
1054 | addPass(createFixIrreduciblePass()); |
1055 | addPass(createUnifyLoopExitsPass()); |
1056 | } |
1057 | addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions |
1058 | } |
1059 | addPass(createAMDGPUAnnotateUniformValues()); |
1060 | if (!LateCFGStructurize) { |
1061 | addPass(createSIAnnotateControlFlowPass()); |
1062 | } |
1063 | addPass(createLCSSAPass()); |
1064 | |
1065 | if (TM->getOptLevel() > CodeGenOpt::Less) |
1066 | addPass(&AMDGPUPerfHintAnalysisID); |
1067 | |
1068 | return false; |
1069 | } |
1070 | |
1071 | void GCNPassConfig::addMachineSSAOptimization() { |
1072 | TargetPassConfig::addMachineSSAOptimization(); |
1073 | |
1074 | // We want to fold operands after PeepholeOptimizer has run (or as part of |
1075 | // it), because it will eliminate extra copies making it easier to fold the |
1076 | // real source operand. We want to eliminate dead instructions after, so that |
1077 | // we see fewer uses of the copies. We then need to clean up the dead |
1078 | // instructions leftover after the operands are folded as well. |
1079 | // |
1080 | // XXX - Can we get away without running DeadMachineInstructionElim again? |
1081 | addPass(&SIFoldOperandsID); |
1082 | if (EnableDPPCombine) |
1083 | addPass(&GCNDPPCombineID); |
1084 | addPass(&SILoadStoreOptimizerID); |
1085 | if (isPassEnabled(EnableSDWAPeephole)) { |
1086 | addPass(&SIPeepholeSDWAID); |
1087 | addPass(&EarlyMachineLICMID); |
1088 | addPass(&MachineCSEID); |
1089 | addPass(&SIFoldOperandsID); |
1090 | } |
1091 | addPass(&DeadMachineInstructionElimID); |
1092 | addPass(createSIShrinkInstructionsPass()); |
1093 | } |
1094 | |
1095 | bool GCNPassConfig::addILPOpts() { |
1096 | if (EnableEarlyIfConversion) |
1097 | addPass(&EarlyIfConverterID); |
1098 | |
1099 | TargetPassConfig::addILPOpts(); |
1100 | return false; |
1101 | } |
1102 | |
1103 | bool GCNPassConfig::addInstSelector() { |
1104 | AMDGPUPassConfig::addInstSelector(); |
1105 | addPass(&SIFixSGPRCopiesID); |
1106 | addPass(createSILowerI1CopiesPass()); |
1107 | return false; |
1108 | } |
1109 | |
1110 | bool GCNPassConfig::addIRTranslator() { |
1111 | addPass(new IRTranslator(getOptLevel())); |
1112 | return false; |
1113 | } |
1114 | |
1115 | void GCNPassConfig::addPreLegalizeMachineIR() { |
1116 | bool IsOptNone = getOptLevel() == CodeGenOpt::None; |
1117 | addPass(createAMDGPUPreLegalizeCombiner(IsOptNone)); |
1118 | addPass(new Localizer()); |
1119 | } |
1120 | |
1121 | bool GCNPassConfig::addLegalizeMachineIR() { |
1122 | addPass(new Legalizer()); |
1123 | return false; |
1124 | } |
1125 | |
1126 | void GCNPassConfig::addPreRegBankSelect() { |
1127 | bool IsOptNone = getOptLevel() == CodeGenOpt::None; |
1128 | addPass(createAMDGPUPostLegalizeCombiner(IsOptNone)); |
1129 | } |
1130 | |
1131 | bool GCNPassConfig::addRegBankSelect() { |
1132 | addPass(new RegBankSelect()); |
1133 | return false; |
1134 | } |
1135 | |
1136 | void GCNPassConfig::addPreGlobalInstructionSelect() { |
1137 | bool IsOptNone = getOptLevel() == CodeGenOpt::None; |
1138 | addPass(createAMDGPURegBankCombiner(IsOptNone)); |
1139 | } |
1140 | |
1141 | bool GCNPassConfig::addGlobalInstructionSelect() { |
1142 | addPass(new InstructionSelect(getOptLevel())); |
1143 | return false; |
1144 | } |
1145 | |
1146 | void GCNPassConfig::addPreRegAlloc() { |
1147 | if (LateCFGStructurize) { |
1148 | addPass(createAMDGPUMachineCFGStructurizerPass()); |
1149 | } |
1150 | } |
1151 | |
1152 | void GCNPassConfig::addFastRegAlloc() { |
1153 | // FIXME: We have to disable the verifier here because of PHIElimination + |
1154 | // TwoAddressInstructions disabling it. |
1155 | |
1156 | // This must be run immediately after phi elimination and before |
1157 | // TwoAddressInstructions, otherwise the processing of the tied operand of |
1158 | // SI_ELSE will introduce a copy of the tied operand source after the else. |
1159 | insertPass(&PHIEliminationID, &SILowerControlFlowID, false); |
1160 | |
1161 | insertPass(&TwoAddressInstructionPassID, &SIWholeQuadModeID); |
1162 | insertPass(&TwoAddressInstructionPassID, &SIPreAllocateWWMRegsID); |
1163 | |
1164 | TargetPassConfig::addFastRegAlloc(); |
1165 | } |
1166 | |
1167 | void GCNPassConfig::addOptimizedRegAlloc() { |
1168 | // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation |
1169 | // instructions that cause scheduling barriers. |
1170 | insertPass(&MachineSchedulerID, &SIWholeQuadModeID); |
1171 | insertPass(&MachineSchedulerID, &SIPreAllocateWWMRegsID); |
1172 | |
1173 | if (OptExecMaskPreRA) |
1174 | insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID); |
1175 | |
1176 | if (isPassEnabled(EnablePreRAOptimizations)) |
1177 | insertPass(&RenameIndependentSubregsID, &GCNPreRAOptimizationsID); |
1178 | |
1179 | // This is not an essential optimization and it has a noticeable impact on |
1180 | // compilation time, so we only enable it from O2. |
1181 | if (TM->getOptLevel() > CodeGenOpt::Less) |
1182 | insertPass(&MachineSchedulerID, &SIFormMemoryClausesID); |
1183 | |
1184 | // FIXME: when an instruction has a Killed operand, and the instruction is |
1185 | // inside a bundle, seems only the BUNDLE instruction appears as the Kills of |
1186 | // the register in LiveVariables, this would trigger a failure in verifier, |
1187 | // we should fix it and enable the verifier. |
1188 | if (OptVGPRLiveRange) |
1189 | insertPass(&LiveVariablesID, &SIOptimizeVGPRLiveRangeID, false); |
1190 | // This must be run immediately after phi elimination and before |
1191 | // TwoAddressInstructions, otherwise the processing of the tied operand of |
1192 | // SI_ELSE will introduce a copy of the tied operand source after the else. |
1193 | insertPass(&PHIEliminationID, &SILowerControlFlowID, false); |
1194 | |
1195 | if (EnableDCEInRA) |
1196 | insertPass(&DetectDeadLanesID, &DeadMachineInstructionElimID); |
1197 | |
1198 | TargetPassConfig::addOptimizedRegAlloc(); |
1199 | } |
1200 | |
1201 | bool GCNPassConfig::addPreRewrite() { |
1202 | if (EnableRegReassign) |
1203 | addPass(&GCNNSAReassignID); |
1204 | return true; |
1205 | } |
1206 | |
1207 | FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) { |
1208 | // Initialize the global default. |
1209 | llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag, |
1210 | initializeDefaultSGPRRegisterAllocatorOnce); |
1211 | |
1212 | RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault(); |
1213 | if (Ctor != useDefaultRegisterAllocator) |
1214 | return Ctor(); |
1215 | |
1216 | if (Optimized) |
1217 | return createGreedyRegisterAllocator(onlyAllocateSGPRs); |
1218 | |
1219 | return createFastRegisterAllocator(onlyAllocateSGPRs, false); |
1220 | } |
1221 | |
1222 | FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) { |
1223 | // Initialize the global default. |
1224 | llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag, |
1225 | initializeDefaultVGPRRegisterAllocatorOnce); |
1226 | |
1227 | RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault(); |
1228 | if (Ctor != useDefaultRegisterAllocator) |
1229 | return Ctor(); |
1230 | |
1231 | if (Optimized) |
1232 | return createGreedyVGPRRegisterAllocator(); |
1233 | |
1234 | return createFastVGPRRegisterAllocator(); |
1235 | } |
1236 | |
1237 | FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) { |
1238 | llvm_unreachable("should not be used")::llvm::llvm_unreachable_internal("should not be used", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp" , 1238); |
1239 | } |
1240 | |
1241 | static const char RegAllocOptNotSupportedMessage[] = |
1242 | "-regalloc not supported with amdgcn. Use -sgpr-regalloc and -vgpr-regalloc"; |
1243 | |
1244 | bool GCNPassConfig::addRegAssignAndRewriteFast() { |
1245 | if (!usingDefaultRegAlloc()) |
1246 | report_fatal_error(RegAllocOptNotSupportedMessage); |
1247 | |
1248 | addPass(createSGPRAllocPass(false)); |
1249 | |
1250 | // Equivalent of PEI for SGPRs. |
1251 | addPass(&SILowerSGPRSpillsID); |
1252 | |
1253 | addPass(createVGPRAllocPass(false)); |
1254 | return true; |
1255 | } |
1256 | |
1257 | bool GCNPassConfig::addRegAssignAndRewriteOptimized() { |
1258 | if (!usingDefaultRegAlloc()) |
1259 | report_fatal_error(RegAllocOptNotSupportedMessage); |
1260 | |
1261 | addPass(createSGPRAllocPass(true)); |
1262 | |
1263 | // Commit allocated register changes. This is mostly necessary because too |
1264 | // many things rely on the use lists of the physical registers, such as the |
1265 | // verifier. This is only necessary with allocators which use LiveIntervals, |
1266 | // since FastRegAlloc does the replacments itself. |
1267 | addPass(createVirtRegRewriter(false)); |
1268 | |
1269 | // Equivalent of PEI for SGPRs. |
1270 | addPass(&SILowerSGPRSpillsID); |
1271 | |
1272 | addPass(createVGPRAllocPass(true)); |
1273 | |
1274 | addPreRewrite(); |
1275 | addPass(&VirtRegRewriterID); |
1276 | |
1277 | return true; |
1278 | } |
1279 | |
1280 | void GCNPassConfig::addPostRegAlloc() { |
1281 | addPass(&SIFixVGPRCopiesID); |
1282 | if (getOptLevel() > CodeGenOpt::None) |
1283 | addPass(&SIOptimizeExecMaskingID); |
1284 | TargetPassConfig::addPostRegAlloc(); |
1285 | } |
1286 | |
1287 | void GCNPassConfig::addPreSched2() { |
1288 | addPass(&SIPostRABundlerID); |
1289 | } |
1290 | |
1291 | void GCNPassConfig::addPreEmitPass() { |
1292 | addPass(createSIMemoryLegalizerPass()); |
1293 | addPass(createSIInsertWaitcntsPass()); |
1294 | |
1295 | if (TM->getOptLevel() > CodeGenOpt::None) |
1296 | addPass(createSIShrinkInstructionsPass()); |
1297 | |
1298 | addPass(createSIModeRegisterPass()); |
1299 | |
1300 | if (getOptLevel() > CodeGenOpt::None) |
1301 | addPass(&SIInsertHardClausesID); |
1302 | |
1303 | addPass(&SILateBranchLoweringPassID); |
1304 | if (getOptLevel() > CodeGenOpt::None) |
1305 | addPass(&SIPreEmitPeepholeID); |
1306 | // The hazard recognizer that runs as part of the post-ra scheduler does not |
1307 | // guarantee to be able handle all hazards correctly. This is because if there |
1308 | // are multiple scheduling regions in a basic block, the regions are scheduled |
1309 | // bottom up, so when we begin to schedule a region we don't know what |
1310 | // instructions were emitted directly before it. |
1311 | // |
1312 | // Here we add a stand-alone hazard recognizer pass which can handle all |
1313 | // cases. |
1314 | addPass(&PostRAHazardRecognizerID); |
1315 | addPass(&BranchRelaxationPassID); |
1316 | } |
1317 | |
1318 | TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) { |
1319 | return new GCNPassConfig(*this, PM); |
1320 | } |
1321 | |
1322 | yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const { |
1323 | return new yaml::SIMachineFunctionInfo(); |
1324 | } |
1325 | |
1326 | yaml::MachineFunctionInfo * |
1327 | GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const { |
1328 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
1329 | return new yaml::SIMachineFunctionInfo( |
1330 | *MFI, *MF.getSubtarget().getRegisterInfo(), MF); |
1331 | } |
1332 | |
1333 | bool GCNTargetMachine::parseMachineFunctionInfo( |
1334 | const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS, |
1335 | SMDiagnostic &Error, SMRange &SourceRange) const { |
1336 | const yaml::SIMachineFunctionInfo &YamlMFI = |
1337 | reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_); |
1338 | MachineFunction &MF = PFS.MF; |
1339 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
1340 | |
1341 | if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange)) |
1342 | return true; |
1343 | |
1344 | if (MFI->Occupancy == 0) { |
1345 | // Fixup the subtarget dependent default value. |
1346 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
1347 | MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize()); |
1348 | } |
1349 | |
1350 | auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) { |
1351 | Register TempReg; |
1352 | if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) { |
1353 | SourceRange = RegName.SourceRange; |
1354 | return true; |
1355 | } |
1356 | RegVal = TempReg; |
1357 | |
1358 | return false; |
1359 | }; |
1360 | |
1361 | auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) { |
1362 | // Create a diagnostic for a the register string literal. |
1363 | const MemoryBuffer &Buffer = |
1364 | *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID()); |
1365 | Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1, |
1366 | RegName.Value.size(), SourceMgr::DK_Error, |
1367 | "incorrect register class for field", RegName.Value, |
1368 | None, None); |
1369 | SourceRange = RegName.SourceRange; |
1370 | return true; |
1371 | }; |
1372 | |
1373 | if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) || |
1374 | parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) || |
1375 | parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg)) |
1376 | return true; |
1377 | |
1378 | if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG && |
1379 | !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) { |
1380 | return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg); |
1381 | } |
1382 | |
1383 | if (MFI->FrameOffsetReg != AMDGPU::FP_REG && |
1384 | !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) { |
1385 | return diagnoseRegisterClass(YamlMFI.FrameOffsetReg); |
1386 | } |
1387 | |
1388 | if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG && |
1389 | !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) { |
1390 | return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg); |
1391 | } |
1392 | |
1393 | auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A, |
1394 | const TargetRegisterClass &RC, |
1395 | ArgDescriptor &Arg, unsigned UserSGPRs, |
1396 | unsigned SystemSGPRs) { |
1397 | // Skip parsing if it's not present. |
1398 | if (!A) |
1399 | return false; |
1400 | |
1401 | if (A->IsRegister) { |
1402 | Register Reg; |
1403 | if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) { |
1404 | SourceRange = A->RegisterName.SourceRange; |
1405 | return true; |
1406 | } |
1407 | if (!RC.contains(Reg)) |
1408 | return diagnoseRegisterClass(A->RegisterName); |
1409 | Arg = ArgDescriptor::createRegister(Reg); |
1410 | } else |
1411 | Arg = ArgDescriptor::createStack(A->StackOffset); |
1412 | // Check and apply the optional mask. |
1413 | if (A->Mask) |
1414 | Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue()); |
1415 | |
1416 | MFI->NumUserSGPRs += UserSGPRs; |
1417 | MFI->NumSystemSGPRs += SystemSGPRs; |
1418 | return false; |
1419 | }; |
1420 | |
1421 | if (YamlMFI.ArgInfo && |
1422 | (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer, |
1423 | AMDGPU::SGPR_128RegClass, |
1424 | MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) || |
1425 | parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr, |
1426 | AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr, |
1427 | 2, 0) || |
1428 | parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass, |
1429 | MFI->ArgInfo.QueuePtr, 2, 0) || |
1430 | parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr, |
1431 | AMDGPU::SReg_64RegClass, |
1432 | MFI->ArgInfo.KernargSegmentPtr, 2, 0) || |
1433 | parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID, |
1434 | AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID, |
1435 | 2, 0) || |
1436 | parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit, |
1437 | AMDGPU::SReg_64RegClass, |
1438 | MFI->ArgInfo.FlatScratchInit, 2, 0) || |
1439 | parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize, |
1440 | AMDGPU::SGPR_32RegClass, |
1441 | MFI->ArgInfo.PrivateSegmentSize, 0, 0) || |
1442 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX, |
1443 | AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX, |
1444 | 0, 1) || |
1445 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY, |
1446 | AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY, |
1447 | 0, 1) || |
1448 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ, |
1449 | AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ, |
1450 | 0, 1) || |
1451 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo, |
1452 | AMDGPU::SGPR_32RegClass, |
1453 | MFI->ArgInfo.WorkGroupInfo, 0, 1) || |
1454 | parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset, |
1455 | AMDGPU::SGPR_32RegClass, |
1456 | MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) || |
1457 | parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr, |
1458 | AMDGPU::SReg_64RegClass, |
1459 | MFI->ArgInfo.ImplicitArgPtr, 0, 0) || |
1460 | parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr, |
1461 | AMDGPU::SReg_64RegClass, |
1462 | MFI->ArgInfo.ImplicitBufferPtr, 2, 0) || |
1463 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX, |
1464 | AMDGPU::VGPR_32RegClass, |
1465 | MFI->ArgInfo.WorkItemIDX, 0, 0) || |
1466 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY, |
1467 | AMDGPU::VGPR_32RegClass, |
1468 | MFI->ArgInfo.WorkItemIDY, 0, 0) || |
1469 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ, |
1470 | AMDGPU::VGPR_32RegClass, |
1471 | MFI->ArgInfo.WorkItemIDZ, 0, 0))) |
1472 | return true; |
1473 | |
1474 | MFI->Mode.IEEE = YamlMFI.Mode.IEEE; |
1475 | MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp; |
1476 | MFI->Mode.FP32InputDenormals = YamlMFI.Mode.FP32InputDenormals; |
1477 | MFI->Mode.FP32OutputDenormals = YamlMFI.Mode.FP32OutputDenormals; |
1478 | MFI->Mode.FP64FP16InputDenormals = YamlMFI.Mode.FP64FP16InputDenormals; |
1479 | MFI->Mode.FP64FP16OutputDenormals = YamlMFI.Mode.FP64FP16OutputDenormals; |
1480 | |
1481 | return false; |
1482 | } |