LLVM 20.0.0git
SIWholeQuadMode.cpp
Go to the documentation of this file.
1//===-- SIWholeQuadMode.cpp - enter and suspend whole quad mode -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass adds instructions to enable whole quad mode (strict or non-strict)
11/// for pixel shaders, and strict whole wavefront mode for all programs.
12///
13/// The "strict" prefix indicates that inactive lanes do not take part in
14/// control flow, specifically an inactive lane enabled by a strict WQM/WWM will
15/// always be enabled irrespective of control flow decisions. Conversely in
16/// non-strict WQM inactive lanes may control flow decisions.
17///
18/// Whole quad mode is required for derivative computations, but it interferes
19/// with shader side effects (stores and atomics). It ensures that WQM is
20/// enabled when necessary, but disabled around stores and atomics.
21///
22/// When necessary, this pass creates a function prolog
23///
24/// S_MOV_B64 LiveMask, EXEC
25/// S_WQM_B64 EXEC, EXEC
26///
27/// to enter WQM at the top of the function and surrounds blocks of Exact
28/// instructions by
29///
30/// S_AND_SAVEEXEC_B64 Tmp, LiveMask
31/// ...
32/// S_MOV_B64 EXEC, Tmp
33///
34/// We also compute when a sequence of instructions requires strict whole
35/// wavefront mode (StrictWWM) and insert instructions to save and restore it:
36///
37/// S_OR_SAVEEXEC_B64 Tmp, -1
38/// ...
39/// S_MOV_B64 EXEC, Tmp
40///
41/// When a sequence of instructions requires strict whole quad mode (StrictWQM)
42/// we use a similar save and restore mechanism and force whole quad mode for
43/// those instructions:
44///
45/// S_MOV_B64 Tmp, EXEC
46/// S_WQM_B64 EXEC, EXEC
47/// ...
48/// S_MOV_B64 EXEC, Tmp
49///
50/// In order to avoid excessive switching during sequences of Exact
51/// instructions, the pass first analyzes which instructions must be run in WQM
52/// (aka which instructions produce values that lead to derivative
53/// computations).
54///
55/// Basic blocks are always exited in WQM as long as some successor needs WQM.
56///
57/// There is room for improvement given better control flow analysis:
58///
59/// (1) at the top level (outside of control flow statements, and as long as
60/// kill hasn't been used), one SGPR can be saved by recovering WQM from
61/// the LiveMask (this is implemented for the entry block).
62///
63/// (2) when entire regions (e.g. if-else blocks or entire loops) only
64/// consist of exact and don't-care instructions, the switch only has to
65/// be done at the entry and exit points rather than potentially in each
66/// block of the region.
67///
68//===----------------------------------------------------------------------===//
69
70#include "AMDGPU.h"
71#include "GCNSubtarget.h"
73#include "llvm/ADT/MapVector.h"
81#include "llvm/IR/CallingConv.h"
84
85using namespace llvm;
86
87#define DEBUG_TYPE "si-wqm"
88
89namespace {
90
91enum {
92 StateWQM = 0x1,
93 StateStrictWWM = 0x2,
94 StateStrictWQM = 0x4,
95 StateExact = 0x8,
96 StateStrict = StateStrictWWM | StateStrictWQM,
97};
98
99struct PrintState {
100public:
101 int State;
102
103 explicit PrintState(int State) : State(State) {}
104};
105
106#ifndef NDEBUG
107static raw_ostream &operator<<(raw_ostream &OS, const PrintState &PS) {
108
109 static const std::pair<char, const char *> Mapping[] = {
110 std::pair(StateWQM, "WQM"), std::pair(StateStrictWWM, "StrictWWM"),
111 std::pair(StateStrictWQM, "StrictWQM"), std::pair(StateExact, "Exact")};
112 char State = PS.State;
113 for (auto M : Mapping) {
114 if (State & M.first) {
115 OS << M.second;
116 State &= ~M.first;
117
118 if (State)
119 OS << '|';
120 }
121 }
122 assert(State == 0);
123 return OS;
124}
125#endif
126
127struct InstrInfo {
128 char Needs = 0;
129 char Disabled = 0;
130 char OutNeeds = 0;
131};
132
133struct BlockInfo {
134 char Needs = 0;
135 char InNeeds = 0;
136 char OutNeeds = 0;
137 char InitialState = 0;
138 bool NeedsLowering = false;
139};
140
141struct WorkItem {
142 MachineBasicBlock *MBB = nullptr;
143 MachineInstr *MI = nullptr;
144
145 WorkItem() = default;
148};
149
150class SIWholeQuadMode : public MachineFunctionPass {
151private:
152 const SIInstrInfo *TII;
153 const SIRegisterInfo *TRI;
154 const GCNSubtarget *ST;
156 LiveIntervals *LIS;
159
160 unsigned AndOpc;
161 unsigned AndTermOpc;
162 unsigned AndN2Opc;
163 unsigned XorOpc;
164 unsigned AndSaveExecOpc;
165 unsigned AndSaveExecTermOpc;
166 unsigned WQMOpc;
167 Register Exec;
168 Register LiveMaskReg;
169
172
173 // Tracks state (WQM/StrictWWM/StrictWQM/Exact) after a given instruction
175
176 SmallVector<MachineInstr *, 2> LiveMaskQueries;
177 SmallVector<MachineInstr *, 4> LowerToMovInstrs;
178 SmallVector<MachineInstr *, 4> LowerToCopyInstrs;
180 SmallVector<MachineInstr *, 4> InitExecInstrs;
181
182 void printInfo();
183
184 void markInstruction(MachineInstr &MI, char Flag,
185 std::vector<WorkItem> &Worklist);
186 void markDefs(const MachineInstr &UseMI, LiveRange &LR, Register Reg,
187 unsigned SubReg, char Flag, std::vector<WorkItem> &Worklist);
188 void markOperand(const MachineInstr &MI, const MachineOperand &Op, char Flag,
189 std::vector<WorkItem> &Worklist);
190 void markInstructionUses(const MachineInstr &MI, char Flag,
191 std::vector<WorkItem> &Worklist);
192 char scanInstructions(MachineFunction &MF, std::vector<WorkItem> &Worklist);
193 void propagateInstruction(MachineInstr &MI, std::vector<WorkItem> &Worklist);
194 void propagateBlock(MachineBasicBlock &MBB, std::vector<WorkItem> &Worklist);
196
201 MachineBasicBlock::iterator Last, bool PreferLast,
202 bool SaveSCC);
204 Register SaveWQM);
206 Register SavedWQM);
208 Register SaveOrig, char StrictStateNeeded);
209 void fromStrictMode(MachineBasicBlock &MBB,
211 char NonStrictState, char CurrentStrictState);
212
214
216 bool IsWQM);
218
219 void lowerBlock(MachineBasicBlock &MBB);
220 void processBlock(MachineBasicBlock &MBB, bool IsEntry);
221
222 bool lowerLiveMaskQueries();
223 bool lowerCopyInstrs();
224 bool lowerKillInstrs(bool IsWQM);
225 void lowerInitExec(MachineInstr &MI);
226 MachineBasicBlock::iterator lowerInitExecInstrs(MachineBasicBlock &Entry,
227 bool &Changed);
228
229public:
230 static char ID;
231
232 SIWholeQuadMode() :
234
235 bool runOnMachineFunction(MachineFunction &MF) override;
236
237 StringRef getPassName() const override { return "SI Whole Quad Mode"; }
238
239 void getAnalysisUsage(AnalysisUsage &AU) const override {
246 }
247
250 MachineFunctionProperties::Property::IsSSA);
251 }
252};
253
254} // end anonymous namespace
255
256char SIWholeQuadMode::ID = 0;
257
258INITIALIZE_PASS_BEGIN(SIWholeQuadMode, DEBUG_TYPE, "SI Whole Quad Mode", false,
259 false)
263INITIALIZE_PASS_END(SIWholeQuadMode, DEBUG_TYPE, "SI Whole Quad Mode", false,
264 false)
265
266char &llvm::SIWholeQuadModeID = SIWholeQuadMode::ID;
267
269 return new SIWholeQuadMode;
270}
271
272#ifndef NDEBUG
273LLVM_DUMP_METHOD void SIWholeQuadMode::printInfo() {
274 for (const auto &BII : Blocks) {
275 dbgs() << "\n"
276 << printMBBReference(*BII.first) << ":\n"
277 << " InNeeds = " << PrintState(BII.second.InNeeds)
278 << ", Needs = " << PrintState(BII.second.Needs)
279 << ", OutNeeds = " << PrintState(BII.second.OutNeeds) << "\n\n";
280
281 for (const MachineInstr &MI : *BII.first) {
282 auto III = Instructions.find(&MI);
283 if (III != Instructions.end()) {
284 dbgs() << " " << MI << " Needs = " << PrintState(III->second.Needs)
285 << ", OutNeeds = " << PrintState(III->second.OutNeeds) << '\n';
286 }
287 }
288 }
289}
290#endif
291
292void SIWholeQuadMode::markInstruction(MachineInstr &MI, char Flag,
293 std::vector<WorkItem> &Worklist) {
294 InstrInfo &II = Instructions[&MI];
295
296 assert(!(Flag & StateExact) && Flag != 0);
297
298 // Remove any disabled states from the flag. The user that required it gets
299 // an undefined value in the helper lanes. For example, this can happen if
300 // the result of an atomic is used by instruction that requires WQM, where
301 // ignoring the request for WQM is correct as per the relevant specs.
302 Flag &= ~II.Disabled;
303
304 // Ignore if the flag is already encompassed by the existing needs, or we
305 // just disabled everything.
306 if ((II.Needs & Flag) == Flag)
307 return;
308
309 LLVM_DEBUG(dbgs() << "markInstruction " << PrintState(Flag) << ": " << MI);
310 II.Needs |= Flag;
311 Worklist.emplace_back(&MI);
312}
313
314/// Mark all relevant definitions of register \p Reg in usage \p UseMI.
315void SIWholeQuadMode::markDefs(const MachineInstr &UseMI, LiveRange &LR,
316 Register Reg, unsigned SubReg, char Flag,
317 std::vector<WorkItem> &Worklist) {
318 LLVM_DEBUG(dbgs() << "markDefs " << PrintState(Flag) << ": " << UseMI);
319
320 LiveQueryResult UseLRQ = LR.Query(LIS->getInstructionIndex(UseMI));
321 const VNInfo *Value = UseLRQ.valueIn();
322 if (!Value)
323 return;
324
325 // Note: this code assumes that lane masks on AMDGPU completely
326 // cover registers.
327 const LaneBitmask UseLanes =
328 SubReg ? TRI->getSubRegIndexLaneMask(SubReg)
329 : (Reg.isVirtual() ? MRI->getMaxLaneMaskForVReg(Reg)
331
332 // Perform a depth-first iteration of the LiveRange graph marking defs.
333 // Stop processing of a given branch when all use lanes have been defined.
334 // The first definition stops processing for a physical register.
335 struct PhiEntry {
336 const VNInfo *Phi;
337 unsigned PredIdx;
338 LaneBitmask DefinedLanes;
339
340 PhiEntry(const VNInfo *Phi, unsigned PredIdx, LaneBitmask DefinedLanes)
341 : Phi(Phi), PredIdx(PredIdx), DefinedLanes(DefinedLanes) {}
342 };
343 using VisitKey = std::pair<const VNInfo *, LaneBitmask>;
345 SmallSet<VisitKey, 4> Visited;
346 LaneBitmask DefinedLanes;
347 unsigned NextPredIdx = 0; // Only used for processing phi nodes
348 do {
349 const VNInfo *NextValue = nullptr;
350 const VisitKey Key(Value, DefinedLanes);
351
352 if (Visited.insert(Key).second) {
353 // On first visit to a phi then start processing first predecessor
354 NextPredIdx = 0;
355 }
356
357 if (Value->isPHIDef()) {
358 // Each predecessor node in the phi must be processed as a subgraph
359 const MachineBasicBlock *MBB = LIS->getMBBFromIndex(Value->def);
360 assert(MBB && "Phi-def has no defining MBB");
361
362 // Find next predecessor to process
363 unsigned Idx = NextPredIdx;
364 auto PI = MBB->pred_begin() + Idx;
365 auto PE = MBB->pred_end();
366 for (; PI != PE && !NextValue; ++PI, ++Idx) {
367 if (const VNInfo *VN = LR.getVNInfoBefore(LIS->getMBBEndIdx(*PI))) {
368 if (!Visited.count(VisitKey(VN, DefinedLanes)))
369 NextValue = VN;
370 }
371 }
372
373 // If there are more predecessors to process; add phi to stack
374 if (PI != PE)
375 PhiStack.emplace_back(Value, Idx, DefinedLanes);
376 } else {
377 MachineInstr *MI = LIS->getInstructionFromIndex(Value->def);
378 assert(MI && "Def has no defining instruction");
379
380 if (Reg.isVirtual()) {
381 // Iterate over all operands to find relevant definitions
382 bool HasDef = false;
383 for (const MachineOperand &Op : MI->all_defs()) {
384 if (Op.getReg() != Reg)
385 continue;
386
387 // Compute lanes defined and overlap with use
388 LaneBitmask OpLanes =
389 Op.isUndef() ? LaneBitmask::getAll()
390 : TRI->getSubRegIndexLaneMask(Op.getSubReg());
391 LaneBitmask Overlap = (UseLanes & OpLanes);
392
393 // Record if this instruction defined any of use
394 HasDef |= Overlap.any();
395
396 // Mark any lanes defined
397 DefinedLanes |= OpLanes;
398 }
399
400 // Check if all lanes of use have been defined
401 if ((DefinedLanes & UseLanes) != UseLanes) {
402 // Definition not complete; need to process input value
403 LiveQueryResult LRQ = LR.Query(LIS->getInstructionIndex(*MI));
404 if (const VNInfo *VN = LRQ.valueIn()) {
405 if (!Visited.count(VisitKey(VN, DefinedLanes)))
406 NextValue = VN;
407 }
408 }
409
410 // Only mark the instruction if it defines some part of the use
411 if (HasDef)
412 markInstruction(*MI, Flag, Worklist);
413 } else {
414 // For physical registers simply mark the defining instruction
415 markInstruction(*MI, Flag, Worklist);
416 }
417 }
418
419 if (!NextValue && !PhiStack.empty()) {
420 // Reach end of chain; revert to processing last phi
421 PhiEntry &Entry = PhiStack.back();
422 NextValue = Entry.Phi;
423 NextPredIdx = Entry.PredIdx;
424 DefinedLanes = Entry.DefinedLanes;
425 PhiStack.pop_back();
426 }
427
428 Value = NextValue;
429 } while (Value);
430}
431
432void SIWholeQuadMode::markOperand(const MachineInstr &MI,
433 const MachineOperand &Op, char Flag,
434 std::vector<WorkItem> &Worklist) {
435 assert(Op.isReg());
436 Register Reg = Op.getReg();
437
438 // Ignore some hardware registers
439 switch (Reg) {
440 case AMDGPU::EXEC:
441 case AMDGPU::EXEC_LO:
442 return;
443 default:
444 break;
445 }
446
447 LLVM_DEBUG(dbgs() << "markOperand " << PrintState(Flag) << ": " << Op
448 << " for " << MI);
449 if (Reg.isVirtual()) {
450 LiveRange &LR = LIS->getInterval(Reg);
451 markDefs(MI, LR, Reg, Op.getSubReg(), Flag, Worklist);
452 } else {
453 // Handle physical registers that we need to track; this is mostly relevant
454 // for VCC, which can appear as the (implicit) input of a uniform branch,
455 // e.g. when a loop counter is stored in a VGPR.
456 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
457 LiveRange &LR = LIS->getRegUnit(Unit);
458 const VNInfo *Value = LR.Query(LIS->getInstructionIndex(MI)).valueIn();
459 if (Value)
460 markDefs(MI, LR, Unit, AMDGPU::NoSubRegister, Flag, Worklist);
461 }
462 }
463}
464
465/// Mark all instructions defining the uses in \p MI with \p Flag.
466void SIWholeQuadMode::markInstructionUses(const MachineInstr &MI, char Flag,
467 std::vector<WorkItem> &Worklist) {
468 LLVM_DEBUG(dbgs() << "markInstructionUses " << PrintState(Flag) << ": "
469 << MI);
470
471 for (const MachineOperand &Use : MI.all_uses())
472 markOperand(MI, Use, Flag, Worklist);
473}
474
475// Scan instructions to determine which ones require an Exact execmask and
476// which ones seed WQM requirements.
477char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
478 std::vector<WorkItem> &Worklist) {
479 char GlobalFlags = 0;
480 bool WQMOutputs = MF.getFunction().hasFnAttribute("amdgpu-ps-wqm-outputs");
481 SmallVector<MachineInstr *, 4> SetInactiveInstrs;
482 SmallVector<MachineInstr *, 4> SoftWQMInstrs;
483 bool HasImplicitDerivatives =
485
486 // We need to visit the basic blocks in reverse post-order so that we visit
487 // defs before uses, in particular so that we don't accidentally mark an
488 // instruction as needing e.g. WQM before visiting it and realizing it needs
489 // WQM disabled.
491 for (MachineBasicBlock *MBB : RPOT) {
492 BlockInfo &BBI = Blocks[MBB];
493
494 for (MachineInstr &MI : *MBB) {
495 InstrInfo &III = Instructions[&MI];
496 unsigned Opcode = MI.getOpcode();
497 char Flags = 0;
498
499 if (TII->isWQM(Opcode)) {
500 // If LOD is not supported WQM is not needed.
501 // Only generate implicit WQM if implicit derivatives are required.
502 // This avoids inserting unintended WQM if a shader type without
503 // implicit derivatives uses an image sampling instruction.
504 if (ST->hasExtendedImageInsts() && HasImplicitDerivatives) {
505 // Sampling instructions don't need to produce results for all pixels
506 // in a quad, they just require all inputs of a quad to have been
507 // computed for derivatives.
508 markInstructionUses(MI, StateWQM, Worklist);
509 GlobalFlags |= StateWQM;
510 }
511 } else if (Opcode == AMDGPU::WQM) {
512 // The WQM intrinsic requires its output to have all the helper lanes
513 // correct, so we need it to be in WQM.
514 Flags = StateWQM;
515 LowerToCopyInstrs.push_back(&MI);
516 } else if (Opcode == AMDGPU::SOFT_WQM) {
517 LowerToCopyInstrs.push_back(&MI);
518 SoftWQMInstrs.push_back(&MI);
519 } else if (Opcode == AMDGPU::STRICT_WWM) {
520 // The STRICT_WWM intrinsic doesn't make the same guarantee, and plus
521 // it needs to be executed in WQM or Exact so that its copy doesn't
522 // clobber inactive lanes.
523 markInstructionUses(MI, StateStrictWWM, Worklist);
524 GlobalFlags |= StateStrictWWM;
525 LowerToMovInstrs.push_back(&MI);
526 } else if (Opcode == AMDGPU::STRICT_WQM ||
527 TII->isDualSourceBlendEXP(MI)) {
528 // STRICT_WQM is similar to STRICTWWM, but instead of enabling all
529 // threads of the wave like STRICTWWM, STRICT_WQM enables all threads in
530 // quads that have at least one active thread.
531 markInstructionUses(MI, StateStrictWQM, Worklist);
532 GlobalFlags |= StateStrictWQM;
533
534 if (Opcode == AMDGPU::STRICT_WQM) {
535 LowerToMovInstrs.push_back(&MI);
536 } else {
537 // Dual source blend export acts as implicit strict-wqm, its sources
538 // need to be shuffled in strict wqm, but the export itself needs to
539 // run in exact mode.
540 BBI.Needs |= StateExact;
541 if (!(BBI.InNeeds & StateExact)) {
542 BBI.InNeeds |= StateExact;
543 Worklist.emplace_back(MBB);
544 }
545 GlobalFlags |= StateExact;
546 III.Disabled = StateWQM | StateStrict;
547 }
548 } else if (Opcode == AMDGPU::LDS_PARAM_LOAD ||
549 Opcode == AMDGPU::DS_PARAM_LOAD ||
550 Opcode == AMDGPU::LDS_DIRECT_LOAD ||
551 Opcode == AMDGPU::DS_DIRECT_LOAD) {
552 // Mark these STRICTWQM, but only for the instruction, not its operands.
553 // This avoid unnecessarily marking M0 as requiring WQM.
554 III.Needs |= StateStrictWQM;
555 GlobalFlags |= StateStrictWQM;
556 } else if (Opcode == AMDGPU::V_SET_INACTIVE_B32 ||
557 Opcode == AMDGPU::V_SET_INACTIVE_B64) {
558 III.Disabled = StateStrict;
559 MachineOperand &Inactive = MI.getOperand(2);
560 if (Inactive.isReg()) {
561 if (Inactive.isUndef()) {
562 LowerToCopyInstrs.push_back(&MI);
563 } else {
564 markOperand(MI, Inactive, StateStrictWWM, Worklist);
565 }
566 }
567 SetInactiveInstrs.push_back(&MI);
568 } else if (TII->isDisableWQM(MI)) {
569 BBI.Needs |= StateExact;
570 if (!(BBI.InNeeds & StateExact)) {
571 BBI.InNeeds |= StateExact;
572 Worklist.emplace_back(MBB);
573 }
574 GlobalFlags |= StateExact;
575 III.Disabled = StateWQM | StateStrict;
576 } else if (Opcode == AMDGPU::SI_PS_LIVE ||
577 Opcode == AMDGPU::SI_LIVE_MASK) {
578 LiveMaskQueries.push_back(&MI);
579 } else if (Opcode == AMDGPU::SI_KILL_I1_TERMINATOR ||
580 Opcode == AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR ||
581 Opcode == AMDGPU::SI_DEMOTE_I1) {
582 KillInstrs.push_back(&MI);
583 BBI.NeedsLowering = true;
584 } else if (Opcode == AMDGPU::SI_INIT_EXEC ||
585 Opcode == AMDGPU::SI_INIT_EXEC_FROM_INPUT) {
586 InitExecInstrs.push_back(&MI);
587 } else if (WQMOutputs) {
588 // The function is in machine SSA form, which means that physical
589 // VGPRs correspond to shader inputs and outputs. Inputs are
590 // only used, outputs are only defined.
591 // FIXME: is this still valid?
592 for (const MachineOperand &MO : MI.defs()) {
593 Register Reg = MO.getReg();
594 if (Reg.isPhysical() &&
595 TRI->hasVectorRegisters(TRI->getPhysRegBaseClass(Reg))) {
596 Flags = StateWQM;
597 break;
598 }
599 }
600 }
601
602 if (Flags) {
603 markInstruction(MI, Flags, Worklist);
604 GlobalFlags |= Flags;
605 }
606 }
607 }
608
609 // Mark sure that any SET_INACTIVE instructions are computed in WQM if WQM is
610 // ever used anywhere in the function. This implements the corresponding
611 // semantics of @llvm.amdgcn.set.inactive.
612 // Similarly for SOFT_WQM instructions, implementing @llvm.amdgcn.softwqm.
613 if (GlobalFlags & StateWQM) {
614 for (MachineInstr *MI : SetInactiveInstrs)
615 markInstruction(*MI, StateWQM, Worklist);
616 for (MachineInstr *MI : SoftWQMInstrs)
617 markInstruction(*MI, StateWQM, Worklist);
618 }
619
620 return GlobalFlags;
621}
622
623void SIWholeQuadMode::propagateInstruction(MachineInstr &MI,
624 std::vector<WorkItem>& Worklist) {
625 MachineBasicBlock *MBB = MI.getParent();
626 InstrInfo II = Instructions[&MI]; // take a copy to prevent dangling references
627 BlockInfo &BI = Blocks[MBB];
628
629 // Control flow-type instructions and stores to temporary memory that are
630 // followed by WQM computations must themselves be in WQM.
631 if ((II.OutNeeds & StateWQM) && !(II.Disabled & StateWQM) &&
632 (MI.isTerminator() || (TII->usesVM_CNT(MI) && MI.mayStore()))) {
633 Instructions[&MI].Needs = StateWQM;
634 II.Needs = StateWQM;
635 }
636
637 // Propagate to block level
638 if (II.Needs & StateWQM) {
639 BI.Needs |= StateWQM;
640 if (!(BI.InNeeds & StateWQM)) {
641 BI.InNeeds |= StateWQM;
642 Worklist.emplace_back(MBB);
643 }
644 }
645
646 // Propagate backwards within block
647 if (MachineInstr *PrevMI = MI.getPrevNode()) {
648 char InNeeds = (II.Needs & ~StateStrict) | II.OutNeeds;
649 if (!PrevMI->isPHI()) {
650 InstrInfo &PrevII = Instructions[PrevMI];
651 if ((PrevII.OutNeeds | InNeeds) != PrevII.OutNeeds) {
652 PrevII.OutNeeds |= InNeeds;
653 Worklist.emplace_back(PrevMI);
654 }
655 }
656 }
657
658 // Propagate WQM flag to instruction inputs
659 assert(!(II.Needs & StateExact));
660
661 if (II.Needs != 0)
662 markInstructionUses(MI, II.Needs, Worklist);
663
664 // Ensure we process a block containing StrictWWM/StrictWQM, even if it does
665 // not require any WQM transitions.
666 if (II.Needs & StateStrictWWM)
667 BI.Needs |= StateStrictWWM;
668 if (II.Needs & StateStrictWQM)
669 BI.Needs |= StateStrictWQM;
670}
671
672void SIWholeQuadMode::propagateBlock(MachineBasicBlock &MBB,
673 std::vector<WorkItem>& Worklist) {
674 BlockInfo BI = Blocks[&MBB]; // Make a copy to prevent dangling references.
675
676 // Propagate through instructions
677 if (!MBB.empty()) {
678 MachineInstr *LastMI = &*MBB.rbegin();
679 InstrInfo &LastII = Instructions[LastMI];
680 if ((LastII.OutNeeds | BI.OutNeeds) != LastII.OutNeeds) {
681 LastII.OutNeeds |= BI.OutNeeds;
682 Worklist.emplace_back(LastMI);
683 }
684 }
685
686 // Predecessor blocks must provide for our WQM/Exact needs.
687 for (MachineBasicBlock *Pred : MBB.predecessors()) {
688 BlockInfo &PredBI = Blocks[Pred];
689 if ((PredBI.OutNeeds | BI.InNeeds) == PredBI.OutNeeds)
690 continue;
691
692 PredBI.OutNeeds |= BI.InNeeds;
693 PredBI.InNeeds |= BI.InNeeds;
694 Worklist.emplace_back(Pred);
695 }
696
697 // All successors must be prepared to accept the same set of WQM/Exact data.
698 for (MachineBasicBlock *Succ : MBB.successors()) {
699 BlockInfo &SuccBI = Blocks[Succ];
700 if ((SuccBI.InNeeds | BI.OutNeeds) == SuccBI.InNeeds)
701 continue;
702
703 SuccBI.InNeeds |= BI.OutNeeds;
704 Worklist.emplace_back(Succ);
705 }
706}
707
708char SIWholeQuadMode::analyzeFunction(MachineFunction &MF) {
709 std::vector<WorkItem> Worklist;
710 char GlobalFlags = scanInstructions(MF, Worklist);
711
712 while (!Worklist.empty()) {
713 WorkItem WI = Worklist.back();
714 Worklist.pop_back();
715
716 if (WI.MI)
717 propagateInstruction(*WI.MI, Worklist);
718 else
719 propagateBlock(*WI.MBB, Worklist);
720 }
721
722 return GlobalFlags;
723}
724
726SIWholeQuadMode::saveSCC(MachineBasicBlock &MBB,
728 Register SaveReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
729
730 MachineInstr *Save =
731 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), SaveReg)
732 .addReg(AMDGPU::SCC);
733 MachineInstr *Restore =
734 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::SCC)
735 .addReg(SaveReg);
736
737 LIS->InsertMachineInstrInMaps(*Save);
738 LIS->InsertMachineInstrInMaps(*Restore);
739 LIS->createAndComputeVirtRegInterval(SaveReg);
740
741 return Restore;
742}
743
744MachineBasicBlock *SIWholeQuadMode::splitBlock(MachineBasicBlock *BB,
745 MachineInstr *TermMI) {
746 LLVM_DEBUG(dbgs() << "Split block " << printMBBReference(*BB) << " @ "
747 << *TermMI << "\n");
748
749 MachineBasicBlock *SplitBB =
750 BB->splitAt(*TermMI, /*UpdateLiveIns*/ true, LIS);
751
752 // Convert last instruction in block to a terminator.
753 // Note: this only covers the expected patterns
754 unsigned NewOpcode = 0;
755 switch (TermMI->getOpcode()) {
756 case AMDGPU::S_AND_B32:
757 NewOpcode = AMDGPU::S_AND_B32_term;
758 break;
759 case AMDGPU::S_AND_B64:
760 NewOpcode = AMDGPU::S_AND_B64_term;
761 break;
762 case AMDGPU::S_MOV_B32:
763 NewOpcode = AMDGPU::S_MOV_B32_term;
764 break;
765 case AMDGPU::S_MOV_B64:
766 NewOpcode = AMDGPU::S_MOV_B64_term;
767 break;
768 default:
769 break;
770 }
771 if (NewOpcode)
772 TermMI->setDesc(TII->get(NewOpcode));
773
774 if (SplitBB != BB) {
775 // Update dominator trees
776 using DomTreeT = DomTreeBase<MachineBasicBlock>;
778 for (MachineBasicBlock *Succ : SplitBB->successors()) {
779 DTUpdates.push_back({DomTreeT::Insert, SplitBB, Succ});
780 DTUpdates.push_back({DomTreeT::Delete, BB, Succ});
781 }
782 DTUpdates.push_back({DomTreeT::Insert, BB, SplitBB});
783 if (MDT)
784 MDT->getBase().applyUpdates(DTUpdates);
785 if (PDT)
786 PDT->applyUpdates(DTUpdates);
787
788 // Link blocks
790 BuildMI(*BB, BB->end(), DebugLoc(), TII->get(AMDGPU::S_BRANCH))
791 .addMBB(SplitBB);
792 LIS->InsertMachineInstrInMaps(*MI);
793 }
794
795 return SplitBB;
796}
797
798MachineInstr *SIWholeQuadMode::lowerKillF32(MachineBasicBlock &MBB,
799 MachineInstr &MI) {
800 assert(LiveMaskReg.isVirtual());
801
802 const DebugLoc &DL = MI.getDebugLoc();
803 unsigned Opcode = 0;
804
805 assert(MI.getOperand(0).isReg());
806
807 // Comparison is for live lanes; however here we compute the inverse
808 // (killed lanes). This is because VCMP will always generate 0 bits
809 // for inactive lanes so a mask of live lanes would not be correct
810 // inside control flow.
811 // Invert the comparison by swapping the operands and adjusting
812 // the comparison codes.
813
814 switch (MI.getOperand(2).getImm()) {
815 case ISD::SETUEQ:
816 Opcode = AMDGPU::V_CMP_LG_F32_e64;
817 break;
818 case ISD::SETUGT:
819 Opcode = AMDGPU::V_CMP_GE_F32_e64;
820 break;
821 case ISD::SETUGE:
822 Opcode = AMDGPU::V_CMP_GT_F32_e64;
823 break;
824 case ISD::SETULT:
825 Opcode = AMDGPU::V_CMP_LE_F32_e64;
826 break;
827 case ISD::SETULE:
828 Opcode = AMDGPU::V_CMP_LT_F32_e64;
829 break;
830 case ISD::SETUNE:
831 Opcode = AMDGPU::V_CMP_EQ_F32_e64;
832 break;
833 case ISD::SETO:
834 Opcode = AMDGPU::V_CMP_O_F32_e64;
835 break;
836 case ISD::SETUO:
837 Opcode = AMDGPU::V_CMP_U_F32_e64;
838 break;
839 case ISD::SETOEQ:
840 case ISD::SETEQ:
841 Opcode = AMDGPU::V_CMP_NEQ_F32_e64;
842 break;
843 case ISD::SETOGT:
844 case ISD::SETGT:
845 Opcode = AMDGPU::V_CMP_NLT_F32_e64;
846 break;
847 case ISD::SETOGE:
848 case ISD::SETGE:
849 Opcode = AMDGPU::V_CMP_NLE_F32_e64;
850 break;
851 case ISD::SETOLT:
852 case ISD::SETLT:
853 Opcode = AMDGPU::V_CMP_NGT_F32_e64;
854 break;
855 case ISD::SETOLE:
856 case ISD::SETLE:
857 Opcode = AMDGPU::V_CMP_NGE_F32_e64;
858 break;
859 case ISD::SETONE:
860 case ISD::SETNE:
861 Opcode = AMDGPU::V_CMP_NLG_F32_e64;
862 break;
863 default:
864 llvm_unreachable("invalid ISD:SET cond code");
865 }
866
867 // Pick opcode based on comparison type.
868 MachineInstr *VcmpMI;
869 const MachineOperand &Op0 = MI.getOperand(0);
870 const MachineOperand &Op1 = MI.getOperand(1);
871
872 // VCC represents lanes killed.
873 Register VCC = ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
874
875 if (TRI->isVGPR(*MRI, Op0.getReg())) {
876 Opcode = AMDGPU::getVOPe32(Opcode);
877 VcmpMI = BuildMI(MBB, &MI, DL, TII->get(Opcode)).add(Op1).add(Op0);
878 } else {
879 VcmpMI = BuildMI(MBB, &MI, DL, TII->get(Opcode))
881 .addImm(0) // src0 modifiers
882 .add(Op1)
883 .addImm(0) // src1 modifiers
884 .add(Op0)
885 .addImm(0); // omod
886 }
887
888 MachineInstr *MaskUpdateMI =
889 BuildMI(MBB, MI, DL, TII->get(AndN2Opc), LiveMaskReg)
890 .addReg(LiveMaskReg)
891 .addReg(VCC);
892
893 // State of SCC represents whether any lanes are live in mask,
894 // if SCC is 0 then no lanes will be alive anymore.
895 MachineInstr *EarlyTermMI =
896 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_EARLY_TERMINATE_SCC0));
897
898 MachineInstr *ExecMaskMI =
899 BuildMI(MBB, MI, DL, TII->get(AndN2Opc), Exec).addReg(Exec).addReg(VCC);
900
901 assert(MBB.succ_size() == 1);
902 MachineInstr *NewTerm = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_BRANCH))
903 .addMBB(*MBB.succ_begin());
904
905 // Update live intervals
906 LIS->ReplaceMachineInstrInMaps(MI, *VcmpMI);
907 MBB.remove(&MI);
908
909 LIS->InsertMachineInstrInMaps(*MaskUpdateMI);
910 LIS->InsertMachineInstrInMaps(*ExecMaskMI);
911 LIS->InsertMachineInstrInMaps(*EarlyTermMI);
912 LIS->InsertMachineInstrInMaps(*NewTerm);
913
914 return NewTerm;
915}
916
917MachineInstr *SIWholeQuadMode::lowerKillI1(MachineBasicBlock &MBB,
918 MachineInstr &MI, bool IsWQM) {
919 assert(LiveMaskReg.isVirtual());
920
921 const DebugLoc &DL = MI.getDebugLoc();
922 MachineInstr *MaskUpdateMI = nullptr;
923
924 const bool IsDemote = IsWQM && (MI.getOpcode() == AMDGPU::SI_DEMOTE_I1);
925 const MachineOperand &Op = MI.getOperand(0);
926 int64_t KillVal = MI.getOperand(1).getImm();
927 MachineInstr *ComputeKilledMaskMI = nullptr;
928 Register CndReg = !Op.isImm() ? Op.getReg() : Register();
929 Register TmpReg;
930
931 // Is this a static or dynamic kill?
932 if (Op.isImm()) {
933 if (Op.getImm() == KillVal) {
934 // Static: all active lanes are killed
935 MaskUpdateMI = BuildMI(MBB, MI, DL, TII->get(AndN2Opc), LiveMaskReg)
936 .addReg(LiveMaskReg)
937 .addReg(Exec);
938 } else {
939 // Static: kill does nothing
940 MachineInstr *NewTerm = nullptr;
941 if (MI.getOpcode() == AMDGPU::SI_DEMOTE_I1) {
942 LIS->RemoveMachineInstrFromMaps(MI);
943 } else {
944 assert(MBB.succ_size() == 1);
945 NewTerm = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_BRANCH))
946 .addMBB(*MBB.succ_begin());
947 LIS->ReplaceMachineInstrInMaps(MI, *NewTerm);
948 }
949 MBB.remove(&MI);
950 return NewTerm;
951 }
952 } else {
953 if (!KillVal) {
954 // Op represents live lanes after kill,
955 // so exec mask needs to be factored in.
956 TmpReg = MRI->createVirtualRegister(TRI->getBoolRC());
957 ComputeKilledMaskMI =
958 BuildMI(MBB, MI, DL, TII->get(XorOpc), TmpReg).add(Op).addReg(Exec);
959 MaskUpdateMI = BuildMI(MBB, MI, DL, TII->get(AndN2Opc), LiveMaskReg)
960 .addReg(LiveMaskReg)
961 .addReg(TmpReg);
962 } else {
963 // Op represents lanes to kill
964 MaskUpdateMI = BuildMI(MBB, MI, DL, TII->get(AndN2Opc), LiveMaskReg)
965 .addReg(LiveMaskReg)
966 .add(Op);
967 }
968 }
969
970 // State of SCC represents whether any lanes are live in mask,
971 // if SCC is 0 then no lanes will be alive anymore.
972 MachineInstr *EarlyTermMI =
973 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_EARLY_TERMINATE_SCC0));
974
975 // In the case we got this far some lanes are still live,
976 // update EXEC to deactivate lanes as appropriate.
977 MachineInstr *NewTerm;
978 MachineInstr *WQMMaskMI = nullptr;
979 Register LiveMaskWQM;
980 if (IsDemote) {
981 // Demote - deactivate quads with only helper lanes
982 LiveMaskWQM = MRI->createVirtualRegister(TRI->getBoolRC());
983 WQMMaskMI =
984 BuildMI(MBB, MI, DL, TII->get(WQMOpc), LiveMaskWQM).addReg(LiveMaskReg);
985 NewTerm = BuildMI(MBB, MI, DL, TII->get(AndOpc), Exec)
986 .addReg(Exec)
987 .addReg(LiveMaskWQM);
988 } else {
989 // Kill - deactivate lanes no longer in live mask
990 if (Op.isImm()) {
991 unsigned MovOpc = ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
992 NewTerm = BuildMI(MBB, &MI, DL, TII->get(MovOpc), Exec).addImm(0);
993 } else if (!IsWQM) {
994 NewTerm = BuildMI(MBB, &MI, DL, TII->get(AndOpc), Exec)
995 .addReg(Exec)
996 .addReg(LiveMaskReg);
997 } else {
998 unsigned Opcode = KillVal ? AndN2Opc : AndOpc;
999 NewTerm =
1000 BuildMI(MBB, &MI, DL, TII->get(Opcode), Exec).addReg(Exec).add(Op);
1001 }
1002 }
1003
1004 // Update live intervals
1005 LIS->RemoveMachineInstrFromMaps(MI);
1006 MBB.remove(&MI);
1007 assert(EarlyTermMI);
1008 assert(MaskUpdateMI);
1009 assert(NewTerm);
1010 if (ComputeKilledMaskMI)
1011 LIS->InsertMachineInstrInMaps(*ComputeKilledMaskMI);
1012 LIS->InsertMachineInstrInMaps(*MaskUpdateMI);
1013 LIS->InsertMachineInstrInMaps(*EarlyTermMI);
1014 if (WQMMaskMI)
1015 LIS->InsertMachineInstrInMaps(*WQMMaskMI);
1016 LIS->InsertMachineInstrInMaps(*NewTerm);
1017
1018 if (CndReg) {
1019 LIS->removeInterval(CndReg);
1020 LIS->createAndComputeVirtRegInterval(CndReg);
1021 }
1022 if (TmpReg)
1023 LIS->createAndComputeVirtRegInterval(TmpReg);
1024 if (LiveMaskWQM)
1025 LIS->createAndComputeVirtRegInterval(LiveMaskWQM);
1026
1027 return NewTerm;
1028}
1029
1030// Replace (or supplement) instructions accessing live mask.
1031// This can only happen once all the live mask registers have been created
1032// and the execute state (WQM/StrictWWM/Exact) of instructions is known.
1033void SIWholeQuadMode::lowerBlock(MachineBasicBlock &MBB) {
1034 auto BII = Blocks.find(&MBB);
1035 if (BII == Blocks.end())
1036 return;
1037
1038 const BlockInfo &BI = BII->second;
1039 if (!BI.NeedsLowering)
1040 return;
1041
1042 LLVM_DEBUG(dbgs() << "\nLowering block " << printMBBReference(MBB) << ":\n");
1043
1045 char State = BI.InitialState;
1046
1049 if (StateTransition.count(&MI))
1050 State = StateTransition[&MI];
1051
1052 MachineInstr *SplitPoint = nullptr;
1053 switch (MI.getOpcode()) {
1054 case AMDGPU::SI_DEMOTE_I1:
1055 case AMDGPU::SI_KILL_I1_TERMINATOR:
1056 SplitPoint = lowerKillI1(MBB, MI, State == StateWQM);
1057 break;
1058 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
1059 SplitPoint = lowerKillF32(MBB, MI);
1060 break;
1061 default:
1062 break;
1063 }
1064 if (SplitPoint)
1065 SplitPoints.push_back(SplitPoint);
1066 }
1067
1068 // Perform splitting after instruction scan to simplify iteration.
1069 if (!SplitPoints.empty()) {
1070 MachineBasicBlock *BB = &MBB;
1071 for (MachineInstr *MI : SplitPoints) {
1072 BB = splitBlock(BB, MI);
1073 }
1074 }
1075}
1076
1077// Return an iterator in the (inclusive) range [First, Last] at which
1078// instructions can be safely inserted, keeping in mind that some of the
1079// instructions we want to add necessarily clobber SCC.
1080MachineBasicBlock::iterator SIWholeQuadMode::prepareInsertion(
1082 MachineBasicBlock::iterator Last, bool PreferLast, bool SaveSCC) {
1083 if (!SaveSCC)
1084 return PreferLast ? Last : First;
1085
1086 LiveRange &LR =
1087 LIS->getRegUnit(*TRI->regunits(MCRegister::from(AMDGPU::SCC)).begin());
1088 auto MBBE = MBB.end();
1089 SlotIndex FirstIdx = First != MBBE ? LIS->getInstructionIndex(*First)
1090 : LIS->getMBBEndIdx(&MBB);
1091 SlotIndex LastIdx =
1092 Last != MBBE ? LIS->getInstructionIndex(*Last) : LIS->getMBBEndIdx(&MBB);
1093 SlotIndex Idx = PreferLast ? LastIdx : FirstIdx;
1094 const LiveRange::Segment *S;
1095
1096 for (;;) {
1097 S = LR.getSegmentContaining(Idx);
1098 if (!S)
1099 break;
1100
1101 if (PreferLast) {
1102 SlotIndex Next = S->start.getBaseIndex();
1103 if (Next < FirstIdx)
1104 break;
1105 Idx = Next;
1106 } else {
1107 MachineInstr *EndMI = LIS->getInstructionFromIndex(S->end.getBaseIndex());
1108 assert(EndMI && "Segment does not end on valid instruction");
1109 auto NextI = std::next(EndMI->getIterator());
1110 if (NextI == MBB.end())
1111 break;
1112 SlotIndex Next = LIS->getInstructionIndex(*NextI);
1113 if (Next > LastIdx)
1114 break;
1115 Idx = Next;
1116 }
1117 }
1118
1120
1121 if (MachineInstr *MI = LIS->getInstructionFromIndex(Idx))
1122 MBBI = MI;
1123 else {
1124 assert(Idx == LIS->getMBBEndIdx(&MBB));
1125 MBBI = MBB.end();
1126 }
1127
1128 // Move insertion point past any operations modifying EXEC.
1129 // This assumes that the value of SCC defined by any of these operations
1130 // does not need to be preserved.
1131 while (MBBI != Last) {
1132 bool IsExecDef = false;
1133 for (const MachineOperand &MO : MBBI->all_defs()) {
1134 IsExecDef |=
1135 MO.getReg() == AMDGPU::EXEC_LO || MO.getReg() == AMDGPU::EXEC;
1136 }
1137 if (!IsExecDef)
1138 break;
1139 MBBI++;
1140 S = nullptr;
1141 }
1142
1143 if (S)
1144 MBBI = saveSCC(MBB, MBBI);
1145
1146 return MBBI;
1147}
1148
1149void SIWholeQuadMode::toExact(MachineBasicBlock &MBB,
1151 Register SaveWQM) {
1152 assert(LiveMaskReg.isVirtual());
1153
1154 bool IsTerminator = Before == MBB.end();
1155 if (!IsTerminator) {
1156 auto FirstTerm = MBB.getFirstTerminator();
1157 if (FirstTerm != MBB.end()) {
1158 SlotIndex FirstTermIdx = LIS->getInstructionIndex(*FirstTerm);
1159 SlotIndex BeforeIdx = LIS->getInstructionIndex(*Before);
1160 IsTerminator = BeforeIdx > FirstTermIdx;
1161 }
1162 }
1163
1165
1166 if (SaveWQM) {
1167 unsigned Opcode = IsTerminator ? AndSaveExecTermOpc : AndSaveExecOpc;
1168 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(Opcode), SaveWQM)
1169 .addReg(LiveMaskReg);
1170 } else {
1171 unsigned Opcode = IsTerminator ? AndTermOpc : AndOpc;
1172 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(Opcode), Exec)
1173 .addReg(Exec)
1174 .addReg(LiveMaskReg);
1175 }
1176
1177 LIS->InsertMachineInstrInMaps(*MI);
1178 StateTransition[MI] = StateExact;
1179}
1180
1181void SIWholeQuadMode::toWQM(MachineBasicBlock &MBB,
1183 Register SavedWQM) {
1185
1186 if (SavedWQM) {
1187 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), Exec)
1188 .addReg(SavedWQM);
1189 } else {
1190 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(WQMOpc), Exec).addReg(Exec);
1191 }
1192
1193 LIS->InsertMachineInstrInMaps(*MI);
1194 StateTransition[MI] = StateWQM;
1195}
1196
1197void SIWholeQuadMode::toStrictMode(MachineBasicBlock &MBB,
1199 Register SaveOrig, char StrictStateNeeded) {
1201 assert(SaveOrig);
1202 assert(StrictStateNeeded == StateStrictWWM ||
1203 StrictStateNeeded == StateStrictWQM);
1204
1205 if (StrictStateNeeded == StateStrictWWM) {
1206 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::ENTER_STRICT_WWM),
1207 SaveOrig)
1208 .addImm(-1);
1209 } else {
1210 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::ENTER_STRICT_WQM),
1211 SaveOrig)
1212 .addImm(-1);
1213 }
1214 LIS->InsertMachineInstrInMaps(*MI);
1215 StateTransition[MI] = StrictStateNeeded;
1216}
1217
1218void SIWholeQuadMode::fromStrictMode(MachineBasicBlock &MBB,
1220 Register SavedOrig, char NonStrictState,
1221 char CurrentStrictState) {
1223
1224 assert(SavedOrig);
1225 assert(CurrentStrictState == StateStrictWWM ||
1226 CurrentStrictState == StateStrictWQM);
1227
1228 if (CurrentStrictState == StateStrictWWM) {
1229 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::EXIT_STRICT_WWM),
1230 Exec)
1231 .addReg(SavedOrig);
1232 } else {
1233 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::EXIT_STRICT_WQM),
1234 Exec)
1235 .addReg(SavedOrig);
1236 }
1237 LIS->InsertMachineInstrInMaps(*MI);
1238 StateTransition[MI] = NonStrictState;
1239}
1240
1241void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, bool IsEntry) {
1242 auto BII = Blocks.find(&MBB);
1243 if (BII == Blocks.end())
1244 return;
1245
1246 BlockInfo &BI = BII->second;
1247
1248 // This is a non-entry block that is WQM throughout, so no need to do
1249 // anything.
1250 if (!IsEntry && BI.Needs == StateWQM && BI.OutNeeds != StateExact) {
1251 BI.InitialState = StateWQM;
1252 return;
1253 }
1254
1255 LLVM_DEBUG(dbgs() << "\nProcessing block " << printMBBReference(MBB)
1256 << ":\n");
1257
1258 Register SavedWQMReg;
1259 Register SavedNonStrictReg;
1260 bool WQMFromExec = IsEntry;
1261 char State = (IsEntry || !(BI.InNeeds & StateWQM)) ? StateExact : StateWQM;
1262 char NonStrictState = 0;
1263 const TargetRegisterClass *BoolRC = TRI->getBoolRC();
1264
1265 auto II = MBB.getFirstNonPHI(), IE = MBB.end();
1266 if (IsEntry) {
1267 // Skip the instruction that saves LiveMask
1268 if (II != IE && II->getOpcode() == AMDGPU::COPY &&
1269 II->getOperand(1).getReg() == TRI->getExec())
1270 ++II;
1271 }
1272
1273 // This stores the first instruction where it's safe to switch from WQM to
1274 // Exact or vice versa.
1276
1277 // This stores the first instruction where it's safe to switch from Strict
1278 // mode to Exact/WQM or to switch to Strict mode. It must always be the same
1279 // as, or after, FirstWQM since if it's safe to switch to/from Strict, it must
1280 // be safe to switch to/from WQM as well.
1281 MachineBasicBlock::iterator FirstStrict = IE;
1282
1283 // Record initial state is block information.
1284 BI.InitialState = State;
1285
1286 for (;;) {
1288 char Needs = StateExact | StateWQM; // Strict mode is disabled by default.
1289 char OutNeeds = 0;
1290
1291 if (FirstWQM == IE)
1292 FirstWQM = II;
1293
1294 if (FirstStrict == IE)
1295 FirstStrict = II;
1296
1297 // First, figure out the allowed states (Needs) based on the propagated
1298 // flags.
1299 if (II != IE) {
1300 MachineInstr &MI = *II;
1301
1302 if (MI.isTerminator() || TII->mayReadEXEC(*MRI, MI)) {
1303 auto III = Instructions.find(&MI);
1304 if (III != Instructions.end()) {
1305 if (III->second.Needs & StateStrictWWM)
1306 Needs = StateStrictWWM;
1307 else if (III->second.Needs & StateStrictWQM)
1308 Needs = StateStrictWQM;
1309 else if (III->second.Needs & StateWQM)
1310 Needs = StateWQM;
1311 else
1312 Needs &= ~III->second.Disabled;
1313 OutNeeds = III->second.OutNeeds;
1314 }
1315 } else {
1316 // If the instruction doesn't actually need a correct EXEC, then we can
1317 // safely leave Strict mode enabled.
1318 Needs = StateExact | StateWQM | StateStrict;
1319 }
1320
1321 // Exact mode exit can occur in terminators, but must be before branches.
1322 if (MI.isBranch() && OutNeeds == StateExact)
1323 Needs = StateExact;
1324
1325 ++Next;
1326 } else {
1327 // End of basic block
1328 if (BI.OutNeeds & StateWQM)
1329 Needs = StateWQM;
1330 else if (BI.OutNeeds == StateExact)
1331 Needs = StateExact;
1332 else
1333 Needs = StateWQM | StateExact;
1334 }
1335
1336 // Now, transition if necessary.
1337 if (!(Needs & State)) {
1339 if (State == StateStrictWWM || Needs == StateStrictWWM ||
1340 State == StateStrictWQM || Needs == StateStrictWQM) {
1341 // We must switch to or from Strict mode.
1342 First = FirstStrict;
1343 } else {
1344 // We only need to switch to/from WQM, so we can use FirstWQM.
1345 First = FirstWQM;
1346 }
1347
1348 // Whether we need to save SCC depends on start and end states.
1349 bool SaveSCC = false;
1350 switch (State) {
1351 case StateExact:
1352 case StateStrictWWM:
1353 case StateStrictWQM:
1354 // Exact/Strict -> Strict: save SCC
1355 // Exact/Strict -> WQM: save SCC if WQM mask is generated from exec
1356 // Exact/Strict -> Exact: no save
1357 SaveSCC = (Needs & StateStrict) || ((Needs & StateWQM) && WQMFromExec);
1358 break;
1359 case StateWQM:
1360 // WQM -> Exact/Strict: save SCC
1361 SaveSCC = !(Needs & StateWQM);
1362 break;
1363 default:
1364 llvm_unreachable("Unknown state");
1365 break;
1366 }
1367 char StartState = State & StateStrict ? NonStrictState : State;
1368 bool WQMToExact =
1369 StartState == StateWQM && (Needs & StateExact) && !(Needs & StateWQM);
1370 bool ExactToWQM = StartState == StateExact && (Needs & StateWQM) &&
1371 !(Needs & StateExact);
1372 bool PreferLast = Needs == StateWQM;
1373 // Exact regions in divergent control flow may run at EXEC=0, so try to
1374 // exclude instructions with unexpected effects from them.
1375 // FIXME: ideally we would branch over these when EXEC=0,
1376 // but this requires updating implicit values, live intervals and CFG.
1377 if ((WQMToExact && (OutNeeds & StateWQM)) || ExactToWQM) {
1378 for (MachineBasicBlock::iterator I = First; I != II; ++I) {
1379 if (TII->hasUnwantedEffectsWhenEXECEmpty(*I)) {
1380 PreferLast = WQMToExact;
1381 break;
1382 }
1383 }
1384 }
1386 prepareInsertion(MBB, First, II, PreferLast, SaveSCC);
1387
1388 if (State & StateStrict) {
1389 assert(State == StateStrictWWM || State == StateStrictWQM);
1390 assert(SavedNonStrictReg);
1391 fromStrictMode(MBB, Before, SavedNonStrictReg, NonStrictState, State);
1392
1393 LIS->createAndComputeVirtRegInterval(SavedNonStrictReg);
1394 SavedNonStrictReg = 0;
1395 State = NonStrictState;
1396 }
1397
1398 if (Needs & StateStrict) {
1399 NonStrictState = State;
1400 assert(Needs == StateStrictWWM || Needs == StateStrictWQM);
1401 assert(!SavedNonStrictReg);
1402 SavedNonStrictReg = MRI->createVirtualRegister(BoolRC);
1403
1404 toStrictMode(MBB, Before, SavedNonStrictReg, Needs);
1405 State = Needs;
1406 } else {
1407 if (WQMToExact) {
1408 if (!WQMFromExec && (OutNeeds & StateWQM)) {
1409 assert(!SavedWQMReg);
1410 SavedWQMReg = MRI->createVirtualRegister(BoolRC);
1411 }
1412
1413 toExact(MBB, Before, SavedWQMReg);
1414 State = StateExact;
1415 } else if (ExactToWQM) {
1416 assert(WQMFromExec == (SavedWQMReg == 0));
1417
1418 toWQM(MBB, Before, SavedWQMReg);
1419
1420 if (SavedWQMReg) {
1421 LIS->createAndComputeVirtRegInterval(SavedWQMReg);
1422 SavedWQMReg = 0;
1423 }
1424 State = StateWQM;
1425 } else {
1426 // We can get here if we transitioned from StrictWWM to a
1427 // non-StrictWWM state that already matches our needs, but we
1428 // shouldn't need to do anything.
1429 assert(Needs & State);
1430 }
1431 }
1432 }
1433
1434 if (Needs != (StateExact | StateWQM | StateStrict)) {
1435 if (Needs != (StateExact | StateWQM))
1436 FirstWQM = IE;
1437 FirstStrict = IE;
1438 }
1439
1440 if (II == IE)
1441 break;
1442
1443 II = Next;
1444 }
1445 assert(!SavedWQMReg);
1446 assert(!SavedNonStrictReg);
1447}
1448
1449bool SIWholeQuadMode::lowerLiveMaskQueries() {
1450 for (MachineInstr *MI : LiveMaskQueries) {
1451 const DebugLoc &DL = MI->getDebugLoc();
1452 Register Dest = MI->getOperand(0).getReg();
1453
1455 BuildMI(*MI->getParent(), MI, DL, TII->get(AMDGPU::COPY), Dest)
1456 .addReg(LiveMaskReg);
1457
1458 LIS->ReplaceMachineInstrInMaps(*MI, *Copy);
1459 MI->eraseFromParent();
1460 }
1461 return !LiveMaskQueries.empty();
1462}
1463
1464bool SIWholeQuadMode::lowerCopyInstrs() {
1465 for (MachineInstr *MI : LowerToMovInstrs) {
1466 assert(MI->getNumExplicitOperands() == 2);
1467
1468 const Register Reg = MI->getOperand(0).getReg();
1469
1470 const TargetRegisterClass *regClass =
1471 TRI->getRegClassForOperandReg(*MRI, MI->getOperand(0));
1472 if (TRI->isVGPRClass(regClass)) {
1473 const unsigned MovOp = TII->getMovOpcode(regClass);
1474 MI->setDesc(TII->get(MovOp));
1475
1476 // Check that it already implicitly depends on exec (like all VALU movs
1477 // should do).
1478 assert(any_of(MI->implicit_operands(), [](const MachineOperand &MO) {
1479 return MO.isUse() && MO.getReg() == AMDGPU::EXEC;
1480 }));
1481 } else {
1482 // Remove early-clobber and exec dependency from simple SGPR copies.
1483 // This allows some to be eliminated during/post RA.
1484 LLVM_DEBUG(dbgs() << "simplify SGPR copy: " << *MI);
1485 if (MI->getOperand(0).isEarlyClobber()) {
1486 LIS->removeInterval(Reg);
1487 MI->getOperand(0).setIsEarlyClobber(false);
1488 LIS->createAndComputeVirtRegInterval(Reg);
1489 }
1490 int Index = MI->findRegisterUseOperandIdx(AMDGPU::EXEC, /*TRI=*/nullptr);
1491 while (Index >= 0) {
1492 MI->removeOperand(Index);
1493 Index = MI->findRegisterUseOperandIdx(AMDGPU::EXEC, /*TRI=*/nullptr);
1494 }
1495 MI->setDesc(TII->get(AMDGPU::COPY));
1496 LLVM_DEBUG(dbgs() << " -> " << *MI);
1497 }
1498 }
1499 for (MachineInstr *MI : LowerToCopyInstrs) {
1500 if (MI->getOpcode() == AMDGPU::V_SET_INACTIVE_B32 ||
1501 MI->getOpcode() == AMDGPU::V_SET_INACTIVE_B64) {
1502 assert(MI->getNumExplicitOperands() == 3);
1503 // the only reason we should be here is V_SET_INACTIVE has
1504 // an undef input so it is being replaced by a simple copy.
1505 // There should be a second undef source that we should remove.
1506 assert(MI->getOperand(2).isUndef());
1507 MI->removeOperand(2);
1508 MI->untieRegOperand(1);
1509 } else {
1510 assert(MI->getNumExplicitOperands() == 2);
1511 }
1512
1513 unsigned CopyOp = MI->getOperand(1).isReg()
1514 ? (unsigned)AMDGPU::COPY
1515 : TII->getMovOpcode(TRI->getRegClassForOperandReg(
1516 *MRI, MI->getOperand(0)));
1517 MI->setDesc(TII->get(CopyOp));
1518 }
1519 return !LowerToCopyInstrs.empty() || !LowerToMovInstrs.empty();
1520}
1521
1522bool SIWholeQuadMode::lowerKillInstrs(bool IsWQM) {
1523 for (MachineInstr *MI : KillInstrs) {
1524 MachineBasicBlock *MBB = MI->getParent();
1525 MachineInstr *SplitPoint = nullptr;
1526 switch (MI->getOpcode()) {
1527 case AMDGPU::SI_DEMOTE_I1:
1528 case AMDGPU::SI_KILL_I1_TERMINATOR:
1529 SplitPoint = lowerKillI1(*MBB, *MI, IsWQM);
1530 break;
1531 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
1532 SplitPoint = lowerKillF32(*MBB, *MI);
1533 break;
1534 }
1535 if (SplitPoint)
1536 splitBlock(MBB, SplitPoint);
1537 }
1538 return !KillInstrs.empty();
1539}
1540
1541void SIWholeQuadMode::lowerInitExec(MachineInstr &MI) {
1542 MachineBasicBlock *MBB = MI.getParent();
1543 bool IsWave32 = ST->isWave32();
1544
1545 if (MI.getOpcode() == AMDGPU::SI_INIT_EXEC) {
1546 // This should be before all vector instructions.
1547 MachineInstr *InitMI =
1548 BuildMI(*MBB, MBB->begin(), MI.getDebugLoc(),
1549 TII->get(IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
1550 Exec)
1551 .addImm(MI.getOperand(0).getImm());
1552 if (LIS) {
1553 LIS->RemoveMachineInstrFromMaps(MI);
1554 LIS->InsertMachineInstrInMaps(*InitMI);
1555 }
1556 MI.eraseFromParent();
1557 return;
1558 }
1559
1560 // Extract the thread count from an SGPR input and set EXEC accordingly.
1561 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
1562 //
1563 // S_BFE_U32 count, input, {shift, 7}
1564 // S_BFM_B64 exec, count, 0
1565 // S_CMP_EQ_U32 count, 64
1566 // S_CMOV_B64 exec, -1
1567 Register InputReg = MI.getOperand(0).getReg();
1568 MachineInstr *FirstMI = &*MBB->begin();
1569 if (InputReg.isVirtual()) {
1570 MachineInstr *DefInstr = MRI->getVRegDef(InputReg);
1571 assert(DefInstr && DefInstr->isCopy());
1572 if (DefInstr->getParent() == MBB) {
1573 if (DefInstr != FirstMI) {
1574 // If the `InputReg` is defined in current block, we also need to
1575 // move that instruction to the beginning of the block.
1576 DefInstr->removeFromParent();
1577 MBB->insert(FirstMI, DefInstr);
1578 if (LIS)
1579 LIS->handleMove(*DefInstr);
1580 } else {
1581 // If first instruction is definition then move pointer after it.
1582 FirstMI = &*std::next(FirstMI->getIterator());
1583 }
1584 }
1585 }
1586
1587 // Insert instruction sequence at block beginning (before vector operations).
1588 const DebugLoc DL = MI.getDebugLoc();
1589 const unsigned WavefrontSize = ST->getWavefrontSize();
1590 const unsigned Mask = (WavefrontSize << 1) - 1;
1591 Register CountReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1592 auto BfeMI = BuildMI(*MBB, FirstMI, DL, TII->get(AMDGPU::S_BFE_U32), CountReg)
1593 .addReg(InputReg)
1594 .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
1595 auto BfmMI =
1596 BuildMI(*MBB, FirstMI, DL,
1597 TII->get(IsWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64), Exec)
1598 .addReg(CountReg)
1599 .addImm(0);
1600 auto CmpMI = BuildMI(*MBB, FirstMI, DL, TII->get(AMDGPU::S_CMP_EQ_U32))
1601 .addReg(CountReg, RegState::Kill)
1602 .addImm(WavefrontSize);
1603 auto CmovMI =
1604 BuildMI(*MBB, FirstMI, DL,
1605 TII->get(IsWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
1606 Exec)
1607 .addImm(-1);
1608
1609 if (!LIS) {
1610 MI.eraseFromParent();
1611 return;
1612 }
1613
1614 LIS->RemoveMachineInstrFromMaps(MI);
1615 MI.eraseFromParent();
1616
1617 LIS->InsertMachineInstrInMaps(*BfeMI);
1618 LIS->InsertMachineInstrInMaps(*BfmMI);
1619 LIS->InsertMachineInstrInMaps(*CmpMI);
1620 LIS->InsertMachineInstrInMaps(*CmovMI);
1621
1622 LIS->removeInterval(InputReg);
1623 LIS->createAndComputeVirtRegInterval(InputReg);
1624 LIS->createAndComputeVirtRegInterval(CountReg);
1625}
1626
1627/// Lower INIT_EXEC instructions. Return a suitable insert point in \p Entry
1628/// for instructions that depend on EXEC.
1630SIWholeQuadMode::lowerInitExecInstrs(MachineBasicBlock &Entry, bool &Changed) {
1631 MachineBasicBlock::iterator InsertPt = Entry.getFirstNonPHI();
1632
1633 for (MachineInstr *MI : InitExecInstrs) {
1634 // Try to handle undefined cases gracefully:
1635 // - multiple INIT_EXEC instructions
1636 // - INIT_EXEC instructions not in the entry block
1637 if (MI->getParent() == &Entry)
1638 InsertPt = std::next(MI->getIterator());
1639
1640 lowerInitExec(*MI);
1641 Changed = true;
1642 }
1643
1644 return InsertPt;
1645}
1646
1647bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) {
1648 LLVM_DEBUG(dbgs() << "SI Whole Quad Mode on " << MF.getName()
1649 << " ------------- \n");
1650 LLVM_DEBUG(MF.dump(););
1651
1652 Instructions.clear();
1653 Blocks.clear();
1654 LiveMaskQueries.clear();
1655 LowerToCopyInstrs.clear();
1656 LowerToMovInstrs.clear();
1657 KillInstrs.clear();
1658 InitExecInstrs.clear();
1659 StateTransition.clear();
1660
1661 ST = &MF.getSubtarget<GCNSubtarget>();
1662
1663 TII = ST->getInstrInfo();
1664 TRI = &TII->getRegisterInfo();
1665 MRI = &MF.getRegInfo();
1666 LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
1667 auto *MDTWrapper = getAnalysisIfAvailable<MachineDominatorTreeWrapperPass>();
1668 MDT = MDTWrapper ? &MDTWrapper->getDomTree() : nullptr;
1669 auto *PDTWrapper =
1670 getAnalysisIfAvailable<MachinePostDominatorTreeWrapperPass>();
1671 PDT = PDTWrapper ? &PDTWrapper->getPostDomTree() : nullptr;
1672
1673 if (ST->isWave32()) {
1674 AndOpc = AMDGPU::S_AND_B32;
1675 AndTermOpc = AMDGPU::S_AND_B32_term;
1676 AndN2Opc = AMDGPU::S_ANDN2_B32;
1677 XorOpc = AMDGPU::S_XOR_B32;
1678 AndSaveExecOpc = AMDGPU::S_AND_SAVEEXEC_B32;
1679 AndSaveExecTermOpc = AMDGPU::S_AND_SAVEEXEC_B32_term;
1680 WQMOpc = AMDGPU::S_WQM_B32;
1681 Exec = AMDGPU::EXEC_LO;
1682 } else {
1683 AndOpc = AMDGPU::S_AND_B64;
1684 AndTermOpc = AMDGPU::S_AND_B64_term;
1685 AndN2Opc = AMDGPU::S_ANDN2_B64;
1686 XorOpc = AMDGPU::S_XOR_B64;
1687 AndSaveExecOpc = AMDGPU::S_AND_SAVEEXEC_B64;
1688 AndSaveExecTermOpc = AMDGPU::S_AND_SAVEEXEC_B64_term;
1689 WQMOpc = AMDGPU::S_WQM_B64;
1690 Exec = AMDGPU::EXEC;
1691 }
1692
1693 const char GlobalFlags = analyzeFunction(MF);
1694 bool Changed = false;
1695
1696 LiveMaskReg = Exec;
1697
1699 MachineBasicBlock::iterator EntryMI = lowerInitExecInstrs(Entry, Changed);
1700
1701 // Store a copy of the original live mask when required
1702 const bool HasLiveMaskQueries = !LiveMaskQueries.empty();
1703 const bool HasWaveModes = GlobalFlags & ~StateExact;
1704 const bool HasKills = !KillInstrs.empty();
1705 const bool UsesWQM = GlobalFlags & StateWQM;
1706 if (HasKills || UsesWQM || (HasWaveModes && HasLiveMaskQueries)) {
1707 LiveMaskReg = MRI->createVirtualRegister(TRI->getBoolRC());
1708 MachineInstr *MI =
1709 BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::COPY), LiveMaskReg)
1710 .addReg(Exec);
1711 LIS->InsertMachineInstrInMaps(*MI);
1712 Changed = true;
1713 }
1714
1715 LLVM_DEBUG(printInfo());
1716
1717 Changed |= lowerLiveMaskQueries();
1718 Changed |= lowerCopyInstrs();
1719
1720 if (!HasWaveModes) {
1721 // No wave mode execution
1722 Changed |= lowerKillInstrs(false);
1723 } else if (GlobalFlags == StateWQM) {
1724 // Shader only needs WQM
1725 auto MI = BuildMI(Entry, EntryMI, DebugLoc(), TII->get(WQMOpc), Exec)
1726 .addReg(Exec);
1727 LIS->InsertMachineInstrInMaps(*MI);
1728 lowerKillInstrs(true);
1729 Changed = true;
1730 } else {
1731 // Wave mode switching requires full lowering pass.
1732 for (auto BII : Blocks)
1733 processBlock(*BII.first, BII.first == &Entry);
1734 // Lowering blocks causes block splitting so perform as a second pass.
1735 for (auto BII : Blocks)
1736 lowerBlock(*BII.first);
1737 Changed = true;
1738 }
1739
1740 // Compute live range for live mask
1741 if (LiveMaskReg != Exec)
1742 LIS->createAndComputeVirtRegInterval(LiveMaskReg);
1743
1744 // Physical registers like SCC aren't tracked by default anyway, so just
1745 // removing the ranges we computed is the simplest option for maintaining
1746 // the analysis results.
1747 LIS->removeAllRegUnitsForPhysReg(AMDGPU::SCC);
1748
1749 // If we performed any kills then recompute EXEC
1750 if (!KillInstrs.empty() || !InitExecInstrs.empty())
1751 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
1752
1753 return Changed;
1754}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
Provides AMDGPU specific target descriptions.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static void analyzeFunction(Function &Fn, const DataLayout &Layout, FunctionVarLocsBuilder *FnVarLocs)
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:533
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
DenseMap< Block *, BlockRelaxAux > Blocks
Definition: ELF_riscv.cpp:507
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
uint64_t IntrinsicInst * II
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static void splitBlock(MachineBasicBlock &MBB, MachineInstr &MI, MachineDominatorTree *MDT)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Whole Quad Mode
#define DEBUG_TYPE
raw_pwrite_stream & OS
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
Core dominator tree base class.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:281
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:743
Result of a LiveRange query.
Definition: LiveInterval.h:90
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
Definition: LiveInterval.h:408
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:542
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
static MCRegister from(unsigned Val)
Check the provided unsigned value is a valid MCRegister.
Definition: MCRegister.h:74
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
MachineBasicBlock * splitAt(MachineInstr &SplitInst, bool UpdateLiveIns=true, LiveIntervals *LIS=nullptr)
Split a basic block into 2 pieces at SplitPoint.
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
iterator_range< pred_iterator > predecessors()
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual MachineFunctionProperties getClearedProperties() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
bool isCopy() const
MachineInstr * removeFromParent()
Unlink 'this' from the containing basic block, and return it without deleting it.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:346
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:65
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:224
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
LLVM Value Representation.
Definition: Value.h:74
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char WavefrontSize[]
Key for Kernel::CodeProps::Metadata::mWavefrontSize.
Key
PAL metadata keys.
LLVM_READONLY int getVOPe32(uint16_t Opcode)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Entry
Definition: COFF.h:826
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ Define
Register definition.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
FunctionPass * createSIWholeQuadModePass()
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
char & SIWholeQuadModeID
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:292
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162