Bug Summary

File:llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp
Warning:line 628, column 7
Forming reference to null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SILowerI1Copies.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Target/AMDGPU -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-12-11-181444-25759-1 -x c++ /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp
1//===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass lowers all occurrences of i1 values (with a vreg_1 register class)
10// to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA
11// form and a wave-level control flow graph.
12//
13// Before this pass, values that are semantically i1 and are defined and used
14// within the same basic block are already represented as lane masks in scalar
15// registers. However, values that cross basic blocks are always transferred
16// between basic blocks in vreg_1 virtual registers and are lowered by this
17// pass.
18//
19// The only instructions that use or define vreg_1 virtual registers are COPY,
20// PHI, and IMPLICIT_DEF.
21//
22//===----------------------------------------------------------------------===//
23
24#include "AMDGPU.h"
25#include "AMDGPUSubtarget.h"
26#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
27#include "SIInstrInfo.h"
28#include "llvm/CodeGen/MachineDominators.h"
29#include "llvm/CodeGen/MachineFunctionPass.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachinePostDominators.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/CodeGen/MachineSSAUpdater.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/LLVMContext.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Target/TargetMachine.h"
38
39#define DEBUG_TYPE"si-i1-copies" "si-i1-copies"
40
41using namespace llvm;
42
43static unsigned createLaneMaskReg(MachineFunction &MF);
44static unsigned insertUndefLaneMask(MachineBasicBlock &MBB);
45
46namespace {
47
48class SILowerI1Copies : public MachineFunctionPass {
49public:
50 static char ID;
51
52private:
53 bool IsWave32 = false;
54 MachineFunction *MF = nullptr;
55 MachineDominatorTree *DT = nullptr;
56 MachinePostDominatorTree *PDT = nullptr;
57 MachineRegisterInfo *MRI = nullptr;
58 const GCNSubtarget *ST = nullptr;
59 const SIInstrInfo *TII = nullptr;
60
61 unsigned ExecReg;
62 unsigned MovOp;
63 unsigned AndOp;
64 unsigned OrOp;
65 unsigned XorOp;
66 unsigned AndN2Op;
67 unsigned OrN2Op;
68
69 DenseSet<unsigned> ConstrainRegs;
70
71public:
72 SILowerI1Copies() : MachineFunctionPass(ID) {
73 initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
74 }
75
76 bool runOnMachineFunction(MachineFunction &MF) override;
77
78 StringRef getPassName() const override { return "SI Lower i1 Copies"; }
79
80 void getAnalysisUsage(AnalysisUsage &AU) const override {
81 AU.setPreservesCFG();
82 AU.addRequired<MachineDominatorTree>();
83 AU.addRequired<MachinePostDominatorTree>();
84 MachineFunctionPass::getAnalysisUsage(AU);
85 }
86
87private:
88 void lowerCopiesFromI1();
89 void lowerPhis();
90 void lowerCopiesToI1();
91 bool isConstantLaneMask(unsigned Reg, bool &Val) const;
92 void buildMergeLaneMasks(MachineBasicBlock &MBB,
93 MachineBasicBlock::iterator I, const DebugLoc &DL,
94 unsigned DstReg, unsigned PrevReg, unsigned CurReg);
95 MachineBasicBlock::iterator
96 getSaluInsertionAtEnd(MachineBasicBlock &MBB) const;
97
98 bool isVreg1(unsigned Reg) const {
99 return Register::isVirtualRegister(Reg) &&
100 MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
101 }
102
103 bool isLaneMaskReg(unsigned Reg) const {
104 return TII->getRegisterInfo().isSGPRReg(*MRI, Reg) &&
105 TII->getRegisterInfo().getRegSizeInBits(Reg, *MRI) ==
106 ST->getWavefrontSize();
107 }
108};
109
110/// Helper class that determines the relationship between incoming values of a
111/// phi in the control flow graph to determine where an incoming value can
112/// simply be taken as a scalar lane mask as-is, and where it needs to be
113/// merged with another, previously defined lane mask.
114///
115/// The approach is as follows:
116/// - Determine all basic blocks which, starting from the incoming blocks,
117/// a wave may reach before entering the def block (the block containing the
118/// phi).
119/// - If an incoming block has no predecessors in this set, we can take the
120/// incoming value as a scalar lane mask as-is.
121/// -- A special case of this is when the def block has a self-loop.
122/// - Otherwise, the incoming value needs to be merged with a previously
123/// defined lane mask.
124/// - If there is a path into the set of reachable blocks that does _not_ go
125/// through an incoming block where we can take the scalar lane mask as-is,
126/// we need to invent an available value for the SSAUpdater. Choices are
127/// 0 and undef, with differing consequences for how to merge values etc.
128///
129/// TODO: We could use region analysis to quickly skip over SESE regions during
130/// the traversal.
131///
132class PhiIncomingAnalysis {
133 MachinePostDominatorTree &PDT;
134
135 // For each reachable basic block, whether it is a source in the induced
136 // subgraph of the CFG.
137 DenseMap<MachineBasicBlock *, bool> ReachableMap;
138 SmallVector<MachineBasicBlock *, 4> ReachableOrdered;
139 SmallVector<MachineBasicBlock *, 4> Stack;
140 SmallVector<MachineBasicBlock *, 4> Predecessors;
141
142public:
143 PhiIncomingAnalysis(MachinePostDominatorTree &PDT) : PDT(PDT) {}
144
145 /// Returns whether \p MBB is a source in the induced subgraph of reachable
146 /// blocks.
147 bool isSource(MachineBasicBlock &MBB) const {
148 return ReachableMap.find(&MBB)->second;
149 }
150
151 ArrayRef<MachineBasicBlock *> predecessors() const { return Predecessors; }
152
153 void analyze(MachineBasicBlock &DefBlock,
154 ArrayRef<MachineBasicBlock *> IncomingBlocks) {
155 assert(Stack.empty())((Stack.empty()) ? static_cast<void> (0) : __assert_fail
("Stack.empty()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp"
, 155, __PRETTY_FUNCTION__))
;
156 ReachableMap.clear();
157 ReachableOrdered.clear();
158 Predecessors.clear();
159
160 // Insert the def block first, so that it acts as an end point for the
161 // traversal.
162 ReachableMap.try_emplace(&DefBlock, false);
163 ReachableOrdered.push_back(&DefBlock);
164
165 for (MachineBasicBlock *MBB : IncomingBlocks) {
166 if (MBB == &DefBlock) {
167 ReachableMap[&DefBlock] = true; // self-loop on DefBlock
168 continue;
169 }
170
171 ReachableMap.try_emplace(MBB, false);
172 ReachableOrdered.push_back(MBB);
173
174 // If this block has a divergent terminator and the def block is its
175 // post-dominator, the wave may first visit the other successors.
176 bool Divergent = false;
177 for (MachineInstr &MI : MBB->terminators()) {
178 if (MI.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO ||
179 MI.getOpcode() == AMDGPU::SI_IF ||
180 MI.getOpcode() == AMDGPU::SI_ELSE ||
181 MI.getOpcode() == AMDGPU::SI_LOOP) {
182 Divergent = true;
183 break;
184 }
185 }
186
187 if (Divergent && PDT.dominates(&DefBlock, MBB)) {
188 for (MachineBasicBlock *Succ : MBB->successors())
189 Stack.push_back(Succ);
190 }
191 }
192
193 while (!Stack.empty()) {
194 MachineBasicBlock *MBB = Stack.pop_back_val();
195 if (!ReachableMap.try_emplace(MBB, false).second)
196 continue;
197 ReachableOrdered.push_back(MBB);
198
199 for (MachineBasicBlock *Succ : MBB->successors())
200 Stack.push_back(Succ);
201 }
202
203 for (MachineBasicBlock *MBB : ReachableOrdered) {
204 bool HaveReachablePred = false;
205 for (MachineBasicBlock *Pred : MBB->predecessors()) {
206 if (ReachableMap.count(Pred)) {
207 HaveReachablePred = true;
208 } else {
209 Stack.push_back(Pred);
210 }
211 }
212 if (!HaveReachablePred)
213 ReachableMap[MBB] = true;
214 if (HaveReachablePred) {
215 for (MachineBasicBlock *UnreachablePred : Stack) {
216 if (llvm::find(Predecessors, UnreachablePred) == Predecessors.end())
217 Predecessors.push_back(UnreachablePred);
218 }
219 }
220 Stack.clear();
221 }
222 }
223};
224
225/// Helper class that detects loops which require us to lower an i1 COPY into
226/// bitwise manipulation.
227///
228/// Unfortunately, we cannot use LoopInfo because LoopInfo does not distinguish
229/// between loops with the same header. Consider this example:
230///
231/// A-+-+
232/// | | |
233/// B-+ |
234/// | |
235/// C---+
236///
237/// A is the header of a loop containing A, B, and C as far as LoopInfo is
238/// concerned. However, an i1 COPY in B that is used in C must be lowered to
239/// bitwise operations to combine results from different loop iterations when
240/// B has a divergent branch (since by default we will compile this code such
241/// that threads in a wave are merged at the entry of C).
242///
243/// The following rule is implemented to determine whether bitwise operations
244/// are required: use the bitwise lowering for a def in block B if a backward
245/// edge to B is reachable without going through the nearest common
246/// post-dominator of B and all uses of the def.
247///
248/// TODO: This rule is conservative because it does not check whether the
249/// relevant branches are actually divergent.
250///
251/// The class is designed to cache the CFG traversal so that it can be re-used
252/// for multiple defs within the same basic block.
253///
254/// TODO: We could use region analysis to quickly skip over SESE regions during
255/// the traversal.
256///
257class LoopFinder {
258 MachineDominatorTree &DT;
259 MachinePostDominatorTree &PDT;
260
261 // All visited / reachable block, tagged by level (level 0 is the def block,
262 // level 1 are all blocks reachable including but not going through the def
263 // block's IPDOM, etc.).
264 DenseMap<MachineBasicBlock *, unsigned> Visited;
265
266 // Nearest common dominator of all visited blocks by level (level 0 is the
267 // def block). Used for seeding the SSAUpdater.
268 SmallVector<MachineBasicBlock *, 4> CommonDominators;
269
270 // Post-dominator of all visited blocks.
271 MachineBasicBlock *VisitedPostDom = nullptr;
272
273 // Level at which a loop was found: 0 is not possible; 1 = a backward edge is
274 // reachable without going through the IPDOM of the def block (if the IPDOM
275 // itself has an edge to the def block, the loop level is 2), etc.
276 unsigned FoundLoopLevel = ~0u;
277
278 MachineBasicBlock *DefBlock = nullptr;
279 SmallVector<MachineBasicBlock *, 4> Stack;
280 SmallVector<MachineBasicBlock *, 4> NextLevel;
281
282public:
283 LoopFinder(MachineDominatorTree &DT, MachinePostDominatorTree &PDT)
284 : DT(DT), PDT(PDT) {}
285
286 void initialize(MachineBasicBlock &MBB) {
287 Visited.clear();
288 CommonDominators.clear();
289 Stack.clear();
290 NextLevel.clear();
291 VisitedPostDom = nullptr;
292 FoundLoopLevel = ~0u;
293
294 DefBlock = &MBB;
295 }
296
297 /// Check whether a backward edge can be reached without going through the
298 /// given \p PostDom of the def block.
299 ///
300 /// Return the level of \p PostDom if a loop was found, or 0 otherwise.
301 unsigned findLoop(MachineBasicBlock *PostDom) {
302 MachineDomTreeNode *PDNode = PDT.getNode(DefBlock);
303
304 if (!VisitedPostDom)
13
Assuming field 'VisitedPostDom' is non-null
14
Taking false branch
305 advanceLevel();
306
307 unsigned Level = 0;
308 while (PDNode->getBlock() != PostDom) {
15
Assuming the condition is false
16
Loop condition is false. Execution continues on line 317
309 if (PDNode->getBlock() == VisitedPostDom)
310 advanceLevel();
311 PDNode = PDNode->getIDom();
312 Level++;
313 if (FoundLoopLevel == Level)
314 return Level;
315 }
316
317 return 0;
17
Returning zero, which participates in a condition later
318 }
319
320 /// Add undef values dominating the loop and the optionally given additional
321 /// blocks, so that the SSA updater doesn't have to search all the way to the
322 /// function entry.
323 void addLoopEntries(unsigned LoopLevel, MachineSSAUpdater &SSAUpdater,
324 ArrayRef<MachineBasicBlock *> Blocks = {}) {
325 assert(LoopLevel < CommonDominators.size())((LoopLevel < CommonDominators.size()) ? static_cast<void
> (0) : __assert_fail ("LoopLevel < CommonDominators.size()"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp"
, 325, __PRETTY_FUNCTION__))
;
326
327 MachineBasicBlock *Dom = CommonDominators[LoopLevel];
328 for (MachineBasicBlock *MBB : Blocks)
329 Dom = DT.findNearestCommonDominator(Dom, MBB);
330
331 if (!inLoopLevel(*Dom, LoopLevel, Blocks)) {
332 SSAUpdater.AddAvailableValue(Dom, insertUndefLaneMask(*Dom));
333 } else {
334 // The dominator is part of the loop or the given blocks, so add the
335 // undef value to unreachable predecessors instead.
336 for (MachineBasicBlock *Pred : Dom->predecessors()) {
337 if (!inLoopLevel(*Pred, LoopLevel, Blocks))
338 SSAUpdater.AddAvailableValue(Pred, insertUndefLaneMask(*Pred));
339 }
340 }
341 }
342
343private:
344 bool inLoopLevel(MachineBasicBlock &MBB, unsigned LoopLevel,
345 ArrayRef<MachineBasicBlock *> Blocks) const {
346 auto DomIt = Visited.find(&MBB);
347 if (DomIt != Visited.end() && DomIt->second <= LoopLevel)
348 return true;
349
350 if (llvm::find(Blocks, &MBB) != Blocks.end())
351 return true;
352
353 return false;
354 }
355
356 void advanceLevel() {
357 MachineBasicBlock *VisitedDom;
358
359 if (!VisitedPostDom) {
360 VisitedPostDom = DefBlock;
361 VisitedDom = DefBlock;
362 Stack.push_back(DefBlock);
363 } else {
364 VisitedPostDom = PDT.getNode(VisitedPostDom)->getIDom()->getBlock();
365 VisitedDom = CommonDominators.back();
366
367 for (unsigned i = 0; i < NextLevel.size();) {
368 if (PDT.dominates(VisitedPostDom, NextLevel[i])) {
369 Stack.push_back(NextLevel[i]);
370
371 NextLevel[i] = NextLevel.back();
372 NextLevel.pop_back();
373 } else {
374 i++;
375 }
376 }
377 }
378
379 unsigned Level = CommonDominators.size();
380 while (!Stack.empty()) {
381 MachineBasicBlock *MBB = Stack.pop_back_val();
382 if (!PDT.dominates(VisitedPostDom, MBB))
383 NextLevel.push_back(MBB);
384
385 Visited[MBB] = Level;
386 VisitedDom = DT.findNearestCommonDominator(VisitedDom, MBB);
387
388 for (MachineBasicBlock *Succ : MBB->successors()) {
389 if (Succ == DefBlock) {
390 if (MBB == VisitedPostDom)
391 FoundLoopLevel = std::min(FoundLoopLevel, Level + 1);
392 else
393 FoundLoopLevel = std::min(FoundLoopLevel, Level);
394 continue;
395 }
396
397 if (Visited.try_emplace(Succ, ~0u).second) {
398 if (MBB == VisitedPostDom)
399 NextLevel.push_back(Succ);
400 else
401 Stack.push_back(Succ);
402 }
403 }
404 }
405
406 CommonDominators.push_back(VisitedDom);
407 }
408};
409
410} // End anonymous namespace.
411
412INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,static void *initializeSILowerI1CopiesPassOnce(PassRegistry &
Registry) {
413 false)static void *initializeSILowerI1CopiesPassOnce(PassRegistry &
Registry) {
414INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)initializeMachineDominatorTreePass(Registry);
415INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)initializeMachinePostDominatorTreePass(Registry);
416INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,PassInfo *PI = new PassInfo( "SI Lower i1 Copies", "si-i1-copies"
, &SILowerI1Copies::ID, PassInfo::NormalCtor_t(callDefaultCtor
<SILowerI1Copies>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeSILowerI1CopiesPassFlag
; void llvm::initializeSILowerI1CopiesPass(PassRegistry &
Registry) { llvm::call_once(InitializeSILowerI1CopiesPassFlag
, initializeSILowerI1CopiesPassOnce, std::ref(Registry)); }
417 false)PassInfo *PI = new PassInfo( "SI Lower i1 Copies", "si-i1-copies"
, &SILowerI1Copies::ID, PassInfo::NormalCtor_t(callDefaultCtor
<SILowerI1Copies>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeSILowerI1CopiesPassFlag
; void llvm::initializeSILowerI1CopiesPass(PassRegistry &
Registry) { llvm::call_once(InitializeSILowerI1CopiesPassFlag
, initializeSILowerI1CopiesPassOnce, std::ref(Registry)); }
418
419char SILowerI1Copies::ID = 0;
420
421char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
422
423FunctionPass *llvm::createSILowerI1CopiesPass() {
424 return new SILowerI1Copies();
425}
426
427static unsigned createLaneMaskReg(MachineFunction &MF) {
428 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
429 MachineRegisterInfo &MRI = MF.getRegInfo();
430 return MRI.createVirtualRegister(ST.isWave32() ? &AMDGPU::SReg_32RegClass
431 : &AMDGPU::SReg_64RegClass);
432}
433
434static unsigned insertUndefLaneMask(MachineBasicBlock &MBB) {
435 MachineFunction &MF = *MBB.getParent();
436 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
437 const SIInstrInfo *TII = ST.getInstrInfo();
438 unsigned UndefReg = createLaneMaskReg(MF);
439 BuildMI(MBB, MBB.getFirstTerminator(), {}, TII->get(AMDGPU::IMPLICIT_DEF),
440 UndefReg);
441 return UndefReg;
442}
443
444/// Lower all instructions that def or use vreg_1 registers.
445///
446/// In a first pass, we lower COPYs from vreg_1 to vector registers, as can
447/// occur around inline assembly. We do this first, before vreg_1 registers
448/// are changed to scalar mask registers.
449///
450/// Then we lower all defs of vreg_1 registers. Phi nodes are lowered before
451/// all others, because phi lowering looks through copies and can therefore
452/// often make copy lowering unnecessary.
453bool SILowerI1Copies::runOnMachineFunction(MachineFunction &TheMF) {
454 MF = &TheMF;
455 MRI = &MF->getRegInfo();
456 DT = &getAnalysis<MachineDominatorTree>();
457 PDT = &getAnalysis<MachinePostDominatorTree>();
458
459 ST = &MF->getSubtarget<GCNSubtarget>();
460 TII = ST->getInstrInfo();
461 IsWave32 = ST->isWave32();
462
463 if (IsWave32
0.1
Field 'IsWave32' is false
) {
1
Taking false branch
464 ExecReg = AMDGPU::EXEC_LO;
465 MovOp = AMDGPU::S_MOV_B32;
466 AndOp = AMDGPU::S_AND_B32;
467 OrOp = AMDGPU::S_OR_B32;
468 XorOp = AMDGPU::S_XOR_B32;
469 AndN2Op = AMDGPU::S_ANDN2_B32;
470 OrN2Op = AMDGPU::S_ORN2_B32;
471 } else {
472 ExecReg = AMDGPU::EXEC;
473 MovOp = AMDGPU::S_MOV_B64;
474 AndOp = AMDGPU::S_AND_B64;
475 OrOp = AMDGPU::S_OR_B64;
476 XorOp = AMDGPU::S_XOR_B64;
477 AndN2Op = AMDGPU::S_ANDN2_B64;
478 OrN2Op = AMDGPU::S_ORN2_B64;
479 }
480
481 lowerCopiesFromI1();
482 lowerPhis();
2
Calling 'SILowerI1Copies::lowerPhis'
483 lowerCopiesToI1();
484
485 for (unsigned Reg : ConstrainRegs)
486 MRI->constrainRegClass(Reg, &AMDGPU::SReg_1_XEXECRegClass);
487 ConstrainRegs.clear();
488
489 return true;
490}
491
492#ifndef NDEBUG
493static bool isVRegCompatibleReg(const SIRegisterInfo &TRI,
494 const MachineRegisterInfo &MRI,
495 Register Reg) {
496 unsigned Size = TRI.getRegSizeInBits(Reg, MRI);
497 return Size == 1 || Size == 32;
498}
499#endif
500
501void SILowerI1Copies::lowerCopiesFromI1() {
502 SmallVector<MachineInstr *, 4> DeadCopies;
503
504 for (MachineBasicBlock &MBB : *MF) {
505 for (MachineInstr &MI : MBB) {
506 if (MI.getOpcode() != AMDGPU::COPY)
507 continue;
508
509 Register DstReg = MI.getOperand(0).getReg();
510 Register SrcReg = MI.getOperand(1).getReg();
511 if (!isVreg1(SrcReg))
512 continue;
513
514 if (isLaneMaskReg(DstReg) || isVreg1(DstReg))
515 continue;
516
517 // Copy into a 32-bit vector register.
518 LLVM_DEBUG(dbgs() << "Lower copy from i1: " << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("si-i1-copies")) { dbgs() << "Lower copy from i1: " <<
MI; } } while (false)
;
519 DebugLoc DL = MI.getDebugLoc();
520
521 assert(isVRegCompatibleReg(TII->getRegisterInfo(), *MRI, DstReg))((isVRegCompatibleReg(TII->getRegisterInfo(), *MRI, DstReg
)) ? static_cast<void> (0) : __assert_fail ("isVRegCompatibleReg(TII->getRegisterInfo(), *MRI, DstReg)"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp"
, 521, __PRETTY_FUNCTION__))
;
522 assert(!MI.getOperand(0).getSubReg())((!MI.getOperand(0).getSubReg()) ? static_cast<void> (0
) : __assert_fail ("!MI.getOperand(0).getSubReg()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp"
, 522, __PRETTY_FUNCTION__))
;
523
524 ConstrainRegs.insert(SrcReg);
525 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
526 .addImm(0)
527 .addImm(0)
528 .addImm(0)
529 .addImm(-1)
530 .addReg(SrcReg);
531 DeadCopies.push_back(&MI);
532 }
533
534 for (MachineInstr *MI : DeadCopies)
535 MI->eraseFromParent();
536 DeadCopies.clear();
537 }
538}
539
540void SILowerI1Copies::lowerPhis() {
541 MachineSSAUpdater SSAUpdater(*MF);
542 LoopFinder LF(*DT, *PDT);
543 PhiIncomingAnalysis PIA(*PDT);
544 SmallVector<MachineInstr *, 4> Vreg1Phis;
545 SmallVector<MachineBasicBlock *, 4> IncomingBlocks;
546 SmallVector<unsigned, 4> IncomingRegs;
547 SmallVector<unsigned, 4> IncomingUpdated;
548#ifndef NDEBUG
549 DenseSet<unsigned> PhiRegisters;
550#endif
551
552 for (MachineBasicBlock &MBB : *MF) {
553 for (MachineInstr &MI : MBB.phis()) {
554 if (isVreg1(MI.getOperand(0).getReg()))
555 Vreg1Phis.push_back(&MI);
556 }
557 }
558
559 MachineBasicBlock *PrevMBB = nullptr;
560 for (MachineInstr *MI : Vreg1Phis) {
3
Assuming '__begin1' is not equal to '__end1'
561 MachineBasicBlock &MBB = *MI->getParent();
4
'MBB' initialized here
562 if (&MBB != PrevMBB) {
5
Assuming pointer value is null
6
Taking false branch
563 LF.initialize(MBB);
564 PrevMBB = &MBB;
565 }
566
567 LLVM_DEBUG(dbgs() << "Lower PHI: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("si-i1-copies")) { dbgs() << "Lower PHI: " << *MI
; } } while (false)
;
7
Assuming 'DebugFlag' is false
8
Loop condition is false. Exiting loop
568
569 Register DstReg = MI->getOperand(0).getReg();
570 MRI->setRegClass(DstReg, IsWave32
8.1
Field 'IsWave32' is false
? &AMDGPU::SReg_32RegClass
9
'?' condition is false
571 : &AMDGPU::SReg_64RegClass);
572
573 // Collect incoming values.
574 for (unsigned i = 1; i < MI->getNumOperands(); i += 2) {
10
Assuming the condition is false
11
Loop condition is false. Execution continues on line 595
575 assert(i + 1 < MI->getNumOperands())((i + 1 < MI->getNumOperands()) ? static_cast<void>
(0) : __assert_fail ("i + 1 < MI->getNumOperands()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp"
, 575, __PRETTY_FUNCTION__))
;
576 Register IncomingReg = MI->getOperand(i).getReg();
577 MachineBasicBlock *IncomingMBB = MI->getOperand(i + 1).getMBB();
578 MachineInstr *IncomingDef = MRI->getUniqueVRegDef(IncomingReg);
579
580 if (IncomingDef->getOpcode() == AMDGPU::COPY) {
581 IncomingReg = IncomingDef->getOperand(1).getReg();
582 assert(isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg))((isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg)) ? static_cast
<void> (0) : __assert_fail ("isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg)"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp"
, 582, __PRETTY_FUNCTION__))
;
583 assert(!IncomingDef->getOperand(1).getSubReg())((!IncomingDef->getOperand(1).getSubReg()) ? static_cast<
void> (0) : __assert_fail ("!IncomingDef->getOperand(1).getSubReg()"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp"
, 583, __PRETTY_FUNCTION__))
;
584 } else if (IncomingDef->getOpcode() == AMDGPU::IMPLICIT_DEF) {
585 continue;
586 } else {
587 assert(IncomingDef->isPHI() || PhiRegisters.count(IncomingReg))((IncomingDef->isPHI() || PhiRegisters.count(IncomingReg))
? static_cast<void> (0) : __assert_fail ("IncomingDef->isPHI() || PhiRegisters.count(IncomingReg)"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp"
, 587, __PRETTY_FUNCTION__))
;
588 }
589
590 IncomingBlocks.push_back(IncomingMBB);
591 IncomingRegs.push_back(IncomingReg);
592 }
593
594#ifndef NDEBUG
595 PhiRegisters.insert(DstReg);
596#endif
597
598 // Phis in a loop that are observed outside the loop receive a simple but
599 // conservatively correct treatment.
600 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
601 for (MachineInstr &Use : MRI->use_instructions(DstReg))
602 DomBlocks.push_back(Use.getParent());
603
604 MachineBasicBlock *PostDomBound =
605 PDT->findNearestCommonDominator(DomBlocks);
606 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
12
Calling 'LoopFinder::findLoop'
18
Returning from 'LoopFinder::findLoop'
607
608 SSAUpdater.Initialize(DstReg);
609
610 if (FoundLoopLevel
18.1
'FoundLoopLevel' is 0
) {
19
Taking false branch
611 LF.addLoopEntries(FoundLoopLevel, SSAUpdater, IncomingBlocks);
612
613 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
614 IncomingUpdated.push_back(createLaneMaskReg(*MF));
615 SSAUpdater.AddAvailableValue(IncomingBlocks[i],
616 IncomingUpdated.back());
617 }
618
619 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
620 MachineBasicBlock &IMBB = *IncomingBlocks[i];
621 buildMergeLaneMasks(
622 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
623 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
624 }
625 } else {
626 // The phi is not observed from outside a loop. Use a more accurate
627 // lowering.
628 PIA.analyze(MBB, IncomingBlocks);
20
Forming reference to null pointer
629
630 for (MachineBasicBlock *MBB : PIA.predecessors())
631 SSAUpdater.AddAvailableValue(MBB, insertUndefLaneMask(*MBB));
632
633 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
634 MachineBasicBlock &IMBB = *IncomingBlocks[i];
635 if (PIA.isSource(IMBB)) {
636 IncomingUpdated.push_back(0);
637 SSAUpdater.AddAvailableValue(&IMBB, IncomingRegs[i]);
638 } else {
639 IncomingUpdated.push_back(createLaneMaskReg(*MF));
640 SSAUpdater.AddAvailableValue(&IMBB, IncomingUpdated.back());
641 }
642 }
643
644 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
645 if (!IncomingUpdated[i])
646 continue;
647
648 MachineBasicBlock &IMBB = *IncomingBlocks[i];
649 buildMergeLaneMasks(
650 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
651 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
652 }
653 }
654
655 unsigned NewReg = SSAUpdater.GetValueInMiddleOfBlock(&MBB);
656 if (NewReg != DstReg) {
657 MRI->replaceRegWith(NewReg, DstReg);
658 MI->eraseFromParent();
659 }
660
661 IncomingBlocks.clear();
662 IncomingRegs.clear();
663 IncomingUpdated.clear();
664 }
665}
666
667void SILowerI1Copies::lowerCopiesToI1() {
668 MachineSSAUpdater SSAUpdater(*MF);
669 LoopFinder LF(*DT, *PDT);
670 SmallVector<MachineInstr *, 4> DeadCopies;
671
672 for (MachineBasicBlock &MBB : *MF) {
673 LF.initialize(MBB);
674
675 for (MachineInstr &MI : MBB) {
676 if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF &&
677 MI.getOpcode() != AMDGPU::COPY)
678 continue;
679
680 Register DstReg = MI.getOperand(0).getReg();
681 if (!isVreg1(DstReg))
682 continue;
683
684 if (MRI->use_empty(DstReg)) {
685 DeadCopies.push_back(&MI);
686 continue;
687 }
688
689 LLVM_DEBUG(dbgs() << "Lower Other: " << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("si-i1-copies")) { dbgs() << "Lower Other: " << MI
; } } while (false)
;
690
691 MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass
692 : &AMDGPU::SReg_64RegClass);
693 if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF)
694 continue;
695
696 DebugLoc DL = MI.getDebugLoc();
697 Register SrcReg = MI.getOperand(1).getReg();
698 assert(!MI.getOperand(1).getSubReg())((!MI.getOperand(1).getSubReg()) ? static_cast<void> (0
) : __assert_fail ("!MI.getOperand(1).getSubReg()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp"
, 698, __PRETTY_FUNCTION__))
;
699
700 if (!Register::isVirtualRegister(SrcReg) ||
701 (!isLaneMaskReg(SrcReg) && !isVreg1(SrcReg))) {
702 assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32)((TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) ==
32) ? static_cast<void> (0) : __assert_fail ("TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp"
, 702, __PRETTY_FUNCTION__))
;
703 unsigned TmpReg = createLaneMaskReg(*MF);
704 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64), TmpReg)
705 .addReg(SrcReg)
706 .addImm(0);
707 MI.getOperand(1).setReg(TmpReg);
708 SrcReg = TmpReg;
709 }
710
711 // Defs in a loop that are observed outside the loop must be transformed
712 // into appropriate bit manipulation.
713 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
714 for (MachineInstr &Use : MRI->use_instructions(DstReg))
715 DomBlocks.push_back(Use.getParent());
716
717 MachineBasicBlock *PostDomBound =
718 PDT->findNearestCommonDominator(DomBlocks);
719 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
720 if (FoundLoopLevel) {
721 SSAUpdater.Initialize(DstReg);
722 SSAUpdater.AddAvailableValue(&MBB, DstReg);
723 LF.addLoopEntries(FoundLoopLevel, SSAUpdater);
724
725 buildMergeLaneMasks(MBB, MI, DL, DstReg,
726 SSAUpdater.GetValueInMiddleOfBlock(&MBB), SrcReg);
727 DeadCopies.push_back(&MI);
728 }
729 }
730
731 for (MachineInstr *MI : DeadCopies)
732 MI->eraseFromParent();
733 DeadCopies.clear();
734 }
735}
736
737bool SILowerI1Copies::isConstantLaneMask(unsigned Reg, bool &Val) const {
738 const MachineInstr *MI;
739 for (;;) {
740 MI = MRI->getUniqueVRegDef(Reg);
741 if (MI->getOpcode() != AMDGPU::COPY)
742 break;
743
744 Reg = MI->getOperand(1).getReg();
745 if (!Register::isVirtualRegister(Reg))
746 return false;
747 if (!isLaneMaskReg(Reg))
748 return false;
749 }
750
751 if (MI->getOpcode() != MovOp)
752 return false;
753
754 if (!MI->getOperand(1).isImm())
755 return false;
756
757 int64_t Imm = MI->getOperand(1).getImm();
758 if (Imm == 0) {
759 Val = false;
760 return true;
761 }
762 if (Imm == -1) {
763 Val = true;
764 return true;
765 }
766
767 return false;
768}
769
770static void instrDefsUsesSCC(const MachineInstr &MI, bool &Def, bool &Use) {
771 Def = false;
772 Use = false;
773
774 for (const MachineOperand &MO : MI.operands()) {
775 if (MO.isReg() && MO.getReg() == AMDGPU::SCC) {
776 if (MO.isUse())
777 Use = true;
778 else
779 Def = true;
780 }
781 }
782}
783
784/// Return a point at the end of the given \p MBB to insert SALU instructions
785/// for lane mask calculation. Take terminators and SCC into account.
786MachineBasicBlock::iterator
787SILowerI1Copies::getSaluInsertionAtEnd(MachineBasicBlock &MBB) const {
788 auto InsertionPt = MBB.getFirstTerminator();
789 bool TerminatorsUseSCC = false;
790 for (auto I = InsertionPt, E = MBB.end(); I != E; ++I) {
791 bool DefsSCC;
792 instrDefsUsesSCC(*I, DefsSCC, TerminatorsUseSCC);
793 if (TerminatorsUseSCC || DefsSCC)
794 break;
795 }
796
797 if (!TerminatorsUseSCC)
798 return InsertionPt;
799
800 while (InsertionPt != MBB.begin()) {
801 InsertionPt--;
802
803 bool DefSCC, UseSCC;
804 instrDefsUsesSCC(*InsertionPt, DefSCC, UseSCC);
805 if (DefSCC)
806 return InsertionPt;
807 }
808
809 // We should have at least seen an IMPLICIT_DEF or COPY
810 llvm_unreachable("SCC used by terminator but no def in block")::llvm::llvm_unreachable_internal("SCC used by terminator but no def in block"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp"
, 810)
;
811}
812
813void SILowerI1Copies::buildMergeLaneMasks(MachineBasicBlock &MBB,
814 MachineBasicBlock::iterator I,
815 const DebugLoc &DL, unsigned DstReg,
816 unsigned PrevReg, unsigned CurReg) {
817 bool PrevVal;
818 bool PrevConstant = isConstantLaneMask(PrevReg, PrevVal);
819 bool CurVal;
820 bool CurConstant = isConstantLaneMask(CurReg, CurVal);
821
822 if (PrevConstant && CurConstant) {
823 if (PrevVal == CurVal) {
824 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(CurReg);
825 } else if (CurVal) {
826 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(ExecReg);
827 } else {
828 BuildMI(MBB, I, DL, TII->get(XorOp), DstReg)
829 .addReg(ExecReg)
830 .addImm(-1);
831 }
832 return;
833 }
834
835 unsigned PrevMaskedReg = 0;
836 unsigned CurMaskedReg = 0;
837 if (!PrevConstant) {
838 if (CurConstant && CurVal) {
839 PrevMaskedReg = PrevReg;
840 } else {
841 PrevMaskedReg = createLaneMaskReg(*MF);
842 BuildMI(MBB, I, DL, TII->get(AndN2Op), PrevMaskedReg)
843 .addReg(PrevReg)
844 .addReg(ExecReg);
845 }
846 }
847 if (!CurConstant) {
848 // TODO: check whether CurReg is already masked by EXEC
849 if (PrevConstant && PrevVal) {
850 CurMaskedReg = CurReg;
851 } else {
852 CurMaskedReg = createLaneMaskReg(*MF);
853 BuildMI(MBB, I, DL, TII->get(AndOp), CurMaskedReg)
854 .addReg(CurReg)
855 .addReg(ExecReg);
856 }
857 }
858
859 if (PrevConstant && !PrevVal) {
860 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
861 .addReg(CurMaskedReg);
862 } else if (CurConstant && !CurVal) {
863 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
864 .addReg(PrevMaskedReg);
865 } else if (PrevConstant && PrevVal) {
866 BuildMI(MBB, I, DL, TII->get(OrN2Op), DstReg)
867 .addReg(CurMaskedReg)
868 .addReg(ExecReg);
869 } else {
870 BuildMI(MBB, I, DL, TII->get(OrOp), DstReg)
871 .addReg(PrevMaskedReg)
872 .addReg(CurMaskedReg ? CurMaskedReg : ExecReg);
873 }
874}