Bug Summary

File:lib/CodeGen/RegisterCoalescer.cpp
Warning:line 3216, column 18
The left operand of '==' is a garbage value

Annotated Source Code

/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp

1//===- RegisterCoalescer.cpp - Generic Register Coalescing Interface ------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the generic RegisterCoalescer interface which
11// is used as the common interface used by all clients and
12// implementations of register coalescing.
13//
14//===----------------------------------------------------------------------===//
15
16#include "RegisterCoalescer.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/BitVector.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/CodeGen/LiveInterval.h"
25#include "llvm/CodeGen/LiveIntervalAnalysis.h"
26#include "llvm/CodeGen/LiveRangeEdit.h"
27#include "llvm/CodeGen/MachineBasicBlock.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineFunctionPass.h"
30#include "llvm/CodeGen/MachineInstr.h"
31#include "llvm/CodeGen/MachineInstrBuilder.h"
32#include "llvm/CodeGen/MachineLoopInfo.h"
33#include "llvm/CodeGen/MachineOperand.h"
34#include "llvm/CodeGen/MachineRegisterInfo.h"
35#include "llvm/CodeGen/Passes.h"
36#include "llvm/CodeGen/RegisterClassInfo.h"
37#include "llvm/CodeGen/SlotIndexes.h"
38#include "llvm/CodeGen/TargetInstrInfo.h"
39#include "llvm/CodeGen/TargetOpcodes.h"
40#include "llvm/CodeGen/TargetRegisterInfo.h"
41#include "llvm/CodeGen/TargetSubtargetInfo.h"
42#include "llvm/IR/DebugLoc.h"
43#include "llvm/MC/LaneBitmask.h"
44#include "llvm/MC/MCInstrDesc.h"
45#include "llvm/MC/MCRegisterInfo.h"
46#include "llvm/Pass.h"
47#include "llvm/Support/CommandLine.h"
48#include "llvm/Support/Compiler.h"
49#include "llvm/Support/Debug.h"
50#include "llvm/Support/ErrorHandling.h"
51#include "llvm/Support/raw_ostream.h"
52#include <algorithm>
53#include <cassert>
54#include <iterator>
55#include <limits>
56#include <tuple>
57#include <utility>
58#include <vector>
59
60using namespace llvm;
61
62#define DEBUG_TYPE"regalloc" "regalloc"
63
64STATISTIC(numJoins , "Number of interval joins performed")static llvm::Statistic numJoins = {"regalloc", "numJoins", "Number of interval joins performed"
, {0}, false}
;
65STATISTIC(numCrossRCs , "Number of cross class joins performed")static llvm::Statistic numCrossRCs = {"regalloc", "numCrossRCs"
, "Number of cross class joins performed", {0}, false}
;
66STATISTIC(numCommutes , "Number of instruction commuting performed")static llvm::Statistic numCommutes = {"regalloc", "numCommutes"
, "Number of instruction commuting performed", {0}, false}
;
67STATISTIC(numExtends , "Number of copies extended")static llvm::Statistic numExtends = {"regalloc", "numExtends"
, "Number of copies extended", {0}, false}
;
68STATISTIC(NumReMats , "Number of instructions re-materialized")static llvm::Statistic NumReMats = {"regalloc", "NumReMats", "Number of instructions re-materialized"
, {0}, false}
;
69STATISTIC(NumInflated , "Number of register classes inflated")static llvm::Statistic NumInflated = {"regalloc", "NumInflated"
, "Number of register classes inflated", {0}, false}
;
70STATISTIC(NumLaneConflicts, "Number of dead lane conflicts tested")static llvm::Statistic NumLaneConflicts = {"regalloc", "NumLaneConflicts"
, "Number of dead lane conflicts tested", {0}, false}
;
71STATISTIC(NumLaneResolves, "Number of dead lane conflicts resolved")static llvm::Statistic NumLaneResolves = {"regalloc", "NumLaneResolves"
, "Number of dead lane conflicts resolved", {0}, false}
;
72
73static cl::opt<bool>
74EnableJoining("join-liveintervals",
75 cl::desc("Coalesce copies (default=true)"),
76 cl::init(true));
77
78static cl::opt<bool> UseTerminalRule("terminal-rule",
79 cl::desc("Apply the terminal rule"),
80 cl::init(false), cl::Hidden);
81
82/// Temporary flag to test critical edge unsplitting.
83static cl::opt<bool>
84EnableJoinSplits("join-splitedges",
85 cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden);
86
87/// Temporary flag to test global copy optimization.
88static cl::opt<cl::boolOrDefault>
89EnableGlobalCopies("join-globalcopies",
90 cl::desc("Coalesce copies that span blocks (default=subtarget)"),
91 cl::init(cl::BOU_UNSET), cl::Hidden);
92
93static cl::opt<bool>
94VerifyCoalescing("verify-coalescing",
95 cl::desc("Verify machine instrs before and after register coalescing"),
96 cl::Hidden);
97
98namespace {
99
100 class RegisterCoalescer : public MachineFunctionPass,
101 private LiveRangeEdit::Delegate {
102 MachineFunction* MF;
103 MachineRegisterInfo* MRI;
104 const TargetRegisterInfo* TRI;
105 const TargetInstrInfo* TII;
106 LiveIntervals *LIS;
107 const MachineLoopInfo* Loops;
108 AliasAnalysis *AA;
109 RegisterClassInfo RegClassInfo;
110
111 /// A LaneMask to remember on which subregister live ranges we need to call
112 /// shrinkToUses() later.
113 LaneBitmask ShrinkMask;
114
115 /// True if the main range of the currently coalesced intervals should be
116 /// checked for smaller live intervals.
117 bool ShrinkMainRange;
118
119 /// \brief True if the coalescer should aggressively coalesce global copies
120 /// in favor of keeping local copies.
121 bool JoinGlobalCopies;
122
123 /// \brief True if the coalescer should aggressively coalesce fall-thru
124 /// blocks exclusively containing copies.
125 bool JoinSplitEdges;
126
127 /// Copy instructions yet to be coalesced.
128 SmallVector<MachineInstr*, 8> WorkList;
129 SmallVector<MachineInstr*, 8> LocalWorkList;
130
131 /// Set of instruction pointers that have been erased, and
132 /// that may be present in WorkList.
133 SmallPtrSet<MachineInstr*, 8> ErasedInstrs;
134
135 /// Dead instructions that are about to be deleted.
136 SmallVector<MachineInstr*, 8> DeadDefs;
137
138 /// Virtual registers to be considered for register class inflation.
139 SmallVector<unsigned, 8> InflateRegs;
140
141 /// Recursively eliminate dead defs in DeadDefs.
142 void eliminateDeadDefs();
143
144 /// LiveRangeEdit callback for eliminateDeadDefs().
145 void LRE_WillEraseInstruction(MachineInstr *MI) override;
146
147 /// Coalesce the LocalWorkList.
148 void coalesceLocals();
149
150 /// Join compatible live intervals
151 void joinAllIntervals();
152
153 /// Coalesce copies in the specified MBB, putting
154 /// copies that cannot yet be coalesced into WorkList.
155 void copyCoalesceInMBB(MachineBasicBlock *MBB);
156
157 /// Tries to coalesce all copies in CurrList. Returns true if any progress
158 /// was made.
159 bool copyCoalesceWorkList(MutableArrayRef<MachineInstr*> CurrList);
160
161 /// Attempt to join intervals corresponding to SrcReg/DstReg, which are the
162 /// src/dst of the copy instruction CopyMI. This returns true if the copy
163 /// was successfully coalesced away. If it is not currently possible to
164 /// coalesce this interval, but it may be possible if other things get
165 /// coalesced, then it returns true by reference in 'Again'.
166 bool joinCopy(MachineInstr *TheCopy, bool &Again);
167
168 /// Attempt to join these two intervals. On failure, this
169 /// returns false. The output "SrcInt" will not have been modified, so we
170 /// can use this information below to update aliases.
171 bool joinIntervals(CoalescerPair &CP);
172
173 /// Attempt joining two virtual registers. Return true on success.
174 bool joinVirtRegs(CoalescerPair &CP);
175
176 /// Attempt joining with a reserved physreg.
177 bool joinReservedPhysReg(CoalescerPair &CP);
178
179 /// Add the LiveRange @p ToMerge as a subregister liverange of @p LI.
180 /// Subranges in @p LI which only partially interfere with the desired
181 /// LaneMask are split as necessary. @p LaneMask are the lanes that
182 /// @p ToMerge will occupy in the coalescer register. @p LI has its subrange
183 /// lanemasks already adjusted to the coalesced register.
184 void mergeSubRangeInto(LiveInterval &LI, const LiveRange &ToMerge,
185 LaneBitmask LaneMask, CoalescerPair &CP);
186
187 /// Join the liveranges of two subregisters. Joins @p RRange into
188 /// @p LRange, @p RRange may be invalid afterwards.
189 void joinSubRegRanges(LiveRange &LRange, LiveRange &RRange,
190 LaneBitmask LaneMask, const CoalescerPair &CP);
191
192 /// We found a non-trivially-coalescable copy. If the source value number is
193 /// defined by a copy from the destination reg see if we can merge these two
194 /// destination reg valno# into a single value number, eliminating a copy.
195 /// This returns true if an interval was modified.
196 bool adjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI);
197
198 /// Return true if there are definitions of IntB
199 /// other than BValNo val# that can reach uses of AValno val# of IntA.
200 bool hasOtherReachingDefs(LiveInterval &IntA, LiveInterval &IntB,
201 VNInfo *AValNo, VNInfo *BValNo);
202
203 /// We found a non-trivially-coalescable copy.
204 /// If the source value number is defined by a commutable instruction and
205 /// its other operand is coalesced to the copy dest register, see if we
206 /// can transform the copy into a noop by commuting the definition.
207 /// This returns true if an interval was modified.
208 bool removeCopyByCommutingDef(const CoalescerPair &CP,MachineInstr *CopyMI);
209
210 /// We found a copy which can be moved to its less frequent predecessor.
211 bool removePartialRedundancy(const CoalescerPair &CP, MachineInstr &CopyMI);
212
213 /// If the source of a copy is defined by a
214 /// trivial computation, replace the copy by rematerialize the definition.
215 bool reMaterializeTrivialDef(const CoalescerPair &CP, MachineInstr *CopyMI,
216 bool &IsDefCopy);
217
218 /// Return true if a copy involving a physreg should be joined.
219 bool canJoinPhys(const CoalescerPair &CP);
220
221 /// Replace all defs and uses of SrcReg to DstReg and update the subregister
222 /// number if it is not zero. If DstReg is a physical register and the
223 /// existing subregister number of the def / use being updated is not zero,
224 /// make sure to set it to the correct physical subregister.
225 void updateRegDefsUses(unsigned SrcReg, unsigned DstReg, unsigned SubIdx);
226
227 /// If the given machine operand reads only undefined lanes add an undef
228 /// flag.
229 /// This can happen when undef uses were previously concealed by a copy
230 /// which we coalesced. Example:
231 /// %vreg0:sub0<def,read-undef> = ...
232 /// %vreg1 = COPY %vreg0 <-- Coalescing COPY reveals undef
233 /// = use %vreg1:sub1 <-- hidden undef use
234 void addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx,
235 MachineOperand &MO, unsigned SubRegIdx);
236
237 /// Handle copies of undef values.
238 /// Returns true if @p CopyMI was a copy of an undef value and eliminated.
239 bool eliminateUndefCopy(MachineInstr *CopyMI);
240
241 /// Check whether or not we should apply the terminal rule on the
242 /// destination (Dst) of \p Copy.
243 /// When the terminal rule applies, Copy is not profitable to
244 /// coalesce.
245 /// Dst is terminal if it has exactly one affinity (Dst, Src) and
246 /// at least one interference (Dst, Dst2). If Dst is terminal, the
247 /// terminal rule consists in checking that at least one of
248 /// interfering node, say Dst2, has an affinity of equal or greater
249 /// weight with Src.
250 /// In that case, Dst2 and Dst will not be able to be both coalesced
251 /// with Src. Since Dst2 exposes more coalescing opportunities than
252 /// Dst, we can drop \p Copy.
253 bool applyTerminalRule(const MachineInstr &Copy) const;
254
255 /// Wrapper method for \see LiveIntervals::shrinkToUses.
256 /// This method does the proper fixing of the live-ranges when the afore
257 /// mentioned method returns true.
258 void shrinkToUses(LiveInterval *LI,
259 SmallVectorImpl<MachineInstr * > *Dead = nullptr) {
260 if (LIS->shrinkToUses(LI, Dead)) {
261 /// Check whether or not \p LI is composed by multiple connected
262 /// components and if that is the case, fix that.
263 SmallVector<LiveInterval*, 8> SplitLIs;
264 LIS->splitSeparateComponents(*LI, SplitLIs);
265 }
266 }
267
268 /// Wrapper Method to do all the necessary work when an Instruction is
269 /// deleted.
270 /// Optimizations should use this to make sure that deleted instructions
271 /// are always accounted for.
272 void deleteInstr(MachineInstr* MI) {
273 ErasedInstrs.insert(MI);
274 LIS->RemoveMachineInstrFromMaps(*MI);
275 MI->eraseFromParent();
276 }
277
278 public:
279 static char ID; ///< Class identification, replacement for typeinfo
280
281 RegisterCoalescer() : MachineFunctionPass(ID) {
282 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
283 }
284
285 void getAnalysisUsage(AnalysisUsage &AU) const override;
286
287 void releaseMemory() override;
288
289 /// This is the pass entry point.
290 bool runOnMachineFunction(MachineFunction&) override;
291
292 /// Implement the dump method.
293 void print(raw_ostream &O, const Module* = nullptr) const override;
294 };
295
296} // end anonymous namespace
297
298char RegisterCoalescer::ID = 0;
299
300char &llvm::RegisterCoalescerID = RegisterCoalescer::ID;
301
302INITIALIZE_PASS_BEGIN(RegisterCoalescer, "simple-register-coalescing",static void *initializeRegisterCoalescerPassOnce(PassRegistry
&Registry) {
303 "Simple Register Coalescing", false, false)static void *initializeRegisterCoalescerPassOnce(PassRegistry
&Registry) {
304INITIALIZE_PASS_DEPENDENCY(LiveIntervals)initializeLiveIntervalsPass(Registry);
305INITIALIZE_PASS_DEPENDENCY(SlotIndexes)initializeSlotIndexesPass(Registry);
306INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)initializeMachineLoopInfoPass(Registry);
307INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
308INITIALIZE_PASS_END(RegisterCoalescer, "simple-register-coalescing",PassInfo *PI = new PassInfo( "Simple Register Coalescing", "simple-register-coalescing"
, &RegisterCoalescer::ID, PassInfo::NormalCtor_t(callDefaultCtor
<RegisterCoalescer>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeRegisterCoalescerPassFlag
; void llvm::initializeRegisterCoalescerPass(PassRegistry &
Registry) { llvm::call_once(InitializeRegisterCoalescerPassFlag
, initializeRegisterCoalescerPassOnce, std::ref(Registry)); }
309 "Simple Register Coalescing", false, false)PassInfo *PI = new PassInfo( "Simple Register Coalescing", "simple-register-coalescing"
, &RegisterCoalescer::ID, PassInfo::NormalCtor_t(callDefaultCtor
<RegisterCoalescer>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeRegisterCoalescerPassFlag
; void llvm::initializeRegisterCoalescerPass(PassRegistry &
Registry) { llvm::call_once(InitializeRegisterCoalescerPassFlag
, initializeRegisterCoalescerPassOnce, std::ref(Registry)); }
310
311static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI,
312 unsigned &Src, unsigned &Dst,
313 unsigned &SrcSub, unsigned &DstSub) {
314 if (MI->isCopy()) {
28
Calling 'MachineInstr::isCopy'
32
Returning from 'MachineInstr::isCopy'
33
Taking false branch
315 Dst = MI->getOperand(0).getReg();
316 DstSub = MI->getOperand(0).getSubReg();
317 Src = MI->getOperand(1).getReg();
318 SrcSub = MI->getOperand(1).getSubReg();
319 } else if (MI->isSubregToReg()) {
34
Calling 'MachineInstr::isSubregToReg'
38
Returning from 'MachineInstr::isSubregToReg'
39
Taking false branch
320 Dst = MI->getOperand(0).getReg();
321 DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(),
322 MI->getOperand(3).getImm());
323 Src = MI->getOperand(2).getReg();
324 SrcSub = MI->getOperand(2).getSubReg();
325 } else
326 return false;
327 return true;
328}
329
330/// Return true if this block should be vacated by the coalescer to eliminate
331/// branches. The important cases to handle in the coalescer are critical edges
332/// split during phi elimination which contain only copies. Simple blocks that
333/// contain non-branches should also be vacated, but this can be handled by an
334/// earlier pass similar to early if-conversion.
335static bool isSplitEdge(const MachineBasicBlock *MBB) {
336 if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
337 return false;
338
339 for (const auto &MI : *MBB) {
340 if (!MI.isCopyLike() && !MI.isUnconditionalBranch())
341 return false;
342 }
343 return true;
344}
345
346bool CoalescerPair::setRegisters(const MachineInstr *MI) {
347 SrcReg = DstReg = 0;
348 SrcIdx = DstIdx = 0;
349 NewRC = nullptr;
350 Flipped = CrossClass = false;
351
352 unsigned Src, Dst, SrcSub, DstSub;
353 if (!isMoveInstr(TRI, MI, Src, Dst, SrcSub, DstSub))
354 return false;
355 Partial = SrcSub || DstSub;
356
357 // If one register is a physreg, it must be Dst.
358 if (TargetRegisterInfo::isPhysicalRegister(Src)) {
359 if (TargetRegisterInfo::isPhysicalRegister(Dst))
360 return false;
361 std::swap(Src, Dst);
362 std::swap(SrcSub, DstSub);
363 Flipped = true;
364 }
365
366 const MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
367
368 if (TargetRegisterInfo::isPhysicalRegister(Dst)) {
369 // Eliminate DstSub on a physreg.
370 if (DstSub) {
371 Dst = TRI.getSubReg(Dst, DstSub);
372 if (!Dst) return false;
373 DstSub = 0;
374 }
375
376 // Eliminate SrcSub by picking a corresponding Dst superregister.
377 if (SrcSub) {
378 Dst = TRI.getMatchingSuperReg(Dst, SrcSub, MRI.getRegClass(Src));
379 if (!Dst) return false;
380 } else if (!MRI.getRegClass(Src)->contains(Dst)) {
381 return false;
382 }
383 } else {
384 // Both registers are virtual.
385 const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
386 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
387
388 // Both registers have subreg indices.
389 if (SrcSub && DstSub) {
390 // Copies between different sub-registers are never coalescable.
391 if (Src == Dst && SrcSub != DstSub)
392 return false;
393
394 NewRC = TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub,
395 SrcIdx, DstIdx);
396 if (!NewRC)
397 return false;
398 } else if (DstSub) {
399 // SrcReg will be merged with a sub-register of DstReg.
400 SrcIdx = DstSub;
401 NewRC = TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
402 } else if (SrcSub) {
403 // DstReg will be merged with a sub-register of SrcReg.
404 DstIdx = SrcSub;
405 NewRC = TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSub);
406 } else {
407 // This is a straight copy without sub-registers.
408 NewRC = TRI.getCommonSubClass(DstRC, SrcRC);
409 }
410
411 // The combined constraint may be impossible to satisfy.
412 if (!NewRC)
413 return false;
414
415 // Prefer SrcReg to be a sub-register of DstReg.
416 // FIXME: Coalescer should support subregs symmetrically.
417 if (DstIdx && !SrcIdx) {
418 std::swap(Src, Dst);
419 std::swap(SrcIdx, DstIdx);
420 Flipped = !Flipped;
421 }
422
423 CrossClass = NewRC != DstRC || NewRC != SrcRC;
424 }
425 // Check our invariants
426 assert(TargetRegisterInfo::isVirtualRegister(Src) && "Src must be virtual")(static_cast <bool> (TargetRegisterInfo::isVirtualRegister
(Src) && "Src must be virtual") ? void (0) : __assert_fail
("TargetRegisterInfo::isVirtualRegister(Src) && \"Src must be virtual\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 426, __extension__ __PRETTY_FUNCTION__))
;
427 assert(!(TargetRegisterInfo::isPhysicalRegister(Dst) && DstSub) &&(static_cast <bool> (!(TargetRegisterInfo::isPhysicalRegister
(Dst) && DstSub) && "Cannot have a physical SubIdx"
) ? void (0) : __assert_fail ("!(TargetRegisterInfo::isPhysicalRegister(Dst) && DstSub) && \"Cannot have a physical SubIdx\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 428, __extension__ __PRETTY_FUNCTION__))
428 "Cannot have a physical SubIdx")(static_cast <bool> (!(TargetRegisterInfo::isPhysicalRegister
(Dst) && DstSub) && "Cannot have a physical SubIdx"
) ? void (0) : __assert_fail ("!(TargetRegisterInfo::isPhysicalRegister(Dst) && DstSub) && \"Cannot have a physical SubIdx\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 428, __extension__ __PRETTY_FUNCTION__))
;
429 SrcReg = Src;
430 DstReg = Dst;
431 return true;
432}
433
434bool CoalescerPair::flip() {
435 if (TargetRegisterInfo::isPhysicalRegister(DstReg))
436 return false;
437 std::swap(SrcReg, DstReg);
438 std::swap(SrcIdx, DstIdx);
439 Flipped = !Flipped;
440 return true;
441}
442
443bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
444 if (!MI)
445 return false;
446 unsigned Src, Dst, SrcSub, DstSub;
447 if (!isMoveInstr(TRI, MI, Src, Dst, SrcSub, DstSub))
448 return false;
449
450 // Find the virtual register that is SrcReg.
451 if (Dst == SrcReg) {
452 std::swap(Src, Dst);
453 std::swap(SrcSub, DstSub);
454 } else if (Src != SrcReg) {
455 return false;
456 }
457
458 // Now check that Dst matches DstReg.
459 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
460 if (!TargetRegisterInfo::isPhysicalRegister(Dst))
461 return false;
462 assert(!DstIdx && !SrcIdx && "Inconsistent CoalescerPair state.")(static_cast <bool> (!DstIdx && !SrcIdx &&
"Inconsistent CoalescerPair state.") ? void (0) : __assert_fail
("!DstIdx && !SrcIdx && \"Inconsistent CoalescerPair state.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 462, __extension__ __PRETTY_FUNCTION__))
;
463 // DstSub could be set for a physreg from INSERT_SUBREG.
464 if (DstSub)
465 Dst = TRI.getSubReg(Dst, DstSub);
466 // Full copy of Src.
467 if (!SrcSub)
468 return DstReg == Dst;
469 // This is a partial register copy. Check that the parts match.
470 return TRI.getSubReg(DstReg, SrcSub) == Dst;
471 } else {
472 // DstReg is virtual.
473 if (DstReg != Dst)
474 return false;
475 // Registers match, do the subregisters line up?
476 return TRI.composeSubRegIndices(SrcIdx, SrcSub) ==
477 TRI.composeSubRegIndices(DstIdx, DstSub);
478 }
479}
480
481void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const {
482 AU.setPreservesCFG();
483 AU.addRequired<AAResultsWrapperPass>();
484 AU.addRequired<LiveIntervals>();
485 AU.addPreserved<LiveIntervals>();
486 AU.addPreserved<SlotIndexes>();
487 AU.addRequired<MachineLoopInfo>();
488 AU.addPreserved<MachineLoopInfo>();
489 AU.addPreservedID(MachineDominatorsID);
490 MachineFunctionPass::getAnalysisUsage(AU);
491}
492
493void RegisterCoalescer::eliminateDeadDefs() {
494 SmallVector<unsigned, 8> NewRegs;
495 LiveRangeEdit(nullptr, NewRegs, *MF, *LIS,
496 nullptr, this).eliminateDeadDefs(DeadDefs);
497}
498
499void RegisterCoalescer::LRE_WillEraseInstruction(MachineInstr *MI) {
500 // MI may be in WorkList. Make sure we don't visit it.
501 ErasedInstrs.insert(MI);
502}
503
504bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP,
505 MachineInstr *CopyMI) {
506 assert(!CP.isPartial() && "This doesn't work for partial copies.")(static_cast <bool> (!CP.isPartial() && "This doesn't work for partial copies."
) ? void (0) : __assert_fail ("!CP.isPartial() && \"This doesn't work for partial copies.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 506, __extension__ __PRETTY_FUNCTION__))
;
507 assert(!CP.isPhys() && "This doesn't work for physreg copies.")(static_cast <bool> (!CP.isPhys() && "This doesn't work for physreg copies."
) ? void (0) : __assert_fail ("!CP.isPhys() && \"This doesn't work for physreg copies.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 507, __extension__ __PRETTY_FUNCTION__))
;
508
509 LiveInterval &IntA =
510 LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
511 LiveInterval &IntB =
512 LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
513 SlotIndex CopyIdx = LIS->getInstructionIndex(*CopyMI).getRegSlot();
514
515 // We have a non-trivially-coalescable copy with IntA being the source and
516 // IntB being the dest, thus this defines a value number in IntB. If the
517 // source value number (in IntA) is defined by a copy from B, see if we can
518 // merge these two pieces of B into a single value number, eliminating a copy.
519 // For example:
520 //
521 // A3 = B0
522 // ...
523 // B1 = A3 <- this copy
524 //
525 // In this case, B0 can be extended to where the B1 copy lives, allowing the
526 // B1 value number to be replaced with B0 (which simplifies the B
527 // liveinterval).
528
529 // BValNo is a value number in B that is defined by a copy from A. 'B1' in
530 // the example above.
531 LiveInterval::iterator BS = IntB.FindSegmentContaining(CopyIdx);
532 if (BS == IntB.end()) return false;
533 VNInfo *BValNo = BS->valno;
534
535 // Get the location that B is defined at. Two options: either this value has
536 // an unknown definition point or it is defined at CopyIdx. If unknown, we
537 // can't process it.
538 if (BValNo->def != CopyIdx) return false;
539
540 // AValNo is the value number in A that defines the copy, A3 in the example.
541 SlotIndex CopyUseIdx = CopyIdx.getRegSlot(true);
542 LiveInterval::iterator AS = IntA.FindSegmentContaining(CopyUseIdx);
543 // The live segment might not exist after fun with physreg coalescing.
544 if (AS == IntA.end()) return false;
545 VNInfo *AValNo = AS->valno;
546
547 // If AValNo is defined as a copy from IntB, we can potentially process this.
548 // Get the instruction that defines this value number.
549 MachineInstr *ACopyMI = LIS->getInstructionFromIndex(AValNo->def);
550 // Don't allow any partial copies, even if isCoalescable() allows them.
551 if (!CP.isCoalescable(ACopyMI) || !ACopyMI->isFullCopy())
552 return false;
553
554 // Get the Segment in IntB that this value number starts with.
555 LiveInterval::iterator ValS =
556 IntB.FindSegmentContaining(AValNo->def.getPrevSlot());
557 if (ValS == IntB.end())
558 return false;
559
560 // Make sure that the end of the live segment is inside the same block as
561 // CopyMI.
562 MachineInstr *ValSEndInst =
563 LIS->getInstructionFromIndex(ValS->end.getPrevSlot());
564 if (!ValSEndInst || ValSEndInst->getParent() != CopyMI->getParent())
565 return false;
566
567 // Okay, we now know that ValS ends in the same block that the CopyMI
568 // live-range starts. If there are no intervening live segments between them
569 // in IntB, we can merge them.
570 if (ValS+1 != BS) return false;
571
572 DEBUG(dbgs() << "Extending: " << PrintReg(IntB.reg, TRI))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Extending: " << PrintReg
(IntB.reg, TRI); } } while (false)
;
573
574 SlotIndex FillerStart = ValS->end, FillerEnd = BS->start;
575 // We are about to delete CopyMI, so need to remove it as the 'instruction
576 // that defines this value #'. Update the valnum with the new defining
577 // instruction #.
578 BValNo->def = FillerStart;
579
580 // Okay, we can merge them. We need to insert a new liverange:
581 // [ValS.end, BS.begin) of either value number, then we merge the
582 // two value numbers.
583 IntB.addSegment(LiveInterval::Segment(FillerStart, FillerEnd, BValNo));
584
585 // Okay, merge "B1" into the same value number as "B0".
586 if (BValNo != ValS->valno)
587 IntB.MergeValueNumberInto(BValNo, ValS->valno);
588
589 // Do the same for the subregister segments.
590 for (LiveInterval::SubRange &S : IntB.subranges()) {
591 VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
592 S.addSegment(LiveInterval::Segment(FillerStart, FillerEnd, SubBValNo));
593 VNInfo *SubValSNo = S.getVNInfoAt(AValNo->def.getPrevSlot());
594 if (SubBValNo != SubValSNo)
595 S.MergeValueNumberInto(SubBValNo, SubValSNo);
596 }
597
598 DEBUG(dbgs() << " result = " << IntB << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << " result = " << IntB <<
'\n'; } } while (false)
;
599
600 // If the source instruction was killing the source register before the
601 // merge, unset the isKill marker given the live range has been extended.
602 int UIdx = ValSEndInst->findRegisterUseOperandIdx(IntB.reg, true);
603 if (UIdx != -1) {
604 ValSEndInst->getOperand(UIdx).setIsKill(false);
605 }
606
607 // Rewrite the copy. If the copy instruction was killing the destination
608 // register before the merge, find the last use and trim the live range. That
609 // will also add the isKill marker.
610 CopyMI->substituteRegister(IntA.reg, IntB.reg, 0, *TRI);
611 if (AS->end == CopyIdx)
612 shrinkToUses(&IntA);
613
614 ++numExtends;
615 return true;
616}
617
618bool RegisterCoalescer::hasOtherReachingDefs(LiveInterval &IntA,
619 LiveInterval &IntB,
620 VNInfo *AValNo,
621 VNInfo *BValNo) {
622 // If AValNo has PHI kills, conservatively assume that IntB defs can reach
623 // the PHI values.
624 if (LIS->hasPHIKill(IntA, AValNo))
625 return true;
626
627 for (LiveRange::Segment &ASeg : IntA.segments) {
628 if (ASeg.valno != AValNo) continue;
629 LiveInterval::iterator BI =
630 std::upper_bound(IntB.begin(), IntB.end(), ASeg.start);
631 if (BI != IntB.begin())
632 --BI;
633 for (; BI != IntB.end() && ASeg.end >= BI->start; ++BI) {
634 if (BI->valno == BValNo)
635 continue;
636 if (BI->start <= ASeg.start && BI->end > ASeg.start)
637 return true;
638 if (BI->start > ASeg.start && BI->start < ASeg.end)
639 return true;
640 }
641 }
642 return false;
643}
644
645/// Copy segements with value number @p SrcValNo from liverange @p Src to live
646/// range @Dst and use value number @p DstValNo there.
647static void addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo,
648 const LiveRange &Src, const VNInfo *SrcValNo) {
649 for (const LiveRange::Segment &S : Src.segments) {
650 if (S.valno != SrcValNo)
651 continue;
652 Dst.addSegment(LiveRange::Segment(S.start, S.end, DstValNo));
653 }
654}
655
656bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
657 MachineInstr *CopyMI) {
658 assert(!CP.isPhys())(static_cast <bool> (!CP.isPhys()) ? void (0) : __assert_fail
("!CP.isPhys()", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 658, __extension__ __PRETTY_FUNCTION__))
;
659
660 LiveInterval &IntA =
661 LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
662 LiveInterval &IntB =
663 LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
664
665 // We found a non-trivially-coalescable copy with IntA being the source and
666 // IntB being the dest, thus this defines a value number in IntB. If the
667 // source value number (in IntA) is defined by a commutable instruction and
668 // its other operand is coalesced to the copy dest register, see if we can
669 // transform the copy into a noop by commuting the definition. For example,
670 //
671 // A3 = op A2 B0<kill>
672 // ...
673 // B1 = A3 <- this copy
674 // ...
675 // = op A3 <- more uses
676 //
677 // ==>
678 //
679 // B2 = op B0 A2<kill>
680 // ...
681 // B1 = B2 <- now an identity copy
682 // ...
683 // = op B2 <- more uses
684
685 // BValNo is a value number in B that is defined by a copy from A. 'B1' in
686 // the example above.
687 SlotIndex CopyIdx = LIS->getInstructionIndex(*CopyMI).getRegSlot();
688 VNInfo *BValNo = IntB.getVNInfoAt(CopyIdx);
689 assert(BValNo != nullptr && BValNo->def == CopyIdx)(static_cast <bool> (BValNo != nullptr && BValNo
->def == CopyIdx) ? void (0) : __assert_fail ("BValNo != nullptr && BValNo->def == CopyIdx"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 689, __extension__ __PRETTY_FUNCTION__))
;
690
691 // AValNo is the value number in A that defines the copy, A3 in the example.
692 VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getRegSlot(true));
693 assert(AValNo && !AValNo->isUnused() && "COPY source not live")(static_cast <bool> (AValNo && !AValNo->isUnused
() && "COPY source not live") ? void (0) : __assert_fail
("AValNo && !AValNo->isUnused() && \"COPY source not live\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 693, __extension__ __PRETTY_FUNCTION__))
;
694 if (AValNo->isPHIDef())
695 return false;
696 MachineInstr *DefMI = LIS->getInstructionFromIndex(AValNo->def);
697 if (!DefMI)
698 return false;
699 if (!DefMI->isCommutable())
700 return false;
701 // If DefMI is a two-address instruction then commuting it will change the
702 // destination register.
703 int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg);
704 assert(DefIdx != -1)(static_cast <bool> (DefIdx != -1) ? void (0) : __assert_fail
("DefIdx != -1", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 704, __extension__ __PRETTY_FUNCTION__))
;
705 unsigned UseOpIdx;
706 if (!DefMI->isRegTiedToUseOperand(DefIdx, &UseOpIdx))
707 return false;
708
709 // FIXME: The code below tries to commute 'UseOpIdx' operand with some other
710 // commutable operand which is expressed by 'CommuteAnyOperandIndex'value
711 // passed to the method. That _other_ operand is chosen by
712 // the findCommutedOpIndices() method.
713 //
714 // That is obviously an area for improvement in case of instructions having
715 // more than 2 operands. For example, if some instruction has 3 commutable
716 // operands then all possible variants (i.e. op#1<->op#2, op#1<->op#3,
717 // op#2<->op#3) of commute transformation should be considered/tried here.
718 unsigned NewDstIdx = TargetInstrInfo::CommuteAnyOperandIndex;
719 if (!TII->findCommutedOpIndices(*DefMI, UseOpIdx, NewDstIdx))
720 return false;
721
722 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
723 unsigned NewReg = NewDstMO.getReg();
724 if (NewReg != IntB.reg || !IntB.Query(AValNo->def).isKill())
725 return false;
726
727 // Make sure there are no other definitions of IntB that would reach the
728 // uses which the new definition can reach.
729 if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
730 return false;
731
732 // If some of the uses of IntA.reg is already coalesced away, return false.
733 // It's not possible to determine whether it's safe to perform the coalescing.
734 for (MachineOperand &MO : MRI->use_nodbg_operands(IntA.reg)) {
735 MachineInstr *UseMI = MO.getParent();
736 unsigned OpNo = &MO - &UseMI->getOperand(0);
737 SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI);
738 LiveInterval::iterator US = IntA.FindSegmentContaining(UseIdx);
739 if (US == IntA.end() || US->valno != AValNo)
740 continue;
741 // If this use is tied to a def, we can't rewrite the register.
742 if (UseMI->isRegTiedToDefOperand(OpNo))
743 return false;
744 }
745
746 DEBUG(dbgs() << "\tremoveCopyByCommutingDef: " << AValNo->def << '\t'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremoveCopyByCommutingDef: "
<< AValNo->def << '\t' << *DefMI; } } while
(false)
747 << *DefMI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremoveCopyByCommutingDef: "
<< AValNo->def << '\t' << *DefMI; } } while
(false)
;
748
749 // At this point we have decided that it is legal to do this
750 // transformation. Start by commuting the instruction.
751 MachineBasicBlock *MBB = DefMI->getParent();
752 MachineInstr *NewMI =
753 TII->commuteInstruction(*DefMI, false, UseOpIdx, NewDstIdx);
754 if (!NewMI)
755 return false;
756 if (TargetRegisterInfo::isVirtualRegister(IntA.reg) &&
757 TargetRegisterInfo::isVirtualRegister(IntB.reg) &&
758 !MRI->constrainRegClass(IntB.reg, MRI->getRegClass(IntA.reg)))
759 return false;
760 if (NewMI != DefMI) {
761 LIS->ReplaceMachineInstrInMaps(*DefMI, *NewMI);
762 MachineBasicBlock::iterator Pos = DefMI;
763 MBB->insert(Pos, NewMI);
764 MBB->erase(DefMI);
765 }
766
767 // If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
768 // A = or A, B
769 // ...
770 // B = A
771 // ...
772 // C = A<kill>
773 // ...
774 // = B
775
776 // Update uses of IntA of the specific Val# with IntB.
777 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(IntA.reg),
778 UE = MRI->use_end();
779 UI != UE; /* ++UI is below because of possible MI removal */) {
780 MachineOperand &UseMO = *UI;
781 ++UI;
782 if (UseMO.isUndef())
783 continue;
784 MachineInstr *UseMI = UseMO.getParent();
785 if (UseMI->isDebugValue()) {
786 // FIXME These don't have an instruction index. Not clear we have enough
787 // info to decide whether to do this replacement or not. For now do it.
788 UseMO.setReg(NewReg);
789 continue;
790 }
791 SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI).getRegSlot(true);
792 LiveInterval::iterator US = IntA.FindSegmentContaining(UseIdx);
793 assert(US != IntA.end() && "Use must be live")(static_cast <bool> (US != IntA.end() && "Use must be live"
) ? void (0) : __assert_fail ("US != IntA.end() && \"Use must be live\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 793, __extension__ __PRETTY_FUNCTION__))
;
794 if (US->valno != AValNo)
795 continue;
796 // Kill flags are no longer accurate. They are recomputed after RA.
797 UseMO.setIsKill(false);
798 if (TargetRegisterInfo::isPhysicalRegister(NewReg))
799 UseMO.substPhysReg(NewReg, *TRI);
800 else
801 UseMO.setReg(NewReg);
802 if (UseMI == CopyMI)
803 continue;
804 if (!UseMI->isCopy())
805 continue;
806 if (UseMI->getOperand(0).getReg() != IntB.reg ||
807 UseMI->getOperand(0).getSubReg())
808 continue;
809
810 // This copy will become a noop. If it's defining a new val#, merge it into
811 // BValNo.
812 SlotIndex DefIdx = UseIdx.getRegSlot();
813 VNInfo *DVNI = IntB.getVNInfoAt(DefIdx);
814 if (!DVNI)
815 continue;
816 DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tnoop: " << DefIdx <<
'\t' << *UseMI; } } while (false)
;
817 assert(DVNI->def == DefIdx)(static_cast <bool> (DVNI->def == DefIdx) ? void (0)
: __assert_fail ("DVNI->def == DefIdx", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 817, __extension__ __PRETTY_FUNCTION__))
;
818 BValNo = IntB.MergeValueNumberInto(DVNI, BValNo);
819 for (LiveInterval::SubRange &S : IntB.subranges()) {
820 VNInfo *SubDVNI = S.getVNInfoAt(DefIdx);
821 if (!SubDVNI)
822 continue;
823 VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
824 assert(SubBValNo->def == CopyIdx)(static_cast <bool> (SubBValNo->def == CopyIdx) ? void
(0) : __assert_fail ("SubBValNo->def == CopyIdx", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 824, __extension__ __PRETTY_FUNCTION__))
;
825 S.MergeValueNumberInto(SubDVNI, SubBValNo);
826 }
827
828 deleteInstr(UseMI);
829 }
830
831 // Extend BValNo by merging in IntA live segments of AValNo. Val# definition
832 // is updated.
833 BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator();
834 if (IntB.hasSubRanges()) {
835 if (!IntA.hasSubRanges()) {
836 LaneBitmask Mask = MRI->getMaxLaneMaskForVReg(IntA.reg);
837 IntA.createSubRangeFrom(Allocator, Mask, IntA);
838 }
839 SlotIndex AIdx = CopyIdx.getRegSlot(true);
840 for (LiveInterval::SubRange &SA : IntA.subranges()) {
841 VNInfo *ASubValNo = SA.getVNInfoAt(AIdx);
842 assert(ASubValNo != nullptr)(static_cast <bool> (ASubValNo != nullptr) ? void (0) :
__assert_fail ("ASubValNo != nullptr", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 842, __extension__ __PRETTY_FUNCTION__))
;
843
844 IntB.refineSubRanges(Allocator, SA.LaneMask,
845 [&Allocator,&SA,CopyIdx,ASubValNo](LiveInterval::SubRange &SR) {
846 VNInfo *BSubValNo = SR.empty()
847 ? SR.getNextValue(CopyIdx, Allocator)
848 : SR.getVNInfoAt(CopyIdx);
849 assert(BSubValNo != nullptr)(static_cast <bool> (BSubValNo != nullptr) ? void (0) :
__assert_fail ("BSubValNo != nullptr", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 849, __extension__ __PRETTY_FUNCTION__))
;
850 addSegmentsWithValNo(SR, BSubValNo, SA, ASubValNo);
851 });
852 }
853 }
854
855 BValNo->def = AValNo->def;
856 addSegmentsWithValNo(IntB, BValNo, IntA, AValNo);
857 DEBUG(dbgs() << "\t\textended: " << IntB << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\textended: " << IntB
<< '\n'; } } while (false)
;
858
859 LIS->removeVRegDefAt(IntA, AValNo->def);
860
861 DEBUG(dbgs() << "\t\ttrimmed: " << IntA << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\ttrimmed: " << IntA
<< '\n'; } } while (false)
;
862 ++numCommutes;
863 return true;
864}
865
866/// For copy B = A in BB2, if A is defined by A = B in BB0 which is a
867/// predecessor of BB2, and if B is not redefined on the way from A = B
868/// in BB2 to B = A in BB2, B = A in BB2 is partially redundant if the
869/// execution goes through the path from BB0 to BB2. We may move B = A
870/// to the predecessor without such reversed copy.
871/// So we will transform the program from:
872/// BB0:
873/// A = B; BB1:
874/// ... ...
875/// / \ /
876/// BB2:
877/// ...
878/// B = A;
879///
880/// to:
881///
882/// BB0: BB1:
883/// A = B; ...
884/// ... B = A;
885/// / \ /
886/// BB2:
887/// ...
888///
889/// A special case is when BB0 and BB2 are the same BB which is the only
890/// BB in a loop:
891/// BB1:
892/// ...
893/// BB0/BB2: ----
894/// B = A; |
895/// ... |
896/// A = B; |
897/// |-------
898/// |
899/// We may hoist B = A from BB0/BB2 to BB1.
900///
901/// The major preconditions for correctness to remove such partial
902/// redundancy include:
903/// 1. A in B = A in BB2 is defined by a PHI in BB2, and one operand of
904/// the PHI is defined by the reversed copy A = B in BB0.
905/// 2. No B is referenced from the start of BB2 to B = A.
906/// 3. No B is defined from A = B to the end of BB0.
907/// 4. BB1 has only one successor.
908///
909/// 2 and 4 implicitly ensure B is not live at the end of BB1.
910/// 4 guarantees BB2 is hotter than BB1, so we can only move a copy to a
911/// colder place, which not only prevent endless loop, but also make sure
912/// the movement of copy is beneficial.
913bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP,
914 MachineInstr &CopyMI) {
915 assert(!CP.isPhys())(static_cast <bool> (!CP.isPhys()) ? void (0) : __assert_fail
("!CP.isPhys()", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 915, __extension__ __PRETTY_FUNCTION__))
;
916 if (!CopyMI.isFullCopy())
917 return false;
918
919 MachineBasicBlock &MBB = *CopyMI.getParent();
920 if (MBB.isEHPad())
921 return false;
922
923 if (MBB.pred_size() != 2)
924 return false;
925
926 LiveInterval &IntA =
927 LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
928 LiveInterval &IntB =
929 LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
930
931 // A is defined by PHI at the entry of MBB.
932 SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getRegSlot(true);
933 VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx);
934 assert(AValNo && !AValNo->isUnused() && "COPY source not live")(static_cast <bool> (AValNo && !AValNo->isUnused
() && "COPY source not live") ? void (0) : __assert_fail
("AValNo && !AValNo->isUnused() && \"COPY source not live\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 934, __extension__ __PRETTY_FUNCTION__))
;
935 if (!AValNo->isPHIDef())
936 return false;
937
938 // No B is referenced before CopyMI in MBB.
939 if (IntB.overlaps(LIS->getMBBStartIdx(&MBB), CopyIdx))
940 return false;
941
942 // MBB has two predecessors: one contains A = B so no copy will be inserted
943 // for it. The other one will have a copy moved from MBB.
944 bool FoundReverseCopy = false;
945 MachineBasicBlock *CopyLeftBB = nullptr;
946 for (MachineBasicBlock *Pred : MBB.predecessors()) {
947 VNInfo *PVal = IntA.getVNInfoBefore(LIS->getMBBEndIdx(Pred));
948 MachineInstr *DefMI = LIS->getInstructionFromIndex(PVal->def);
949 if (!DefMI || !DefMI->isFullCopy()) {
950 CopyLeftBB = Pred;
951 continue;
952 }
953 // Check DefMI is a reverse copy and it is in BB Pred.
954 if (DefMI->getOperand(0).getReg() != IntA.reg ||
955 DefMI->getOperand(1).getReg() != IntB.reg ||
956 DefMI->getParent() != Pred) {
957 CopyLeftBB = Pred;
958 continue;
959 }
960 // If there is any other def of B after DefMI and before the end of Pred,
961 // we need to keep the copy of B = A at the end of Pred if we remove
962 // B = A from MBB.
963 bool ValB_Changed = false;
964 for (auto VNI : IntB.valnos) {
965 if (VNI->isUnused())
966 continue;
967 if (PVal->def < VNI->def && VNI->def < LIS->getMBBEndIdx(Pred)) {
968 ValB_Changed = true;
969 break;
970 }
971 }
972 if (ValB_Changed) {
973 CopyLeftBB = Pred;
974 continue;
975 }
976 FoundReverseCopy = true;
977 }
978
979 // If no reverse copy is found in predecessors, nothing to do.
980 if (!FoundReverseCopy)
981 return false;
982
983 // If CopyLeftBB is nullptr, it means every predecessor of MBB contains
984 // reverse copy, CopyMI can be removed trivially if only IntA/IntB is updated.
985 // If CopyLeftBB is not nullptr, move CopyMI from MBB to CopyLeftBB and
986 // update IntA/IntB.
987 //
988 // If CopyLeftBB is not nullptr, ensure CopyLeftBB has a single succ so
989 // MBB is hotter than CopyLeftBB.
990 if (CopyLeftBB && CopyLeftBB->succ_size() > 1)
991 return false;
992
993 // Now ok to move copy.
994 if (CopyLeftBB) {
995 DEBUG(dbgs() << "\tremovePartialRedundancy: Move the copy to BB#"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremovePartialRedundancy: Move the copy to BB#"
<< CopyLeftBB->getNumber() << '\t' << CopyMI
; } } while (false)
996 << CopyLeftBB->getNumber() << '\t' << CopyMI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremovePartialRedundancy: Move the copy to BB#"
<< CopyLeftBB->getNumber() << '\t' << CopyMI
; } } while (false)
;
997
998 // Insert new copy to CopyLeftBB.
999 auto InsPos = CopyLeftBB->getFirstTerminator();
1000 MachineInstr *NewCopyMI = BuildMI(*CopyLeftBB, InsPos, CopyMI.getDebugLoc(),
1001 TII->get(TargetOpcode::COPY), IntB.reg)
1002 .addReg(IntA.reg);
1003 SlotIndex NewCopyIdx =
1004 LIS->InsertMachineInstrInMaps(*NewCopyMI).getRegSlot();
1005 IntB.createDeadDef(NewCopyIdx, LIS->getVNInfoAllocator());
1006 for (LiveInterval::SubRange &SR : IntB.subranges())
1007 SR.createDeadDef(NewCopyIdx, LIS->getVNInfoAllocator());
1008
1009 // If the newly created Instruction has an address of an instruction that was
1010 // deleted before (object recycled by the allocator) it needs to be removed from
1011 // the deleted list.
1012 ErasedInstrs.erase(NewCopyMI);
1013 } else {
1014 DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from BB#"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremovePartialRedundancy: Remove the copy from BB#"
<< MBB.getNumber() << '\t' << CopyMI; } } while
(false)
1015 << MBB.getNumber() << '\t' << CopyMI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremovePartialRedundancy: Remove the copy from BB#"
<< MBB.getNumber() << '\t' << CopyMI; } } while
(false)
;
1016 }
1017
1018 // Remove CopyMI.
1019 // Note: This is fine to remove the copy before updating the live-ranges.
1020 // While updating the live-ranges, we only look at slot indices and
1021 // never go back to the instruction.
1022 // Mark instructions as deleted.
1023 deleteInstr(&CopyMI);
1024
1025 // Update the liveness.
1026 SmallVector<SlotIndex, 8> EndPoints;
1027 VNInfo *BValNo = IntB.Query(CopyIdx).valueOutOrDead();
1028 LIS->pruneValue(*static_cast<LiveRange *>(&IntB), CopyIdx.getRegSlot(),
1029 &EndPoints);
1030 BValNo->markUnused();
1031 // Extend IntB to the EndPoints of its original live interval.
1032 LIS->extendToIndices(IntB, EndPoints);
1033
1034 // Now, do the same for its subranges.
1035 for (LiveInterval::SubRange &SR : IntB.subranges()) {
1036 EndPoints.clear();
1037 VNInfo *BValNo = SR.Query(CopyIdx).valueOutOrDead();
1038 assert(BValNo && "All sublanes should be live")(static_cast <bool> (BValNo && "All sublanes should be live"
) ? void (0) : __assert_fail ("BValNo && \"All sublanes should be live\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1038, __extension__ __PRETTY_FUNCTION__))
;
1039 LIS->pruneValue(SR, CopyIdx.getRegSlot(), &EndPoints);
1040 BValNo->markUnused();
1041 LIS->extendToIndices(SR, EndPoints);
1042 }
1043
1044 // Finally, update the live-range of IntA.
1045 shrinkToUses(&IntA);
1046 return true;
1047}
1048
1049/// Returns true if @p MI defines the full vreg @p Reg, as opposed to just
1050/// defining a subregister.
1051static bool definesFullReg(const MachineInstr &MI, unsigned Reg) {
1052 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) &&(static_cast <bool> (!TargetRegisterInfo::isPhysicalRegister
(Reg) && "This code cannot handle physreg aliasing") ?
void (0) : __assert_fail ("!TargetRegisterInfo::isPhysicalRegister(Reg) && \"This code cannot handle physreg aliasing\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1053, __extension__ __PRETTY_FUNCTION__))
1053 "This code cannot handle physreg aliasing")(static_cast <bool> (!TargetRegisterInfo::isPhysicalRegister
(Reg) && "This code cannot handle physreg aliasing") ?
void (0) : __assert_fail ("!TargetRegisterInfo::isPhysicalRegister(Reg) && \"This code cannot handle physreg aliasing\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1053, __extension__ __PRETTY_FUNCTION__))
;
1054 for (const MachineOperand &Op : MI.operands()) {
1055 if (!Op.isReg() || !Op.isDef() || Op.getReg() != Reg)
1056 continue;
1057 // Return true if we define the full register or don't care about the value
1058 // inside other subregisters.
1059 if (Op.getSubReg() == 0 || Op.isUndef())
1060 return true;
1061 }
1062 return false;
1063}
1064
1065bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
1066 MachineInstr *CopyMI,
1067 bool &IsDefCopy) {
1068 IsDefCopy = false;
1069 unsigned SrcReg = CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg();
1070 unsigned SrcIdx = CP.isFlipped() ? CP.getDstIdx() : CP.getSrcIdx();
1071 unsigned DstReg = CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg();
1072 unsigned DstIdx = CP.isFlipped() ? CP.getSrcIdx() : CP.getDstIdx();
1073 if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
1074 return false;
1075
1076 LiveInterval &SrcInt = LIS->getInterval(SrcReg);
1077 SlotIndex CopyIdx = LIS->getInstructionIndex(*CopyMI);
1078 VNInfo *ValNo = SrcInt.Query(CopyIdx).valueIn();
1079 assert(ValNo && "CopyMI input register not live")(static_cast <bool> (ValNo && "CopyMI input register not live"
) ? void (0) : __assert_fail ("ValNo && \"CopyMI input register not live\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1079, __extension__ __PRETTY_FUNCTION__))
;
1080 if (ValNo->isPHIDef() || ValNo->isUnused())
1081 return false;
1082 MachineInstr *DefMI = LIS->getInstructionFromIndex(ValNo->def);
1083 if (!DefMI)
1084 return false;
1085 if (DefMI->isCopyLike()) {
1086 IsDefCopy = true;
1087 return false;
1088 }
1089 if (!TII->isAsCheapAsAMove(*DefMI))
1090 return false;
1091 if (!TII->isTriviallyReMaterializable(*DefMI, AA))
1092 return false;
1093 if (!definesFullReg(*DefMI, SrcReg))
1094 return false;
1095 bool SawStore = false;
1096 if (!DefMI->isSafeToMove(AA, SawStore))
1097 return false;
1098 const MCInstrDesc &MCID = DefMI->getDesc();
1099 if (MCID.getNumDefs() != 1)
1100 return false;
1101 // Only support subregister destinations when the def is read-undef.
1102 MachineOperand &DstOperand = CopyMI->getOperand(0);
1103 unsigned CopyDstReg = DstOperand.getReg();
1104 if (DstOperand.getSubReg() && !DstOperand.isUndef())
1105 return false;
1106
1107 // If both SrcIdx and DstIdx are set, correct rematerialization would widen
1108 // the register substantially (beyond both source and dest size). This is bad
1109 // for performance since it can cascade through a function, introducing many
1110 // extra spills and fills (e.g. ARM can easily end up copying QQQQPR registers
1111 // around after a few subreg copies).
1112 if (SrcIdx && DstIdx)
1113 return false;
1114
1115 const TargetRegisterClass *DefRC = TII->getRegClass(MCID, 0, TRI, *MF);
1116 if (!DefMI->isImplicitDef()) {
1117 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
1118 unsigned NewDstReg = DstReg;
1119
1120 unsigned NewDstIdx = TRI->composeSubRegIndices(CP.getSrcIdx(),
1121 DefMI->getOperand(0).getSubReg());
1122 if (NewDstIdx)
1123 NewDstReg = TRI->getSubReg(DstReg, NewDstIdx);
1124
1125 // Finally, make sure that the physical subregister that will be
1126 // constructed later is permitted for the instruction.
1127 if (!DefRC->contains(NewDstReg))
1128 return false;
1129 } else {
1130 // Theoretically, some stack frame reference could exist. Just make sure
1131 // it hasn't actually happened.
1132 assert(TargetRegisterInfo::isVirtualRegister(DstReg) &&(static_cast <bool> (TargetRegisterInfo::isVirtualRegister
(DstReg) && "Only expect to deal with virtual or physical registers"
) ? void (0) : __assert_fail ("TargetRegisterInfo::isVirtualRegister(DstReg) && \"Only expect to deal with virtual or physical registers\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1133, __extension__ __PRETTY_FUNCTION__))
1133 "Only expect to deal with virtual or physical registers")(static_cast <bool> (TargetRegisterInfo::isVirtualRegister
(DstReg) && "Only expect to deal with virtual or physical registers"
) ? void (0) : __assert_fail ("TargetRegisterInfo::isVirtualRegister(DstReg) && \"Only expect to deal with virtual or physical registers\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1133, __extension__ __PRETTY_FUNCTION__))
;
1134 }
1135 }
1136
1137 DebugLoc DL = CopyMI->getDebugLoc();
1138 MachineBasicBlock *MBB = CopyMI->getParent();
1139 MachineBasicBlock::iterator MII =
1140 std::next(MachineBasicBlock::iterator(CopyMI));
1141 TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, *DefMI, *TRI);
1142 MachineInstr &NewMI = *std::prev(MII);
1143 NewMI.setDebugLoc(DL);
1144
1145 // In a situation like the following:
1146 // %vreg0:subreg = instr ; DefMI, subreg = DstIdx
1147 // %vreg1 = copy %vreg0:subreg ; CopyMI, SrcIdx = 0
1148 // instead of widening %vreg1 to the register class of %vreg0 simply do:
1149 // %vreg1 = instr
1150 const TargetRegisterClass *NewRC = CP.getNewRC();
1151 if (DstIdx != 0) {
1152 MachineOperand &DefMO = NewMI.getOperand(0);
1153 if (DefMO.getSubReg() == DstIdx) {
1154 assert(SrcIdx == 0 && CP.isFlipped()(static_cast <bool> (SrcIdx == 0 && CP.isFlipped
() && "Shouldn't have SrcIdx+DstIdx at this point") ?
void (0) : __assert_fail ("SrcIdx == 0 && CP.isFlipped() && \"Shouldn't have SrcIdx+DstIdx at this point\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1155, __extension__ __PRETTY_FUNCTION__))
1155 && "Shouldn't have SrcIdx+DstIdx at this point")(static_cast <bool> (SrcIdx == 0 && CP.isFlipped
() && "Shouldn't have SrcIdx+DstIdx at this point") ?
void (0) : __assert_fail ("SrcIdx == 0 && CP.isFlipped() && \"Shouldn't have SrcIdx+DstIdx at this point\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1155, __extension__ __PRETTY_FUNCTION__))
;
1156 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
1157 const TargetRegisterClass *CommonRC =
1158 TRI->getCommonSubClass(DefRC, DstRC);
1159 if (CommonRC != nullptr) {
1160 NewRC = CommonRC;
1161 DstIdx = 0;
1162 DefMO.setSubReg(0);
1163 DefMO.setIsUndef(false); // Only subregs can have def+undef.
1164 }
1165 }
1166 }
1167
1168 // CopyMI may have implicit operands, save them so that we can transfer them
1169 // over to the newly materialized instruction after CopyMI is removed.
1170 SmallVector<MachineOperand, 4> ImplicitOps;
1171 ImplicitOps.reserve(CopyMI->getNumOperands() -
1172 CopyMI->getDesc().getNumOperands());
1173 for (unsigned I = CopyMI->getDesc().getNumOperands(),
1174 E = CopyMI->getNumOperands();
1175 I != E; ++I) {
1176 MachineOperand &MO = CopyMI->getOperand(I);
1177 if (MO.isReg()) {
1178 assert(MO.isImplicit() && "No explicit operands after implict operands.")(static_cast <bool> (MO.isImplicit() && "No explicit operands after implict operands."
) ? void (0) : __assert_fail ("MO.isImplicit() && \"No explicit operands after implict operands.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1178, __extension__ __PRETTY_FUNCTION__))
;
1179 // Discard VReg implicit defs.
1180 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
1181 ImplicitOps.push_back(MO);
1182 }
1183 }
1184
1185 LIS->ReplaceMachineInstrInMaps(*CopyMI, NewMI);
1186 CopyMI->eraseFromParent();
1187 ErasedInstrs.insert(CopyMI);
1188
1189 // NewMI may have dead implicit defs (E.g. EFLAGS for MOV<bits>r0 on X86).
1190 // We need to remember these so we can add intervals once we insert
1191 // NewMI into SlotIndexes.
1192 SmallVector<unsigned, 4> NewMIImplDefs;
1193 for (unsigned i = NewMI.getDesc().getNumOperands(),
1194 e = NewMI.getNumOperands();
1195 i != e; ++i) {
1196 MachineOperand &MO = NewMI.getOperand(i);
1197 if (MO.isReg() && MO.isDef()) {
1198 assert(MO.isImplicit() && MO.isDead() &&(static_cast <bool> (MO.isImplicit() && MO.isDead
() && TargetRegisterInfo::isPhysicalRegister(MO.getReg
())) ? void (0) : __assert_fail ("MO.isImplicit() && MO.isDead() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1199, __extension__ __PRETTY_FUNCTION__))
1199 TargetRegisterInfo::isPhysicalRegister(MO.getReg()))(static_cast <bool> (MO.isImplicit() && MO.isDead
() && TargetRegisterInfo::isPhysicalRegister(MO.getReg
())) ? void (0) : __assert_fail ("MO.isImplicit() && MO.isDead() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1199, __extension__ __PRETTY_FUNCTION__))
;
1200 NewMIImplDefs.push_back(MO.getReg());
1201 }
1202 }
1203
1204 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
1205 unsigned NewIdx = NewMI.getOperand(0).getSubReg();
1206
1207 if (DefRC != nullptr) {
1208 if (NewIdx)
1209 NewRC = TRI->getMatchingSuperRegClass(NewRC, DefRC, NewIdx);
1210 else
1211 NewRC = TRI->getCommonSubClass(NewRC, DefRC);
1212 assert(NewRC && "subreg chosen for remat incompatible with instruction")(static_cast <bool> (NewRC && "subreg chosen for remat incompatible with instruction"
) ? void (0) : __assert_fail ("NewRC && \"subreg chosen for remat incompatible with instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1212, __extension__ __PRETTY_FUNCTION__))
;
1213 }
1214 // Remap subranges to new lanemask and change register class.
1215 LiveInterval &DstInt = LIS->getInterval(DstReg);
1216 for (LiveInterval::SubRange &SR : DstInt.subranges()) {
1217 SR.LaneMask = TRI->composeSubRegIndexLaneMask(DstIdx, SR.LaneMask);
1218 }
1219 MRI->setRegClass(DstReg, NewRC);
1220
1221 // Update machine operands and add flags.
1222 updateRegDefsUses(DstReg, DstReg, DstIdx);
1223 NewMI.getOperand(0).setSubReg(NewIdx);
1224 // Add dead subregister definitions if we are defining the whole register
1225 // but only part of it is live.
1226 // This could happen if the rematerialization instruction is rematerializing
1227 // more than actually is used in the register.
1228 // An example would be:
1229 // vreg1 = LOAD CONSTANTS 5, 8 ; Loading both 5 and 8 in different subregs
1230 // ; Copying only part of the register here, but the rest is undef.
1231 // vreg2:sub_16bit<def, read-undef> = COPY vreg1:sub_16bit
1232 // ==>
1233 // ; Materialize all the constants but only using one
1234 // vreg2 = LOAD_CONSTANTS 5, 8
1235 //
1236 // at this point for the part that wasn't defined before we could have
1237 // subranges missing the definition.
1238 if (NewIdx == 0 && DstInt.hasSubRanges()) {
1239 SlotIndex CurrIdx = LIS->getInstructionIndex(NewMI);
1240 SlotIndex DefIndex =
1241 CurrIdx.getRegSlot(NewMI.getOperand(0).isEarlyClobber());
1242 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(DstReg);
1243 VNInfo::Allocator& Alloc = LIS->getVNInfoAllocator();
1244 for (LiveInterval::SubRange &SR : DstInt.subranges()) {
1245 if (!SR.liveAt(DefIndex))
1246 SR.createDeadDef(DefIndex, Alloc);
1247 MaxMask &= ~SR.LaneMask;
1248 }
1249 if (MaxMask.any()) {
1250 LiveInterval::SubRange *SR = DstInt.createSubRange(Alloc, MaxMask);
1251 SR->createDeadDef(DefIndex, Alloc);
1252 }
1253 }
1254
1255 // Make sure that the subrange for resultant undef is removed
1256 // For example:
1257 // vreg1:sub1<def,read-undef> = LOAD CONSTANT 1
1258 // vreg2<def> = COPY vreg1
1259 // ==>
1260 // vreg2:sub1<def, read-undef> = LOAD CONSTANT 1
1261 // ; Correct but need to remove the subrange for vreg2:sub0
1262 // ; as it is now undef
1263 if (NewIdx != 0 && DstInt.hasSubRanges()) {
1264 // The affected subregister segments can be removed.
1265 SlotIndex CurrIdx = LIS->getInstructionIndex(NewMI);
1266 LaneBitmask DstMask = TRI->getSubRegIndexLaneMask(NewIdx);
1267 bool UpdatedSubRanges = false;
1268 for (LiveInterval::SubRange &SR : DstInt.subranges()) {
1269 if ((SR.LaneMask & DstMask).none()) {
1270 DEBUG(dbgs() << "Removing undefined SubRange "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Removing undefined SubRange "
<< PrintLaneMask(SR.LaneMask) << " : " << SR
<< "\n"; } } while (false)
1271 << PrintLaneMask(SR.LaneMask) << " : " << SR << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Removing undefined SubRange "
<< PrintLaneMask(SR.LaneMask) << " : " << SR
<< "\n"; } } while (false)
;
1272 // VNI is in ValNo - remove any segments in this SubRange that have this ValNo
1273 if (VNInfo *RmValNo = SR.getVNInfoAt(CurrIdx.getRegSlot())) {
1274 SR.removeValNo(RmValNo);
1275 UpdatedSubRanges = true;
1276 }
1277 }
1278 }
1279 if (UpdatedSubRanges)
1280 DstInt.removeEmptySubRanges();
1281 }
1282 } else if (NewMI.getOperand(0).getReg() != CopyDstReg) {
1283 // The New instruction may be defining a sub-register of what's actually
1284 // been asked for. If so it must implicitly define the whole thing.
1285 assert(TargetRegisterInfo::isPhysicalRegister(DstReg) &&(static_cast <bool> (TargetRegisterInfo::isPhysicalRegister
(DstReg) && "Only expect virtual or physical registers in remat"
) ? void (0) : __assert_fail ("TargetRegisterInfo::isPhysicalRegister(DstReg) && \"Only expect virtual or physical registers in remat\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1286, __extension__ __PRETTY_FUNCTION__))
1286 "Only expect virtual or physical registers in remat")(static_cast <bool> (TargetRegisterInfo::isPhysicalRegister
(DstReg) && "Only expect virtual or physical registers in remat"
) ? void (0) : __assert_fail ("TargetRegisterInfo::isPhysicalRegister(DstReg) && \"Only expect virtual or physical registers in remat\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1286, __extension__ __PRETTY_FUNCTION__))
;
1287 NewMI.getOperand(0).setIsDead(true);
1288 NewMI.addOperand(MachineOperand::CreateReg(
1289 CopyDstReg, true /*IsDef*/, true /*IsImp*/, false /*IsKill*/));
1290 // Record small dead def live-ranges for all the subregisters
1291 // of the destination register.
1292 // Otherwise, variables that live through may miss some
1293 // interferences, thus creating invalid allocation.
1294 // E.g., i386 code:
1295 // vreg1 = somedef ; vreg1 GR8
1296 // vreg2 = remat ; vreg2 GR32
1297 // CL = COPY vreg2.sub_8bit
1298 // = somedef vreg1 ; vreg1 GR8
1299 // =>
1300 // vreg1 = somedef ; vreg1 GR8
1301 // ECX<def, dead> = remat ; CL<imp-def>
1302 // = somedef vreg1 ; vreg1 GR8
1303 // vreg1 will see the inteferences with CL but not with CH since
1304 // no live-ranges would have been created for ECX.
1305 // Fix that!
1306 SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
1307 for (MCRegUnitIterator Units(NewMI.getOperand(0).getReg(), TRI);
1308 Units.isValid(); ++Units)
1309 if (LiveRange *LR = LIS->getCachedRegUnit(*Units))
1310 LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator());
1311 }
1312
1313 if (NewMI.getOperand(0).getSubReg())
1314 NewMI.getOperand(0).setIsUndef();
1315
1316 // Transfer over implicit operands to the rematerialized instruction.
1317 for (MachineOperand &MO : ImplicitOps)
1318 NewMI.addOperand(MO);
1319
1320 SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
1321 for (unsigned i = 0, e = NewMIImplDefs.size(); i != e; ++i) {
1322 unsigned Reg = NewMIImplDefs[i];
1323 for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
1324 if (LiveRange *LR = LIS->getCachedRegUnit(*Units))
1325 LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator());
1326 }
1327
1328 DEBUG(dbgs() << "Remat: " << NewMI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Remat: " << NewMI; } }
while (false)
;
1329 ++NumReMats;
1330
1331 // The source interval can become smaller because we removed a use.
1332 shrinkToUses(&SrcInt, &DeadDefs);
1333 if (!DeadDefs.empty()) {
1334 // If the virtual SrcReg is completely eliminated, update all DBG_VALUEs
1335 // to describe DstReg instead.
1336 for (MachineOperand &UseMO : MRI->use_operands(SrcReg)) {
1337 MachineInstr *UseMI = UseMO.getParent();
1338 if (UseMI->isDebugValue()) {
1339 UseMO.setReg(DstReg);
1340 // Move the debug value directly after the def of the rematerialized
1341 // value in DstReg.
1342 MBB->splice(std::next(NewMI.getIterator()), UseMI->getParent(), UseMI);
1343 DEBUG(dbgs() << "\t\tupdated: " << *UseMI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tupdated: " << *UseMI
; } } while (false)
;
1344 }
1345 }
1346 eliminateDeadDefs();
1347 }
1348
1349 return true;
1350}
1351
1352bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) {
1353 // ProcessImpicitDefs may leave some copies of <undef> values, it only removes
1354 // local variables. When we have a copy like:
1355 //
1356 // %vreg1 = COPY %vreg2<undef>
1357 //
1358 // We delete the copy and remove the corresponding value number from %vreg1.
1359 // Any uses of that value number are marked as <undef>.
1360
1361 // Note that we do not query CoalescerPair here but redo isMoveInstr as the
1362 // CoalescerPair may have a new register class with adjusted subreg indices
1363 // at this point.
1364 unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
1365 isMoveInstr(*TRI, CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx);
1366
1367 SlotIndex Idx = LIS->getInstructionIndex(*CopyMI);
1368 const LiveInterval &SrcLI = LIS->getInterval(SrcReg);
1369 // CopyMI is undef iff SrcReg is not live before the instruction.
1370 if (SrcSubIdx != 0 && SrcLI.hasSubRanges()) {
1371 LaneBitmask SrcMask = TRI->getSubRegIndexLaneMask(SrcSubIdx);
1372 for (const LiveInterval::SubRange &SR : SrcLI.subranges()) {
1373 if ((SR.LaneMask & SrcMask).none())
1374 continue;
1375 if (SR.liveAt(Idx))
1376 return false;
1377 }
1378 } else if (SrcLI.liveAt(Idx))
1379 return false;
1380
1381 DEBUG(dbgs() << "\tEliminating copy of <undef> value\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tEliminating copy of <undef> value\n"
; } } while (false)
;
1382
1383 // Remove any DstReg segments starting at the instruction.
1384 LiveInterval &DstLI = LIS->getInterval(DstReg);
1385 SlotIndex RegIndex = Idx.getRegSlot();
1386 // Remove value or merge with previous one in case of a subregister def.
1387 if (VNInfo *PrevVNI = DstLI.getVNInfoAt(Idx)) {
1388 VNInfo *VNI = DstLI.getVNInfoAt(RegIndex);
1389 DstLI.MergeValueNumberInto(VNI, PrevVNI);
1390
1391 // The affected subregister segments can be removed.
1392 LaneBitmask DstMask = TRI->getSubRegIndexLaneMask(DstSubIdx);
1393 for (LiveInterval::SubRange &SR : DstLI.subranges()) {
1394 if ((SR.LaneMask & DstMask).none())
1395 continue;
1396
1397 VNInfo *SVNI = SR.getVNInfoAt(RegIndex);
1398 assert(SVNI != nullptr && SlotIndex::isSameInstr(SVNI->def, RegIndex))(static_cast <bool> (SVNI != nullptr && SlotIndex
::isSameInstr(SVNI->def, RegIndex)) ? void (0) : __assert_fail
("SVNI != nullptr && SlotIndex::isSameInstr(SVNI->def, RegIndex)"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1398, __extension__ __PRETTY_FUNCTION__))
;
1399 SR.removeValNo(SVNI);
1400 }
1401 DstLI.removeEmptySubRanges();
1402 } else
1403 LIS->removeVRegDefAt(DstLI, RegIndex);
1404
1405 // Mark uses as undef.
1406 for (MachineOperand &MO : MRI->reg_nodbg_operands(DstReg)) {
1407 if (MO.isDef() /*|| MO.isUndef()*/)
1408 continue;
1409 const MachineInstr &MI = *MO.getParent();
1410 SlotIndex UseIdx = LIS->getInstructionIndex(MI);
1411 LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
1412 bool isLive;
1413 if (!UseMask.all() && DstLI.hasSubRanges()) {
1414 isLive = false;
1415 for (const LiveInterval::SubRange &SR : DstLI.subranges()) {
1416 if ((SR.LaneMask & UseMask).none())
1417 continue;
1418 if (SR.liveAt(UseIdx)) {
1419 isLive = true;
1420 break;
1421 }
1422 }
1423 } else
1424 isLive = DstLI.liveAt(UseIdx);
1425 if (isLive)
1426 continue;
1427 MO.setIsUndef(true);
1428 DEBUG(dbgs() << "\tnew undef: " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tnew undef: " << UseIdx
<< '\t' << MI; } } while (false)
;
1429 }
1430
1431 // A def of a subregister may be a use of the other subregisters, so
1432 // deleting a def of a subregister may also remove uses. Since CopyMI
1433 // is still part of the function (but about to be erased), mark all
1434 // defs of DstReg in it as <undef>, so that shrinkToUses would
1435 // ignore them.
1436 for (MachineOperand &MO : CopyMI->operands())
1437 if (MO.isReg() && MO.isDef() && MO.getReg() == DstReg)
1438 MO.setIsUndef(true);
1439 LIS->shrinkToUses(&DstLI);
1440
1441 return true;
1442}
1443
1444void RegisterCoalescer::addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx,
1445 MachineOperand &MO, unsigned SubRegIdx) {
1446 LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubRegIdx);
1447 if (MO.isDef())
1448 Mask = ~Mask;
1449 bool IsUndef = true;
1450 for (const LiveInterval::SubRange &S : Int.subranges()) {
1451 if ((S.LaneMask & Mask).none())
1452 continue;
1453 if (S.liveAt(UseIdx)) {
1454 IsUndef = false;
1455 break;
1456 }
1457 }
1458 if (IsUndef) {
1459 MO.setIsUndef(true);
1460 // We found out some subregister use is actually reading an undefined
1461 // value. In some cases the whole vreg has become undefined at this
1462 // point so we have to potentially shrink the main range if the
1463 // use was ending a live segment there.
1464 LiveQueryResult Q = Int.Query(UseIdx);
1465 if (Q.valueOut() == nullptr)
1466 ShrinkMainRange = true;
1467 }
1468}
1469
1470void RegisterCoalescer::updateRegDefsUses(unsigned SrcReg,
1471 unsigned DstReg,
1472 unsigned SubIdx) {
1473 bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
1474 LiveInterval *DstInt = DstIsPhys ? nullptr : &LIS->getInterval(DstReg);
1475
1476 if (DstInt && DstInt->hasSubRanges() && DstReg != SrcReg) {
1477 for (MachineOperand &MO : MRI->reg_operands(DstReg)) {
1478 unsigned SubReg = MO.getSubReg();
1479 if (SubReg == 0 || MO.isUndef())
1480 continue;
1481 MachineInstr &MI = *MO.getParent();
1482 if (MI.isDebugValue())
1483 continue;
1484 SlotIndex UseIdx = LIS->getInstructionIndex(MI).getRegSlot(true);
1485 addUndefFlag(*DstInt, UseIdx, MO, SubReg);
1486 }
1487 }
1488
1489 SmallPtrSet<MachineInstr*, 8> Visited;
1490 for (MachineRegisterInfo::reg_instr_iterator
1491 I = MRI->reg_instr_begin(SrcReg), E = MRI->reg_instr_end();
1492 I != E; ) {
1493 MachineInstr *UseMI = &*(I++);
1494
1495 // Each instruction can only be rewritten once because sub-register
1496 // composition is not always idempotent. When SrcReg != DstReg, rewriting
1497 // the UseMI operands removes them from the SrcReg use-def chain, but when
1498 // SrcReg is DstReg we could encounter UseMI twice if it has multiple
1499 // operands mentioning the virtual register.
1500 if (SrcReg == DstReg && !Visited.insert(UseMI).second)
1501 continue;
1502
1503 SmallVector<unsigned,8> Ops;
1504 bool Reads, Writes;
1505 std::tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
1506
1507 // If SrcReg wasn't read, it may still be the case that DstReg is live-in
1508 // because SrcReg is a sub-register.
1509 if (DstInt && !Reads && SubIdx && !UseMI->isDebugValue())
1510 Reads = DstInt->liveAt(LIS->getInstructionIndex(*UseMI));
1511
1512 // Replace SrcReg with DstReg in all UseMI operands.
1513 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1514 MachineOperand &MO = UseMI->getOperand(Ops[i]);
1515
1516 // Adjust <undef> flags in case of sub-register joins. We don't want to
1517 // turn a full def into a read-modify-write sub-register def and vice
1518 // versa.
1519 if (SubIdx && MO.isDef())
1520 MO.setIsUndef(!Reads);
1521
1522 // A subreg use of a partially undef (super) register may be a complete
1523 // undef use now and then has to be marked that way.
1524 if (SubIdx != 0 && MO.isUse() && MRI->shouldTrackSubRegLiveness(DstReg)) {
1525 if (!DstInt->hasSubRanges()) {
1526 BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator();
1527 LaneBitmask Mask = MRI->getMaxLaneMaskForVReg(DstInt->reg);
1528 DstInt->createSubRangeFrom(Allocator, Mask, *DstInt);
1529 }
1530 SlotIndex MIIdx = UseMI->isDebugValue()
1531 ? LIS->getSlotIndexes()->getIndexBefore(*UseMI)
1532 : LIS->getInstructionIndex(*UseMI);
1533 SlotIndex UseIdx = MIIdx.getRegSlot(true);
1534 addUndefFlag(*DstInt, UseIdx, MO, SubIdx);
1535 }
1536
1537 if (DstIsPhys)
1538 MO.substPhysReg(DstReg, *TRI);
1539 else
1540 MO.substVirtReg(DstReg, SubIdx, *TRI);
1541 }
1542
1543 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\tupdated: "; if (!UseMI
->isDebugValue()) dbgs() << LIS->getInstructionIndex
(*UseMI) << "\t"; dbgs() << *UseMI; }; } } while (
false)
1544 dbgs() << "\t\tupdated: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\tupdated: "; if (!UseMI
->isDebugValue()) dbgs() << LIS->getInstructionIndex
(*UseMI) << "\t"; dbgs() << *UseMI; }; } } while (
false)
1545 if (!UseMI->isDebugValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\tupdated: "; if (!UseMI
->isDebugValue()) dbgs() << LIS->getInstructionIndex
(*UseMI) << "\t"; dbgs() << *UseMI; }; } } while (
false)
1546 dbgs() << LIS->getInstructionIndex(*UseMI) << "\t";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\tupdated: "; if (!UseMI
->isDebugValue()) dbgs() << LIS->getInstructionIndex
(*UseMI) << "\t"; dbgs() << *UseMI; }; } } while (
false)
1547 dbgs() << *UseMI;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\tupdated: "; if (!UseMI
->isDebugValue()) dbgs() << LIS->getInstructionIndex
(*UseMI) << "\t"; dbgs() << *UseMI; }; } } while (
false)
1548 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\tupdated: "; if (!UseMI
->isDebugValue()) dbgs() << LIS->getInstructionIndex
(*UseMI) << "\t"; dbgs() << *UseMI; }; } } while (
false)
;
1549 }
1550}
1551
1552bool RegisterCoalescer::canJoinPhys(const CoalescerPair &CP) {
1553 // Always join simple intervals that are defined by a single copy from a
1554 // reserved register. This doesn't increase register pressure, so it is
1555 // always beneficial.
1556 if (!MRI->isReserved(CP.getDstReg())) {
1557 DEBUG(dbgs() << "\tCan only merge into reserved registers.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tCan only merge into reserved registers.\n"
; } } while (false)
;
1558 return false;
1559 }
1560
1561 LiveInterval &JoinVInt = LIS->getInterval(CP.getSrcReg());
1562 if (JoinVInt.containsOneValue())
1563 return true;
1564
1565 DEBUG(dbgs() << "\tCannot join complex intervals into reserved register.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tCannot join complex intervals into reserved register.\n"
; } } while (false)
;
1566 return false;
1567}
1568
1569bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
1570 Again = false;
1571 DEBUG(dbgs() << LIS->getInstructionIndex(*CopyMI) << '\t' << *CopyMI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << LIS->getInstructionIndex(*
CopyMI) << '\t' << *CopyMI; } } while (false)
;
1572
1573 CoalescerPair CP(*TRI);
1574 if (!CP.setRegisters(CopyMI)) {
1575 DEBUG(dbgs() << "\tNot coalescable.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tNot coalescable.\n"; } } while
(false)
;
1576 return false;
1577 }
1578
1579 if (CP.getNewRC()) {
1580 auto SrcRC = MRI->getRegClass(CP.getSrcReg());
1581 auto DstRC = MRI->getRegClass(CP.getDstReg());
1582 unsigned SrcIdx = CP.getSrcIdx();
1583 unsigned DstIdx = CP.getDstIdx();
1584 if (CP.isFlipped()) {
1585 std::swap(SrcIdx, DstIdx);
1586 std::swap(SrcRC, DstRC);
1587 }
1588 if (!TRI->shouldCoalesce(CopyMI, SrcRC, SrcIdx, DstRC, DstIdx,
1589 CP.getNewRC(), *LIS)) {
1590 DEBUG(dbgs() << "\tSubtarget bailed on coalescing.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tSubtarget bailed on coalescing.\n"
; } } while (false)
;
1591 return false;
1592 }
1593 }
1594
1595 // Dead code elimination. This really should be handled by MachineDCE, but
1596 // sometimes dead copies slip through, and we can't generate invalid live
1597 // ranges.
1598 if (!CP.isPhys() && CopyMI->allDefsAreDead()) {
1599 DEBUG(dbgs() << "\tCopy is dead.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tCopy is dead.\n"; } } while
(false)
;
1600 DeadDefs.push_back(CopyMI);
1601 eliminateDeadDefs();
1602 return true;
1603 }
1604
1605 // Eliminate undefs.
1606 if (!CP.isPhys() && eliminateUndefCopy(CopyMI)) {
1607 deleteInstr(CopyMI);
1608 return false; // Not coalescable.
1609 }
1610
1611 // Coalesced copies are normally removed immediately, but transformations
1612 // like removeCopyByCommutingDef() can inadvertently create identity copies.
1613 // When that happens, just join the values and remove the copy.
1614 if (CP.getSrcReg() == CP.getDstReg()) {
1615 LiveInterval &LI = LIS->getInterval(CP.getSrcReg());
1616 DEBUG(dbgs() << "\tCopy already coalesced: " << LI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tCopy already coalesced: " <<
LI << '\n'; } } while (false)
;
1617 const SlotIndex CopyIdx = LIS->getInstructionIndex(*CopyMI);
1618 LiveQueryResult LRQ = LI.Query(CopyIdx);
1619 if (VNInfo *DefVNI = LRQ.valueDefined()) {
1620 VNInfo *ReadVNI = LRQ.valueIn();
1621 assert(ReadVNI && "No value before copy and no <undef> flag.")(static_cast <bool> (ReadVNI && "No value before copy and no <undef> flag."
) ? void (0) : __assert_fail ("ReadVNI && \"No value before copy and no <undef> flag.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1621, __extension__ __PRETTY_FUNCTION__))
;
1622 assert(ReadVNI != DefVNI && "Cannot read and define the same value.")(static_cast <bool> (ReadVNI != DefVNI && "Cannot read and define the same value."
) ? void (0) : __assert_fail ("ReadVNI != DefVNI && \"Cannot read and define the same value.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1622, __extension__ __PRETTY_FUNCTION__))
;
1623 LI.MergeValueNumberInto(DefVNI, ReadVNI);
1624
1625 // Process subregister liveranges.
1626 for (LiveInterval::SubRange &S : LI.subranges()) {
1627 LiveQueryResult SLRQ = S.Query(CopyIdx);
1628 if (VNInfo *SDefVNI = SLRQ.valueDefined()) {
1629 VNInfo *SReadVNI = SLRQ.valueIn();
1630 S.MergeValueNumberInto(SDefVNI, SReadVNI);
1631 }
1632 }
1633 DEBUG(dbgs() << "\tMerged values: " << LI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tMerged values: " <<
LI << '\n'; } } while (false)
;
1634 }
1635 deleteInstr(CopyMI);
1636 return true;
1637 }
1638
1639 // Enforce policies.
1640 if (CP.isPhys()) {
1641 DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), TRI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tConsidering merging " <<
PrintReg(CP.getSrcReg(), TRI) << " with " << PrintReg
(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n'; } } while
(false)
1642 << " with " << PrintReg(CP.getDstReg(), TRI, CP.getSrcIdx())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tConsidering merging " <<
PrintReg(CP.getSrcReg(), TRI) << " with " << PrintReg
(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n'; } } while
(false)
1643 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tConsidering merging " <<
PrintReg(CP.getSrcReg(), TRI) << " with " << PrintReg
(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n'; } } while
(false)
;
1644 if (!canJoinPhys(CP)) {
1645 // Before giving up coalescing, if definition of source is defined by
1646 // trivial computation, try rematerializing it.
1647 bool IsDefCopy;
1648 if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
1649 return true;
1650 if (IsDefCopy)
1651 Again = true; // May be possible to coalesce later.
1652 return false;
1653 }
1654 } else {
1655 // When possible, let DstReg be the larger interval.
1656 if (!CP.isPartial() && LIS->getInterval(CP.getSrcReg()).size() >
1657 LIS->getInterval(CP.getDstReg()).size())
1658 CP.flip();
1659
1660 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
1661 dbgs() << "\tConsidering merging to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
1662 << TRI->getRegClassName(CP.getNewRC()) << " with ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
1663 if (CP.getDstIdx() && CP.getSrcIdx())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
1664 dbgs() << PrintReg(CP.getDstReg()) << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
1665 << TRI->getSubRegIndexName(CP.getDstIdx()) << " and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
1666 << PrintReg(CP.getSrcReg()) << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
1667 << TRI->getSubRegIndexName(CP.getSrcIdx()) << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
1668 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
1669 dbgs() << PrintReg(CP.getSrcReg(), TRI) << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
1670 << PrintReg(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
1671 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with "
; if (CP.getDstIdx() && CP.getSrcIdx()) dbgs() <<
PrintReg(CP.getDstReg()) << " in " << TRI->getSubRegIndexName
(CP.getDstIdx()) << " and " << PrintReg(CP.getSrcReg
()) << " in " << TRI->getSubRegIndexName(CP.getSrcIdx
()) << '\n'; else dbgs() << PrintReg(CP.getSrcReg
(), TRI) << " in " << PrintReg(CP.getDstReg(), TRI
, CP.getSrcIdx()) << '\n'; }; } } while (false)
;
1672 }
1673
1674 ShrinkMask = LaneBitmask::getNone();
1675 ShrinkMainRange = false;
1676
1677 // Okay, attempt to join these two intervals. On failure, this returns false.
1678 // Otherwise, if one of the intervals being joined is a physreg, this method
1679 // always canonicalizes DstInt to be it. The output "SrcInt" will not have
1680 // been modified, so we can use this information below to update aliases.
1681 if (!joinIntervals(CP)) {
1682 // Coalescing failed.
1683
1684 // If definition of source is defined by trivial computation, try
1685 // rematerializing it.
1686 bool IsDefCopy;
1687 if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
1688 return true;
1689
1690 // If we can eliminate the copy without merging the live segments, do so
1691 // now.
1692 if (!CP.isPartial() && !CP.isPhys()) {
1693 if (adjustCopiesBackFrom(CP, CopyMI) ||
1694 removeCopyByCommutingDef(CP, CopyMI)) {
1695 deleteInstr(CopyMI);
1696 DEBUG(dbgs() << "\tTrivial!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tTrivial!\n"; } } while (false
)
;
1697 return true;
1698 }
1699 }
1700
1701 // Try and see if we can partially eliminate the copy by moving the copy to
1702 // its predecessor.
1703 if (!CP.isPartial() && !CP.isPhys())
1704 if (removePartialRedundancy(CP, *CopyMI))
1705 return true;
1706
1707 // Otherwise, we are unable to join the intervals.
1708 DEBUG(dbgs() << "\tInterference!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tInterference!\n"; } } while
(false)
;
1709 Again = true; // May be possible to coalesce later.
1710 return false;
1711 }
1712
1713 // Coalescing to a virtual register that is of a sub-register class of the
1714 // other. Make sure the resulting register is set to the right register class.
1715 if (CP.isCrossClass()) {
1716 ++numCrossRCs;
1717 MRI->setRegClass(CP.getDstReg(), CP.getNewRC());
1718 }
1719
1720 // Removing sub-register copies can ease the register class constraints.
1721 // Make sure we attempt to inflate the register class of DstReg.
1722 if (!CP.isPhys() && RegClassInfo.isProperSubClass(CP.getNewRC()))
1723 InflateRegs.push_back(CP.getDstReg());
1724
1725 // CopyMI has been erased by joinIntervals at this point. Remove it from
1726 // ErasedInstrs since copyCoalesceWorkList() won't add a successful join back
1727 // to the work list. This keeps ErasedInstrs from growing needlessly.
1728 ErasedInstrs.erase(CopyMI);
1729
1730 // Rewrite all SrcReg operands to DstReg.
1731 // Also update DstReg operands to include DstIdx if it is set.
1732 if (CP.getDstIdx())
1733 updateRegDefsUses(CP.getDstReg(), CP.getDstReg(), CP.getDstIdx());
1734 updateRegDefsUses(CP.getSrcReg(), CP.getDstReg(), CP.getSrcIdx());
1735
1736 // Shrink subregister ranges if necessary.
1737 if (ShrinkMask.any()) {
1738 LiveInterval &LI = LIS->getInterval(CP.getDstReg());
1739 for (LiveInterval::SubRange &S : LI.subranges()) {
1740 if ((S.LaneMask & ShrinkMask).none())
1741 continue;
1742 DEBUG(dbgs() << "Shrink LaneUses (Lane " << PrintLaneMask(S.LaneMask)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Shrink LaneUses (Lane " <<
PrintLaneMask(S.LaneMask) << ")\n"; } } while (false)
1743 << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Shrink LaneUses (Lane " <<
PrintLaneMask(S.LaneMask) << ")\n"; } } while (false)
;
1744 LIS->shrinkToUses(S, LI.reg);
1745 }
1746 LI.removeEmptySubRanges();
1747 }
1748 if (ShrinkMainRange) {
1749 LiveInterval &LI = LIS->getInterval(CP.getDstReg());
1750 shrinkToUses(&LI);
1751 }
1752
1753 // SrcReg is guaranteed to be the register whose live interval that is
1754 // being merged.
1755 LIS->removeInterval(CP.getSrcReg());
1756
1757 // Update regalloc hint.
1758 TRI->updateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *MF);
1759
1760 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tSuccess: " << PrintReg
(CP.getSrcReg(), TRI, CP.getSrcIdx()) << " -> " <<
PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = "; if (CP.isPhys()) dbgs() <<
PrintReg(CP.getDstReg(), TRI); else dbgs() << LIS->
getInterval(CP.getDstReg()); dbgs() << '\n'; }; } } while
(false)
1761 dbgs() << "\tSuccess: " << PrintReg(CP.getSrcReg(), TRI, CP.getSrcIdx())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tSuccess: " << PrintReg
(CP.getSrcReg(), TRI, CP.getSrcIdx()) << " -> " <<
PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = "; if (CP.isPhys()) dbgs() <<
PrintReg(CP.getDstReg(), TRI); else dbgs() << LIS->
getInterval(CP.getDstReg()); dbgs() << '\n'; }; } } while
(false)
1762 << " -> " << PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tSuccess: " << PrintReg
(CP.getSrcReg(), TRI, CP.getSrcIdx()) << " -> " <<
PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = "; if (CP.isPhys()) dbgs() <<
PrintReg(CP.getDstReg(), TRI); else dbgs() << LIS->
getInterval(CP.getDstReg()); dbgs() << '\n'; }; } } while
(false)
1763 dbgs() << "\tResult = ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tSuccess: " << PrintReg
(CP.getSrcReg(), TRI, CP.getSrcIdx()) << " -> " <<
PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = "; if (CP.isPhys()) dbgs() <<
PrintReg(CP.getDstReg(), TRI); else dbgs() << LIS->
getInterval(CP.getDstReg()); dbgs() << '\n'; }; } } while
(false)
1764 if (CP.isPhys())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tSuccess: " << PrintReg
(CP.getSrcReg(), TRI, CP.getSrcIdx()) << " -> " <<
PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = "; if (CP.isPhys()) dbgs() <<
PrintReg(CP.getDstReg(), TRI); else dbgs() << LIS->
getInterval(CP.getDstReg()); dbgs() << '\n'; }; } } while
(false)
1765 dbgs() << PrintReg(CP.getDstReg(), TRI);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tSuccess: " << PrintReg
(CP.getSrcReg(), TRI, CP.getSrcIdx()) << " -> " <<
PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = "; if (CP.isPhys()) dbgs() <<
PrintReg(CP.getDstReg(), TRI); else dbgs() << LIS->
getInterval(CP.getDstReg()); dbgs() << '\n'; }; } } while
(false)
1766 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tSuccess: " << PrintReg
(CP.getSrcReg(), TRI, CP.getSrcIdx()) << " -> " <<
PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = "; if (CP.isPhys()) dbgs() <<
PrintReg(CP.getDstReg(), TRI); else dbgs() << LIS->
getInterval(CP.getDstReg()); dbgs() << '\n'; }; } } while
(false)
1767 dbgs() << LIS->getInterval(CP.getDstReg());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tSuccess: " << PrintReg
(CP.getSrcReg(), TRI, CP.getSrcIdx()) << " -> " <<
PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = "; if (CP.isPhys()) dbgs() <<
PrintReg(CP.getDstReg(), TRI); else dbgs() << LIS->
getInterval(CP.getDstReg()); dbgs() << '\n'; }; } } while
(false)
1768 dbgs() << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tSuccess: " << PrintReg
(CP.getSrcReg(), TRI, CP.getSrcIdx()) << " -> " <<
PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = "; if (CP.isPhys()) dbgs() <<
PrintReg(CP.getDstReg(), TRI); else dbgs() << LIS->
getInterval(CP.getDstReg()); dbgs() << '\n'; }; } } while
(false)
1769 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\tSuccess: " << PrintReg
(CP.getSrcReg(), TRI, CP.getSrcIdx()) << " -> " <<
PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = "; if (CP.isPhys()) dbgs() <<
PrintReg(CP.getDstReg(), TRI); else dbgs() << LIS->
getInterval(CP.getDstReg()); dbgs() << '\n'; }; } } while
(false)
;
1770
1771 ++numJoins;
1772 return true;
1773}
1774
1775bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
1776 unsigned DstReg = CP.getDstReg();
1777 unsigned SrcReg = CP.getSrcReg();
1778 assert(CP.isPhys() && "Must be a physreg copy")(static_cast <bool> (CP.isPhys() && "Must be a physreg copy"
) ? void (0) : __assert_fail ("CP.isPhys() && \"Must be a physreg copy\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1778, __extension__ __PRETTY_FUNCTION__))
;
1779 assert(MRI->isReserved(DstReg) && "Not a reserved register")(static_cast <bool> (MRI->isReserved(DstReg) &&
"Not a reserved register") ? void (0) : __assert_fail ("MRI->isReserved(DstReg) && \"Not a reserved register\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1779, __extension__ __PRETTY_FUNCTION__))
;
1780 LiveInterval &RHS = LIS->getInterval(SrcReg);
1781 DEBUG(dbgs() << "\t\tRHS = " << RHS << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tRHS = " << RHS <<
'\n'; } } while (false)
;
1782
1783 assert(RHS.containsOneValue() && "Invalid join with reserved register")(static_cast <bool> (RHS.containsOneValue() && "Invalid join with reserved register"
) ? void (0) : __assert_fail ("RHS.containsOneValue() && \"Invalid join with reserved register\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 1783, __extension__ __PRETTY_FUNCTION__))
;
1784
1785 // Optimization for reserved registers like ESP. We can only merge with a
1786 // reserved physreg if RHS has a single value that is a copy of DstReg.
1787 // The live range of the reserved register will look like a set of dead defs
1788 // - we don't properly track the live range of reserved registers.
1789
1790 // Deny any overlapping intervals. This depends on all the reserved
1791 // register live ranges to look like dead defs.
1792 if (!MRI->isConstantPhysReg(DstReg)) {
1793 for (MCRegUnitIterator UI(DstReg, TRI); UI.isValid(); ++UI) {
1794 // Abort if not all the regunits are reserved.
1795 for (MCRegUnitRootIterator RI(*UI, TRI); RI.isValid(); ++RI) {
1796 if (!MRI->isReserved(*RI))
1797 return false;
1798 }
1799 if (RHS.overlaps(LIS->getRegUnit(*UI))) {
1800 DEBUG(dbgs() << "\t\tInterference: " << PrintRegUnit(*UI, TRI) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tInterference: " <<
PrintRegUnit(*UI, TRI) << '\n'; } } while (false)
;
1801 return false;
1802 }
1803 }
1804
1805 // We must also check for overlaps with regmask clobbers.
1806 BitVector RegMaskUsable;
1807 if (LIS->checkRegMaskInterference(RHS, RegMaskUsable) &&
1808 !RegMaskUsable.test(DstReg)) {
1809 DEBUG(dbgs() << "\t\tRegMask interference\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tRegMask interference\n";
} } while (false)
;
1810 return false;
1811 }
1812 }
1813
1814 // Skip any value computations, we are not adding new values to the
1815 // reserved register. Also skip merging the live ranges, the reserved
1816 // register live range doesn't need to be accurate as long as all the
1817 // defs are there.
1818
1819 // Delete the identity copy.
1820 MachineInstr *CopyMI;
1821 if (CP.isFlipped()) {
1822 // Physreg is copied into vreg
1823 // %vregY = COPY %X
1824 // ... //< no other def of %X here
1825 // use %vregY
1826 // =>
1827 // ...
1828 // use %X
1829 CopyMI = MRI->getVRegDef(SrcReg);
1830 } else {
1831 // VReg is copied into physreg:
1832 // %vregX = def
1833 // ... //< no other def or use of %Y here
1834 // %Y = COPY %vregX
1835 // =>
1836 // %Y = def
1837 // ...
1838 if (!MRI->hasOneNonDBGUse(SrcReg)) {
1839 DEBUG(dbgs() << "\t\tMultiple vreg uses!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tMultiple vreg uses!\n"; }
} while (false)
;
1840 return false;
1841 }
1842
1843 if (!LIS->intervalIsInOneMBB(RHS)) {
1844 DEBUG(dbgs() << "\t\tComplex control flow!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tComplex control flow!\n"
; } } while (false)
;
1845 return false;
1846 }
1847
1848 MachineInstr &DestMI = *MRI->getVRegDef(SrcReg);
1849 CopyMI = &*MRI->use_instr_nodbg_begin(SrcReg);
1850 SlotIndex CopyRegIdx = LIS->getInstructionIndex(*CopyMI).getRegSlot();
1851 SlotIndex DestRegIdx = LIS->getInstructionIndex(DestMI).getRegSlot();
1852
1853 if (!MRI->isConstantPhysReg(DstReg)) {
1854 // We checked above that there are no interfering defs of the physical
1855 // register. However, for this case, where we intend to move up the def of
1856 // the physical register, we also need to check for interfering uses.
1857 SlotIndexes *Indexes = LIS->getSlotIndexes();
1858 for (SlotIndex SI = Indexes->getNextNonNullIndex(DestRegIdx);
1859 SI != CopyRegIdx; SI = Indexes->getNextNonNullIndex(SI)) {
1860 MachineInstr *MI = LIS->getInstructionFromIndex(SI);
1861 if (MI->readsRegister(DstReg, TRI)) {
1862 DEBUG(dbgs() << "\t\tInterference (read): " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tInterference (read): " <<
*MI; } } while (false)
;
1863 return false;
1864 }
1865 }
1866 }
1867
1868 // We're going to remove the copy which defines a physical reserved
1869 // register, so remove its valno, etc.
1870 DEBUG(dbgs() << "\t\tRemoving phys reg def of " << PrintReg(DstReg, TRI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tRemoving phys reg def of "
<< PrintReg(DstReg, TRI) << " at " << CopyRegIdx
<< "\n"; } } while (false)
1871 << " at " << CopyRegIdx << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tRemoving phys reg def of "
<< PrintReg(DstReg, TRI) << " at " << CopyRegIdx
<< "\n"; } } while (false)
;
1872
1873 LIS->removePhysRegDefAt(DstReg, CopyRegIdx);
1874 // Create a new dead def at the new def location.
1875 for (MCRegUnitIterator UI(DstReg, TRI); UI.isValid(); ++UI) {
1876 LiveRange &LR = LIS->getRegUnit(*UI);
1877 LR.createDeadDef(DestRegIdx, LIS->getVNInfoAllocator());
1878 }
1879 }
1880
1881 deleteInstr(CopyMI);
1882
1883 // We don't track kills for reserved registers.
1884 MRI->clearKillFlags(CP.getSrcReg());
1885
1886 return true;
1887}
1888
1889//===----------------------------------------------------------------------===//
1890// Interference checking and interval joining
1891//===----------------------------------------------------------------------===//
1892//
1893// In the easiest case, the two live ranges being joined are disjoint, and
1894// there is no interference to consider. It is quite common, though, to have
1895// overlapping live ranges, and we need to check if the interference can be
1896// resolved.
1897//
1898// The live range of a single SSA value forms a sub-tree of the dominator tree.
1899// This means that two SSA values overlap if and only if the def of one value
1900// is contained in the live range of the other value. As a special case, the
1901// overlapping values can be defined at the same index.
1902//
1903// The interference from an overlapping def can be resolved in these cases:
1904//
1905// 1. Coalescable copies. The value is defined by a copy that would become an
1906// identity copy after joining SrcReg and DstReg. The copy instruction will
1907// be removed, and the value will be merged with the source value.
1908//
1909// There can be several copies back and forth, causing many values to be
1910// merged into one. We compute a list of ultimate values in the joined live
1911// range as well as a mappings from the old value numbers.
1912//
1913// 2. IMPLICIT_DEF. This instruction is only inserted to ensure all PHI
1914// predecessors have a live out value. It doesn't cause real interference,
1915// and can be merged into the value it overlaps. Like a coalescable copy, it
1916// can be erased after joining.
1917//
1918// 3. Copy of external value. The overlapping def may be a copy of a value that
1919// is already in the other register. This is like a coalescable copy, but
1920// the live range of the source register must be trimmed after erasing the
1921// copy instruction:
1922//
1923// %src = COPY %ext
1924// %dst = COPY %ext <-- Remove this COPY, trim the live range of %ext.
1925//
1926// 4. Clobbering undefined lanes. Vector registers are sometimes built by
1927// defining one lane at a time:
1928//
1929// %dst:ssub0<def,read-undef> = FOO
1930// %src = BAR
1931// %dst:ssub1<def> = COPY %src
1932//
1933// The live range of %src overlaps the %dst value defined by FOO, but
1934// merging %src into %dst:ssub1 is only going to clobber the ssub1 lane
1935// which was undef anyway.
1936//
1937// The value mapping is more complicated in this case. The final live range
1938// will have different value numbers for both FOO and BAR, but there is no
1939// simple mapping from old to new values. It may even be necessary to add
1940// new PHI values.
1941//
1942// 5. Clobbering dead lanes. A def may clobber a lane of a vector register that
1943// is live, but never read. This can happen because we don't compute
1944// individual live ranges per lane.
1945//
1946// %dst<def> = FOO
1947// %src = BAR
1948// %dst:ssub1<def> = COPY %src
1949//
1950// This kind of interference is only resolved locally. If the clobbered
1951// lane value escapes the block, the join is aborted.
1952
1953namespace {
1954
1955/// Track information about values in a single virtual register about to be
1956/// joined. Objects of this class are always created in pairs - one for each
1957/// side of the CoalescerPair (or one for each lane of a side of the coalescer
1958/// pair)
1959class JoinVals {
1960 /// Live range we work on.
1961 LiveRange &LR;
1962
1963 /// (Main) register we work on.
1964 const unsigned Reg;
1965
1966 /// Reg (and therefore the values in this liverange) will end up as
1967 /// subregister SubIdx in the coalesced register. Either CP.DstIdx or
1968 /// CP.SrcIdx.
1969 const unsigned SubIdx;
1970
1971 /// The LaneMask that this liverange will occupy the coalesced register. May
1972 /// be smaller than the lanemask produced by SubIdx when merging subranges.
1973 const LaneBitmask LaneMask;
1974
1975 /// This is true when joining sub register ranges, false when joining main
1976 /// ranges.
1977 const bool SubRangeJoin;
1978
1979 /// Whether the current LiveInterval tracks subregister liveness.
1980 const bool TrackSubRegLiveness;
1981
1982 /// Values that will be present in the final live range.
1983 SmallVectorImpl<VNInfo*> &NewVNInfo;
1984
1985 const CoalescerPair &CP;
1986 LiveIntervals *LIS;
1987 SlotIndexes *Indexes;
1988 const TargetRegisterInfo *TRI;
1989
1990 /// Value number assignments. Maps value numbers in LI to entries in
1991 /// NewVNInfo. This is suitable for passing to LiveInterval::join().
1992 SmallVector<int, 8> Assignments;
1993
1994 /// Conflict resolution for overlapping values.
1995 enum ConflictResolution {
1996 /// No overlap, simply keep this value.
1997 CR_Keep,
1998
1999 /// Merge this value into OtherVNI and erase the defining instruction.
2000 /// Used for IMPLICIT_DEF, coalescable copies, and copies from external
2001 /// values.
2002 CR_Erase,
2003
2004 /// Merge this value into OtherVNI but keep the defining instruction.
2005 /// This is for the special case where OtherVNI is defined by the same
2006 /// instruction.
2007 CR_Merge,
2008
2009 /// Keep this value, and have it replace OtherVNI where possible. This
2010 /// complicates value mapping since OtherVNI maps to two different values
2011 /// before and after this def.
2012 /// Used when clobbering undefined or dead lanes.
2013 CR_Replace,
2014
2015 /// Unresolved conflict. Visit later when all values have been mapped.
2016 CR_Unresolved,
2017
2018 /// Unresolvable conflict. Abort the join.
2019 CR_Impossible
2020 };
2021
2022 /// Per-value info for LI. The lane bit masks are all relative to the final
2023 /// joined register, so they can be compared directly between SrcReg and
2024 /// DstReg.
2025 struct Val {
2026 ConflictResolution Resolution = CR_Keep;
2027
2028 /// Lanes written by this def, 0 for unanalyzed values.
2029 LaneBitmask WriteLanes;
2030
2031 /// Lanes with defined values in this register. Other lanes are undef and
2032 /// safe to clobber.
2033 LaneBitmask ValidLanes;
2034
2035 /// Value in LI being redefined by this def.
2036 VNInfo *RedefVNI = nullptr;
2037
2038 /// Value in the other live range that overlaps this def, if any.
2039 VNInfo *OtherVNI = nullptr;
2040
2041 /// Is this value an IMPLICIT_DEF that can be erased?
2042 ///
2043 /// IMPLICIT_DEF values should only exist at the end of a basic block that
2044 /// is a predecessor to a phi-value. These IMPLICIT_DEF instructions can be
2045 /// safely erased if they are overlapping a live value in the other live
2046 /// interval.
2047 ///
2048 /// Weird control flow graphs and incomplete PHI handling in
2049 /// ProcessImplicitDefs can very rarely create IMPLICIT_DEF values with
2050 /// longer live ranges. Such IMPLICIT_DEF values should be treated like
2051 /// normal values.
2052 bool ErasableImplicitDef = false;
2053
2054 /// True when the live range of this value will be pruned because of an
2055 /// overlapping CR_Replace value in the other live range.
2056 bool Pruned = false;
2057
2058 /// True once Pruned above has been computed.
2059 bool PrunedComputed = false;
2060
2061 Val() = default;
2062
2063 bool isAnalyzed() const { return WriteLanes.any(); }
2064 };
2065
2066 /// One entry per value number in LI.
2067 SmallVector<Val, 8> Vals;
2068
2069 /// Compute the bitmask of lanes actually written by DefMI.
2070 /// Set Redef if there are any partial register definitions that depend on the
2071 /// previous value of the register.
2072 LaneBitmask computeWriteLanes(const MachineInstr *DefMI, bool &Redef) const;
2073
2074 /// Find the ultimate value that VNI was copied from.
2075 std::pair<const VNInfo*,unsigned> followCopyChain(const VNInfo *VNI) const;
2076
2077 bool valuesIdentical(VNInfo *Val0, VNInfo *Val1, const JoinVals &Other) const;
2078
2079 /// Analyze ValNo in this live range, and set all fields of Vals[ValNo].
2080 /// Return a conflict resolution when possible, but leave the hard cases as
2081 /// CR_Unresolved.
2082 /// Recursively calls computeAssignment() on this and Other, guaranteeing that
2083 /// both OtherVNI and RedefVNI have been analyzed and mapped before returning.
2084 /// The recursion always goes upwards in the dominator tree, making loops
2085 /// impossible.
2086 ConflictResolution analyzeValue(unsigned ValNo, JoinVals &Other);
2087
2088 /// Compute the value assignment for ValNo in RI.
2089 /// This may be called recursively by analyzeValue(), but never for a ValNo on
2090 /// the stack.
2091 void computeAssignment(unsigned ValNo, JoinVals &Other);
2092
2093 /// Assuming ValNo is going to clobber some valid lanes in Other.LR, compute
2094 /// the extent of the tainted lanes in the block.
2095 ///
2096 /// Multiple values in Other.LR can be affected since partial redefinitions
2097 /// can preserve previously tainted lanes.
2098 ///
2099 /// 1 %dst = VLOAD <-- Define all lanes in %dst
2100 /// 2 %src = FOO <-- ValNo to be joined with %dst:ssub0
2101 /// 3 %dst:ssub1 = BAR <-- Partial redef doesn't clear taint in ssub0
2102 /// 4 %dst:ssub0 = COPY %src <-- Conflict resolved, ssub0 wasn't read
2103 ///
2104 /// For each ValNo in Other that is affected, add an (EndIndex, TaintedLanes)
2105 /// entry to TaintedVals.
2106 ///
2107 /// Returns false if the tainted lanes extend beyond the basic block.
2108 bool
2109 taintExtent(unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &Other,
2110 SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent);
2111
2112 /// Return true if MI uses any of the given Lanes from Reg.
2113 /// This does not include partial redefinitions of Reg.
2114 bool usesLanes(const MachineInstr &MI, unsigned, unsigned, LaneBitmask) const;
2115
2116 /// Determine if ValNo is a copy of a value number in LR or Other.LR that will
2117 /// be pruned:
2118 ///
2119 /// %dst = COPY %src
2120 /// %src = COPY %dst <-- This value to be pruned.
2121 /// %dst = COPY %src <-- This value is a copy of a pruned value.
2122 bool isPrunedValue(unsigned ValNo, JoinVals &Other);
2123
2124public:
2125 JoinVals(LiveRange &LR, unsigned Reg, unsigned SubIdx, LaneBitmask LaneMask,
2126 SmallVectorImpl<VNInfo*> &newVNInfo, const CoalescerPair &cp,
2127 LiveIntervals *lis, const TargetRegisterInfo *TRI, bool SubRangeJoin,
2128 bool TrackSubRegLiveness)
2129 : LR(LR), Reg(Reg), SubIdx(SubIdx), LaneMask(LaneMask),
2130 SubRangeJoin(SubRangeJoin), TrackSubRegLiveness(TrackSubRegLiveness),
2131 NewVNInfo(newVNInfo), CP(cp), LIS(lis), Indexes(LIS->getSlotIndexes()),
2132 TRI(TRI), Assignments(LR.getNumValNums(), -1), Vals(LR.getNumValNums()) {}
2133
2134 /// Analyze defs in LR and compute a value mapping in NewVNInfo.
2135 /// Returns false if any conflicts were impossible to resolve.
2136 bool mapValues(JoinVals &Other);
2137
2138 /// Try to resolve conflicts that require all values to be mapped.
2139 /// Returns false if any conflicts were impossible to resolve.
2140 bool resolveConflicts(JoinVals &Other);
2141
2142 /// Prune the live range of values in Other.LR where they would conflict with
2143 /// CR_Replace values in LR. Collect end points for restoring the live range
2144 /// after joining.
2145 void pruneValues(JoinVals &Other, SmallVectorImpl<SlotIndex> &EndPoints,
2146 bool changeInstrs);
2147
2148 /// Removes subranges starting at copies that get removed. This sometimes
2149 /// happens when undefined subranges are copied around. These ranges contain
2150 /// no useful information and can be removed.
2151 void pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask);
2152
2153 /// Pruning values in subranges can lead to removing segments in these
2154 /// subranges started by IMPLICIT_DEFs. The corresponding segments in
2155 /// the main range also need to be removed. This function will mark
2156 /// the corresponding values in the main range as pruned, so that
2157 /// eraseInstrs can do the final cleanup.
2158 /// The parameter @p LI must be the interval whose main range is the
2159 /// live range LR.
2160 void pruneMainSegments(LiveInterval &LI, bool &ShrinkMainRange);
2161
2162 /// Erase any machine instructions that have been coalesced away.
2163 /// Add erased instructions to ErasedInstrs.
2164 /// Add foreign virtual registers to ShrinkRegs if their live range ended at
2165 /// the erased instrs.
2166 void eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
2167 SmallVectorImpl<unsigned> &ShrinkRegs,
2168 LiveInterval *LI = nullptr);
2169
2170 /// Remove liverange defs at places where implicit defs will be removed.
2171 void removeImplicitDefs();
2172
2173 /// Get the value assignments suitable for passing to LiveInterval::join.
2174 const int *getAssignments() const { return Assignments.data(); }
2175};
2176
2177} // end anonymous namespace
2178
2179LaneBitmask JoinVals::computeWriteLanes(const MachineInstr *DefMI, bool &Redef)
2180 const {
2181 LaneBitmask L;
2182 for (const MachineOperand &MO : DefMI->operands()) {
2183 if (!MO.isReg() || MO.getReg() != Reg || !MO.isDef())
2184 continue;
2185 L |= TRI->getSubRegIndexLaneMask(
2186 TRI->composeSubRegIndices(SubIdx, MO.getSubReg()));
2187 if (MO.readsReg())
2188 Redef = true;
2189 }
2190 return L;
2191}
2192
2193std::pair<const VNInfo*, unsigned> JoinVals::followCopyChain(
2194 const VNInfo *VNI) const {
2195 unsigned Reg = this->Reg;
2196
2197 while (!VNI->isPHIDef()) {
2198 SlotIndex Def = VNI->def;
2199 MachineInstr *MI = Indexes->getInstructionFromIndex(Def);
2200 assert(MI && "No defining instruction")(static_cast <bool> (MI && "No defining instruction"
) ? void (0) : __assert_fail ("MI && \"No defining instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2200, __extension__ __PRETTY_FUNCTION__))
;
2201 if (!MI->isFullCopy())
2202 return std::make_pair(VNI, Reg);
2203 unsigned SrcReg = MI->getOperand(1).getReg();
2204 if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
2205 return std::make_pair(VNI, Reg);
2206
2207 const LiveInterval &LI = LIS->getInterval(SrcReg);
2208 const VNInfo *ValueIn;
2209 // No subrange involved.
2210 if (!SubRangeJoin || !LI.hasSubRanges()) {
2211 LiveQueryResult LRQ = LI.Query(Def);
2212 ValueIn = LRQ.valueIn();
2213 } else {
2214 // Query subranges. Pick the first matching one.
2215 ValueIn = nullptr;
2216 for (const LiveInterval::SubRange &S : LI.subranges()) {
2217 // Transform lanemask to a mask in the joined live interval.
2218 LaneBitmask SMask = TRI->composeSubRegIndexLaneMask(SubIdx, S.LaneMask);
2219 if ((SMask & LaneMask).none())
2220 continue;
2221 LiveQueryResult LRQ = S.Query(Def);
2222 ValueIn = LRQ.valueIn();
2223 break;
2224 }
2225 }
2226 if (ValueIn == nullptr)
2227 break;
2228 VNI = ValueIn;
2229 Reg = SrcReg;
2230 }
2231 return std::make_pair(VNI, Reg);
2232}
2233
2234bool JoinVals::valuesIdentical(VNInfo *Value0, VNInfo *Value1,
2235 const JoinVals &Other) const {
2236 const VNInfo *Orig0;
2237 unsigned Reg0;
2238 std::tie(Orig0, Reg0) = followCopyChain(Value0);
2239 if (Orig0 == Value1)
2240 return true;
2241
2242 const VNInfo *Orig1;
2243 unsigned Reg1;
2244 std::tie(Orig1, Reg1) = Other.followCopyChain(Value1);
2245
2246 // The values are equal if they are defined at the same place and use the
2247 // same register. Note that we cannot compare VNInfos directly as some of
2248 // them might be from a copy created in mergeSubRangeInto() while the other
2249 // is from the original LiveInterval.
2250 return Orig0->def == Orig1->def && Reg0 == Reg1;
2251}
2252
2253JoinVals::ConflictResolution
2254JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) {
2255 Val &V = Vals[ValNo];
2256 assert(!V.isAnalyzed() && "Value has already been analyzed!")(static_cast <bool> (!V.isAnalyzed() && "Value has already been analyzed!"
) ? void (0) : __assert_fail ("!V.isAnalyzed() && \"Value has already been analyzed!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2256, __extension__ __PRETTY_FUNCTION__))
;
2257 VNInfo *VNI = LR.getValNumInfo(ValNo);
2258 if (VNI->isUnused()) {
2259 V.WriteLanes = LaneBitmask::getAll();
2260 return CR_Keep;
2261 }
2262
2263 // Get the instruction defining this value, compute the lanes written.
2264 const MachineInstr *DefMI = nullptr;
2265 if (VNI->isPHIDef()) {
2266 // Conservatively assume that all lanes in a PHI are valid.
2267 LaneBitmask Lanes = SubRangeJoin ? LaneBitmask::getLane(0)
2268 : TRI->getSubRegIndexLaneMask(SubIdx);
2269 V.ValidLanes = V.WriteLanes = Lanes;
2270 } else {
2271 DefMI = Indexes->getInstructionFromIndex(VNI->def);
2272 assert(DefMI != nullptr)(static_cast <bool> (DefMI != nullptr) ? void (0) : __assert_fail
("DefMI != nullptr", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2272, __extension__ __PRETTY_FUNCTION__))
;
2273 if (SubRangeJoin) {
2274 // We don't care about the lanes when joining subregister ranges.
2275 V.WriteLanes = V.ValidLanes = LaneBitmask::getLane(0);
2276 if (DefMI->isImplicitDef()) {
2277 V.ValidLanes = LaneBitmask::getNone();
2278 V.ErasableImplicitDef = true;
2279 }
2280 } else {
2281 bool Redef = false;
2282 V.ValidLanes = V.WriteLanes = computeWriteLanes(DefMI, Redef);
2283
2284 // If this is a read-modify-write instruction, there may be more valid
2285 // lanes than the ones written by this instruction.
2286 // This only covers partial redef operands. DefMI may have normal use
2287 // operands reading the register. They don't contribute valid lanes.
2288 //
2289 // This adds ssub1 to the set of valid lanes in %src:
2290 //
2291 // %src:ssub1<def> = FOO
2292 //
2293 // This leaves only ssub1 valid, making any other lanes undef:
2294 //
2295 // %src:ssub1<def,read-undef> = FOO %src:ssub2
2296 //
2297 // The <read-undef> flag on the def operand means that old lane values are
2298 // not important.
2299 if (Redef) {
2300 V.RedefVNI = LR.Query(VNI->def).valueIn();
2301 assert((TrackSubRegLiveness || V.RedefVNI) &&(static_cast <bool> ((TrackSubRegLiveness || V.RedefVNI
) && "Instruction is reading nonexistent value") ? void
(0) : __assert_fail ("(TrackSubRegLiveness || V.RedefVNI) && \"Instruction is reading nonexistent value\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2302, __extension__ __PRETTY_FUNCTION__))
2302 "Instruction is reading nonexistent value")(static_cast <bool> ((TrackSubRegLiveness || V.RedefVNI
) && "Instruction is reading nonexistent value") ? void
(0) : __assert_fail ("(TrackSubRegLiveness || V.RedefVNI) && \"Instruction is reading nonexistent value\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2302, __extension__ __PRETTY_FUNCTION__))
;
2303 if (V.RedefVNI != nullptr) {
2304 computeAssignment(V.RedefVNI->id, Other);
2305 V.ValidLanes |= Vals[V.RedefVNI->id].ValidLanes;
2306 }
2307 }
2308
2309 // An IMPLICIT_DEF writes undef values.
2310 if (DefMI->isImplicitDef()) {
2311 // We normally expect IMPLICIT_DEF values to be live only until the end
2312 // of their block. If the value is really live longer and gets pruned in
2313 // another block, this flag is cleared again.
2314 V.ErasableImplicitDef = true;
2315 V.ValidLanes &= ~V.WriteLanes;
2316 }
2317 }
2318 }
2319
2320 // Find the value in Other that overlaps VNI->def, if any.
2321 LiveQueryResult OtherLRQ = Other.LR.Query(VNI->def);
2322
2323 // It is possible that both values are defined by the same instruction, or
2324 // the values are PHIs defined in the same block. When that happens, the two
2325 // values should be merged into one, but not into any preceding value.
2326 // The first value defined or visited gets CR_Keep, the other gets CR_Merge.
2327 if (VNInfo *OtherVNI = OtherLRQ.valueDefined()) {
2328 assert(SlotIndex::isSameInstr(VNI->def, OtherVNI->def) && "Broken LRQ")(static_cast <bool> (SlotIndex::isSameInstr(VNI->def
, OtherVNI->def) && "Broken LRQ") ? void (0) : __assert_fail
("SlotIndex::isSameInstr(VNI->def, OtherVNI->def) && \"Broken LRQ\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2328, __extension__ __PRETTY_FUNCTION__))
;
2329
2330 // One value stays, the other is merged. Keep the earlier one, or the first
2331 // one we see.
2332 if (OtherVNI->def < VNI->def)
2333 Other.computeAssignment(OtherVNI->id, *this);
2334 else if (VNI->def < OtherVNI->def && OtherLRQ.valueIn()) {
2335 // This is an early-clobber def overlapping a live-in value in the other
2336 // register. Not mergeable.
2337 V.OtherVNI = OtherLRQ.valueIn();
2338 return CR_Impossible;
2339 }
2340 V.OtherVNI = OtherVNI;
2341 Val &OtherV = Other.Vals[OtherVNI->id];
2342 // Keep this value, check for conflicts when analyzing OtherVNI.
2343 if (!OtherV.isAnalyzed())
2344 return CR_Keep;
2345 // Both sides have been analyzed now.
2346 // Allow overlapping PHI values. Any real interference would show up in a
2347 // predecessor, the PHI itself can't introduce any conflicts.
2348 if (VNI->isPHIDef())
2349 return CR_Merge;
2350 if ((V.ValidLanes & OtherV.ValidLanes).any())
2351 // Overlapping lanes can't be resolved.
2352 return CR_Impossible;
2353 else
2354 return CR_Merge;
2355 }
2356
2357 // No simultaneous def. Is Other live at the def?
2358 V.OtherVNI = OtherLRQ.valueIn();
2359 if (!V.OtherVNI)
2360 // No overlap, no conflict.
2361 return CR_Keep;
2362
2363 assert(!SlotIndex::isSameInstr(VNI->def, V.OtherVNI->def) && "Broken LRQ")(static_cast <bool> (!SlotIndex::isSameInstr(VNI->def
, V.OtherVNI->def) && "Broken LRQ") ? void (0) : __assert_fail
("!SlotIndex::isSameInstr(VNI->def, V.OtherVNI->def) && \"Broken LRQ\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2363, __extension__ __PRETTY_FUNCTION__))
;
2364
2365 // We have overlapping values, or possibly a kill of Other.
2366 // Recursively compute assignments up the dominator tree.
2367 Other.computeAssignment(V.OtherVNI->id, *this);
2368 Val &OtherV = Other.Vals[V.OtherVNI->id];
2369
2370 // Check if OtherV is an IMPLICIT_DEF that extends beyond its basic block.
2371 // This shouldn't normally happen, but ProcessImplicitDefs can leave such
2372 // IMPLICIT_DEF instructions behind, and there is nothing wrong with it
2373 // technically.
2374 //
2375 // When it happens, treat that IMPLICIT_DEF as a normal value, and don't try
2376 // to erase the IMPLICIT_DEF instruction.
2377 if (OtherV.ErasableImplicitDef && DefMI &&
2378 DefMI->getParent() != Indexes->getMBBFromIndex(V.OtherVNI->def)) {
2379 DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->defdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "IMPLICIT_DEF defined at " <<
V.OtherVNI->def << " extends into BB#" << DefMI
->getParent()->getNumber() << ", keeping it.\n"; }
} while (false)
2380 << " extends into BB#" << DefMI->getParent()->getNumber()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "IMPLICIT_DEF defined at " <<
V.OtherVNI->def << " extends into BB#" << DefMI
->getParent()->getNumber() << ", keeping it.\n"; }
} while (false)
2381 << ", keeping it.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "IMPLICIT_DEF defined at " <<
V.OtherVNI->def << " extends into BB#" << DefMI
->getParent()->getNumber() << ", keeping it.\n"; }
} while (false)
;
2382 OtherV.ErasableImplicitDef = false;
2383 }
2384
2385 // Allow overlapping PHI values. Any real interference would show up in a
2386 // predecessor, the PHI itself can't introduce any conflicts.
2387 if (VNI->isPHIDef())
2388 return CR_Replace;
2389
2390 // Check for simple erasable conflicts.
2391 if (DefMI->isImplicitDef()) {
2392 // We need the def for the subregister if there is nothing else live at the
2393 // subrange at this point.
2394 if (TrackSubRegLiveness
2395 && (V.WriteLanes & (OtherV.ValidLanes | OtherV.WriteLanes)).none())
2396 return CR_Replace;
2397 return CR_Erase;
2398 }
2399
2400 // Include the non-conflict where DefMI is a coalescable copy that kills
2401 // OtherVNI. We still want the copy erased and value numbers merged.
2402 if (CP.isCoalescable(DefMI)) {
2403 // Some of the lanes copied from OtherVNI may be undef, making them undef
2404 // here too.
2405 V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes;
2406 return CR_Erase;
2407 }
2408
2409 // This may not be a real conflict if DefMI simply kills Other and defines
2410 // VNI.
2411 if (OtherLRQ.isKill() && OtherLRQ.endPoint() <= VNI->def)
2412 return CR_Keep;
2413
2414 // Handle the case where VNI and OtherVNI can be proven to be identical:
2415 //
2416 // %other = COPY %ext
2417 // %this = COPY %ext <-- Erase this copy
2418 //
2419 if (DefMI->isFullCopy() && !CP.isPartial()
2420 && valuesIdentical(VNI, V.OtherVNI, Other))
2421 return CR_Erase;
2422
2423 // If the lanes written by this instruction were all undef in OtherVNI, it is
2424 // still safe to join the live ranges. This can't be done with a simple value
2425 // mapping, though - OtherVNI will map to multiple values:
2426 //
2427 // 1 %dst:ssub0 = FOO <-- OtherVNI
2428 // 2 %src = BAR <-- VNI
2429 // 3 %dst:ssub1 = COPY %src<kill> <-- Eliminate this copy.
2430 // 4 BAZ %dst<kill>
2431 // 5 QUUX %src<kill>
2432 //
2433 // Here OtherVNI will map to itself in [1;2), but to VNI in [2;5). CR_Replace
2434 // handles this complex value mapping.
2435 if ((V.WriteLanes & OtherV.ValidLanes).none())
2436 return CR_Replace;
2437
2438 // If the other live range is killed by DefMI and the live ranges are still
2439 // overlapping, it must be because we're looking at an early clobber def:
2440 //
2441 // %dst<def,early-clobber> = ASM %src<kill>
2442 //
2443 // In this case, it is illegal to merge the two live ranges since the early
2444 // clobber def would clobber %src before it was read.
2445 if (OtherLRQ.isKill()) {
2446 // This case where the def doesn't overlap the kill is handled above.
2447 assert(VNI->def.isEarlyClobber() &&(static_cast <bool> (VNI->def.isEarlyClobber() &&
"Only early clobber defs can overlap a kill") ? void (0) : __assert_fail
("VNI->def.isEarlyClobber() && \"Only early clobber defs can overlap a kill\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2448, __extension__ __PRETTY_FUNCTION__))
2448 "Only early clobber defs can overlap a kill")(static_cast <bool> (VNI->def.isEarlyClobber() &&
"Only early clobber defs can overlap a kill") ? void (0) : __assert_fail
("VNI->def.isEarlyClobber() && \"Only early clobber defs can overlap a kill\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2448, __extension__ __PRETTY_FUNCTION__))
;
2449 return CR_Impossible;
2450 }
2451
2452 // VNI is clobbering live lanes in OtherVNI, but there is still the
2453 // possibility that no instructions actually read the clobbered lanes.
2454 // If we're clobbering all the lanes in OtherVNI, at least one must be read.
2455 // Otherwise Other.RI wouldn't be live here.
2456 if ((TRI->getSubRegIndexLaneMask(Other.SubIdx) & ~V.WriteLanes).none())
2457 return CR_Impossible;
2458
2459 // We need to verify that no instructions are reading the clobbered lanes. To
2460 // save compile time, we'll only check that locally. Don't allow the tainted
2461 // value to escape the basic block.
2462 MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def);
2463 if (OtherLRQ.endPoint() >= Indexes->getMBBEndIdx(MBB))
2464 return CR_Impossible;
2465
2466 // There are still some things that could go wrong besides clobbered lanes
2467 // being read, for example OtherVNI may be only partially redefined in MBB,
2468 // and some clobbered lanes could escape the block. Save this analysis for
2469 // resolveConflicts() when all values have been mapped. We need to know
2470 // RedefVNI and WriteLanes for any later defs in MBB, and we can't compute
2471 // that now - the recursive analyzeValue() calls must go upwards in the
2472 // dominator tree.
2473 return CR_Unresolved;
2474}
2475
2476void JoinVals::computeAssignment(unsigned ValNo, JoinVals &Other) {
2477 Val &V = Vals[ValNo];
2478 if (V.isAnalyzed()) {
2479 // Recursion should always move up the dominator tree, so ValNo is not
2480 // supposed to reappear before it has been assigned.
2481 assert(Assignments[ValNo] != -1 && "Bad recursion?")(static_cast <bool> (Assignments[ValNo] != -1 &&
"Bad recursion?") ? void (0) : __assert_fail ("Assignments[ValNo] != -1 && \"Bad recursion?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2481, __extension__ __PRETTY_FUNCTION__))
;
2482 return;
2483 }
2484 switch ((V.Resolution = analyzeValue(ValNo, Other))) {
2485 case CR_Erase:
2486 case CR_Merge:
2487 // Merge this ValNo into OtherVNI.
2488 assert(V.OtherVNI && "OtherVNI not assigned, can't merge.")(static_cast <bool> (V.OtherVNI && "OtherVNI not assigned, can't merge."
) ? void (0) : __assert_fail ("V.OtherVNI && \"OtherVNI not assigned, can't merge.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2488, __extension__ __PRETTY_FUNCTION__))
;
2489 assert(Other.Vals[V.OtherVNI->id].isAnalyzed() && "Missing recursion")(static_cast <bool> (Other.Vals[V.OtherVNI->id].isAnalyzed
() && "Missing recursion") ? void (0) : __assert_fail
("Other.Vals[V.OtherVNI->id].isAnalyzed() && \"Missing recursion\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2489, __extension__ __PRETTY_FUNCTION__))
;
2490 Assignments[ValNo] = Other.Assignments[V.OtherVNI->id];
2491 DEBUG(dbgs() << "\t\tmerge " << PrintReg(Reg) << ':' << ValNo << '@'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tmerge " << PrintReg
(Reg) << ':' << ValNo << '@' << LR.getValNumInfo
(ValNo)->def << " into " << PrintReg(Other.Reg
) << ':' << V.OtherVNI->id << '@' <<
V.OtherVNI->def << " --> @" << NewVNInfo[Assignments
[ValNo]]->def << '\n'; } } while (false)
2492 << LR.getValNumInfo(ValNo)->def << " into "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tmerge " << PrintReg
(Reg) << ':' << ValNo << '@' << LR.getValNumInfo
(ValNo)->def << " into " << PrintReg(Other.Reg
) << ':' << V.OtherVNI->id << '@' <<
V.OtherVNI->def << " --> @" << NewVNInfo[Assignments
[ValNo]]->def << '\n'; } } while (false)
2493 << PrintReg(Other.Reg) << ':' << V.OtherVNI->id << '@'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tmerge " << PrintReg
(Reg) << ':' << ValNo << '@' << LR.getValNumInfo
(ValNo)->def << " into " << PrintReg(Other.Reg
) << ':' << V.OtherVNI->id << '@' <<
V.OtherVNI->def << " --> @" << NewVNInfo[Assignments
[ValNo]]->def << '\n'; } } while (false)
2494 << V.OtherVNI->def << " --> @"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tmerge " << PrintReg
(Reg) << ':' << ValNo << '@' << LR.getValNumInfo
(ValNo)->def << " into " << PrintReg(Other.Reg
) << ':' << V.OtherVNI->id << '@' <<
V.OtherVNI->def << " --> @" << NewVNInfo[Assignments
[ValNo]]->def << '\n'; } } while (false)
2495 << NewVNInfo[Assignments[ValNo]]->def << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tmerge " << PrintReg
(Reg) << ':' << ValNo << '@' << LR.getValNumInfo
(ValNo)->def << " into " << PrintReg(Other.Reg
) << ':' << V.OtherVNI->id << '@' <<
V.OtherVNI->def << " --> @" << NewVNInfo[Assignments
[ValNo]]->def << '\n'; } } while (false)
;
2496 break;
2497 case CR_Replace:
2498 case CR_Unresolved: {
2499 // The other value is going to be pruned if this join is successful.
2500 assert(V.OtherVNI && "OtherVNI not assigned, can't prune")(static_cast <bool> (V.OtherVNI && "OtherVNI not assigned, can't prune"
) ? void (0) : __assert_fail ("V.OtherVNI && \"OtherVNI not assigned, can't prune\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2500, __extension__ __PRETTY_FUNCTION__))
;
2501 Val &OtherV = Other.Vals[V.OtherVNI->id];
2502 // We cannot erase an IMPLICIT_DEF if we don't have valid values for all
2503 // its lanes.
2504 if ((OtherV.WriteLanes & ~V.ValidLanes).any() && TrackSubRegLiveness)
2505 OtherV.ErasableImplicitDef = false;
2506 OtherV.Pruned = true;
2507 LLVM_FALLTHROUGH[[clang::fallthrough]];
2508 }
2509 default:
2510 // This value number needs to go in the final joined live range.
2511 Assignments[ValNo] = NewVNInfo.size();
2512 NewVNInfo.push_back(LR.getValNumInfo(ValNo));
2513 break;
2514 }
2515}
2516
2517bool JoinVals::mapValues(JoinVals &Other) {
2518 for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
2519 computeAssignment(i, Other);
2520 if (Vals[i].Resolution == CR_Impossible) {
2521 DEBUG(dbgs() << "\t\tinterference at " << PrintReg(Reg) << ':' << ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tinterference at " <<
PrintReg(Reg) << ':' << i << '@' << LR
.getValNumInfo(i)->def << '\n'; } } while (false)
2522 << '@' << LR.getValNumInfo(i)->def << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tinterference at " <<
PrintReg(Reg) << ':' << i << '@' << LR
.getValNumInfo(i)->def << '\n'; } } while (false)
;
2523 return false;
2524 }
2525 }
2526 return true;
2527}
2528
2529bool JoinVals::
2530taintExtent(unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &Other,
2531 SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent) {
2532 VNInfo *VNI = LR.getValNumInfo(ValNo);
2533 MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def);
2534 SlotIndex MBBEnd = Indexes->getMBBEndIdx(MBB);
2535
2536 // Scan Other.LR from VNI.def to MBBEnd.
2537 LiveInterval::iterator OtherI = Other.LR.find(VNI->def);
2538 assert(OtherI != Other.LR.end() && "No conflict?")(static_cast <bool> (OtherI != Other.LR.end() &&
"No conflict?") ? void (0) : __assert_fail ("OtherI != Other.LR.end() && \"No conflict?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2538, __extension__ __PRETTY_FUNCTION__))
;
2539 do {
2540 // OtherI is pointing to a tainted value. Abort the join if the tainted
2541 // lanes escape the block.
2542 SlotIndex End = OtherI->end;
2543 if (End >= MBBEnd) {
2544 DEBUG(dbgs() << "\t\ttaints global " << PrintReg(Other.Reg) << ':'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\ttaints global " <<
PrintReg(Other.Reg) << ':' << OtherI->valno->
id << '@' << OtherI->start << '\n'; } } while
(false)
2545 << OtherI->valno->id << '@' << OtherI->start << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\ttaints global " <<
PrintReg(Other.Reg) << ':' << OtherI->valno->
id << '@' << OtherI->start << '\n'; } } while
(false)
;
2546 return false;
2547 }
2548 DEBUG(dbgs() << "\t\ttaints local " << PrintReg(Other.Reg) << ':'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\ttaints local " << PrintReg
(Other.Reg) << ':' << OtherI->valno->id <<
'@' << OtherI->start << " to " << End <<
'\n'; } } while (false)
2549 << OtherI->valno->id << '@' << OtherI->startdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\ttaints local " << PrintReg
(Other.Reg) << ':' << OtherI->valno->id <<
'@' << OtherI->start << " to " << End <<
'\n'; } } while (false)
2550 << " to " << End << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\ttaints local " << PrintReg
(Other.Reg) << ':' << OtherI->valno->id <<
'@' << OtherI->start << " to " << End <<
'\n'; } } while (false)
;
2551 // A dead def is not a problem.
2552 if (End.isDead())
2553 break;
2554 TaintExtent.push_back(std::make_pair(End, TaintedLanes));
2555
2556 // Check for another def in the MBB.
2557 if (++OtherI == Other.LR.end() || OtherI->start >= MBBEnd)
2558 break;
2559
2560 // Lanes written by the new def are no longer tainted.
2561 const Val &OV = Other.Vals[OtherI->valno->id];
2562 TaintedLanes &= ~OV.WriteLanes;
2563 if (!OV.RedefVNI)
2564 break;
2565 } while (TaintedLanes.any());
2566 return true;
2567}
2568
2569bool JoinVals::usesLanes(const MachineInstr &MI, unsigned Reg, unsigned SubIdx,
2570 LaneBitmask Lanes) const {
2571 if (MI.isDebugValue())
2572 return false;
2573 for (const MachineOperand &MO : MI.operands()) {
2574 if (!MO.isReg() || MO.isDef() || MO.getReg() != Reg)
2575 continue;
2576 if (!MO.readsReg())
2577 continue;
2578 unsigned S = TRI->composeSubRegIndices(SubIdx, MO.getSubReg());
2579 if ((Lanes & TRI->getSubRegIndexLaneMask(S)).any())
2580 return true;
2581 }
2582 return false;
2583}
2584
2585bool JoinVals::resolveConflicts(JoinVals &Other) {
2586 for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
2587 Val &V = Vals[i];
2588 assert(V.Resolution != CR_Impossible && "Unresolvable conflict")(static_cast <bool> (V.Resolution != CR_Impossible &&
"Unresolvable conflict") ? void (0) : __assert_fail ("V.Resolution != CR_Impossible && \"Unresolvable conflict\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2588, __extension__ __PRETTY_FUNCTION__))
;
2589 if (V.Resolution != CR_Unresolved)
2590 continue;
2591 DEBUG(dbgs() << "\t\tconflict at " << PrintReg(Reg) << ':' << ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tconflict at " << PrintReg
(Reg) << ':' << i << '@' << LR.getValNumInfo
(i)->def << '\n'; } } while (false)
2592 << '@' << LR.getValNumInfo(i)->def << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tconflict at " << PrintReg
(Reg) << ':' << i << '@' << LR.getValNumInfo
(i)->def << '\n'; } } while (false)
;
2593 if (SubRangeJoin)
2594 return false;
2595
2596 ++NumLaneConflicts;
2597 assert(V.OtherVNI && "Inconsistent conflict resolution.")(static_cast <bool> (V.OtherVNI && "Inconsistent conflict resolution."
) ? void (0) : __assert_fail ("V.OtherVNI && \"Inconsistent conflict resolution.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2597, __extension__ __PRETTY_FUNCTION__))
;
2598 VNInfo *VNI = LR.getValNumInfo(i);
2599 const Val &OtherV = Other.Vals[V.OtherVNI->id];
2600
2601 // VNI is known to clobber some lanes in OtherVNI. If we go ahead with the
2602 // join, those lanes will be tainted with a wrong value. Get the extent of
2603 // the tainted lanes.
2604 LaneBitmask TaintedLanes = V.WriteLanes & OtherV.ValidLanes;
2605 SmallVector<std::pair<SlotIndex, LaneBitmask>, 8> TaintExtent;
2606 if (!taintExtent(i, TaintedLanes, Other, TaintExtent))
2607 // Tainted lanes would extend beyond the basic block.
2608 return false;
2609
2610 assert(!TaintExtent.empty() && "There should be at least one conflict.")(static_cast <bool> (!TaintExtent.empty() && "There should be at least one conflict."
) ? void (0) : __assert_fail ("!TaintExtent.empty() && \"There should be at least one conflict.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2610, __extension__ __PRETTY_FUNCTION__))
;
2611
2612 // Now look at the instructions from VNI->def to TaintExtent (inclusive).
2613 MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def);
2614 MachineBasicBlock::iterator MI = MBB->begin();
2615 if (!VNI->isPHIDef()) {
2616 MI = Indexes->getInstructionFromIndex(VNI->def);
2617 // No need to check the instruction defining VNI for reads.
2618 ++MI;
2619 }
2620 assert(!SlotIndex::isSameInstr(VNI->def, TaintExtent.front().first) &&(static_cast <bool> (!SlotIndex::isSameInstr(VNI->def
, TaintExtent.front().first) && "Interference ends on VNI->def. Should have been handled earlier"
) ? void (0) : __assert_fail ("!SlotIndex::isSameInstr(VNI->def, TaintExtent.front().first) && \"Interference ends on VNI->def. Should have been handled earlier\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2621, __extension__ __PRETTY_FUNCTION__))
2621 "Interference ends on VNI->def. Should have been handled earlier")(static_cast <bool> (!SlotIndex::isSameInstr(VNI->def
, TaintExtent.front().first) && "Interference ends on VNI->def. Should have been handled earlier"
) ? void (0) : __assert_fail ("!SlotIndex::isSameInstr(VNI->def, TaintExtent.front().first) && \"Interference ends on VNI->def. Should have been handled earlier\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2621, __extension__ __PRETTY_FUNCTION__))
;
2622 MachineInstr *LastMI =
2623 Indexes->getInstructionFromIndex(TaintExtent.front().first);
2624 assert(LastMI && "Range must end at a proper instruction")(static_cast <bool> (LastMI && "Range must end at a proper instruction"
) ? void (0) : __assert_fail ("LastMI && \"Range must end at a proper instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__))
;
2625 unsigned TaintNum = 0;
2626 while (true) {
2627 assert(MI != MBB->end() && "Bad LastMI")(static_cast <bool> (MI != MBB->end() && "Bad LastMI"
) ? void (0) : __assert_fail ("MI != MBB->end() && \"Bad LastMI\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2627, __extension__ __PRETTY_FUNCTION__))
;
2628 if (usesLanes(*MI, Other.Reg, Other.SubIdx, TaintedLanes)) {
2629 DEBUG(dbgs() << "\t\ttainted lanes used by: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\ttainted lanes used by: "
<< *MI; } } while (false)
;
2630 return false;
2631 }
2632 // LastMI is the last instruction to use the current value.
2633 if (&*MI == LastMI) {
2634 if (++TaintNum == TaintExtent.size())
2635 break;
2636 LastMI = Indexes->getInstructionFromIndex(TaintExtent[TaintNum].first);
2637 assert(LastMI && "Range must end at a proper instruction")(static_cast <bool> (LastMI && "Range must end at a proper instruction"
) ? void (0) : __assert_fail ("LastMI && \"Range must end at a proper instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2637, __extension__ __PRETTY_FUNCTION__))
;
2638 TaintedLanes = TaintExtent[TaintNum].second;
2639 }
2640 ++MI;
2641 }
2642
2643 // The tainted lanes are unused.
2644 V.Resolution = CR_Replace;
2645 ++NumLaneResolves;
2646 }
2647 return true;
2648}
2649
2650bool JoinVals::isPrunedValue(unsigned ValNo, JoinVals &Other) {
2651 Val &V = Vals[ValNo];
2652 if (V.Pruned || V.PrunedComputed)
2653 return V.Pruned;
2654
2655 if (V.Resolution != CR_Erase && V.Resolution != CR_Merge)
2656 return V.Pruned;
2657
2658 // Follow copies up the dominator tree and check if any intermediate value
2659 // has been pruned.
2660 V.PrunedComputed = true;
2661 V.Pruned = Other.isPrunedValue(V.OtherVNI->id, *this);
2662 return V.Pruned;
2663}
2664
2665void JoinVals::pruneValues(JoinVals &Other,
2666 SmallVectorImpl<SlotIndex> &EndPoints,
2667 bool changeInstrs) {
2668 for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
2669 SlotIndex Def = LR.getValNumInfo(i)->def;
2670 switch (Vals[i].Resolution) {
2671 case CR_Keep:
2672 break;
2673 case CR_Replace: {
2674 // This value takes precedence over the value in Other.LR.
2675 LIS->pruneValue(Other.LR, Def, &EndPoints);
2676 // Check if we're replacing an IMPLICIT_DEF value. The IMPLICIT_DEF
2677 // instructions are only inserted to provide a live-out value for PHI
2678 // predecessors, so the instruction should simply go away once its value
2679 // has been replaced.
2680 Val &OtherV = Other.Vals[Vals[i].OtherVNI->id];
2681 bool EraseImpDef = OtherV.ErasableImplicitDef &&
2682 OtherV.Resolution == CR_Keep;
2683 if (!Def.isBlock()) {
2684 if (changeInstrs) {
2685 // Remove <def,read-undef> flags. This def is now a partial redef.
2686 // Also remove <def,dead> flags since the joined live range will
2687 // continue past this instruction.
2688 for (MachineOperand &MO :
2689 Indexes->getInstructionFromIndex(Def)->operands()) {
2690 if (MO.isReg() && MO.isDef() && MO.getReg() == Reg) {
2691 if (MO.getSubReg() != 0 && MO.isUndef() && !EraseImpDef)
2692 MO.setIsUndef(false);
2693 MO.setIsDead(false);
2694 }
2695 }
2696 }
2697 // This value will reach instructions below, but we need to make sure
2698 // the live range also reaches the instruction at Def.
2699 if (!EraseImpDef)
2700 EndPoints.push_back(Def);
2701 }
2702 DEBUG(dbgs() << "\t\tpruned " << PrintReg(Other.Reg) << " at " << Defdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tpruned " << PrintReg
(Other.Reg) << " at " << Def << ": " <<
Other.LR << '\n'; } } while (false)
2703 << ": " << Other.LR << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tpruned " << PrintReg
(Other.Reg) << " at " << Def << ": " <<
Other.LR << '\n'; } } while (false)
;
2704 break;
2705 }
2706 case CR_Erase:
2707 case CR_Merge:
2708 if (isPrunedValue(i, Other)) {
2709 // This value is ultimately a copy of a pruned value in LR or Other.LR.
2710 // We can no longer trust the value mapping computed by
2711 // computeAssignment(), the value that was originally copied could have
2712 // been replaced.
2713 LIS->pruneValue(LR, Def, &EndPoints);
2714 DEBUG(dbgs() << "\t\tpruned all of " << PrintReg(Reg) << " at "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tpruned all of " <<
PrintReg(Reg) << " at " << Def << ": " <<
LR << '\n'; } } while (false)
2715 << Def << ": " << LR << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tpruned all of " <<
PrintReg(Reg) << " at " << Def << ": " <<
LR << '\n'; } } while (false)
;
2716 }
2717 break;
2718 case CR_Unresolved:
2719 case CR_Impossible:
2720 llvm_unreachable("Unresolved conflicts")::llvm::llvm_unreachable_internal("Unresolved conflicts", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2720)
;
2721 }
2722 }
2723}
2724
2725void JoinVals::pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask) {
2726 // Look for values being erased.
2727 bool DidPrune = false;
2728 for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
2729 // We should trigger in all cases in which eraseInstrs() does something.
2730 // match what eraseInstrs() is doing, print a message so
2731 if (Vals[i].Resolution != CR_Erase &&
2732 (Vals[i].Resolution != CR_Keep || !Vals[i].ErasableImplicitDef ||
2733 !Vals[i].Pruned))
2734 continue;
2735
2736 // Check subranges at the point where the copy will be removed.
2737 SlotIndex Def = LR.getValNumInfo(i)->def;
2738 // Print message so mismatches with eraseInstrs() can be diagnosed.
2739 DEBUG(dbgs() << "\t\tExpecting instruction removal at " << Def << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tExpecting instruction removal at "
<< Def << '\n'; } } while (false)
;
2740 for (LiveInterval::SubRange &S : LI.subranges()) {
2741 LiveQueryResult Q = S.Query(Def);
2742
2743 // If a subrange starts at the copy then an undefined value has been
2744 // copied and we must remove that subrange value as well.
2745 VNInfo *ValueOut = Q.valueOutOrDead();
2746 if (ValueOut != nullptr && Q.valueIn() == nullptr) {
2747 DEBUG(dbgs() << "\t\tPrune sublane " << PrintLaneMask(S.LaneMask)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tPrune sublane " <<
PrintLaneMask(S.LaneMask) << " at " << Def <<
"\n"; } } while (false)
2748 << " at " << Def << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tPrune sublane " <<
PrintLaneMask(S.LaneMask) << " at " << Def <<
"\n"; } } while (false)
;
2749 LIS->pruneValue(S, Def, nullptr);
2750 DidPrune = true;
2751 // Mark value number as unused.
2752 ValueOut->markUnused();
2753 continue;
2754 }
2755 // If a subrange ends at the copy, then a value was copied but only
2756 // partially used later. Shrink the subregister range appropriately.
2757 if (Q.valueIn() != nullptr && Q.valueOut() == nullptr) {
2758 DEBUG(dbgs() << "\t\tDead uses at sublane " << PrintLaneMask(S.LaneMask)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tDead uses at sublane " <<
PrintLaneMask(S.LaneMask) << " at " << Def <<
"\n"; } } while (false)
2759 << " at " << Def << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tDead uses at sublane " <<
PrintLaneMask(S.LaneMask) << " at " << Def <<
"\n"; } } while (false)
;
2760 ShrinkMask |= S.LaneMask;
2761 }
2762 }
2763 }
2764 if (DidPrune)
2765 LI.removeEmptySubRanges();
2766}
2767
2768/// Check if any of the subranges of @p LI contain a definition at @p Def.
2769static bool isDefInSubRange(LiveInterval &LI, SlotIndex Def) {
2770 for (LiveInterval::SubRange &SR : LI.subranges()) {
2771 if (VNInfo *VNI = SR.Query(Def).valueOutOrDead())
2772 if (VNI->def == Def)
2773 return true;
2774 }
2775 return false;
2776}
2777
2778void JoinVals::pruneMainSegments(LiveInterval &LI, bool &ShrinkMainRange) {
2779 assert(&static_cast<LiveRange&>(LI) == &LR)(static_cast <bool> (&static_cast<LiveRange&
>(LI) == &LR) ? void (0) : __assert_fail ("&static_cast<LiveRange&>(LI) == &LR"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2779, __extension__ __PRETTY_FUNCTION__))
;
2780
2781 for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
2782 if (Vals[i].Resolution != CR_Keep)
2783 continue;
2784 VNInfo *VNI = LR.getValNumInfo(i);
2785 if (VNI->isUnused() || VNI->isPHIDef() || isDefInSubRange(LI, VNI->def))
2786 continue;
2787 Vals[i].Pruned = true;
2788 ShrinkMainRange = true;
2789 }
2790}
2791
2792void JoinVals::removeImplicitDefs() {
2793 for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
2794 Val &V = Vals[i];
2795 if (V.Resolution != CR_Keep || !V.ErasableImplicitDef || !V.Pruned)
2796 continue;
2797
2798 VNInfo *VNI = LR.getValNumInfo(i);
2799 VNI->markUnused();
2800 LR.removeValNo(VNI);
2801 }
2802}
2803
2804void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
2805 SmallVectorImpl<unsigned> &ShrinkRegs,
2806 LiveInterval *LI) {
2807 for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
2808 // Get the def location before markUnused() below invalidates it.
2809 SlotIndex Def = LR.getValNumInfo(i)->def;
2810 switch (Vals[i].Resolution) {
2811 case CR_Keep: {
2812 // If an IMPLICIT_DEF value is pruned, it doesn't serve a purpose any
2813 // longer. The IMPLICIT_DEF instructions are only inserted by
2814 // PHIElimination to guarantee that all PHI predecessors have a value.
2815 if (!Vals[i].ErasableImplicitDef || !Vals[i].Pruned)
2816 break;
2817 // Remove value number i from LR.
2818 // For intervals with subranges, removing a segment from the main range
2819 // may require extending the previous segment: for each definition of
2820 // a subregister, there will be a corresponding def in the main range.
2821 // That def may fall in the middle of a segment from another subrange.
2822 // In such cases, removing this def from the main range must be
2823 // complemented by extending the main range to account for the liveness
2824 // of the other subrange.
2825 VNInfo *VNI = LR.getValNumInfo(i);
2826 SlotIndex Def = VNI->def;
2827 // The new end point of the main range segment to be extended.
2828 SlotIndex NewEnd;
2829 if (LI != nullptr) {
2830 LiveRange::iterator I = LR.FindSegmentContaining(Def);
2831 assert(I != LR.end())(static_cast <bool> (I != LR.end()) ? void (0) : __assert_fail
("I != LR.end()", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2831, __extension__ __PRETTY_FUNCTION__))
;
2832 // Do not extend beyond the end of the segment being removed.
2833 // The segment may have been pruned in preparation for joining
2834 // live ranges.
2835 NewEnd = I->end;
2836 }
2837
2838 LR.removeValNo(VNI);
2839 // Note that this VNInfo is reused and still referenced in NewVNInfo,
2840 // make it appear like an unused value number.
2841 VNI->markUnused();
2842
2843 if (LI != nullptr && LI->hasSubRanges()) {
2844 assert(static_cast<LiveRange*>(LI) == &LR)(static_cast <bool> (static_cast<LiveRange*>(LI) ==
&LR) ? void (0) : __assert_fail ("static_cast<LiveRange*>(LI) == &LR"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2844, __extension__ __PRETTY_FUNCTION__))
;
2845 // Determine the end point based on the subrange information:
2846 // minimum of (earliest def of next segment,
2847 // latest end point of containing segment)
2848 SlotIndex ED, LE;
2849 for (LiveInterval::SubRange &SR : LI->subranges()) {
2850 LiveRange::iterator I = SR.find(Def);
2851 if (I == SR.end())
2852 continue;
2853 if (I->start > Def)
2854 ED = ED.isValid() ? std::min(ED, I->start) : I->start;
2855 else
2856 LE = LE.isValid() ? std::max(LE, I->end) : I->end;
2857 }
2858 if (LE.isValid())
2859 NewEnd = std::min(NewEnd, LE);
2860 if (ED.isValid())
2861 NewEnd = std::min(NewEnd, ED);
2862
2863 // We only want to do the extension if there was a subrange that
2864 // was live across Def.
2865 if (LE.isValid()) {
2866 LiveRange::iterator S = LR.find(Def);
2867 if (S != LR.begin())
2868 std::prev(S)->end = NewEnd;
2869 }
2870 }
2871 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\tremoved " << i <<
'@' << Def << ": " << LR << '\n'; if
(LI != nullptr) dbgs() << "\t\t LHS = " << *LI <<
'\n'; }; } } while (false)
2872 dbgs() << "\t\tremoved " << i << '@' << Def << ": " << LR << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\tremoved " << i <<
'@' << Def << ": " << LR << '\n'; if
(LI != nullptr) dbgs() << "\t\t LHS = " << *LI <<
'\n'; }; } } while (false)
2873 if (LI != nullptr)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\tremoved " << i <<
'@' << Def << ": " << LR << '\n'; if
(LI != nullptr) dbgs() << "\t\t LHS = " << *LI <<
'\n'; }; } } while (false)
2874 dbgs() << "\t\t LHS = " << *LI << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\tremoved " << i <<
'@' << Def << ": " << LR << '\n'; if
(LI != nullptr) dbgs() << "\t\t LHS = " << *LI <<
'\n'; }; } } while (false)
2875 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\tremoved " << i <<
'@' << Def << ": " << LR << '\n'; if
(LI != nullptr) dbgs() << "\t\t LHS = " << *LI <<
'\n'; }; } } while (false)
;
2876 LLVM_FALLTHROUGH[[clang::fallthrough]];
2877 }
2878
2879 case CR_Erase: {
2880 MachineInstr *MI = Indexes->getInstructionFromIndex(Def);
2881 assert(MI && "No instruction to erase")(static_cast <bool> (MI && "No instruction to erase"
) ? void (0) : __assert_fail ("MI && \"No instruction to erase\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2881, __extension__ __PRETTY_FUNCTION__))
;
2882 if (MI->isCopy()) {
2883 unsigned Reg = MI->getOperand(1).getReg();
2884 if (TargetRegisterInfo::isVirtualRegister(Reg) &&
2885 Reg != CP.getSrcReg() && Reg != CP.getDstReg())
2886 ShrinkRegs.push_back(Reg);
2887 }
2888 ErasedInstrs.insert(MI);
2889 DEBUG(dbgs() << "\t\terased:\t" << Def << '\t' << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\terased:\t" << Def <<
'\t' << *MI; } } while (false)
;
2890 LIS->RemoveMachineInstrFromMaps(*MI);
2891 MI->eraseFromParent();
2892 break;
2893 }
2894 default:
2895 break;
2896 }
2897 }
2898}
2899
2900void RegisterCoalescer::joinSubRegRanges(LiveRange &LRange, LiveRange &RRange,
2901 LaneBitmask LaneMask,
2902 const CoalescerPair &CP) {
2903 SmallVector<VNInfo*, 16> NewVNInfo;
2904 JoinVals RHSVals(RRange, CP.getSrcReg(), CP.getSrcIdx(), LaneMask,
2905 NewVNInfo, CP, LIS, TRI, true, true);
2906 JoinVals LHSVals(LRange, CP.getDstReg(), CP.getDstIdx(), LaneMask,
2907 NewVNInfo, CP, LIS, TRI, true, true);
2908
2909 // Compute NewVNInfo and resolve conflicts (see also joinVirtRegs())
2910 // We should be able to resolve all conflicts here as we could successfully do
2911 // it on the mainrange already. There is however a problem when multiple
2912 // ranges get mapped to the "overflow" lane mask bit which creates unexpected
2913 // interferences.
2914 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals)) {
2915 // We already determined that it is legal to merge the intervals, so this
2916 // should never fail.
2917 llvm_unreachable("*** Couldn't join subrange!\n")::llvm::llvm_unreachable_internal("*** Couldn't join subrange!\n"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2917)
;
2918 }
2919 if (!LHSVals.resolveConflicts(RHSVals) ||
2920 !RHSVals.resolveConflicts(LHSVals)) {
2921 // We already determined that it is legal to merge the intervals, so this
2922 // should never fail.
2923 llvm_unreachable("*** Couldn't join subrange!\n")::llvm::llvm_unreachable_internal("*** Couldn't join subrange!\n"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 2923)
;
2924 }
2925
2926 // The merging algorithm in LiveInterval::join() can't handle conflicting
2927 // value mappings, so we need to remove any live ranges that overlap a
2928 // CR_Replace resolution. Collect a set of end points that can be used to
2929 // restore the live range after joining.
2930 SmallVector<SlotIndex, 8> EndPoints;
2931 LHSVals.pruneValues(RHSVals, EndPoints, false);
2932 RHSVals.pruneValues(LHSVals, EndPoints, false);
2933
2934 LHSVals.removeImplicitDefs();
2935 RHSVals.removeImplicitDefs();
2936
2937 LRange.verify();
2938 RRange.verify();
2939
2940 // Join RRange into LHS.
2941 LRange.join(RRange, LHSVals.getAssignments(), RHSVals.getAssignments(),
2942 NewVNInfo);
2943
2944 DEBUG(dbgs() << "\t\tjoined lanes: " << LRange << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tjoined lanes: " <<
LRange << "\n"; } } while (false)
;
2945 if (EndPoints.empty())
2946 return;
2947
2948 // Recompute the parts of the live range we had to remove because of
2949 // CR_Replace conflicts.
2950 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LRange << '\n'; }; } } while (false)
2951 dbgs() << "\t\trestoring liveness to " << EndPoints.size() << " points: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LRange << '\n'; }; } } while (false)
2952 for (unsigned i = 0, n = EndPoints.size(); i != n; ++i) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LRange << '\n'; }; } } while (false)
2953 dbgs() << EndPoints[i];do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LRange << '\n'; }; } } while (false)
2954 if (i != n-1)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LRange << '\n'; }; } } while (false)
2955 dbgs() << ',';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LRange << '\n'; }; } } while (false)
2956 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LRange << '\n'; }; } } while (false)
2957 dbgs() << ": " << LRange << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LRange << '\n'; }; } } while (false)
2958 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LRange << '\n'; }; } } while (false)
;
2959 LIS->extendToIndices(LRange, EndPoints);
2960}
2961
2962void RegisterCoalescer::mergeSubRangeInto(LiveInterval &LI,
2963 const LiveRange &ToMerge,
2964 LaneBitmask LaneMask,
2965 CoalescerPair &CP) {
2966 BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator();
2967 LI.refineSubRanges(Allocator, LaneMask,
2968 [this,&Allocator,&ToMerge,&CP](LiveInterval::SubRange &SR) {
2969 if (SR.empty()) {
2970 SR.assign(ToMerge, Allocator);
2971 } else {
2972 // joinSubRegRange() destroys the merged range, so we need a copy.
2973 LiveRange RangeCopy(ToMerge, Allocator);
2974 joinSubRegRanges(SR, RangeCopy, SR.LaneMask, CP);
2975 }
2976 });
2977}
2978
2979bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
2980 SmallVector<VNInfo*, 16> NewVNInfo;
2981 LiveInterval &RHS = LIS->getInterval(CP.getSrcReg());
2982 LiveInterval &LHS = LIS->getInterval(CP.getDstReg());
2983 bool TrackSubRegLiveness = MRI->shouldTrackSubRegLiveness(*CP.getNewRC());
2984 JoinVals RHSVals(RHS, CP.getSrcReg(), CP.getSrcIdx(), LaneBitmask::getNone(),
2985 NewVNInfo, CP, LIS, TRI, false, TrackSubRegLiveness);
2986 JoinVals LHSVals(LHS, CP.getDstReg(), CP.getDstIdx(), LaneBitmask::getNone(),
2987 NewVNInfo, CP, LIS, TRI, false, TrackSubRegLiveness);
2988
2989 DEBUG(dbgs() << "\t\tRHS = " << RHSdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tRHS = " << RHS <<
"\n\t\tLHS = " << LHS << '\n'; } } while (false)
2990 << "\n\t\tLHS = " << LHSdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tRHS = " << RHS <<
"\n\t\tLHS = " << LHS << '\n'; } } while (false)
2991 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tRHS = " << RHS <<
"\n\t\tLHS = " << LHS << '\n'; } } while (false)
;
2992
2993 // First compute NewVNInfo and the simple value mappings.
2994 // Detect impossible conflicts early.
2995 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals))
2996 return false;
2997
2998 // Some conflicts can only be resolved after all values have been mapped.
2999 if (!LHSVals.resolveConflicts(RHSVals) || !RHSVals.resolveConflicts(LHSVals))
3000 return false;
3001
3002 // All clear, the live ranges can be merged.
3003 if (RHS.hasSubRanges() || LHS.hasSubRanges()) {
3004 BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator();
3005
3006 // Transform lanemasks from the LHS to masks in the coalesced register and
3007 // create initial subranges if necessary.
3008 unsigned DstIdx = CP.getDstIdx();
3009 if (!LHS.hasSubRanges()) {
3010 LaneBitmask Mask = DstIdx == 0 ? CP.getNewRC()->getLaneMask()
3011 : TRI->getSubRegIndexLaneMask(DstIdx);
3012 // LHS must support subregs or we wouldn't be in this codepath.
3013 assert(Mask.any())(static_cast <bool> (Mask.any()) ? void (0) : __assert_fail
("Mask.any()", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 3013, __extension__ __PRETTY_FUNCTION__))
;
3014 LHS.createSubRangeFrom(Allocator, Mask, LHS);
3015 } else if (DstIdx != 0) {
3016 // Transform LHS lanemasks to new register class if necessary.
3017 for (LiveInterval::SubRange &R : LHS.subranges()) {
3018 LaneBitmask Mask = TRI->composeSubRegIndexLaneMask(DstIdx, R.LaneMask);
3019 R.LaneMask = Mask;
3020 }
3021 }
3022 DEBUG(dbgs() << "\t\tLHST = " << PrintReg(CP.getDstReg())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tLHST = " << PrintReg
(CP.getDstReg()) << ' ' << LHS << '\n'; } }
while (false)
3023 << ' ' << LHS << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t\tLHST = " << PrintReg
(CP.getDstReg()) << ' ' << LHS << '\n'; } }
while (false)
;
3024
3025 // Determine lanemasks of RHS in the coalesced register and merge subranges.
3026 unsigned SrcIdx = CP.getSrcIdx();
3027 if (!RHS.hasSubRanges()) {
3028 LaneBitmask Mask = SrcIdx == 0 ? CP.getNewRC()->getLaneMask()
3029 : TRI->getSubRegIndexLaneMask(SrcIdx);
3030 mergeSubRangeInto(LHS, RHS, Mask, CP);
3031 } else {
3032 // Pair up subranges and merge.
3033 for (LiveInterval::SubRange &R : RHS.subranges()) {
3034 LaneBitmask Mask = TRI->composeSubRegIndexLaneMask(SrcIdx, R.LaneMask);
3035 mergeSubRangeInto(LHS, R, Mask, CP);
3036 }
3037 }
3038 DEBUG(dbgs() << "\tJoined SubRanges " << LHS << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tJoined SubRanges " <<
LHS << "\n"; } } while (false)
;
3039
3040 // Pruning implicit defs from subranges may result in the main range
3041 // having stale segments.
3042 LHSVals.pruneMainSegments(LHS, ShrinkMainRange);
3043
3044 LHSVals.pruneSubRegValues(LHS, ShrinkMask);
3045 RHSVals.pruneSubRegValues(LHS, ShrinkMask);
3046 }
3047
3048 // The merging algorithm in LiveInterval::join() can't handle conflicting
3049 // value mappings, so we need to remove any live ranges that overlap a
3050 // CR_Replace resolution. Collect a set of end points that can be used to
3051 // restore the live range after joining.
3052 SmallVector<SlotIndex, 8> EndPoints;
3053 LHSVals.pruneValues(RHSVals, EndPoints, true);
3054 RHSVals.pruneValues(LHSVals, EndPoints, true);
3055
3056 // Erase COPY and IMPLICIT_DEF instructions. This may cause some external
3057 // registers to require trimming.
3058 SmallVector<unsigned, 8> ShrinkRegs;
3059 LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs, &LHS);
3060 RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
3061 while (!ShrinkRegs.empty())
3062 shrinkToUses(&LIS->getInterval(ShrinkRegs.pop_back_val()));
3063
3064 // Join RHS into LHS.
3065 LHS.join(RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo);
3066
3067 // Kill flags are going to be wrong if the live ranges were overlapping.
3068 // Eventually, we should simply clear all kill flags when computing live
3069 // ranges. They are reinserted after register allocation.
3070 MRI->clearKillFlags(LHS.reg);
3071 MRI->clearKillFlags(RHS.reg);
3072
3073 if (!EndPoints.empty()) {
3074 // Recompute the parts of the live range we had to remove because of
3075 // CR_Replace conflicts.
3076 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LHS << '\n'; }; } } while (false)
3077 dbgs() << "\t\trestoring liveness to " << EndPoints.size() << " points: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LHS << '\n'; }; } } while (false)
3078 for (unsigned i = 0, n = EndPoints.size(); i != n; ++i) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LHS << '\n'; }; } } while (false)
3079 dbgs() << EndPoints[i];do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LHS << '\n'; }; } } while (false)
3080 if (i != n-1)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LHS << '\n'; }; } } while (false)
3081 dbgs() << ',';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LHS << '\n'; }; } } while (false)
3082 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LHS << '\n'; }; } } while (false)
3083 dbgs() << ": " << LHS << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LHS << '\n'; }; } } while (false)
3084 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\t\trestoring liveness to "
<< EndPoints.size() << " points: "; for (unsigned
i = 0, n = EndPoints.size(); i != n; ++i) { dbgs() << EndPoints
[i]; if (i != n-1) dbgs() << ','; } dbgs() << ": "
<< LHS << '\n'; }; } } while (false)
;
3085 LIS->extendToIndices((LiveRange&)LHS, EndPoints);
3086 }
3087
3088 return true;
3089}
3090
3091bool RegisterCoalescer::joinIntervals(CoalescerPair &CP) {
3092 return CP.isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(CP);
3093}
3094
3095namespace {
3096
3097/// Information concerning MBB coalescing priority.
3098struct MBBPriorityInfo {
3099 MachineBasicBlock *MBB;
3100 unsigned Depth;
3101 bool IsSplit;
3102
3103 MBBPriorityInfo(MachineBasicBlock *mbb, unsigned depth, bool issplit)
3104 : MBB(mbb), Depth(depth), IsSplit(issplit) {}
3105};
3106
3107} // end anonymous namespace
3108
3109/// C-style comparator that sorts first based on the loop depth of the basic
3110/// block (the unsigned), and then on the MBB number.
3111///
3112/// EnableGlobalCopies assumes that the primary sort key is loop depth.
3113static int compareMBBPriority(const MBBPriorityInfo *LHS,
3114 const MBBPriorityInfo *RHS) {
3115 // Deeper loops first
3116 if (LHS->Depth != RHS->Depth)
3117 return LHS->Depth > RHS->Depth ? -1 : 1;
3118
3119 // Try to unsplit critical edges next.
3120 if (LHS->IsSplit != RHS->IsSplit)
3121 return LHS->IsSplit ? -1 : 1;
3122
3123 // Prefer blocks that are more connected in the CFG. This takes care of
3124 // the most difficult copies first while intervals are short.
3125 unsigned cl = LHS->MBB->pred_size() + LHS->MBB->succ_size();
3126 unsigned cr = RHS->MBB->pred_size() + RHS->MBB->succ_size();
3127 if (cl != cr)
3128 return cl > cr ? -1 : 1;
3129
3130 // As a last resort, sort by block number.
3131 return LHS->MBB->getNumber() < RHS->MBB->getNumber() ? -1 : 1;
3132}
3133
3134/// \returns true if the given copy uses or defines a local live range.
3135static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS) {
3136 if (!Copy->isCopy())
3137 return false;
3138
3139 if (Copy->getOperand(1).isUndef())
3140 return false;
3141
3142 unsigned SrcReg = Copy->getOperand(1).getReg();
3143 unsigned DstReg = Copy->getOperand(0).getReg();
3144 if (TargetRegisterInfo::isPhysicalRegister(SrcReg)
3145 || TargetRegisterInfo::isPhysicalRegister(DstReg))
3146 return false;
3147
3148 return LIS->intervalIsInOneMBB(LIS->getInterval(SrcReg))
3149 || LIS->intervalIsInOneMBB(LIS->getInterval(DstReg));
3150}
3151
3152bool RegisterCoalescer::
3153copyCoalesceWorkList(MutableArrayRef<MachineInstr*> CurrList) {
3154 bool Progress = false;
3155 for (unsigned i = 0, e = CurrList.size(); i != e; ++i) {
3156 if (!CurrList[i])
3157 continue;
3158 // Skip instruction pointers that have already been erased, for example by
3159 // dead code elimination.
3160 if (ErasedInstrs.count(CurrList[i])) {
3161 CurrList[i] = nullptr;
3162 continue;
3163 }
3164 bool Again = false;
3165 bool Success = joinCopy(CurrList[i], Again);
3166 Progress |= Success;
3167 if (Success || !Again)
3168 CurrList[i] = nullptr;
3169 }
3170 return Progress;
3171}
3172
3173/// Check if DstReg is a terminal node.
3174/// I.e., it does not have any affinity other than \p Copy.
3175static bool isTerminalReg(unsigned DstReg, const MachineInstr &Copy,
3176 const MachineRegisterInfo *MRI) {
3177 assert(Copy.isCopyLike())(static_cast <bool> (Copy.isCopyLike()) ? void (0) : __assert_fail
("Copy.isCopyLike()", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 3177, __extension__ __PRETTY_FUNCTION__))
;
3178 // Check if the destination of this copy as any other affinity.
3179 for (const MachineInstr &MI : MRI->reg_nodbg_instructions(DstReg))
3180 if (&MI != &Copy && MI.isCopyLike())
3181 return false;
3182 return true;
3183}
3184
3185bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const {
3186 assert(Copy.isCopyLike())(static_cast <bool> (Copy.isCopyLike()) ? void (0) : __assert_fail
("Copy.isCopyLike()", "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 3186, __extension__ __PRETTY_FUNCTION__))
;
3187 if (!UseTerminalRule)
20
Assuming the condition is false
21
Taking false branch
3188 return false;
3189 unsigned DstReg, DstSubReg, SrcReg, SrcSubReg;
3190 isMoveInstr(*TRI, &Copy, SrcReg, DstReg, SrcSubReg, DstSubReg);
3191 // Check if the destination of this copy has any other affinity.
3192 if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
22
Taking false branch
3193 // If SrcReg is a physical register, the copy won't be coalesced.
3194 // Ignoring it may have other side effect (like missing
3195 // rematerialization). So keep it.
3196 TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
3197 !isTerminalReg(DstReg, Copy, MRI))
3198 return false;
3199
3200 // DstReg is a terminal node. Check if it interferes with any other
3201 // copy involving SrcReg.
3202 const MachineBasicBlock *OrigBB = Copy.getParent();
3203 const LiveInterval &DstLI = LIS->getInterval(DstReg);
3204 for (const MachineInstr &MI : MRI->reg_nodbg_instructions(SrcReg)) {
3205 // Technically we should check if the weight of the new copy is
3206 // interesting compared to the other one and update the weight
3207 // of the copies accordingly. However, this would only work if
3208 // we would gather all the copies first then coalesce, whereas
3209 // right now we interleave both actions.
3210 // For now, just consider the copies that are in the same block.
3211 if (&MI == &Copy || !MI.isCopyLike() || MI.getParent() != OrigBB)
23
Assuming the condition is false
24
Assuming the condition is false
25
Taking false branch
3212 continue;
3213 unsigned OtherReg, OtherSubReg, OtherSrcReg, OtherSrcSubReg;
26
'OtherReg' declared without an initial value
3214 isMoveInstr(*TRI, &Copy, OtherSrcReg, OtherReg, OtherSrcSubReg,
27
Calling 'isMoveInstr'
40
Returning from 'isMoveInstr'
3215 OtherSubReg);
3216 if (OtherReg == SrcReg)
41
The left operand of '==' is a garbage value
3217 OtherReg = OtherSrcReg;
3218 // Check if OtherReg is a non-terminal.
3219 if (TargetRegisterInfo::isPhysicalRegister(OtherReg) ||
3220 isTerminalReg(OtherReg, MI, MRI))
3221 continue;
3222 // Check that OtherReg interfere with DstReg.
3223 if (LIS->getInterval(OtherReg).overlaps(DstLI)) {
3224 DEBUG(dbgs() << "Apply terminal rule for: " << PrintReg(DstReg) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Apply terminal rule for: " <<
PrintReg(DstReg) << '\n'; } } while (false)
;
3225 return true;
3226 }
3227 }
3228 return false;
3229}
3230
3231void
3232RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) {
3233 DEBUG(dbgs() << MBB->getName() << ":\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << MBB->getName() << ":\n"
; } } while (false)
;
3234
3235 // Collect all copy-like instructions in MBB. Don't start coalescing anything
3236 // yet, it might invalidate the iterator.
3237 const unsigned PrevSize = WorkList.size();
3238 if (JoinGlobalCopies) {
14
Taking false branch
3239 SmallVector<MachineInstr*, 2> LocalTerminals;
3240 SmallVector<MachineInstr*, 2> GlobalTerminals;
3241 // Coalesce copies bottom-up to coalesce local defs before local uses. They
3242 // are not inherently easier to resolve, but slightly preferable until we
3243 // have local live range splitting. In particular this is required by
3244 // cmp+jmp macro fusion.
3245 for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
3246 MII != E; ++MII) {
3247 if (!MII->isCopyLike())
3248 continue;
3249 bool ApplyTerminalRule = applyTerminalRule(*MII);
3250 if (isLocalCopy(&(*MII), LIS)) {
3251 if (ApplyTerminalRule)
3252 LocalTerminals.push_back(&(*MII));
3253 else
3254 LocalWorkList.push_back(&(*MII));
3255 } else {
3256 if (ApplyTerminalRule)
3257 GlobalTerminals.push_back(&(*MII));
3258 else
3259 WorkList.push_back(&(*MII));
3260 }
3261 }
3262 // Append the copies evicted by the terminal rule at the end of the list.
3263 LocalWorkList.append(LocalTerminals.begin(), LocalTerminals.end());
3264 WorkList.append(GlobalTerminals.begin(), GlobalTerminals.end());
3265 }
3266 else {
3267 SmallVector<MachineInstr*, 2> Terminals;
3268 for (MachineInstr &MII : *MBB)
3269 if (MII.isCopyLike()) {
15
Taking false branch
16
Taking false branch
17
Taking false branch
18
Taking true branch
3270 if (applyTerminalRule(MII))
19
Calling 'RegisterCoalescer::applyTerminalRule'
3271 Terminals.push_back(&MII);
3272 else
3273 WorkList.push_back(&MII);
3274 }
3275 // Append the copies evicted by the terminal rule at the end of the list.
3276 WorkList.append(Terminals.begin(), Terminals.end());
3277 }
3278 // Try coalescing the collected copies immediately, and remove the nulls.
3279 // This prevents the WorkList from getting too large since most copies are
3280 // joinable on the first attempt.
3281 MutableArrayRef<MachineInstr*>
3282 CurrList(WorkList.begin() + PrevSize, WorkList.end());
3283 if (copyCoalesceWorkList(CurrList))
3284 WorkList.erase(std::remove(WorkList.begin() + PrevSize, WorkList.end(),
3285 nullptr), WorkList.end());
3286}
3287
3288void RegisterCoalescer::coalesceLocals() {
3289 copyCoalesceWorkList(LocalWorkList);
3290 for (unsigned j = 0, je = LocalWorkList.size(); j != je; ++j) {
3291 if (LocalWorkList[j])
3292 WorkList.push_back(LocalWorkList[j]);
3293 }
3294 LocalWorkList.clear();
3295}
3296
3297void RegisterCoalescer::joinAllIntervals() {
3298 DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "********** JOINING INTERVALS ***********\n"
; } } while (false)
;
3299 assert(WorkList.empty() && LocalWorkList.empty() && "Old data still around.")(static_cast <bool> (WorkList.empty() && LocalWorkList
.empty() && "Old data still around.") ? void (0) : __assert_fail
("WorkList.empty() && LocalWorkList.empty() && \"Old data still around.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 3299, __extension__ __PRETTY_FUNCTION__))
;
3300
3301 std::vector<MBBPriorityInfo> MBBs;
3302 MBBs.reserve(MF->size());
3303 for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I) {
9
Loop condition is false. Execution continues on line 3308
3304 MachineBasicBlock *MBB = &*I;
3305 MBBs.push_back(MBBPriorityInfo(MBB, Loops->getLoopDepth(MBB),
3306 JoinSplitEdges && isSplitEdge(MBB)));
3307 }
3308 array_pod_sort(MBBs.begin(), MBBs.end(), compareMBBPriority);
3309
3310 // Coalesce intervals in MBB priority order.
3311 unsigned CurrDepth = std::numeric_limits<unsigned>::max();
3312 for (unsigned i = 0, e = MBBs.size(); i != e; ++i) {
10
Assuming 'i' is not equal to 'e'
11
Loop condition is true. Entering loop body
3313 // Try coalescing the collected local copies for deeper loops.
3314 if (JoinGlobalCopies && MBBs[i].Depth < CurrDepth) {
12
Assuming the condition is false
3315 coalesceLocals();
3316 CurrDepth = MBBs[i].Depth;
3317 }
3318 copyCoalesceInMBB(MBBs[i].MBB);
13
Calling 'RegisterCoalescer::copyCoalesceInMBB'
3319 }
3320 coalesceLocals();
3321
3322 // Joining intervals can allow other intervals to be joined. Iteratively join
3323 // until we make no progress.
3324 while (copyCoalesceWorkList(WorkList))
3325 /* empty */ ;
3326}
3327
3328void RegisterCoalescer::releaseMemory() {
3329 ErasedInstrs.clear();
3330 WorkList.clear();
3331 DeadDefs.clear();
3332 InflateRegs.clear();
3333}
3334
3335bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
3336 MF = &fn;
3337 MRI = &fn.getRegInfo();
3338 const TargetSubtargetInfo &STI = fn.getSubtarget();
3339 TRI = STI.getRegisterInfo();
3340 TII = STI.getInstrInfo();
3341 LIS = &getAnalysis<LiveIntervals>();
3342 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3343 Loops = &getAnalysis<MachineLoopInfo>();
3344 if (EnableGlobalCopies == cl::BOU_UNSET)
1
Assuming the condition is false
2
Taking false branch
3345 JoinGlobalCopies = STI.enableJoinGlobalCopies();
3346 else
3347 JoinGlobalCopies = (EnableGlobalCopies == cl::BOU_TRUE);
3
Assuming the condition is false
3348
3349 // The MachineScheduler does not currently require JoinSplitEdges. This will
3350 // either be enabled unconditionally or replaced by a more general live range
3351 // splitting optimization.
3352 JoinSplitEdges = EnableJoinSplits;
3353
3354 DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
<< "********** Function: " << MF->getName() <<
'\n'; } } while (false)
3355 << "********** Function: " << MF->getName() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
<< "********** Function: " << MF->getName() <<
'\n'; } } while (false)
;
3356
3357 if (VerifyCoalescing)
4
Assuming the condition is false
5
Taking false branch
3358 MF->verify(this, "Before register coalescing");
3359
3360 RegClassInfo.runOnMachineFunction(fn);
3361
3362 // Join (coalesce) intervals if requested.
3363 if (EnableJoining)
6
Assuming the condition is true
7
Taking true branch
3364 joinAllIntervals();
8
Calling 'RegisterCoalescer::joinAllIntervals'
3365
3366 // After deleting a lot of copies, register classes may be less constrained.
3367 // Removing sub-register operands may allow GR32_ABCD -> GR32 and DPR_VFP2 ->
3368 // DPR inflation.
3369 array_pod_sort(InflateRegs.begin(), InflateRegs.end());
3370 InflateRegs.erase(std::unique(InflateRegs.begin(), InflateRegs.end()),
3371 InflateRegs.end());
3372 DEBUG(dbgs() << "Trying to inflate " << InflateRegs.size() << " regs.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Trying to inflate " <<
InflateRegs.size() << " regs.\n"; } } while (false)
;
3373 for (unsigned i = 0, e = InflateRegs.size(); i != e; ++i) {
3374 unsigned Reg = InflateRegs[i];
3375 if (MRI->reg_nodbg_empty(Reg))
3376 continue;
3377 if (MRI->recomputeRegClass(Reg)) {
3378 DEBUG(dbgs() << PrintReg(Reg) << " inflated to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << PrintReg(Reg) << " inflated to "
<< TRI->getRegClassName(MRI->getRegClass(Reg)) <<
'\n'; } } while (false)
3379 << TRI->getRegClassName(MRI->getRegClass(Reg)) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << PrintReg(Reg) << " inflated to "
<< TRI->getRegClassName(MRI->getRegClass(Reg)) <<
'\n'; } } while (false)
;
3380 ++NumInflated;
3381
3382 LiveInterval &LI = LIS->getInterval(Reg);
3383 if (LI.hasSubRanges()) {
3384 // If the inflated register class does not support subregisters anymore
3385 // remove the subranges.
3386 if (!MRI->shouldTrackSubRegLiveness(Reg)) {
3387 LI.clearSubRanges();
3388 } else {
3389#ifndef NDEBUG
3390 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3391 // If subranges are still supported, then the same subregs
3392 // should still be supported.
3393 for (LiveInterval::SubRange &S : LI.subranges()) {
3394 assert((S.LaneMask & ~MaxMask).none())(static_cast <bool> ((S.LaneMask & ~MaxMask).none()
) ? void (0) : __assert_fail ("(S.LaneMask & ~MaxMask).none()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/CodeGen/RegisterCoalescer.cpp"
, 3394, __extension__ __PRETTY_FUNCTION__))
;
3395 }
3396#endif
3397 }
3398 }
3399 }
3400 }
3401
3402 DEBUG(dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dump(); } } while (false)
;
3403 if (VerifyCoalescing)
3404 MF->verify(this, "After register coalescing");
3405 return true;
3406}
3407
3408void RegisterCoalescer::print(raw_ostream &O, const Module* m) const {
3409 LIS->print(O, m);
3410}

/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/CodeGen/MachineInstr.h

1//===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the declaration of the MachineInstr class, which is the
11// basic representation for all target dependent machine instructions used by
12// the back end.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_MACHINEINSTR_H
17#define LLVM_CODEGEN_MACHINEINSTR_H
18
19#include "llvm/ADT/DenseMapInfo.h"
20#include "llvm/ADT/ilist.h"
21#include "llvm/ADT/ilist_node.h"
22#include "llvm/ADT/iterator_range.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/CodeGen/MachineOperand.h"
25#include "llvm/CodeGen/TargetOpcodes.h"
26#include "llvm/IR/DebugLoc.h"
27#include "llvm/IR/InlineAsm.h"
28#include "llvm/MC/MCInstrDesc.h"
29#include "llvm/Support/ArrayRecycler.h"
30#include <algorithm>
31#include <cassert>
32#include <cstdint>
33#include <utility>
34
35namespace llvm {
36
37template <typename T> class ArrayRef;
38class DIExpression;
39class DILocalVariable;
40class MachineBasicBlock;
41class MachineFunction;
42class MachineMemOperand;
43class MachineRegisterInfo;
44class ModuleSlotTracker;
45class raw_ostream;
46template <typename T> class SmallVectorImpl;
47class StringRef;
48class TargetInstrInfo;
49class TargetRegisterClass;
50class TargetRegisterInfo;
51
52//===----------------------------------------------------------------------===//
53/// Representation of each machine instruction.
54///
55/// This class isn't a POD type, but it must have a trivial destructor. When a
56/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
57/// without having their destructor called.
58///
59class MachineInstr
60 : public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
61 ilist_sentinel_tracking<true>> {
62public:
63 using mmo_iterator = MachineMemOperand **;
64
65 /// Flags to specify different kinds of comments to output in
66 /// assembly code. These flags carry semantic information not
67 /// otherwise easily derivable from the IR text.
68 ///
69 enum CommentFlag {
70 ReloadReuse = 0x1 // higher bits are reserved for target dep comments.
71 };
72
73 enum MIFlag {
74 NoFlags = 0,
75 FrameSetup = 1 << 0, // Instruction is used as a part of
76 // function frame setup code.
77 FrameDestroy = 1 << 1, // Instruction is used as a part of
78 // function frame destruction code.
79 BundledPred = 1 << 2, // Instruction has bundled predecessors.
80 BundledSucc = 1 << 3 // Instruction has bundled successors.
81 };
82
83private:
84 const MCInstrDesc *MCID; // Instruction descriptor.
85 MachineBasicBlock *Parent = nullptr; // Pointer to the owning basic block.
86
87 // Operands are allocated by an ArrayRecycler.
88 MachineOperand *Operands = nullptr; // Pointer to the first operand.
89 unsigned NumOperands = 0; // Number of operands on instruction.
90 using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
91 OperandCapacity CapOperands; // Capacity of the Operands array.
92
93 uint8_t Flags = 0; // Various bits of additional
94 // information about machine
95 // instruction.
96
97 uint8_t AsmPrinterFlags = 0; // Various bits of information used by
98 // the AsmPrinter to emit helpful
99 // comments. This is *not* semantic
100 // information. Do not use this for
101 // anything other than to convey comment
102 // information to AsmPrinter.
103
104 uint8_t NumMemRefs = 0; // Information on memory references.
105 // Note that MemRefs == nullptr, means 'don't know', not 'no memory access'.
106 // Calling code must treat missing information conservatively. If the number
107 // of memory operands required to be precise exceeds the maximum value of
108 // NumMemRefs - currently 256 - we remove the operands entirely. Note also
109 // that this is a non-owning reference to a shared copy on write buffer owned
110 // by the MachineFunction and created via MF.allocateMemRefsArray.
111 mmo_iterator MemRefs = nullptr;
112
113 DebugLoc debugLoc; // Source line information.
114
115 // Intrusive list support
116 friend struct ilist_traits<MachineInstr>;
117 friend struct ilist_callback_traits<MachineBasicBlock>;
118 void setParent(MachineBasicBlock *P) { Parent = P; }
119
120 /// This constructor creates a copy of the given
121 /// MachineInstr in the given MachineFunction.
122 MachineInstr(MachineFunction &, const MachineInstr &);
123
124 /// This constructor create a MachineInstr and add the implicit operands.
125 /// It reserves space for number of operands specified by
126 /// MCInstrDesc. An explicit DebugLoc is supplied.
127 MachineInstr(MachineFunction &, const MCInstrDesc &MCID, DebugLoc dl,
128 bool NoImp = false);
129
130 // MachineInstrs are pool-allocated and owned by MachineFunction.
131 friend class MachineFunction;
132
133public:
134 MachineInstr(const MachineInstr &) = delete;
135 MachineInstr &operator=(const MachineInstr &) = delete;
136 // Use MachineFunction::DeleteMachineInstr() instead.
137 ~MachineInstr() = delete;
138
139 const MachineBasicBlock* getParent() const { return Parent; }
140 MachineBasicBlock* getParent() { return Parent; }
141
142 /// Return the function that contains the basic block that this instruction
143 /// belongs to.
144 ///
145 /// Note: this is undefined behaviour if the instruction does not have a
146 /// parent.
147 const MachineFunction *getMF() const;
148 MachineFunction *getMF() {
149 return const_cast<MachineFunction *>(
150 static_cast<const MachineInstr *>(this)->getMF());
151 }
152
153 /// Return the asm printer flags bitvector.
154 uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }
155
156 /// Clear the AsmPrinter bitvector.
157 void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
158
159 /// Return whether an AsmPrinter flag is set.
160 bool getAsmPrinterFlag(CommentFlag Flag) const {
161 return AsmPrinterFlags & Flag;
162 }
163
164 /// Set a flag for the AsmPrinter.
165 void setAsmPrinterFlag(uint8_t Flag) {
166 AsmPrinterFlags |= Flag;
167 }
168
169 /// Clear specific AsmPrinter flags.
170 void clearAsmPrinterFlag(CommentFlag Flag) {
171 AsmPrinterFlags &= ~Flag;
172 }
173
174 /// Return the MI flags bitvector.
175 uint8_t getFlags() const {
176 return Flags;
177 }
178
179 /// Return whether an MI flag is set.
180 bool getFlag(MIFlag Flag) const {
181 return Flags & Flag;
182 }
183
184 /// Set a MI flag.
185 void setFlag(MIFlag Flag) {
186 Flags |= (uint8_t)Flag;
187 }
188
189 void setFlags(unsigned flags) {
190 // Filter out the automatically maintained flags.
191 unsigned Mask = BundledPred | BundledSucc;
192 Flags = (Flags & Mask) | (flags & ~Mask);
193 }
194
195 /// clearFlag - Clear a MI flag.
196 void clearFlag(MIFlag Flag) {
197 Flags &= ~((uint8_t)Flag);
198 }
199
200 /// Return true if MI is in a bundle (but not the first MI in a bundle).
201 ///
202 /// A bundle looks like this before it's finalized:
203 /// ----------------
204 /// | MI |
205 /// ----------------
206 /// |
207 /// ----------------
208 /// | MI * |
209 /// ----------------
210 /// |
211 /// ----------------
212 /// | MI * |
213 /// ----------------
214 /// In this case, the first MI starts a bundle but is not inside a bundle, the
215 /// next 2 MIs are considered "inside" the bundle.
216 ///
217 /// After a bundle is finalized, it looks like this:
218 /// ----------------
219 /// | Bundle |
220 /// ----------------
221 /// |
222 /// ----------------
223 /// | MI * |
224 /// ----------------
225 /// |
226 /// ----------------
227 /// | MI * |
228 /// ----------------
229 /// |
230 /// ----------------
231 /// | MI * |
232 /// ----------------
233 /// The first instruction has the special opcode "BUNDLE". It's not "inside"
234 /// a bundle, but the next three MIs are.
235 bool isInsideBundle() const {
236 return getFlag(BundledPred);
237 }
238
239 /// Return true if this instruction part of a bundle. This is true
240 /// if either itself or its following instruction is marked "InsideBundle".
241 bool isBundled() const {
242 return isBundledWithPred() || isBundledWithSucc();
243 }
244
245 /// Return true if this instruction is part of a bundle, and it is not the
246 /// first instruction in the bundle.
247 bool isBundledWithPred() const { return getFlag(BundledPred); }
248
249 /// Return true if this instruction is part of a bundle, and it is not the
250 /// last instruction in the bundle.
251 bool isBundledWithSucc() const { return getFlag(BundledSucc); }
252
253 /// Bundle this instruction with its predecessor. This can be an unbundled
254 /// instruction, or it can be the first instruction in a bundle.
255 void bundleWithPred();
256
257 /// Bundle this instruction with its successor. This can be an unbundled
258 /// instruction, or it can be the last instruction in a bundle.
259 void bundleWithSucc();
260
261 /// Break bundle above this instruction.
262 void unbundleFromPred();
263
264 /// Break bundle below this instruction.
265 void unbundleFromSucc();
266
267 /// Returns the debug location id of this MachineInstr.
268 const DebugLoc &getDebugLoc() const { return debugLoc; }
269
270 /// Return the debug variable referenced by
271 /// this DBG_VALUE instruction.
272 const DILocalVariable *getDebugVariable() const;
273
274 /// Return the complex address expression referenced by
275 /// this DBG_VALUE instruction.
276 const DIExpression *getDebugExpression() const;
277
278 /// Emit an error referring to the source location of this instruction.
279 /// This should only be used for inline assembly that is somehow
280 /// impossible to compile. Other errors should have been handled much
281 /// earlier.
282 ///
283 /// If this method returns, the caller should try to recover from the error.
284 void emitError(StringRef Msg) const;
285
286 /// Returns the target instruction descriptor of this MachineInstr.
287 const MCInstrDesc &getDesc() const { return *MCID; }
288
289 /// Returns the opcode of this MachineInstr.
290 unsigned getOpcode() const { return MCID->Opcode; }
291
292 /// Access to explicit operands of the instruction.
293 unsigned getNumOperands() const { return NumOperands; }
294
295 const MachineOperand& getOperand(unsigned i) const {
296 assert(i < getNumOperands() && "getOperand() out of range!")(static_cast <bool> (i < getNumOperands() &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/CodeGen/MachineInstr.h"
, 296, __extension__ __PRETTY_FUNCTION__))
;
297 return Operands[i];
298 }
299 MachineOperand& getOperand(unsigned i) {
300 assert(i < getNumOperands() && "getOperand() out of range!")(static_cast <bool> (i < getNumOperands() &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/CodeGen/MachineInstr.h"
, 300, __extension__ __PRETTY_FUNCTION__))
;
301 return Operands[i];
302 }
303
304 /// Return true if operand \p OpIdx is a subregister index.
305 bool isOperandSubregIdx(unsigned OpIdx) const {
306 assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&(static_cast <bool> (getOperand(OpIdx).getType() == MachineOperand
::MO_Immediate && "Expected MO_Immediate operand type."
) ? void (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/CodeGen/MachineInstr.h"
, 307, __extension__ __PRETTY_FUNCTION__))
307 "Expected MO_Immediate operand type.")(static_cast <bool> (getOperand(OpIdx).getType() == MachineOperand
::MO_Immediate && "Expected MO_Immediate operand type."
) ? void (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/CodeGen/MachineInstr.h"
, 307, __extension__ __PRETTY_FUNCTION__))
;
308 if (isExtractSubreg() && OpIdx == 2)
309 return true;
310 if (isInsertSubreg() && OpIdx == 3)
311 return true;
312 if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0)
313 return true;
314 if (isSubregToReg() && OpIdx == 3)
315 return true;
316 return false;
317 }
318
319 /// Returns the number of non-implicit operands.
320 unsigned getNumExplicitOperands() const;
321
322 /// iterator/begin/end - Iterate over all operands of a machine instruction.
323 using mop_iterator = MachineOperand *;
324 using const_mop_iterator = const MachineOperand *;
325
326 mop_iterator operands_begin() { return Operands; }
327 mop_iterator operands_end() { return Operands + NumOperands; }
328
329 const_mop_iterator operands_begin() const { return Operands; }
330 const_mop_iterator operands_end() const { return Operands + NumOperands; }
331
332 iterator_range<mop_iterator> operands() {
333 return make_range(operands_begin(), operands_end());
334 }
335 iterator_range<const_mop_iterator> operands() const {
336 return make_range(operands_begin(), operands_end());
337 }
338 iterator_range<mop_iterator> explicit_operands() {
339 return make_range(operands_begin(),
340 operands_begin() + getNumExplicitOperands());
341 }
342 iterator_range<const_mop_iterator> explicit_operands() const {
343 return make_range(operands_begin(),
344 operands_begin() + getNumExplicitOperands());
345 }
346 iterator_range<mop_iterator> implicit_operands() {
347 return make_range(explicit_operands().end(), operands_end());
348 }
349 iterator_range<const_mop_iterator> implicit_operands() const {
350 return make_range(explicit_operands().end(), operands_end());
351 }
352 /// Returns a range over all explicit operands that are register definitions.
353 /// Implicit definition are not included!
354 iterator_range<mop_iterator> defs() {
355 return make_range(operands_begin(),
356 operands_begin() + getDesc().getNumDefs());
357 }
358 /// \copydoc defs()
359 iterator_range<const_mop_iterator> defs() const {
360 return make_range(operands_begin(),
361 operands_begin() + getDesc().getNumDefs());
362 }
363 /// Returns a range that includes all operands that are register uses.
364 /// This may include unrelated operands which are not register uses.
365 iterator_range<mop_iterator> uses() {
366 return make_range(operands_begin() + getDesc().getNumDefs(),
367 operands_end());
368 }
369 /// \copydoc uses()
370 iterator_range<const_mop_iterator> uses() const {
371 return make_range(operands_begin() + getDesc().getNumDefs(),
372 operands_end());
373 }
374 iterator_range<mop_iterator> explicit_uses() {
375 return make_range(operands_begin() + getDesc().getNumDefs(),
376 operands_begin() + getNumExplicitOperands() );
377 }
378 iterator_range<const_mop_iterator> explicit_uses() const {
379 return make_range(operands_begin() + getDesc().getNumDefs(),
380 operands_begin() + getNumExplicitOperands() );
381 }
382
383 /// Returns the number of the operand iterator \p I points to.
384 unsigned getOperandNo(const_mop_iterator I) const {
385 return I - operands_begin();
386 }
387
388 /// Access to memory operands of the instruction
389 mmo_iterator memoperands_begin() const { return MemRefs; }
390 mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
391 /// Return true if we don't have any memory operands which described the the
392 /// memory access done by this instruction. If this is true, calling code
393 /// must be conservative.
394 bool memoperands_empty() const { return NumMemRefs == 0; }
395
396 iterator_range<mmo_iterator> memoperands() {
397 return make_range(memoperands_begin(), memoperands_end());
398 }
399 iterator_range<mmo_iterator> memoperands() const {
400 return make_range(memoperands_begin(), memoperands_end());
401 }
402
403 /// Return true if this instruction has exactly one MachineMemOperand.
404 bool hasOneMemOperand() const {
405 return NumMemRefs == 1;
406 }
407
408 /// Return the number of memory operands.
409 unsigned getNumMemOperands() const { return NumMemRefs; }
410
411 /// API for querying MachineInstr properties. They are the same as MCInstrDesc
412 /// queries but they are bundle aware.
413
414 enum QueryType {
415 IgnoreBundle, // Ignore bundles
416 AnyInBundle, // Return true if any instruction in bundle has property
417 AllInBundle // Return true if all instructions in bundle have property
418 };
419
420 /// Return true if the instruction (or in the case of a bundle,
421 /// the instructions inside the bundle) has the specified property.
422 /// The first argument is the property being queried.
423 /// The second argument indicates whether the query should look inside
424 /// instruction bundles.
425 bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
426 // Inline the fast path for unbundled or bundle-internal instructions.
427 if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
428 return getDesc().getFlags() & (1ULL << MCFlag);
429
430 // If this is the first instruction in a bundle, take the slow path.
431 return hasPropertyInBundle(1ULL << MCFlag, Type);
432 }
433
434 /// Return true if this instruction can have a variable number of operands.
435 /// In this case, the variable operands will be after the normal
436 /// operands but before the implicit definitions and uses (if any are
437 /// present).
438 bool isVariadic(QueryType Type = IgnoreBundle) const {
439 return hasProperty(MCID::Variadic, Type);
440 }
441
442 /// Set if this instruction has an optional definition, e.g.
443 /// ARM instructions which can set condition code if 's' bit is set.
444 bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
445 return hasProperty(MCID::HasOptionalDef, Type);
446 }
447
448 /// Return true if this is a pseudo instruction that doesn't
449 /// correspond to a real machine instruction.
450 bool isPseudo(QueryType Type = IgnoreBundle) const {
451 return hasProperty(MCID::Pseudo, Type);
452 }
453
454 bool isReturn(QueryType Type = AnyInBundle) const {
455 return hasProperty(MCID::Return, Type);
456 }
457
458 bool isCall(QueryType Type = AnyInBundle) const {
459 return hasProperty(MCID::Call, Type);
460 }
461
462 /// Returns true if the specified instruction stops control flow
463 /// from executing the instruction immediately following it. Examples include
464 /// unconditional branches and return instructions.
465 bool isBarrier(QueryType Type = AnyInBundle) const {
466 return hasProperty(MCID::Barrier, Type);
467 }
468
469 /// Returns true if this instruction part of the terminator for a basic block.
470 /// Typically this is things like return and branch instructions.
471 ///
472 /// Various passes use this to insert code into the bottom of a basic block,
473 /// but before control flow occurs.
474 bool isTerminator(QueryType Type = AnyInBundle) const {
475 return hasProperty(MCID::Terminator, Type);
476 }
477
478 /// Returns true if this is a conditional, unconditional, or indirect branch.
479 /// Predicates below can be used to discriminate between
480 /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
481 /// get more information.
482 bool isBranch(QueryType Type = AnyInBundle) const {
483 return hasProperty(MCID::Branch, Type);
484 }
485
486 /// Return true if this is an indirect branch, such as a
487 /// branch through a register.
488 bool isIndirectBranch(QueryType Type = AnyInBundle) const {
489 return hasProperty(MCID::IndirectBranch, Type);
490 }
491
492 /// Return true if this is a branch which may fall
493 /// through to the next instruction or may transfer control flow to some other
494 /// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more
495 /// information about this branch.
496 bool isConditionalBranch(QueryType Type = AnyInBundle) const {
497 return isBranch(Type) & !isBarrier(Type) & !isIndirectBranch(Type);
498 }
499
500 /// Return true if this is a branch which always
501 /// transfers control flow to some other block. The
502 /// TargetInstrInfo::AnalyzeBranch method can be used to get more information
503 /// about this branch.
504 bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
505 return isBranch(Type) & isBarrier(Type) & !isIndirectBranch(Type);
506 }
507
508 /// Return true if this instruction has a predicate operand that
509 /// controls execution. It may be set to 'always', or may be set to other
510 /// values. There are various methods in TargetInstrInfo that can be used to
511 /// control and modify the predicate in this instruction.
512 bool isPredicable(QueryType Type = AllInBundle) const {
513 // If it's a bundle than all bundled instructions must be predicable for this
514 // to return true.
515 return hasProperty(MCID::Predicable, Type);
516 }
517
518 /// Return true if this instruction is a comparison.
519 bool isCompare(QueryType Type = IgnoreBundle) const {
520 return hasProperty(MCID::Compare, Type);
521 }
522
523 /// Return true if this instruction is a move immediate
524 /// (including conditional moves) instruction.
525 bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
526 return hasProperty(MCID::MoveImm, Type);
527 }
528
529 /// Return true if this instruction is a bitcast instruction.
530 bool isBitcast(QueryType Type = IgnoreBundle) const {
531 return hasProperty(MCID::Bitcast, Type);
532 }
533
534 /// Return true if this instruction is a select instruction.
535 bool isSelect(QueryType Type = IgnoreBundle) const {
536 return hasProperty(MCID::Select, Type);
537 }
538
539 /// Return true if this instruction cannot be safely duplicated.
540 /// For example, if the instruction has a unique labels attached
541 /// to it, duplicating it would cause multiple definition errors.
542 bool isNotDuplicable(QueryType Type = AnyInBundle) const {
543 return hasProperty(MCID::NotDuplicable, Type);
544 }
545
546 /// Return true if this instruction is convergent.
547 /// Convergent instructions can not be made control-dependent on any
548 /// additional values.
549 bool isConvergent(QueryType Type = AnyInBundle) const {
550 if (isInlineAsm()) {
551 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
552 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
553 return true;
554 }
555 return hasProperty(MCID::Convergent, Type);
556 }
557
558 /// Returns true if the specified instruction has a delay slot
559 /// which must be filled by the code generator.
560 bool hasDelaySlot(QueryType Type = AnyInBundle) const {
561 return hasProperty(MCID::DelaySlot, Type);
562 }
563
564 /// Return true for instructions that can be folded as
565 /// memory operands in other instructions. The most common use for this
566 /// is instructions that are simple loads from memory that don't modify
567 /// the loaded value in any way, but it can also be used for instructions
568 /// that can be expressed as constant-pool loads, such as V_SETALLONES
569 /// on x86, to allow them to be folded when it is beneficial.
570 /// This should only be set on instructions that return a value in their
571 /// only virtual register definition.
572 bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
573 return hasProperty(MCID::FoldableAsLoad, Type);
574 }
575
576 /// \brief Return true if this instruction behaves
577 /// the same way as the generic REG_SEQUENCE instructions.
578 /// E.g., on ARM,
579 /// dX VMOVDRR rY, rZ
580 /// is equivalent to
581 /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
582 ///
583 /// Note that for the optimizers to be able to take advantage of
584 /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
585 /// override accordingly.
586 bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
587 return hasProperty(MCID::RegSequence, Type);
588 }
589
590 /// \brief Return true if this instruction behaves
591 /// the same way as the generic EXTRACT_SUBREG instructions.
592 /// E.g., on ARM,
593 /// rX, rY VMOVRRD dZ
594 /// is equivalent to two EXTRACT_SUBREG:
595 /// rX = EXTRACT_SUBREG dZ, ssub_0
596 /// rY = EXTRACT_SUBREG dZ, ssub_1
597 ///
598 /// Note that for the optimizers to be able to take advantage of
599 /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
600 /// override accordingly.
601 bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
602 return hasProperty(MCID::ExtractSubreg, Type);
603 }
604
605 /// \brief Return true if this instruction behaves
606 /// the same way as the generic INSERT_SUBREG instructions.
607 /// E.g., on ARM,
608 /// dX = VSETLNi32 dY, rZ, Imm
609 /// is equivalent to a INSERT_SUBREG:
610 /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
611 ///
612 /// Note that for the optimizers to be able to take advantage of
613 /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
614 /// override accordingly.
615 bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
616 return hasProperty(MCID::InsertSubreg, Type);
617 }
618
619 //===--------------------------------------------------------------------===//
620 // Side Effect Analysis
621 //===--------------------------------------------------------------------===//
622
623 /// Return true if this instruction could possibly read memory.
624 /// Instructions with this flag set are not necessarily simple load
625 /// instructions, they may load a value and modify it, for example.
626 bool mayLoad(QueryType Type = AnyInBundle) const {
627 if (isInlineAsm()) {
628 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
629 if (ExtraInfo & InlineAsm::Extra_MayLoad)
630 return true;
631 }
632 return hasProperty(MCID::MayLoad, Type);
633 }
634
635 /// Return true if this instruction could possibly modify memory.
636 /// Instructions with this flag set are not necessarily simple store
637 /// instructions, they may store a modified value based on their operands, or
638 /// may not actually modify anything, for example.
639 bool mayStore(QueryType Type = AnyInBundle) const {
640 if (isInlineAsm()) {
641 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
642 if (ExtraInfo & InlineAsm::Extra_MayStore)
643 return true;
644 }
645 return hasProperty(MCID::MayStore, Type);
646 }
647
648 /// Return true if this instruction could possibly read or modify memory.
649 bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
650 return mayLoad(Type) || mayStore(Type);
651 }
652
653 //===--------------------------------------------------------------------===//
654 // Flags that indicate whether an instruction can be modified by a method.
655 //===--------------------------------------------------------------------===//
656
657 /// Return true if this may be a 2- or 3-address
658 /// instruction (of the form "X = op Y, Z, ..."), which produces the same
659 /// result if Y and Z are exchanged. If this flag is set, then the
660 /// TargetInstrInfo::commuteInstruction method may be used to hack on the
661 /// instruction.
662 ///
663 /// Note that this flag may be set on instructions that are only commutable
664 /// sometimes. In these cases, the call to commuteInstruction will fail.
665 /// Also note that some instructions require non-trivial modification to
666 /// commute them.
667 bool isCommutable(QueryType Type = IgnoreBundle) const {
668 return hasProperty(MCID::Commutable, Type);
669 }
670
671 /// Return true if this is a 2-address instruction
672 /// which can be changed into a 3-address instruction if needed. Doing this
673 /// transformation can be profitable in the register allocator, because it
674 /// means that the instruction can use a 2-address form if possible, but
675 /// degrade into a less efficient form if the source and dest register cannot
676 /// be assigned to the same register. For example, this allows the x86
677 /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
678 /// is the same speed as the shift but has bigger code size.
679 ///
680 /// If this returns true, then the target must implement the
681 /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
682 /// is allowed to fail if the transformation isn't valid for this specific
683 /// instruction (e.g. shl reg, 4 on x86).
684 ///
685 bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
686 return hasProperty(MCID::ConvertibleTo3Addr, Type);
687 }
688
689 /// Return true if this instruction requires
690 /// custom insertion support when the DAG scheduler is inserting it into a
691 /// machine basic block. If this is true for the instruction, it basically
692 /// means that it is a pseudo instruction used at SelectionDAG time that is
693 /// expanded out into magic code by the target when MachineInstrs are formed.
694 ///
695 /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
696 /// is used to insert this into the MachineBasicBlock.
697 bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
698 return hasProperty(MCID::UsesCustomInserter, Type);
699 }
700
701 /// Return true if this instruction requires *adjustment*
702 /// after instruction selection by calling a target hook. For example, this
703 /// can be used to fill in ARM 's' optional operand depending on whether
704 /// the conditional flag register is used.
705 bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
706 return hasProperty(MCID::HasPostISelHook, Type);
707 }
708
709 /// Returns true if this instruction is a candidate for remat.
710 /// This flag is deprecated, please don't use it anymore. If this
711 /// flag is set, the isReallyTriviallyReMaterializable() method is called to
712 /// verify the instruction is really rematable.
713 bool isRematerializable(QueryType Type = AllInBundle) const {
714 // It's only possible to re-mat a bundle if all bundled instructions are
715 // re-materializable.
716 return hasProperty(MCID::Rematerializable, Type);
717 }
718
719 /// Returns true if this instruction has the same cost (or less) than a move
720 /// instruction. This is useful during certain types of optimizations
721 /// (e.g., remat during two-address conversion or machine licm)
722 /// where we would like to remat or hoist the instruction, but not if it costs
723 /// more than moving the instruction into the appropriate register. Note, we
724 /// are not marking copies from and to the same register class with this flag.
725 bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
726 // Only returns true for a bundle if all bundled instructions are cheap.
727 return hasProperty(MCID::CheapAsAMove, Type);
728 }
729
730 /// Returns true if this instruction source operands
731 /// have special register allocation requirements that are not captured by the
732 /// operand register classes. e.g. ARM::STRD's two source registers must be an
733 /// even / odd pair, ARM::STM registers have to be in ascending order.
734 /// Post-register allocation passes should not attempt to change allocations
735 /// for sources of instructions with this flag.
736 bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
737 return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
738 }
739
740 /// Returns true if this instruction def operands
741 /// have special register allocation requirements that are not captured by the
742 /// operand register classes. e.g. ARM::LDRD's two def registers must be an
743 /// even / odd pair, ARM::LDM registers have to be in ascending order.
744 /// Post-register allocation passes should not attempt to change allocations
745 /// for definitions of instructions with this flag.
746 bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
747 return hasProperty(MCID::ExtraDefRegAllocReq, Type);
748 }
749
750 enum MICheckType {
751 CheckDefs, // Check all operands for equality
752 CheckKillDead, // Check all operands including kill / dead markers
753 IgnoreDefs, // Ignore all definitions
754 IgnoreVRegDefs // Ignore virtual register definitions
755 };
756
757 /// Return true if this instruction is identical to \p Other.
758 /// Two instructions are identical if they have the same opcode and all their
759 /// operands are identical (with respect to MachineOperand::isIdenticalTo()).
760 /// Note that this means liveness related flags (dead, undef, kill) do not
761 /// affect the notion of identical.
762 bool isIdenticalTo(const MachineInstr &Other,
763 MICheckType Check = CheckDefs) const;
764
765 /// Unlink 'this' from the containing basic block, and return it without
766 /// deleting it.
767 ///
768 /// This function can not be used on bundled instructions, use
769 /// removeFromBundle() to remove individual instructions from a bundle.
770 MachineInstr *removeFromParent();
771
772 /// Unlink this instruction from its basic block and return it without
773 /// deleting it.
774 ///
775 /// If the instruction is part of a bundle, the other instructions in the
776 /// bundle remain bundled.
777 MachineInstr *removeFromBundle();
778
779 /// Unlink 'this' from the containing basic block and delete it.
780 ///
781 /// If this instruction is the header of a bundle, the whole bundle is erased.
782 /// This function can not be used for instructions inside a bundle, use
783 /// eraseFromBundle() to erase individual bundled instructions.
784 void eraseFromParent();
785
786 /// Unlink 'this' from the containing basic block and delete it.
787 ///
788 /// For all definitions mark their uses in DBG_VALUE nodes
789 /// as undefined. Otherwise like eraseFromParent().
790 void eraseFromParentAndMarkDBGValuesForRemoval();
791
792 /// Unlink 'this' form its basic block and delete it.
793 ///
794 /// If the instruction is part of a bundle, the other instructions in the
795 /// bundle remain bundled.
796 void eraseFromBundle();
797
798 bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
799 bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
800 bool isAnnotationLabel() const {
801 return getOpcode() == TargetOpcode::ANNOTATION_LABEL;
802 }
803
804 /// Returns true if the MachineInstr represents a label.
805 bool isLabel() const {
806 return isEHLabel() || isGCLabel() || isAnnotationLabel();
807 }
808
809 bool isCFIInstruction() const {
810 return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
811 }
812
813 // True if the instruction represents a position in the function.
814 bool isPosition() const { return isLabel() || isCFIInstruction(); }
815
816 bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
817
818 /// A DBG_VALUE is indirect iff the first operand is a register and
819 /// the second operand is an immediate.
820 bool isIndirectDebugValue() const {
821 return isDebugValue()
822 && getOperand(0).isReg()
823 && getOperand(1).isImm();
824 }
825
826 bool isPHI() const {
827 return getOpcode() == TargetOpcode::PHI ||
828 getOpcode() == TargetOpcode::G_PHI;
829 }
830 bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
831 bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
832 bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
833
834 bool isMSInlineAsm() const {
835 return getOpcode() == TargetOpcode::INLINEASM && getInlineAsmDialect();
836 }
837
838 bool isStackAligningInlineAsm() const;
839 InlineAsm::AsmDialect getInlineAsmDialect() const;
840
841 bool isInsertSubreg() const {
842 return getOpcode() == TargetOpcode::INSERT_SUBREG;
843 }
844
845 bool isSubregToReg() const {
846 return getOpcode() == TargetOpcode::SUBREG_TO_REG;
35
Calling 'MachineInstr::getOpcode'
36
Returning from 'MachineInstr::getOpcode'
37
Assuming the condition is false
847 }
848
849 bool isRegSequence() const {
850 return getOpcode() == TargetOpcode::REG_SEQUENCE;
851 }
852
853 bool isBundle() const {
854 return getOpcode() == TargetOpcode::BUNDLE;
855 }
856
857 bool isCopy() const {
858 return getOpcode() == TargetOpcode::COPY;
29
Calling 'MachineInstr::getOpcode'
30
Returning from 'MachineInstr::getOpcode'
31
Assuming the condition is false
859 }
860
861 bool isFullCopy() const {
862 return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
863 }
864
865 bool isExtractSubreg() const {
866 return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
867 }
868
869 /// Return true if the instruction behaves like a copy.
870 /// This does not include native copy instructions.
871 bool isCopyLike() const {
872 return isCopy() || isSubregToReg();
873 }
874
875 /// Return true is the instruction is an identity copy.
876 bool isIdentityCopy() const {
877 return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
878 getOperand(0).getSubReg() == getOperand(1).getSubReg();
879 }
880
881 /// Return true if this instruction doesn't produce any output in the form of
882 /// executable instructions.
883 bool isMetaInstruction() const {
884 switch (getOpcode()) {
885 default:
886 return false;
887 case TargetOpcode::IMPLICIT_DEF:
888 case TargetOpcode::KILL:
889 case TargetOpcode::CFI_INSTRUCTION:
890 case TargetOpcode::EH_LABEL:
891 case TargetOpcode::GC_LABEL:
892 case TargetOpcode::DBG_VALUE:
893 return true;
894 }
895 }
896
897 /// Return true if this is a transient instruction that is either very likely
898 /// to be eliminated during register allocation (such as copy-like
899 /// instructions), or if this instruction doesn't have an execution-time cost.
900 bool isTransient() const {
901 switch (getOpcode()) {
902 default:
903 return isMetaInstruction();
904 // Copy-like instructions are usually eliminated during register allocation.
905 case TargetOpcode::PHI:
906 case TargetOpcode::G_PHI:
907 case TargetOpcode::COPY:
908 case TargetOpcode::INSERT_SUBREG:
909 case TargetOpcode::SUBREG_TO_REG:
910 case TargetOpcode::REG_SEQUENCE:
911 return true;
912 }
913 }
914
915 /// Return the number of instructions inside the MI bundle, excluding the
916 /// bundle header.
917 ///
918 /// This is the number of instructions that MachineBasicBlock::iterator
919 /// skips, 0 for unbundled instructions.
920 unsigned getBundleSize() const;
921
922 /// Return true if the MachineInstr reads the specified register.
923 /// If TargetRegisterInfo is passed, then it also checks if there
924 /// is a read of a super-register.
925 /// This does not count partial redefines of virtual registers as reads:
926 /// %reg1024:6 = OP.
927 bool readsRegister(unsigned Reg,
928 const TargetRegisterInfo *TRI = nullptr) const {
929 return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
930 }
931
932 /// Return true if the MachineInstr reads the specified virtual register.
933 /// Take into account that a partial define is a
934 /// read-modify-write operation.
935 bool readsVirtualRegister(unsigned Reg) const {
936 return readsWritesVirtualRegister(Reg).first;
937 }
938
939 /// Return a pair of bools (reads, writes) indicating if this instruction
940 /// reads or writes Reg. This also considers partial defines.
941 /// If Ops is not null, all operand indices for Reg are added.
942 std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg,
943 SmallVectorImpl<unsigned> *Ops = nullptr) const;
944
945 /// Return true if the MachineInstr kills the specified register.
946 /// If TargetRegisterInfo is passed, then it also checks if there is
947 /// a kill of a super-register.
948 bool killsRegister(unsigned Reg,
949 const TargetRegisterInfo *TRI = nullptr) const {
950 return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
951 }
952
953 /// Return true if the MachineInstr fully defines the specified register.
954 /// If TargetRegisterInfo is passed, then it also checks
955 /// if there is a def of a super-register.
956 /// NOTE: It's ignoring subreg indices on virtual registers.
957 bool definesRegister(unsigned Reg,
958 const TargetRegisterInfo *TRI = nullptr) const {
959 return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
960 }
961
962 /// Return true if the MachineInstr modifies (fully define or partially
963 /// define) the specified register.
964 /// NOTE: It's ignoring subreg indices on virtual registers.
965 bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const {
966 return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
967 }
968
969 /// Returns true if the register is dead in this machine instruction.
970 /// If TargetRegisterInfo is passed, then it also checks
971 /// if there is a dead def of a super-register.
972 bool registerDefIsDead(unsigned Reg,
973 const TargetRegisterInfo *TRI = nullptr) const {
974 return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
975 }
976
977 /// Returns true if the MachineInstr has an implicit-use operand of exactly
978 /// the given register (not considering sub/super-registers).
979 bool hasRegisterImplicitUseOperand(unsigned Reg) const;
980
981 /// Returns the operand index that is a use of the specific register or -1
982 /// if it is not found. It further tightens the search criteria to a use
983 /// that kills the register if isKill is true.
984 int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false,
985 const TargetRegisterInfo *TRI = nullptr) const;
986
987 /// Wrapper for findRegisterUseOperandIdx, it returns
988 /// a pointer to the MachineOperand rather than an index.
989 MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false,
990 const TargetRegisterInfo *TRI = nullptr) {
991 int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
992 return (Idx == -1) ? nullptr : &getOperand(Idx);
993 }
994
995 const MachineOperand *findRegisterUseOperand(
996 unsigned Reg, bool isKill = false,
997 const TargetRegisterInfo *TRI = nullptr) const {
998 return const_cast<MachineInstr *>(this)->
999 findRegisterUseOperand(Reg, isKill, TRI);
1000 }
1001
1002 /// Returns the operand index that is a def of the specified register or
1003 /// -1 if it is not found. If isDead is true, defs that are not dead are
1004 /// skipped. If Overlap is true, then it also looks for defs that merely
1005 /// overlap the specified register. If TargetRegisterInfo is non-null,
1006 /// then it also checks if there is a def of a super-register.
1007 /// This may also return a register mask operand when Overlap is true.
1008 int findRegisterDefOperandIdx(unsigned Reg,
1009 bool isDead = false, bool Overlap = false,
1010 const TargetRegisterInfo *TRI = nullptr) const;
1011
1012 /// Wrapper for findRegisterDefOperandIdx, it returns
1013 /// a pointer to the MachineOperand rather than an index.
1014 MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false,
1015 const TargetRegisterInfo *TRI = nullptr) {
1016 int Idx = findRegisterDefOperandIdx(Reg, isDead, false, TRI);
1017 return (Idx == -1) ? nullptr : &getOperand(Idx);
1018 }
1019
1020 /// Find the index of the first operand in the
1021 /// operand list that is used to represent the predicate. It returns -1 if
1022 /// none is found.
1023 int findFirstPredOperandIdx() const;
1024
1025 /// Find the index of the flag word operand that
1026 /// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if
1027 /// getOperand(OpIdx) does not belong to an inline asm operand group.
1028 ///
1029 /// If GroupNo is not NULL, it will receive the number of the operand group
1030 /// containing OpIdx.
1031 ///
1032 /// The flag operand is an immediate that can be decoded with methods like
1033 /// InlineAsm::hasRegClassConstraint().
1034 int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
1035
1036 /// Compute the static register class constraint for operand OpIdx.
1037 /// For normal instructions, this is derived from the MCInstrDesc.
1038 /// For inline assembly it is derived from the flag words.
1039 ///
1040 /// Returns NULL if the static register class constraint cannot be
1041 /// determined.
1042 const TargetRegisterClass*
1043 getRegClassConstraint(unsigned OpIdx,
1044 const TargetInstrInfo *TII,
1045 const TargetRegisterInfo *TRI) const;
1046
1047 /// \brief Applies the constraints (def/use) implied by this MI on \p Reg to
1048 /// the given \p CurRC.
1049 /// If \p ExploreBundle is set and MI is part of a bundle, all the
1050 /// instructions inside the bundle will be taken into account. In other words,
1051 /// this method accumulates all the constraints of the operand of this MI and
1052 /// the related bundle if MI is a bundle or inside a bundle.
1053 ///
1054 /// Returns the register class that satisfies both \p CurRC and the
1055 /// constraints set by MI. Returns NULL if such a register class does not
1056 /// exist.
1057 ///
1058 /// \pre CurRC must not be NULL.
1059 const TargetRegisterClass *getRegClassConstraintEffectForVReg(
1060 unsigned Reg, const TargetRegisterClass *CurRC,
1061 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
1062 bool ExploreBundle = false) const;
1063
1064 /// \brief Applies the constraints (def/use) implied by the \p OpIdx operand
1065 /// to the given \p CurRC.
1066 ///
1067 /// Returns the register class that satisfies both \p CurRC and the
1068 /// constraints set by \p OpIdx MI. Returns NULL if such a register class
1069 /// does not exist.
1070 ///
1071 /// \pre CurRC must not be NULL.
1072 /// \pre The operand at \p OpIdx must be a register.
1073 const TargetRegisterClass *
1074 getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC,
1075 const TargetInstrInfo *TII,
1076 const TargetRegisterInfo *TRI) const;
1077
1078 /// Add a tie between the register operands at DefIdx and UseIdx.
1079 /// The tie will cause the register allocator to ensure that the two
1080 /// operands are assigned the same physical register.
1081 ///
1082 /// Tied operands are managed automatically for explicit operands in the
1083 /// MCInstrDesc. This method is for exceptional cases like inline asm.
1084 void tieOperands(unsigned DefIdx, unsigned UseIdx);
1085
1086 /// Given the index of a tied register operand, find the
1087 /// operand it is tied to. Defs are tied to uses and vice versa. Returns the
1088 /// index of the tied operand which must exist.
1089 unsigned findTiedOperandIdx(unsigned OpIdx) const;
1090
1091 /// Given the index of a register def operand,
1092 /// check if the register def is tied to a source operand, due to either
1093 /// two-address elimination or inline assembly constraints. Returns the
1094 /// first tied use operand index by reference if UseOpIdx is not null.
1095 bool isRegTiedToUseOperand(unsigned DefOpIdx,
1096 unsigned *UseOpIdx = nullptr) const {
1097 const MachineOperand &MO = getOperand(DefOpIdx);
1098 if (!MO.isReg() || !MO.isDef() || !MO.isTied())
1099 return false;
1100 if (UseOpIdx)
1101 *UseOpIdx = findTiedOperandIdx(DefOpIdx);
1102 return true;
1103 }
1104
1105 /// Return true if the use operand of the specified index is tied to a def
1106 /// operand. It also returns the def operand index by reference if DefOpIdx
1107 /// is not null.
1108 bool isRegTiedToDefOperand(unsigned UseOpIdx,
1109 unsigned *DefOpIdx = nullptr) const {
1110 const MachineOperand &MO = getOperand(UseOpIdx);
1111 if (!MO.isReg() || !MO.isUse() || !MO.isTied())
1112 return false;
1113 if (DefOpIdx)
1114 *DefOpIdx = findTiedOperandIdx(UseOpIdx);
1115 return true;
1116 }
1117
1118 /// Clears kill flags on all operands.
1119 void clearKillInfo();
1120
1121 /// Replace all occurrences of FromReg with ToReg:SubIdx,
1122 /// properly composing subreg indices where necessary.
1123 void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx,
1124 const TargetRegisterInfo &RegInfo);
1125
1126 /// We have determined MI kills a register. Look for the
1127 /// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
1128 /// add a implicit operand if it's not found. Returns true if the operand
1129 /// exists / is added.
1130 bool addRegisterKilled(unsigned IncomingReg,
1131 const TargetRegisterInfo *RegInfo,
1132 bool AddIfNotFound = false);
1133
1134 /// Clear all kill flags affecting Reg. If RegInfo is provided, this includes
1135 /// all aliasing registers.
1136 void clearRegisterKills(unsigned Reg, const TargetRegisterInfo *RegInfo);
1137
1138 /// We have determined MI defined a register without a use.
1139 /// Look for the operand that defines it and mark it as IsDead. If
1140 /// AddIfNotFound is true, add a implicit operand if it's not found. Returns
1141 /// true if the operand exists / is added.
1142 bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo,
1143 bool AddIfNotFound = false);
1144
1145 /// Clear all dead flags on operands defining register @p Reg.
1146 void clearRegisterDeads(unsigned Reg);
1147
1148 /// Mark all subregister defs of register @p Reg with the undef flag.
1149 /// This function is used when we determined to have a subregister def in an
1150 /// otherwise undefined super register.
1151 void setRegisterDefReadUndef(unsigned Reg, bool IsUndef = true);
1152
1153 /// We have determined MI defines a register. Make sure there is an operand
1154 /// defining Reg.
1155 void addRegisterDefined(unsigned Reg,
1156 const TargetRegisterInfo *RegInfo = nullptr);
1157
1158 /// Mark every physreg used by this instruction as
1159 /// dead except those in the UsedRegs list.
1160 ///
1161 /// On instructions with register mask operands, also add implicit-def
1162 /// operands for all registers in UsedRegs.
1163 void setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
1164 const TargetRegisterInfo &TRI);
1165
1166 /// Return true if it is safe to move this instruction. If
1167 /// SawStore is set to true, it means that there is a store (or call) between
1168 /// the instruction's location and its intended destination.
1169 bool isSafeToMove(AliasAnalysis *AA, bool &SawStore) const;
1170
1171 /// Returns true if this instruction's memory access aliases the memory
1172 /// access of Other.
1173 //
1174 /// Assumes any physical registers used to compute addresses
1175 /// have the same value for both instructions. Returns false if neither
1176 /// instruction writes to memory.
1177 ///
1178 /// @param AA Optional alias analysis, used to compare memory operands.
1179 /// @param Other MachineInstr to check aliasing against.
1180 /// @param UseTBAA Whether to pass TBAA information to alias analysis.
1181 bool mayAlias(AliasAnalysis *AA, MachineInstr &Other, bool UseTBAA);
1182
1183 /// Return true if this instruction may have an ordered
1184 /// or volatile memory reference, or if the information describing the memory
1185 /// reference is not available. Return false if it is known to have no
1186 /// ordered or volatile memory references.
1187 bool hasOrderedMemoryRef() const;
1188
1189 /// Return true if this load instruction never traps and points to a memory
1190 /// location whose value doesn't change during the execution of this function.
1191 ///
1192 /// Examples include loading a value from the constant pool or from the
1193 /// argument area of a function (if it does not change). If the instruction
1194 /// does multiple loads, this returns true only if all of the loads are
1195 /// dereferenceable and invariant.
1196 bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const;
1197
1198 /// If the specified instruction is a PHI that always merges together the
1199 /// same virtual register, return the register, otherwise return 0.
1200 unsigned isConstantValuePHI() const;
1201
1202 /// Return true if this instruction has side effects that are not modeled
1203 /// by mayLoad / mayStore, etc.
1204 /// For all instructions, the property is encoded in MCInstrDesc::Flags
1205 /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
1206 /// INLINEASM instruction, in which case the side effect property is encoded
1207 /// in one of its operands (see InlineAsm::Extra_HasSideEffect).
1208 ///
1209 bool hasUnmodeledSideEffects() const;
1210
1211 /// Returns true if it is illegal to fold a load across this instruction.
1212 bool isLoadFoldBarrier() const;
1213
1214 /// Return true if all the defs of this instruction are dead.
1215 bool allDefsAreDead() const;
1216
1217 /// Copy implicit register operands from specified
1218 /// instruction to this instruction.
1219 void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI);
1220
1221 /// Debugging support
1222 /// @{
1223 /// Print this MI to \p OS.
1224 /// Only print the defs and the opcode if \p SkipOpers is true.
1225 /// Otherwise, also print operands if \p SkipDebugLoc is true.
1226 /// Otherwise, also print the debug loc, with a terminating newline.
1227 /// \p TII is used to print the opcode name. If it's not present, but the
1228 /// MI is in a function, the opcode will be printed using the function's TII.
1229 void print(raw_ostream &OS, bool SkipOpers = false, bool SkipDebugLoc = false,
1230 const TargetInstrInfo *TII = nullptr) const;
1231 void print(raw_ostream &OS, ModuleSlotTracker &MST, bool SkipOpers = false,
1232 bool SkipDebugLoc = false,
1233 const TargetInstrInfo *TII = nullptr) const;
1234 void dump() const;
1235 /// @}
1236
1237 //===--------------------------------------------------------------------===//
1238 // Accessors used to build up machine instructions.
1239
1240 /// Add the specified operand to the instruction. If it is an implicit
1241 /// operand, it is added to the end of the operand list. If it is an
1242 /// explicit operand it is added at the end of the explicit operand list
1243 /// (before the first implicit operand).
1244 ///
1245 /// MF must be the machine function that was used to allocate this
1246 /// instruction.
1247 ///
1248 /// MachineInstrBuilder provides a more convenient interface for creating
1249 /// instructions and adding operands.
1250 void addOperand(MachineFunction &MF, const MachineOperand &Op);
1251
1252 /// Add an operand without providing an MF reference. This only works for
1253 /// instructions that are inserted in a basic block.
1254 ///
1255 /// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be
1256 /// preferred.
1257 void addOperand(const MachineOperand &Op);
1258
1259 /// Replace the instruction descriptor (thus opcode) of
1260 /// the current instruction with a new one.
1261 void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
1262
1263 /// Replace current source information with new such.
1264 /// Avoid using this, the constructor argument is preferable.
1265 void setDebugLoc(DebugLoc dl) {
1266 debugLoc = std::move(dl);
1267 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() &&
"Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/CodeGen/MachineInstr.h"
, 1267, __extension__ __PRETTY_FUNCTION__))
;
1268 }
1269
1270 /// Erase an operand from an instruction, leaving it with one
1271 /// fewer operand than it started with.
1272 void RemoveOperand(unsigned i);
1273
1274 /// Add a MachineMemOperand to the machine instruction.
1275 /// This function should be used only occasionally. The setMemRefs function
1276 /// is the primary method for setting up a MachineInstr's MemRefs list.
1277 void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
1278
1279 /// Assign this MachineInstr's memory reference descriptor list.
1280 /// This does not transfer ownership.
1281 void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
1282 setMemRefs(std::make_pair(NewMemRefs, NewMemRefsEnd-NewMemRefs));
1283 }
1284
1285 /// Assign this MachineInstr's memory reference descriptor list. First
1286 /// element in the pair is the begin iterator/pointer to the array; the
1287 /// second is the number of MemoryOperands. This does not transfer ownership
1288 /// of the underlying memory.
1289 void setMemRefs(std::pair<mmo_iterator, unsigned> NewMemRefs) {
1290 MemRefs = NewMemRefs.first;
1291 NumMemRefs = uint8_t(NewMemRefs.second);
1292 assert(NumMemRefs == NewMemRefs.second &&(static_cast <bool> (NumMemRefs == NewMemRefs.second &&
"Too many memrefs - must drop memory operands") ? void (0) :
__assert_fail ("NumMemRefs == NewMemRefs.second && \"Too many memrefs - must drop memory operands\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/CodeGen/MachineInstr.h"
, 1293, __extension__ __PRETTY_FUNCTION__))
1293 "Too many memrefs - must drop memory operands")(static_cast <bool> (NumMemRefs == NewMemRefs.second &&
"Too many memrefs - must drop memory operands") ? void (0) :
__assert_fail ("NumMemRefs == NewMemRefs.second && \"Too many memrefs - must drop memory operands\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/CodeGen/MachineInstr.h"
, 1293, __extension__ __PRETTY_FUNCTION__))
;
1294 }
1295
1296 /// Return a set of memrefs (begin iterator, size) which conservatively
1297 /// describe the memory behavior of both MachineInstrs. This is appropriate
1298 /// for use when merging two MachineInstrs into one. This routine does not
1299 /// modify the memrefs of the this MachineInstr.
1300 std::pair<mmo_iterator, unsigned> mergeMemRefsWith(const MachineInstr& Other);
1301
1302 /// Clear this MachineInstr's memory reference descriptor list. This resets
1303 /// the memrefs to their most conservative state. This should be used only
1304 /// as a last resort since it greatly pessimizes our knowledge of the memory
1305 /// access performed by the instruction.
1306 void dropMemRefs() {
1307 MemRefs = nullptr;
1308 NumMemRefs = 0;
1309 }
1310
1311 /// Break any tie involving OpIdx.
1312 void untieRegOperand(unsigned OpIdx) {
1313 MachineOperand &MO = getOperand(OpIdx);
1314 if (MO.isReg() && MO.isTied()) {
1315 getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
1316 MO.TiedTo = 0;
1317 }
1318 }
1319
1320 /// Add all implicit def and use operands to this instruction.
1321 void addImplicitDefUseOperands(MachineFunction &MF);
1322
1323private:
1324 /// If this instruction is embedded into a MachineFunction, return the
1325 /// MachineRegisterInfo object for the current function, otherwise
1326 /// return null.
1327 MachineRegisterInfo *getRegInfo();
1328
1329 /// Unlink all of the register operands in this instruction from their
1330 /// respective use lists. This requires that the operands already be on their
1331 /// use lists.
1332 void RemoveRegOperandsFromUseLists(MachineRegisterInfo&);
1333
1334 /// Add all of the register operands in this instruction from their
1335 /// respective use lists. This requires that the operands not be on their
1336 /// use lists yet.
1337 void AddRegOperandsToUseLists(MachineRegisterInfo&);
1338
1339 /// Slow path for hasProperty when we're dealing with a bundle.
1340 bool hasPropertyInBundle(unsigned Mask, QueryType Type) const;
1341
1342 /// \brief Implements the logic of getRegClassConstraintEffectForVReg for the
1343 /// this MI and the given operand index \p OpIdx.
1344 /// If the related operand does not constrained Reg, this returns CurRC.
1345 const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(
1346 unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
1347 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const;
1348};
1349
1350/// Special DenseMapInfo traits to compare MachineInstr* by *value* of the
1351/// instruction rather than by pointer value.
1352/// The hashing and equality testing functions ignore definitions so this is
1353/// useful for CSE, etc.
1354struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
1355 static inline MachineInstr *getEmptyKey() {
1356 return nullptr;
1357 }
1358
1359 static inline MachineInstr *getTombstoneKey() {
1360 return reinterpret_cast<MachineInstr*>(-1);
1361 }
1362
1363 static unsigned getHashValue(const MachineInstr* const &MI);
1364
1365 static bool isEqual(const MachineInstr* const &LHS,
1366 const MachineInstr* const &RHS) {
1367 if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
1368 LHS == getEmptyKey() || LHS == getTombstoneKey())
1369 return LHS == RHS;
1370 return LHS->isIdenticalTo(*RHS, MachineInstr::IgnoreVRegDefs);
1371 }
1372};
1373
1374//===----------------------------------------------------------------------===//
1375// Debugging Support
1376
1377inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) {
1378 MI.print(OS);
1379 return OS;
1380}
1381
1382} // end namespace llvm
1383
1384#endif // LLVM_CODEGEN_MACHINEINSTR_H