Bug Summary

File:lib/CodeGen/MachineInstr.cpp
Warning:line 239, column 19
Access to field 'ParentMI' results in a dereference of a null pointer (loaded from variable 'NewMO')

Annotated Source Code

[?] Use j/k keys for keyboard navigation

/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp

1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Methods common to all machine instructions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/MachineInstr.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/FoldingSet.h"
18#include "llvm/ADT/Hashing.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallBitVector.h"
22#include "llvm/ADT/SmallString.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Analysis/Loads.h"
26#include "llvm/Analysis/MemoryLocation.h"
27#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
28#include "llvm/CodeGen/MachineBasicBlock.h"
29#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachineInstrBundle.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/MachineModuleInfo.h"
34#include "llvm/CodeGen/MachineOperand.h"
35#include "llvm/CodeGen/MachineRegisterInfo.h"
36#include "llvm/CodeGen/PseudoSourceValue.h"
37#include "llvm/CodeGen/TargetInstrInfo.h"
38#include "llvm/CodeGen/TargetRegisterInfo.h"
39#include "llvm/CodeGen/TargetSubtargetInfo.h"
40#include "llvm/IR/Constants.h"
41#include "llvm/IR/DebugInfoMetadata.h"
42#include "llvm/IR/DebugLoc.h"
43#include "llvm/IR/DerivedTypes.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/InlineAsm.h"
46#include "llvm/IR/InstrTypes.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/LLVMContext.h"
49#include "llvm/IR/Metadata.h"
50#include "llvm/IR/Module.h"
51#include "llvm/IR/ModuleSlotTracker.h"
52#include "llvm/IR/Type.h"
53#include "llvm/IR/Value.h"
54#include "llvm/MC/MCInstrDesc.h"
55#include "llvm/MC/MCRegisterInfo.h"
56#include "llvm/MC/MCSymbol.h"
57#include "llvm/Support/Casting.h"
58#include "llvm/Support/CommandLine.h"
59#include "llvm/Support/Compiler.h"
60#include "llvm/Support/Debug.h"
61#include "llvm/Support/ErrorHandling.h"
62#include "llvm/Support/LowLevelTypeImpl.h"
63#include "llvm/Support/MathExtras.h"
64#include "llvm/Support/raw_ostream.h"
65#include "llvm/Target/TargetIntrinsicInfo.h"
66#include "llvm/Target/TargetMachine.h"
67#include <algorithm>
68#include <cassert>
69#include <cstddef>
70#include <cstdint>
71#include <cstring>
72#include <iterator>
73#include <utility>
74
75using namespace llvm;
76
77void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
78 if (MCID->ImplicitDefs)
79 for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); *ImpDefs;
80 ++ImpDefs)
81 addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true));
82 if (MCID->ImplicitUses)
83 for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); *ImpUses;
84 ++ImpUses)
85 addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true));
86}
87
88/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
89/// implicit operands. It reserves space for the number of operands specified by
90/// the MCInstrDesc.
91MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
92 DebugLoc dl, bool NoImp)
93 : MCID(&tid), debugLoc(std::move(dl)) {
94 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() &&
"Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 94, __extension__ __PRETTY_FUNCTION__))
;
95
96 // Reserve space for the expected number of operands.
97 if (unsigned NumOps = MCID->getNumOperands() +
98 MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
99 CapOperands = OperandCapacity::get(NumOps);
100 Operands = MF.allocateOperandArray(CapOperands);
101 }
102
103 if (!NoImp)
104 addImplicitDefUseOperands(MF);
105}
106
107/// MachineInstr ctor - Copies MachineInstr arg exactly
108///
109MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
110 : MCID(&MI.getDesc()), NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs),
111 debugLoc(MI.getDebugLoc()) {
112 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() &&
"Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 112, __extension__ __PRETTY_FUNCTION__))
;
113
114 CapOperands = OperandCapacity::get(MI.getNumOperands());
115 Operands = MF.allocateOperandArray(CapOperands);
116
117 // Copy operands.
118 for (const MachineOperand &MO : MI.operands())
119 addOperand(MF, MO);
120
121 // Copy all the sensible flags.
122 setFlags(MI.Flags);
123}
124
125/// getRegInfo - If this instruction is embedded into a MachineFunction,
126/// return the MachineRegisterInfo object for the current function, otherwise
127/// return null.
128MachineRegisterInfo *MachineInstr::getRegInfo() {
129 if (MachineBasicBlock *MBB = getParent())
130 return &MBB->getParent()->getRegInfo();
131 return nullptr;
132}
133
134/// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
135/// this instruction from their respective use lists. This requires that the
136/// operands already be on their use lists.
137void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
138 for (MachineOperand &MO : operands())
139 if (MO.isReg())
140 MRI.removeRegOperandFromUseList(&MO);
141}
142
143/// AddRegOperandsToUseLists - Add all of the register operands in
144/// this instruction from their respective use lists. This requires that the
145/// operands not be on their use lists yet.
146void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
147 for (MachineOperand &MO : operands())
148 if (MO.isReg())
149 MRI.addRegOperandToUseList(&MO);
150}
151
152void MachineInstr::addOperand(const MachineOperand &Op) {
153 MachineBasicBlock *MBB = getParent();
154 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs")(static_cast <bool> (MBB && "Use MachineInstrBuilder to add operands to dangling instrs"
) ? void (0) : __assert_fail ("MBB && \"Use MachineInstrBuilder to add operands to dangling instrs\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 154, __extension__ __PRETTY_FUNCTION__))
;
155 MachineFunction *MF = MBB->getParent();
156 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs")(static_cast <bool> (MF && "Use MachineInstrBuilder to add operands to dangling instrs"
) ? void (0) : __assert_fail ("MF && \"Use MachineInstrBuilder to add operands to dangling instrs\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 156, __extension__ __PRETTY_FUNCTION__))
;
157 addOperand(*MF, Op);
158}
159
160/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
161/// ranges. If MRI is non-null also update use-def chains.
162static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
163 unsigned NumOps, MachineRegisterInfo *MRI) {
164 if (MRI)
165 return MRI->moveOperands(Dst, Src, NumOps);
166
167 // MachineOperand is a trivially copyable type so we can just use memmove.
168 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
169}
170
171/// addOperand - Add the specified operand to the instruction. If it is an
172/// implicit operand, it is added to the end of the operand list. If it is
173/// an explicit operand it is added at the end of the explicit operand list
174/// (before the first implicit operand).
175void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
176 assert(MCID && "Cannot add operands before providing an instr descriptor")(static_cast <bool> (MCID && "Cannot add operands before providing an instr descriptor"
) ? void (0) : __assert_fail ("MCID && \"Cannot add operands before providing an instr descriptor\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 176, __extension__ __PRETTY_FUNCTION__))
;
177
178 // Check if we're adding one of our existing operands.
179 if (&Op >= Operands && &Op < Operands + NumOperands) {
180 // This is unusual: MI->addOperand(MI->getOperand(i)).
181 // If adding Op requires reallocating or moving existing operands around,
182 // the Op reference could go stale. Support it by copying Op.
183 MachineOperand CopyOp(Op);
184 return addOperand(MF, CopyOp);
185 }
186
187 // Find the insert location for the new operand. Implicit registers go at
188 // the end, everything else goes before the implicit regs.
189 //
190 // FIXME: Allow mixed explicit and implicit operands on inline asm.
191 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
192 // implicit-defs, but they must not be moved around. See the FIXME in
193 // InstrEmitter.cpp.
194 unsigned OpNo = getNumOperands();
195 bool isImpReg = Op.isReg() && Op.isImplicit();
196 if (!isImpReg && !isInlineAsm()) {
6
Taking true branch
197 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
7
Assuming 'OpNo' is 0
198 --OpNo;
199 assert(!Operands[OpNo].isTied() && "Cannot move tied operands")(static_cast <bool> (!Operands[OpNo].isTied() &&
"Cannot move tied operands") ? void (0) : __assert_fail ("!Operands[OpNo].isTied() && \"Cannot move tied operands\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 199, __extension__ __PRETTY_FUNCTION__))
;
200 }
201 }
202
203#ifndef NDEBUG
204 bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata;
205 // OpNo now points as the desired insertion point. Unless this is a variadic
206 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
207 // RegMask operands go between the explicit and implicit operands.
208 assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||(static_cast <bool> ((isImpReg || Op.isRegMask() || MCID
->isVariadic() || OpNo < MCID->getNumOperands() || isMetaDataOp
) && "Trying to add an operand to a machine instr that is already done!"
) ? void (0) : __assert_fail ("(isImpReg || Op.isRegMask() || MCID->isVariadic() || OpNo < MCID->getNumOperands() || isMetaDataOp) && \"Trying to add an operand to a machine instr that is already done!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 210, __extension__ __PRETTY_FUNCTION__))
209 OpNo < MCID->getNumOperands() || isMetaDataOp) &&(static_cast <bool> ((isImpReg || Op.isRegMask() || MCID
->isVariadic() || OpNo < MCID->getNumOperands() || isMetaDataOp
) && "Trying to add an operand to a machine instr that is already done!"
) ? void (0) : __assert_fail ("(isImpReg || Op.isRegMask() || MCID->isVariadic() || OpNo < MCID->getNumOperands() || isMetaDataOp) && \"Trying to add an operand to a machine instr that is already done!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 210, __extension__ __PRETTY_FUNCTION__))
210 "Trying to add an operand to a machine instr that is already done!")(static_cast <bool> ((isImpReg || Op.isRegMask() || MCID
->isVariadic() || OpNo < MCID->getNumOperands() || isMetaDataOp
) && "Trying to add an operand to a machine instr that is already done!"
) ? void (0) : __assert_fail ("(isImpReg || Op.isRegMask() || MCID->isVariadic() || OpNo < MCID->getNumOperands() || isMetaDataOp) && \"Trying to add an operand to a machine instr that is already done!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 210, __extension__ __PRETTY_FUNCTION__))
;
211#endif
212
213 MachineRegisterInfo *MRI = getRegInfo();
214
215 // Determine if the Operands array needs to be reallocated.
216 // Save the old capacity and operand array.
217 OperandCapacity OldCap = CapOperands;
218 MachineOperand *OldOperands = Operands;
219 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
8
Assuming 'OldOperands' is null
220 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
9
'?' condition is false
221 Operands = MF.allocateOperandArray(CapOperands);
222 // Move the operands before the insertion point.
223 if (OpNo)
10
Taking false branch
224 moveOperands(Operands, OldOperands, OpNo, MRI);
225 }
226
227 // Move the operands following the insertion point.
228 if (OpNo != NumOperands)
11
Taking false branch
229 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
230 MRI);
231 ++NumOperands;
232
233 // Deallocate the old operand array.
234 if (OldOperands != Operands && OldOperands)
12
Assuming the condition is false
235 MF.deallocateOperandArray(OldCap, OldOperands);
236
237 // Copy Op into place. It still needs to be inserted into the MRI use lists.
238 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
13
'NewMO' initialized to a null pointer value
239 NewMO->ParentMI = this;
14
Access to field 'ParentMI' results in a dereference of a null pointer (loaded from variable 'NewMO')
240
241 // When adding a register operand, tell MRI about it.
242 if (NewMO->isReg()) {
243 // Ensure isOnRegUseList() returns false, regardless of Op's status.
244 NewMO->Contents.Reg.Prev = nullptr;
245 // Ignore existing ties. This is not a property that can be copied.
246 NewMO->TiedTo = 0;
247 // Add the new operand to MRI, but only for instructions in an MBB.
248 if (MRI)
249 MRI->addRegOperandToUseList(NewMO);
250 // The MCID operand information isn't accurate until we start adding
251 // explicit operands. The implicit operands are added first, then the
252 // explicits are inserted before them.
253 if (!isImpReg) {
254 // Tie uses to defs as indicated in MCInstrDesc.
255 if (NewMO->isUse()) {
256 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
257 if (DefIdx != -1)
258 tieOperands(DefIdx, OpNo);
259 }
260 // If the register operand is flagged as early, mark the operand as such.
261 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
262 NewMO->setIsEarlyClobber(true);
263 }
264 }
265}
266
267/// RemoveOperand - Erase an operand from an instruction, leaving it with one
268/// fewer operand than it started with.
269///
270void MachineInstr::RemoveOperand(unsigned OpNo) {
271 assert(OpNo < getNumOperands() && "Invalid operand number")(static_cast <bool> (OpNo < getNumOperands() &&
"Invalid operand number") ? void (0) : __assert_fail ("OpNo < getNumOperands() && \"Invalid operand number\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 271, __extension__ __PRETTY_FUNCTION__))
;
272 untieRegOperand(OpNo);
273
274#ifndef NDEBUG
275 // Moving tied operands would break the ties.
276 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
277 if (Operands[i].isReg())
278 assert(!Operands[i].isTied() && "Cannot move tied operands")(static_cast <bool> (!Operands[i].isTied() && "Cannot move tied operands"
) ? void (0) : __assert_fail ("!Operands[i].isTied() && \"Cannot move tied operands\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 278, __extension__ __PRETTY_FUNCTION__))
;
279#endif
280
281 MachineRegisterInfo *MRI = getRegInfo();
282 if (MRI && Operands[OpNo].isReg())
283 MRI->removeRegOperandFromUseList(Operands + OpNo);
284
285 // Don't call the MachineOperand destructor. A lot of this code depends on
286 // MachineOperand having a trivial destructor anyway, and adding a call here
287 // wouldn't make it 'destructor-correct'.
288
289 if (unsigned N = NumOperands - 1 - OpNo)
290 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
291 --NumOperands;
292}
293
294/// addMemOperand - Add a MachineMemOperand to the machine instruction.
295/// This function should be used only occasionally. The setMemRefs function
296/// is the primary method for setting up a MachineInstr's MemRefs list.
297void MachineInstr::addMemOperand(MachineFunction &MF,
298 MachineMemOperand *MO) {
299 mmo_iterator OldMemRefs = MemRefs;
300 unsigned OldNumMemRefs = NumMemRefs;
301
302 unsigned NewNum = NumMemRefs + 1;
303 mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum);
304
305 std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs);
306 NewMemRefs[NewNum - 1] = MO;
307 setMemRefs(NewMemRefs, NewMemRefs + NewNum);
308}
309
310/// Check to see if the MMOs pointed to by the two MemRefs arrays are
311/// identical.
312static bool hasIdenticalMMOs(const MachineInstr &MI1, const MachineInstr &MI2) {
313 auto I1 = MI1.memoperands_begin(), E1 = MI1.memoperands_end();
314 auto I2 = MI2.memoperands_begin(), E2 = MI2.memoperands_end();
315 if ((E1 - I1) != (E2 - I2))
316 return false;
317 for (; I1 != E1; ++I1, ++I2) {
318 if (**I1 != **I2)
319 return false;
320 }
321 return true;
322}
323
324std::pair<MachineInstr::mmo_iterator, unsigned>
325MachineInstr::mergeMemRefsWith(const MachineInstr& Other) {
326
327 // If either of the incoming memrefs are empty, we must be conservative and
328 // treat this as if we've exhausted our space for memrefs and dropped them.
329 if (memoperands_empty() || Other.memoperands_empty())
330 return std::make_pair(nullptr, 0);
331
332 // If both instructions have identical memrefs, we don't need to merge them.
333 // Since many instructions have a single memref, and we tend to merge things
334 // like pairs of loads from the same location, this catches a large number of
335 // cases in practice.
336 if (hasIdenticalMMOs(*this, Other))
337 return std::make_pair(MemRefs, NumMemRefs);
338
339 // TODO: consider uniquing elements within the operand lists to reduce
340 // space usage and fall back to conservative information less often.
341 size_t CombinedNumMemRefs = NumMemRefs + Other.NumMemRefs;
342
343 // If we don't have enough room to store this many memrefs, be conservative
344 // and drop them. Otherwise, we'd fail asserts when trying to add them to
345 // the new instruction.
346 if (CombinedNumMemRefs != uint8_t(CombinedNumMemRefs))
347 return std::make_pair(nullptr, 0);
348
349 MachineFunction *MF = getMF();
350 mmo_iterator MemBegin = MF->allocateMemRefsArray(CombinedNumMemRefs);
351 mmo_iterator MemEnd = std::copy(memoperands_begin(), memoperands_end(),
352 MemBegin);
353 MemEnd = std::copy(Other.memoperands_begin(), Other.memoperands_end(),
354 MemEnd);
355 assert(MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs &&(static_cast <bool> (MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs
&& "missing memrefs") ? void (0) : __assert_fail ("MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs && \"missing memrefs\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 356, __extension__ __PRETTY_FUNCTION__))
356 "missing memrefs")(static_cast <bool> (MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs
&& "missing memrefs") ? void (0) : __assert_fail ("MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs && \"missing memrefs\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 356, __extension__ __PRETTY_FUNCTION__))
;
357
358 return std::make_pair(MemBegin, CombinedNumMemRefs);
359}
360
361bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const {
362 assert(!isBundledWithPred() && "Must be called on bundle header")(static_cast <bool> (!isBundledWithPred() && "Must be called on bundle header"
) ? void (0) : __assert_fail ("!isBundledWithPred() && \"Must be called on bundle header\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 362, __extension__ __PRETTY_FUNCTION__))
;
363 for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
364 if (MII->getDesc().getFlags() & Mask) {
365 if (Type == AnyInBundle)
366 return true;
367 } else {
368 if (Type == AllInBundle && !MII->isBundle())
369 return false;
370 }
371 // This was the last instruction in the bundle.
372 if (!MII->isBundledWithSucc())
373 return Type == AllInBundle;
374 }
375}
376
377bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
378 MICheckType Check) const {
379 // If opcodes or number of operands are not the same then the two
380 // instructions are obviously not identical.
381 if (Other.getOpcode() != getOpcode() ||
382 Other.getNumOperands() != getNumOperands())
383 return false;
384
385 if (isBundle()) {
386 // We have passed the test above that both instructions have the same
387 // opcode, so we know that both instructions are bundles here. Let's compare
388 // MIs inside the bundle.
389 assert(Other.isBundle() && "Expected that both instructions are bundles.")(static_cast <bool> (Other.isBundle() && "Expected that both instructions are bundles."
) ? void (0) : __assert_fail ("Other.isBundle() && \"Expected that both instructions are bundles.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 389, __extension__ __PRETTY_FUNCTION__))
;
390 MachineBasicBlock::const_instr_iterator I1 = getIterator();
391 MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
392 // Loop until we analysed the last intruction inside at least one of the
393 // bundles.
394 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
395 ++I1;
396 ++I2;
397 if (!I1->isIdenticalTo(*I2, Check))
398 return false;
399 }
400 // If we've reached the end of just one of the two bundles, but not both,
401 // the instructions are not identical.
402 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
403 return false;
404 }
405
406 // Check operands to make sure they match.
407 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
408 const MachineOperand &MO = getOperand(i);
409 const MachineOperand &OMO = Other.getOperand(i);
410 if (!MO.isReg()) {
411 if (!MO.isIdenticalTo(OMO))
412 return false;
413 continue;
414 }
415
416 // Clients may or may not want to ignore defs when testing for equality.
417 // For example, machine CSE pass only cares about finding common
418 // subexpressions, so it's safe to ignore virtual register defs.
419 if (MO.isDef()) {
420 if (Check == IgnoreDefs)
421 continue;
422 else if (Check == IgnoreVRegDefs) {
423 if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()) ||
424 !TargetRegisterInfo::isVirtualRegister(OMO.getReg()))
425 if (!MO.isIdenticalTo(OMO))
426 return false;
427 } else {
428 if (!MO.isIdenticalTo(OMO))
429 return false;
430 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
431 return false;
432 }
433 } else {
434 if (!MO.isIdenticalTo(OMO))
435 return false;
436 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
437 return false;
438 }
439 }
440 // If DebugLoc does not match then two dbg.values are not identical.
441 if (isDebugValue())
442 if (getDebugLoc() && Other.getDebugLoc() &&
443 getDebugLoc() != Other.getDebugLoc())
444 return false;
445 return true;
446}
447
448const MachineFunction *MachineInstr::getMF() const {
449 return getParent()->getParent();
450}
451
452MachineInstr *MachineInstr::removeFromParent() {
453 assert(getParent() && "Not embedded in a basic block!")(static_cast <bool> (getParent() && "Not embedded in a basic block!"
) ? void (0) : __assert_fail ("getParent() && \"Not embedded in a basic block!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 453, __extension__ __PRETTY_FUNCTION__))
;
454 return getParent()->remove(this);
455}
456
457MachineInstr *MachineInstr::removeFromBundle() {
458 assert(getParent() && "Not embedded in a basic block!")(static_cast <bool> (getParent() && "Not embedded in a basic block!"
) ? void (0) : __assert_fail ("getParent() && \"Not embedded in a basic block!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 458, __extension__ __PRETTY_FUNCTION__))
;
459 return getParent()->remove_instr(this);
460}
461
462void MachineInstr::eraseFromParent() {
463 assert(getParent() && "Not embedded in a basic block!")(static_cast <bool> (getParent() && "Not embedded in a basic block!"
) ? void (0) : __assert_fail ("getParent() && \"Not embedded in a basic block!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 463, __extension__ __PRETTY_FUNCTION__))
;
464 getParent()->erase(this);
465}
466
467void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() {
468 assert(getParent() && "Not embedded in a basic block!")(static_cast <bool> (getParent() && "Not embedded in a basic block!"
) ? void (0) : __assert_fail ("getParent() && \"Not embedded in a basic block!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 468, __extension__ __PRETTY_FUNCTION__))
;
469 MachineBasicBlock *MBB = getParent();
470 MachineFunction *MF = MBB->getParent();
471 assert(MF && "Not embedded in a function!")(static_cast <bool> (MF && "Not embedded in a function!"
) ? void (0) : __assert_fail ("MF && \"Not embedded in a function!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 471, __extension__ __PRETTY_FUNCTION__))
;
472
473 MachineInstr *MI = (MachineInstr *)this;
474 MachineRegisterInfo &MRI = MF->getRegInfo();
475
476 for (const MachineOperand &MO : MI->operands()) {
477 if (!MO.isReg() || !MO.isDef())
478 continue;
479 unsigned Reg = MO.getReg();
480 if (!TargetRegisterInfo::isVirtualRegister(Reg))
481 continue;
482 MRI.markUsesInDebugValueAsUndef(Reg);
483 }
484 MI->eraseFromParent();
485}
486
487void MachineInstr::eraseFromBundle() {
488 assert(getParent() && "Not embedded in a basic block!")(static_cast <bool> (getParent() && "Not embedded in a basic block!"
) ? void (0) : __assert_fail ("getParent() && \"Not embedded in a basic block!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 488, __extension__ __PRETTY_FUNCTION__))
;
489 getParent()->erase_instr(this);
490}
491
492/// getNumExplicitOperands - Returns the number of non-implicit operands.
493///
494unsigned MachineInstr::getNumExplicitOperands() const {
495 unsigned NumOperands = MCID->getNumOperands();
496 if (!MCID->isVariadic())
497 return NumOperands;
498
499 for (unsigned i = NumOperands, e = getNumOperands(); i != e; ++i) {
500 const MachineOperand &MO = getOperand(i);
501 if (!MO.isReg() || !MO.isImplicit())
502 NumOperands++;
503 }
504 return NumOperands;
505}
506
507void MachineInstr::bundleWithPred() {
508 assert(!isBundledWithPred() && "MI is already bundled with its predecessor")(static_cast <bool> (!isBundledWithPred() && "MI is already bundled with its predecessor"
) ? void (0) : __assert_fail ("!isBundledWithPred() && \"MI is already bundled with its predecessor\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 508, __extension__ __PRETTY_FUNCTION__))
;
509 setFlag(BundledPred);
510 MachineBasicBlock::instr_iterator Pred = getIterator();
511 --Pred;
512 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags")(static_cast <bool> (!Pred->isBundledWithSucc() &&
"Inconsistent bundle flags") ? void (0) : __assert_fail ("!Pred->isBundledWithSucc() && \"Inconsistent bundle flags\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 512, __extension__ __PRETTY_FUNCTION__))
;
513 Pred->setFlag(BundledSucc);
514}
515
516void MachineInstr::bundleWithSucc() {
517 assert(!isBundledWithSucc() && "MI is already bundled with its successor")(static_cast <bool> (!isBundledWithSucc() && "MI is already bundled with its successor"
) ? void (0) : __assert_fail ("!isBundledWithSucc() && \"MI is already bundled with its successor\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 517, __extension__ __PRETTY_FUNCTION__))
;
518 setFlag(BundledSucc);
519 MachineBasicBlock::instr_iterator Succ = getIterator();
520 ++Succ;
521 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags")(static_cast <bool> (!Succ->isBundledWithPred() &&
"Inconsistent bundle flags") ? void (0) : __assert_fail ("!Succ->isBundledWithPred() && \"Inconsistent bundle flags\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 521, __extension__ __PRETTY_FUNCTION__))
;
522 Succ->setFlag(BundledPred);
523}
524
525void MachineInstr::unbundleFromPred() {
526 assert(isBundledWithPred() && "MI isn't bundled with its predecessor")(static_cast <bool> (isBundledWithPred() && "MI isn't bundled with its predecessor"
) ? void (0) : __assert_fail ("isBundledWithPred() && \"MI isn't bundled with its predecessor\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 526, __extension__ __PRETTY_FUNCTION__))
;
527 clearFlag(BundledPred);
528 MachineBasicBlock::instr_iterator Pred = getIterator();
529 --Pred;
530 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags")(static_cast <bool> (Pred->isBundledWithSucc() &&
"Inconsistent bundle flags") ? void (0) : __assert_fail ("Pred->isBundledWithSucc() && \"Inconsistent bundle flags\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 530, __extension__ __PRETTY_FUNCTION__))
;
531 Pred->clearFlag(BundledSucc);
532}
533
534void MachineInstr::unbundleFromSucc() {
535 assert(isBundledWithSucc() && "MI isn't bundled with its successor")(static_cast <bool> (isBundledWithSucc() && "MI isn't bundled with its successor"
) ? void (0) : __assert_fail ("isBundledWithSucc() && \"MI isn't bundled with its successor\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 535, __extension__ __PRETTY_FUNCTION__))
;
536 clearFlag(BundledSucc);
537 MachineBasicBlock::instr_iterator Succ = getIterator();
538 ++Succ;
539 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags")(static_cast <bool> (Succ->isBundledWithPred() &&
"Inconsistent bundle flags") ? void (0) : __assert_fail ("Succ->isBundledWithPred() && \"Inconsistent bundle flags\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 539, __extension__ __PRETTY_FUNCTION__))
;
540 Succ->clearFlag(BundledPred);
541}
542
543bool MachineInstr::isStackAligningInlineAsm() const {
544 if (isInlineAsm()) {
545 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
546 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
547 return true;
548 }
549 return false;
550}
551
552InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
553 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!")(static_cast <bool> (isInlineAsm() && "getInlineAsmDialect() only works for inline asms!"
) ? void (0) : __assert_fail ("isInlineAsm() && \"getInlineAsmDialect() only works for inline asms!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 553, __extension__ __PRETTY_FUNCTION__))
;
554 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
555 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
556}
557
558int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
559 unsigned *GroupNo) const {
560 assert(isInlineAsm() && "Expected an inline asm instruction")(static_cast <bool> (isInlineAsm() && "Expected an inline asm instruction"
) ? void (0) : __assert_fail ("isInlineAsm() && \"Expected an inline asm instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 560, __extension__ __PRETTY_FUNCTION__))
;
561 assert(OpIdx < getNumOperands() && "OpIdx out of range")(static_cast <bool> (OpIdx < getNumOperands() &&
"OpIdx out of range") ? void (0) : __assert_fail ("OpIdx < getNumOperands() && \"OpIdx out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 561, __extension__ __PRETTY_FUNCTION__))
;
562
563 // Ignore queries about the initial operands.
564 if (OpIdx < InlineAsm::MIOp_FirstOperand)
565 return -1;
566
567 unsigned Group = 0;
568 unsigned NumOps;
569 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
570 i += NumOps) {
571 const MachineOperand &FlagMO = getOperand(i);
572 // If we reach the implicit register operands, stop looking.
573 if (!FlagMO.isImm())
574 return -1;
575 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
576 if (i + NumOps > OpIdx) {
577 if (GroupNo)
578 *GroupNo = Group;
579 return i;
580 }
581 ++Group;
582 }
583 return -1;
584}
585
586const DILocalVariable *MachineInstr::getDebugVariable() const {
587 assert(isDebugValue() && "not a DBG_VALUE")(static_cast <bool> (isDebugValue() && "not a DBG_VALUE"
) ? void (0) : __assert_fail ("isDebugValue() && \"not a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 587, __extension__ __PRETTY_FUNCTION__))
;
588 return cast<DILocalVariable>(getOperand(2).getMetadata());
589}
590
591const DIExpression *MachineInstr::getDebugExpression() const {
592 assert(isDebugValue() && "not a DBG_VALUE")(static_cast <bool> (isDebugValue() && "not a DBG_VALUE"
) ? void (0) : __assert_fail ("isDebugValue() && \"not a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 592, __extension__ __PRETTY_FUNCTION__))
;
593 return cast<DIExpression>(getOperand(3).getMetadata());
594}
595
596const TargetRegisterClass*
597MachineInstr::getRegClassConstraint(unsigned OpIdx,
598 const TargetInstrInfo *TII,
599 const TargetRegisterInfo *TRI) const {
600 assert(getParent() && "Can't have an MBB reference here!")(static_cast <bool> (getParent() && "Can't have an MBB reference here!"
) ? void (0) : __assert_fail ("getParent() && \"Can't have an MBB reference here!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 600, __extension__ __PRETTY_FUNCTION__))
;
601 assert(getMF() && "Can't have an MF reference here!")(static_cast <bool> (getMF() && "Can't have an MF reference here!"
) ? void (0) : __assert_fail ("getMF() && \"Can't have an MF reference here!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 601, __extension__ __PRETTY_FUNCTION__))
;
602 const MachineFunction &MF = *getMF();
603
604 // Most opcodes have fixed constraints in their MCInstrDesc.
605 if (!isInlineAsm())
606 return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
607
608 if (!getOperand(OpIdx).isReg())
609 return nullptr;
610
611 // For tied uses on inline asm, get the constraint from the def.
612 unsigned DefIdx;
613 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
614 OpIdx = DefIdx;
615
616 // Inline asm stores register class constraints in the flag word.
617 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
618 if (FlagIdx < 0)
619 return nullptr;
620
621 unsigned Flag = getOperand(FlagIdx).getImm();
622 unsigned RCID;
623 if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
624 InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
625 InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
626 InlineAsm::hasRegClassConstraint(Flag, RCID))
627 return TRI->getRegClass(RCID);
628
629 // Assume that all registers in a memory operand are pointers.
630 if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
631 return TRI->getPointerRegClass(MF);
632
633 return nullptr;
634}
635
636const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
637 unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
638 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
639 // Check every operands inside the bundle if we have
640 // been asked to.
641 if (ExploreBundle)
642 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
643 ++OpndIt)
644 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
645 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
646 else
647 // Otherwise, just check the current operands.
648 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
649 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
650 return CurRC;
651}
652
653const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
654 unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
655 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
656 assert(CurRC && "Invalid initial register class")(static_cast <bool> (CurRC && "Invalid initial register class"
) ? void (0) : __assert_fail ("CurRC && \"Invalid initial register class\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 656, __extension__ __PRETTY_FUNCTION__))
;
657 // Check if Reg is constrained by some of its use/def from MI.
658 const MachineOperand &MO = getOperand(OpIdx);
659 if (!MO.isReg() || MO.getReg() != Reg)
660 return CurRC;
661 // If yes, accumulate the constraints through the operand.
662 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
663}
664
665const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
666 unsigned OpIdx, const TargetRegisterClass *CurRC,
667 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
668 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
669 const MachineOperand &MO = getOperand(OpIdx);
670 assert(MO.isReg() &&(static_cast <bool> (MO.isReg() && "Cannot get register constraints for non-register operand"
) ? void (0) : __assert_fail ("MO.isReg() && \"Cannot get register constraints for non-register operand\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 671, __extension__ __PRETTY_FUNCTION__))
671 "Cannot get register constraints for non-register operand")(static_cast <bool> (MO.isReg() && "Cannot get register constraints for non-register operand"
) ? void (0) : __assert_fail ("MO.isReg() && \"Cannot get register constraints for non-register operand\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 671, __extension__ __PRETTY_FUNCTION__))
;
672 assert(CurRC && "Invalid initial register class")(static_cast <bool> (CurRC && "Invalid initial register class"
) ? void (0) : __assert_fail ("CurRC && \"Invalid initial register class\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 672, __extension__ __PRETTY_FUNCTION__))
;
673 if (unsigned SubIdx = MO.getSubReg()) {
674 if (OpRC)
675 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
676 else
677 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
678 } else if (OpRC)
679 CurRC = TRI->getCommonSubClass(CurRC, OpRC);
680 return CurRC;
681}
682
683/// Return the number of instructions inside the MI bundle, not counting the
684/// header instruction.
685unsigned MachineInstr::getBundleSize() const {
686 MachineBasicBlock::const_instr_iterator I = getIterator();
687 unsigned Size = 0;
688 while (I->isBundledWithSucc()) {
689 ++Size;
690 ++I;
691 }
692 return Size;
693}
694
695/// Returns true if the MachineInstr has an implicit-use operand of exactly
696/// the given register (not considering sub/super-registers).
697bool MachineInstr::hasRegisterImplicitUseOperand(unsigned Reg) const {
698 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
699 const MachineOperand &MO = getOperand(i);
700 if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg)
701 return true;
702 }
703 return false;
704}
705
706/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
707/// the specific register or -1 if it is not found. It further tightens
708/// the search criteria to a use that kills the register if isKill is true.
709int MachineInstr::findRegisterUseOperandIdx(
710 unsigned Reg, bool isKill, const TargetRegisterInfo *TRI) const {
711 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
712 const MachineOperand &MO = getOperand(i);
713 if (!MO.isReg() || !MO.isUse())
714 continue;
715 unsigned MOReg = MO.getReg();
716 if (!MOReg)
717 continue;
718 if (MOReg == Reg || (TRI && TargetRegisterInfo::isPhysicalRegister(MOReg) &&
719 TargetRegisterInfo::isPhysicalRegister(Reg) &&
720 TRI->isSubRegister(MOReg, Reg)))
721 if (!isKill || MO.isKill())
722 return i;
723 }
724 return -1;
725}
726
727/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
728/// indicating if this instruction reads or writes Reg. This also considers
729/// partial defines.
730std::pair<bool,bool>
731MachineInstr::readsWritesVirtualRegister(unsigned Reg,
732 SmallVectorImpl<unsigned> *Ops) const {
733 bool PartDef = false; // Partial redefine.
734 bool FullDef = false; // Full define.
735 bool Use = false;
736
737 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
738 const MachineOperand &MO = getOperand(i);
739 if (!MO.isReg() || MO.getReg() != Reg)
740 continue;
741 if (Ops)
742 Ops->push_back(i);
743 if (MO.isUse())
744 Use |= !MO.isUndef();
745 else if (MO.getSubReg() && !MO.isUndef())
746 // A partial def undef doesn't count as reading the register.
747 PartDef = true;
748 else
749 FullDef = true;
750 }
751 // A partial redefine uses Reg unless there is also a full define.
752 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
753}
754
755/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
756/// the specified register or -1 if it is not found. If isDead is true, defs
757/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
758/// also checks if there is a def of a super-register.
759int
760MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
761 const TargetRegisterInfo *TRI) const {
762 bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
763 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
764 const MachineOperand &MO = getOperand(i);
765 // Accept regmask operands when Overlap is set.
766 // Ignore them when looking for a specific def operand (Overlap == false).
767 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
768 return i;
769 if (!MO.isReg() || !MO.isDef())
770 continue;
771 unsigned MOReg = MO.getReg();
772 bool Found = (MOReg == Reg);
773 if (!Found && TRI && isPhys &&
774 TargetRegisterInfo::isPhysicalRegister(MOReg)) {
775 if (Overlap)
776 Found = TRI->regsOverlap(MOReg, Reg);
777 else
778 Found = TRI->isSubRegister(MOReg, Reg);
779 }
780 if (Found && (!isDead || MO.isDead()))
781 return i;
782 }
783 return -1;
784}
785
786/// findFirstPredOperandIdx() - Find the index of the first operand in the
787/// operand list that is used to represent the predicate. It returns -1 if
788/// none is found.
789int MachineInstr::findFirstPredOperandIdx() const {
790 // Don't call MCID.findFirstPredOperandIdx() because this variant
791 // is sometimes called on an instruction that's not yet complete, and
792 // so the number of operands is less than the MCID indicates. In
793 // particular, the PTX target does this.
794 const MCInstrDesc &MCID = getDesc();
795 if (MCID.isPredicable()) {
796 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
797 if (MCID.OpInfo[i].isPredicate())
798 return i;
799 }
800
801 return -1;
802}
803
804// MachineOperand::TiedTo is 4 bits wide.
805const unsigned TiedMax = 15;
806
807/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
808///
809/// Use and def operands can be tied together, indicated by a non-zero TiedTo
810/// field. TiedTo can have these values:
811///
812/// 0: Operand is not tied to anything.
813/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
814/// TiedMax: Tied to an operand >= TiedMax-1.
815///
816/// The tied def must be one of the first TiedMax operands on a normal
817/// instruction. INLINEASM instructions allow more tied defs.
818///
819void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
820 MachineOperand &DefMO = getOperand(DefIdx);
821 MachineOperand &UseMO = getOperand(UseIdx);
822 assert(DefMO.isDef() && "DefIdx must be a def operand")(static_cast <bool> (DefMO.isDef() && "DefIdx must be a def operand"
) ? void (0) : __assert_fail ("DefMO.isDef() && \"DefIdx must be a def operand\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 822, __extension__ __PRETTY_FUNCTION__))
;
823 assert(UseMO.isUse() && "UseIdx must be a use operand")(static_cast <bool> (UseMO.isUse() && "UseIdx must be a use operand"
) ? void (0) : __assert_fail ("UseMO.isUse() && \"UseIdx must be a use operand\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 823, __extension__ __PRETTY_FUNCTION__))
;
824 assert(!DefMO.isTied() && "Def is already tied to another use")(static_cast <bool> (!DefMO.isTied() && "Def is already tied to another use"
) ? void (0) : __assert_fail ("!DefMO.isTied() && \"Def is already tied to another use\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 824, __extension__ __PRETTY_FUNCTION__))
;
825 assert(!UseMO.isTied() && "Use is already tied to another def")(static_cast <bool> (!UseMO.isTied() && "Use is already tied to another def"
) ? void (0) : __assert_fail ("!UseMO.isTied() && \"Use is already tied to another def\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 825, __extension__ __PRETTY_FUNCTION__))
;
826
827 if (DefIdx < TiedMax)
828 UseMO.TiedTo = DefIdx + 1;
829 else {
830 // Inline asm can use the group descriptors to find tied operands, but on
831 // normal instruction, the tied def must be within the first TiedMax
832 // operands.
833 assert(isInlineAsm() && "DefIdx out of range")(static_cast <bool> (isInlineAsm() && "DefIdx out of range"
) ? void (0) : __assert_fail ("isInlineAsm() && \"DefIdx out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 833, __extension__ __PRETTY_FUNCTION__))
;
834 UseMO.TiedTo = TiedMax;
835 }
836
837 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
838 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
839}
840
841/// Given the index of a tied register operand, find the operand it is tied to.
842/// Defs are tied to uses and vice versa. Returns the index of the tied operand
843/// which must exist.
844unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
845 const MachineOperand &MO = getOperand(OpIdx);
846 assert(MO.isTied() && "Operand isn't tied")(static_cast <bool> (MO.isTied() && "Operand isn't tied"
) ? void (0) : __assert_fail ("MO.isTied() && \"Operand isn't tied\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 846, __extension__ __PRETTY_FUNCTION__))
;
847
848 // Normally TiedTo is in range.
849 if (MO.TiedTo < TiedMax)
850 return MO.TiedTo - 1;
851
852 // Uses on normal instructions can be out of range.
853 if (!isInlineAsm()) {
854 // Normal tied defs must be in the 0..TiedMax-1 range.
855 if (MO.isUse())
856 return TiedMax - 1;
857 // MO is a def. Search for the tied use.
858 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
859 const MachineOperand &UseMO = getOperand(i);
860 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
861 return i;
862 }
863 llvm_unreachable("Can't find tied use")::llvm::llvm_unreachable_internal("Can't find tied use", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 863)
;
864 }
865
866 // Now deal with inline asm by parsing the operand group descriptor flags.
867 // Find the beginning of each operand group.
868 SmallVector<unsigned, 8> GroupIdx;
869 unsigned OpIdxGroup = ~0u;
870 unsigned NumOps;
871 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
872 i += NumOps) {
873 const MachineOperand &FlagMO = getOperand(i);
874 assert(FlagMO.isImm() && "Invalid tied operand on inline asm")(static_cast <bool> (FlagMO.isImm() && "Invalid tied operand on inline asm"
) ? void (0) : __assert_fail ("FlagMO.isImm() && \"Invalid tied operand on inline asm\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 874, __extension__ __PRETTY_FUNCTION__))
;
875 unsigned CurGroup = GroupIdx.size();
876 GroupIdx.push_back(i);
877 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
878 // OpIdx belongs to this operand group.
879 if (OpIdx > i && OpIdx < i + NumOps)
880 OpIdxGroup = CurGroup;
881 unsigned TiedGroup;
882 if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
883 continue;
884 // Operands in this group are tied to operands in TiedGroup which must be
885 // earlier. Find the number of operands between the two groups.
886 unsigned Delta = i - GroupIdx[TiedGroup];
887
888 // OpIdx is a use tied to TiedGroup.
889 if (OpIdxGroup == CurGroup)
890 return OpIdx - Delta;
891
892 // OpIdx is a def tied to this use group.
893 if (OpIdxGroup == TiedGroup)
894 return OpIdx + Delta;
895 }
896 llvm_unreachable("Invalid tied operand on inline asm")::llvm::llvm_unreachable_internal("Invalid tied operand on inline asm"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 896)
;
897}
898
899/// clearKillInfo - Clears kill flags on all operands.
900///
901void MachineInstr::clearKillInfo() {
902 for (MachineOperand &MO : operands()) {
903 if (MO.isReg() && MO.isUse())
904 MO.setIsKill(false);
905 }
906}
907
908void MachineInstr::substituteRegister(unsigned FromReg,
909 unsigned ToReg,
910 unsigned SubIdx,
911 const TargetRegisterInfo &RegInfo) {
912 if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
913 if (SubIdx)
914 ToReg = RegInfo.getSubReg(ToReg, SubIdx);
915 for (MachineOperand &MO : operands()) {
916 if (!MO.isReg() || MO.getReg() != FromReg)
917 continue;
918 MO.substPhysReg(ToReg, RegInfo);
919 }
920 } else {
921 for (MachineOperand &MO : operands()) {
922 if (!MO.isReg() || MO.getReg() != FromReg)
923 continue;
924 MO.substVirtReg(ToReg, SubIdx, RegInfo);
925 }
926 }
927}
928
929/// isSafeToMove - Return true if it is safe to move this instruction. If
930/// SawStore is set to true, it means that there is a store (or call) between
931/// the instruction's location and its intended destination.
932bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
933 // Ignore stuff that we obviously can't move.
934 //
935 // Treat volatile loads as stores. This is not strictly necessary for
936 // volatiles, but it is required for atomic loads. It is not allowed to move
937 // a load across an atomic load with Ordering > Monotonic.
938 if (mayStore() || isCall() || isPHI() ||
939 (mayLoad() && hasOrderedMemoryRef())) {
940 SawStore = true;
941 return false;
942 }
943
944 if (isPosition() || isDebugValue() || isTerminator() ||
945 hasUnmodeledSideEffects())
946 return false;
947
948 // See if this instruction does a load. If so, we have to guarantee that the
949 // loaded value doesn't change between the load and the its intended
950 // destination. The check for isInvariantLoad gives the targe the chance to
951 // classify the load as always returning a constant, e.g. a constant pool
952 // load.
953 if (mayLoad() && !isDereferenceableInvariantLoad(AA))
954 // Otherwise, this is a real load. If there is a store between the load and
955 // end of block, we can't move it.
956 return !SawStore;
957
958 return true;
959}
960
961bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other,
962 bool UseTBAA) {
963 const MachineFunction *MF = getMF();
964 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
965 const MachineFrameInfo &MFI = MF->getFrameInfo();
966
967 // If neither instruction stores to memory, they can't alias in any
968 // meaningful way, even if they read from the same address.
969 if (!mayStore() && !Other.mayStore())
970 return false;
971
972 // Let the target decide if memory accesses cannot possibly overlap.
973 if (TII->areMemAccessesTriviallyDisjoint(*this, Other, AA))
974 return false;
975
976 // FIXME: Need to handle multiple memory operands to support all targets.
977 if (!hasOneMemOperand() || !Other.hasOneMemOperand())
978 return true;
979
980 MachineMemOperand *MMOa = *memoperands_begin();
981 MachineMemOperand *MMOb = *Other.memoperands_begin();
982
983 // The following interface to AA is fashioned after DAGCombiner::isAlias
984 // and operates with MachineMemOperand offset with some important
985 // assumptions:
986 // - LLVM fundamentally assumes flat address spaces.
987 // - MachineOperand offset can *only* result from legalization and
988 // cannot affect queries other than the trivial case of overlap
989 // checking.
990 // - These offsets never wrap and never step outside
991 // of allocated objects.
992 // - There should never be any negative offsets here.
993 //
994 // FIXME: Modify API to hide this math from "user"
995 // Even before we go to AA we can reason locally about some
996 // memory objects. It can save compile time, and possibly catch some
997 // corner cases not currently covered.
998
999 int64_t OffsetA = MMOa->getOffset();
1000 int64_t OffsetB = MMOb->getOffset();
1001
1002 int64_t MinOffset = std::min(OffsetA, OffsetB);
1003 int64_t WidthA = MMOa->getSize();
1004 int64_t WidthB = MMOb->getSize();
1005 const Value *ValA = MMOa->getValue();
1006 const Value *ValB = MMOb->getValue();
1007 bool SameVal = (ValA && ValB && (ValA == ValB));
1008 if (!SameVal) {
1009 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1010 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1011 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1012 return false;
1013 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1014 return false;
1015 if (PSVa && PSVb && (PSVa == PSVb))
1016 SameVal = true;
1017 }
1018
1019 if (SameVal) {
1020 int64_t MaxOffset = std::max(OffsetA, OffsetB);
1021 int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
1022 return (MinOffset + LowWidth > MaxOffset);
1023 }
1024
1025 if (!AA)
1026 return true;
1027
1028 if (!ValA || !ValB)
1029 return true;
1030
1031 assert((OffsetA >= 0) && "Negative MachineMemOperand offset")(static_cast <bool> ((OffsetA >= 0) && "Negative MachineMemOperand offset"
) ? void (0) : __assert_fail ("(OffsetA >= 0) && \"Negative MachineMemOperand offset\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1031, __extension__ __PRETTY_FUNCTION__))
;
1032 assert((OffsetB >= 0) && "Negative MachineMemOperand offset")(static_cast <bool> ((OffsetB >= 0) && "Negative MachineMemOperand offset"
) ? void (0) : __assert_fail ("(OffsetB >= 0) && \"Negative MachineMemOperand offset\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1032, __extension__ __PRETTY_FUNCTION__))
;
1033
1034 int64_t Overlapa = WidthA + OffsetA - MinOffset;
1035 int64_t Overlapb = WidthB + OffsetB - MinOffset;
1036
1037 AliasResult AAResult = AA->alias(
1038 MemoryLocation(ValA, Overlapa,
1039 UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1040 MemoryLocation(ValB, Overlapb,
1041 UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1042
1043 return (AAResult != NoAlias);
1044}
1045
1046/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1047/// or volatile memory reference, or if the information describing the memory
1048/// reference is not available. Return false if it is known to have no ordered
1049/// memory references.
1050bool MachineInstr::hasOrderedMemoryRef() const {
1051 // An instruction known never to access memory won't have a volatile access.
1052 if (!mayStore() &&
1053 !mayLoad() &&
1054 !isCall() &&
1055 !hasUnmodeledSideEffects())
1056 return false;
1057
1058 // Otherwise, if the instruction has no memory reference information,
1059 // conservatively assume it wasn't preserved.
1060 if (memoperands_empty())
1061 return true;
1062
1063 // Check if any of our memory operands are ordered.
1064 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1065 return !MMO->isUnordered();
1066 });
1067}
1068
1069/// isDereferenceableInvariantLoad - Return true if this instruction will never
1070/// trap and is loading from a location whose value is invariant across a run of
1071/// this function.
1072bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const {
1073 // If the instruction doesn't load at all, it isn't an invariant load.
1074 if (!mayLoad())
1075 return false;
1076
1077 // If the instruction has lost its memoperands, conservatively assume that
1078 // it may not be an invariant load.
1079 if (memoperands_empty())
1080 return false;
1081
1082 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1083
1084 for (MachineMemOperand *MMO : memoperands()) {
1085 if (MMO->isVolatile()) return false;
1086 if (MMO->isStore()) return false;
1087 if (MMO->isInvariant() && MMO->isDereferenceable())
1088 continue;
1089
1090 // A load from a constant PseudoSourceValue is invariant.
1091 if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
1092 if (PSV->isConstant(&MFI))
1093 continue;
1094
1095 if (const Value *V = MMO->getValue()) {
1096 // If we have an AliasAnalysis, ask it whether the memory is constant.
1097 if (AA &&
1098 AA->pointsToConstantMemory(
1099 MemoryLocation(V, MMO->getSize(), MMO->getAAInfo())))
1100 continue;
1101 }
1102
1103 // Otherwise assume conservatively.
1104 return false;
1105 }
1106
1107 // Everything checks out.
1108 return true;
1109}
1110
1111/// isConstantValuePHI - If the specified instruction is a PHI that always
1112/// merges together the same virtual register, return the register, otherwise
1113/// return 0.
1114unsigned MachineInstr::isConstantValuePHI() const {
1115 if (!isPHI())
1116 return 0;
1117 assert(getNumOperands() >= 3 &&(static_cast <bool> (getNumOperands() >= 3 &&
"It's illegal to have a PHI without source operands") ? void
(0) : __assert_fail ("getNumOperands() >= 3 && \"It's illegal to have a PHI without source operands\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1118, __extension__ __PRETTY_FUNCTION__))
1118 "It's illegal to have a PHI without source operands")(static_cast <bool> (getNumOperands() >= 3 &&
"It's illegal to have a PHI without source operands") ? void
(0) : __assert_fail ("getNumOperands() >= 3 && \"It's illegal to have a PHI without source operands\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1118, __extension__ __PRETTY_FUNCTION__))
;
1119
1120 unsigned Reg = getOperand(1).getReg();
1121 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1122 if (getOperand(i).getReg() != Reg)
1123 return 0;
1124 return Reg;
1125}
1126
1127bool MachineInstr::hasUnmodeledSideEffects() const {
1128 if (hasProperty(MCID::UnmodeledSideEffects))
1129 return true;
1130 if (isInlineAsm()) {
1131 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1132 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1133 return true;
1134 }
1135
1136 return false;
1137}
1138
1139bool MachineInstr::isLoadFoldBarrier() const {
1140 return mayStore() || isCall() || hasUnmodeledSideEffects();
1141}
1142
1143/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1144///
1145bool MachineInstr::allDefsAreDead() const {
1146 for (const MachineOperand &MO : operands()) {
1147 if (!MO.isReg() || MO.isUse())
1148 continue;
1149 if (!MO.isDead())
1150 return false;
1151 }
1152 return true;
1153}
1154
1155/// copyImplicitOps - Copy implicit register operands from specified
1156/// instruction to this instruction.
1157void MachineInstr::copyImplicitOps(MachineFunction &MF,
1158 const MachineInstr &MI) {
1159 for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands();
1160 i != e; ++i) {
1161 const MachineOperand &MO = MI.getOperand(i);
1162 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1163 addOperand(MF, MO);
1164 }
1165}
1166
1167bool MachineInstr::hasComplexRegisterTies() const {
1168 const MCInstrDesc &MCID = getDesc();
1169 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1170 const auto &Operand = getOperand(I);
1171 if (!Operand.isReg() || Operand.isDef())
1172 // Ignore the defined registers as MCID marks only the uses as tied.
1173 continue;
1174 int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1175 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1176 if (ExpectedTiedIdx != TiedIdx)
1177 return true;
1178 }
1179 return false;
1180}
1181
1182LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1183 const MachineRegisterInfo &MRI) const {
1184 const MachineOperand &Op = getOperand(OpIdx);
1185 if (!Op.isReg())
1186 return LLT{};
1187
1188 if (isVariadic() || OpIdx >= getNumExplicitOperands())
1189 return MRI.getType(Op.getReg());
1190
1191 auto &OpInfo = getDesc().OpInfo[OpIdx];
1192 if (!OpInfo.isGenericType())
1193 return MRI.getType(Op.getReg());
1194
1195 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1196 return LLT{};
1197
1198 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1199 return MRI.getType(Op.getReg());
1200}
1201
1202#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1203LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MachineInstr::dump() const {
1204 dbgs() << " ";
1205 print(dbgs());
1206}
1207#endif
1208
1209void MachineInstr::print(raw_ostream &OS, bool SkipOpers, bool SkipDebugLoc,
1210 const TargetInstrInfo *TII) const {
1211 const Module *M = nullptr;
1212 if (const MachineBasicBlock *MBB = getParent())
1213 if (const MachineFunction *MF = MBB->getParent())
1214 M = MF->getFunction().getParent();
1215
1216 ModuleSlotTracker MST(M);
1217 print(OS, MST, SkipOpers, SkipDebugLoc, TII);
1218}
1219
1220void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1221 bool SkipOpers, bool SkipDebugLoc,
1222 const TargetInstrInfo *TII) const {
1223 // We can be a bit tidier if we know the MachineFunction.
1224 const MachineFunction *MF = nullptr;
1225 const TargetRegisterInfo *TRI = nullptr;
1226 const MachineRegisterInfo *MRI = nullptr;
1227 const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1228
1229 if (const MachineBasicBlock *MBB = getParent()) {
1230 MF = MBB->getParent();
1231 if (MF) {
1232 MRI = &MF->getRegInfo();
1233 TRI = MF->getSubtarget().getRegisterInfo();
1234 if (!TII)
1235 TII = MF->getSubtarget().getInstrInfo();
1236 IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
1237 }
1238 }
1239
1240 // Save a list of virtual registers.
1241 SmallVector<unsigned, 8> VirtRegs;
1242
1243 SmallBitVector PrintedTypes(8);
1244 bool ShouldPrintRegisterTies = hasComplexRegisterTies();
1245 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1246 if (!ShouldPrintRegisterTies)
1247 return 0U;
1248 const MachineOperand &MO = getOperand(OpIdx);
1249 if (MO.isReg() && MO.isTied() && !MO.isDef())
1250 return findTiedOperandIdx(OpIdx);
1251 return 0U;
1252 };
1253 // Print explicitly defined operands on the left of an assignment syntax.
1254 unsigned StartOp = 0, e = getNumOperands();
1255 for (; StartOp < e && getOperand(StartOp).isReg() &&
1256 getOperand(StartOp).isDef() && !getOperand(StartOp).isImplicit();
1257 ++StartOp) {
1258 if (StartOp != 0)
1259 OS << ", ";
1260 LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1261 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1262 getOperand(StartOp).print(OS, MST, TypeToPrint, /*PrintDef=*/false,
1263 ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1264 IntrinsicInfo);
1265 unsigned Reg = getOperand(StartOp).getReg();
1266 if (TargetRegisterInfo::isVirtualRegister(Reg))
1267 VirtRegs.push_back(Reg);
1268 }
1269
1270 if (StartOp != 0)
1271 OS << " = ";
1272
1273 // Print the opcode name.
1274 if (TII)
1275 OS << TII->getName(getOpcode());
1276 else
1277 OS << "UNKNOWN";
1278
1279 if (SkipOpers)
1280 return;
1281
1282 // Print the rest of the operands.
1283 bool FirstOp = true;
1284 unsigned AsmDescOp = ~0u;
1285 unsigned AsmOpCount = 0;
1286
1287 if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1288 // Print asm string.
1289 OS << " ";
1290 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1291 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1292 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1293 getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true,
1294 ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1295 IntrinsicInfo);
1296
1297 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1298 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1299 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1300 OS << " [sideeffect]";
1301 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1302 OS << " [mayload]";
1303 if (ExtraInfo & InlineAsm::Extra_MayStore)
1304 OS << " [maystore]";
1305 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1306 OS << " [isconvergent]";
1307 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1308 OS << " [alignstack]";
1309 if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1310 OS << " [attdialect]";
1311 if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1312 OS << " [inteldialect]";
1313
1314 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1315 FirstOp = false;
1316 }
1317
1318 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1319 const MachineOperand &MO = getOperand(i);
1320
1321 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1322 VirtRegs.push_back(MO.getReg());
1323
1324 if (FirstOp) FirstOp = false; else OS << ",";
1325 OS << " ";
1326 if (i < getDesc().NumOperands) {
1327 const MCOperandInfo &MCOI = getDesc().OpInfo[i];
1328 if (MCOI.isPredicate())
1329 OS << "pred:";
1330 if (MCOI.isOptionalDef())
1331 OS << "opt:";
1332 }
1333 if (isDebugValue() && MO.isMetadata()) {
1334 // Pretty print DBG_VALUE instructions.
1335 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1336 if (DIV && !DIV->getName().empty())
1337 OS << "!\"" << DIV->getName() << '\"';
1338 else {
1339 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1340 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1341 MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true,
1342 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1343 }
1344 } else if (TRI && (isInsertSubreg() || isRegSequence() ||
1345 (isSubregToReg() && i == 3)) && MO.isImm()) {
1346 OS << TRI->getSubRegIndexName(MO.getImm());
1347 } else if (i == AsmDescOp && MO.isImm()) {
1348 // Pretty print the inline asm operand descriptor.
1349 OS << '$' << AsmOpCount++;
1350 unsigned Flag = MO.getImm();
1351 switch (InlineAsm::getKind(Flag)) {
1352 case InlineAsm::Kind_RegUse: OS << ":[reguse"; break;
1353 case InlineAsm::Kind_RegDef: OS << ":[regdef"; break;
1354 case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break;
1355 case InlineAsm::Kind_Clobber: OS << ":[clobber"; break;
1356 case InlineAsm::Kind_Imm: OS << ":[imm"; break;
1357 case InlineAsm::Kind_Mem: OS << ":[mem"; break;
1358 default: OS << ":[??" << InlineAsm::getKind(Flag); break;
1359 }
1360
1361 unsigned RCID = 0;
1362 if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1363 InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1364 if (TRI) {
1365 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1366 } else
1367 OS << ":RC" << RCID;
1368 }
1369
1370 if (InlineAsm::isMemKind(Flag)) {
1371 unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1372 switch (MCID) {
1373 case InlineAsm::Constraint_es: OS << ":es"; break;
1374 case InlineAsm::Constraint_i: OS << ":i"; break;
1375 case InlineAsm::Constraint_m: OS << ":m"; break;
1376 case InlineAsm::Constraint_o: OS << ":o"; break;
1377 case InlineAsm::Constraint_v: OS << ":v"; break;
1378 case InlineAsm::Constraint_Q: OS << ":Q"; break;
1379 case InlineAsm::Constraint_R: OS << ":R"; break;
1380 case InlineAsm::Constraint_S: OS << ":S"; break;
1381 case InlineAsm::Constraint_T: OS << ":T"; break;
1382 case InlineAsm::Constraint_Um: OS << ":Um"; break;
1383 case InlineAsm::Constraint_Un: OS << ":Un"; break;
1384 case InlineAsm::Constraint_Uq: OS << ":Uq"; break;
1385 case InlineAsm::Constraint_Us: OS << ":Us"; break;
1386 case InlineAsm::Constraint_Ut: OS << ":Ut"; break;
1387 case InlineAsm::Constraint_Uv: OS << ":Uv"; break;
1388 case InlineAsm::Constraint_Uy: OS << ":Uy"; break;
1389 case InlineAsm::Constraint_X: OS << ":X"; break;
1390 case InlineAsm::Constraint_Z: OS << ":Z"; break;
1391 case InlineAsm::Constraint_ZC: OS << ":ZC"; break;
1392 case InlineAsm::Constraint_Zy: OS << ":Zy"; break;
1393 default: OS << ":?"; break;
1394 }
1395 }
1396
1397 unsigned TiedTo = 0;
1398 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1399 OS << " tiedto:$" << TiedTo;
1400
1401 OS << ']';
1402
1403 // Compute the index of the next operand descriptor.
1404 AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
1405 } else {
1406 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1407 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1408 if (MO.isImm() && isOperandSubregIdx(i))
1409 MachineOperand::printSubregIdx(OS, MO.getImm(), TRI);
1410 else
1411 MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true,
1412 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1413 }
1414 }
1415
1416 bool HaveSemi = false;
1417 const unsigned PrintableFlags = FrameSetup | FrameDestroy;
1418 if (Flags & PrintableFlags) {
1419 if (!HaveSemi) {
1420 OS << ";";
1421 HaveSemi = true;
1422 }
1423 OS << " flags: ";
1424
1425 if (Flags & FrameSetup)
1426 OS << "FrameSetup";
1427
1428 if (Flags & FrameDestroy)
1429 OS << "FrameDestroy";
1430 }
1431
1432 if (!memoperands_empty()) {
1433 if (!HaveSemi) {
1434 OS << ";";
1435 HaveSemi = true;
1436 }
1437
1438 OS << " mem:";
1439 for (mmo_iterator i = memoperands_begin(), e = memoperands_end();
1440 i != e; ++i) {
1441 (*i)->print(OS, MST);
1442 if (std::next(i) != e)
1443 OS << " ";
1444 }
1445 }
1446
1447 // Print the regclass of any virtual registers encountered.
1448 if (MRI && !VirtRegs.empty()) {
1449 if (!HaveSemi) {
1450 OS << ";";
1451 HaveSemi = true;
1452 }
1453 for (unsigned i = 0; i != VirtRegs.size(); ++i) {
1454 const RegClassOrRegBank &RC = MRI->getRegClassOrRegBank(VirtRegs[i]);
1455 if (!RC)
1456 continue;
1457 // Generic virtual registers do not have register classes.
1458 if (RC.is<const RegisterBank *>())
1459 OS << " " << RC.get<const RegisterBank *>()->getName();
1460 else
1461 OS << " "
1462 << TRI->getRegClassName(RC.get<const TargetRegisterClass *>());
1463 OS << ':' << printReg(VirtRegs[i]);
1464 for (unsigned j = i+1; j != VirtRegs.size();) {
1465 if (MRI->getRegClassOrRegBank(VirtRegs[j]) != RC) {
1466 ++j;
1467 continue;
1468 }
1469 if (VirtRegs[i] != VirtRegs[j])
1470 OS << "," << printReg(VirtRegs[j]);
1471 VirtRegs.erase(VirtRegs.begin()+j);
1472 }
1473 }
1474 }
1475
1476 // Print debug location information.
1477 if (isDebugValue() && getOperand(e - 2).isMetadata()) {
1478 if (!HaveSemi)
1479 OS << ";";
1480 auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata());
1481 OS << " line no:" << DV->getLine();
1482 if (auto *InlinedAt = debugLoc->getInlinedAt()) {
1483 DebugLoc InlinedAtDL(InlinedAt);
1484 if (InlinedAtDL && MF) {
1485 OS << " inlined @[ ";
1486 InlinedAtDL.print(OS);
1487 OS << " ]";
1488 }
1489 }
1490 if (isIndirectDebugValue())
1491 OS << " indirect";
1492 } else if (SkipDebugLoc) {
1493 return;
1494 } else if (debugLoc && MF) {
1495 if (!HaveSemi)
1496 OS << ";";
1497 OS << " dbg:";
1498 debugLoc.print(OS);
1499 }
1500
1501 OS << '\n';
1502}
1503
1504bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
1505 const TargetRegisterInfo *RegInfo,
1506 bool AddIfNotFound) {
1507 bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
1508 bool hasAliases = isPhysReg &&
1509 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
1510 bool Found = false;
1511 SmallVector<unsigned,4> DeadOps;
1512 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1513 MachineOperand &MO = getOperand(i);
1514 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
1515 continue;
1516
1517 // DEBUG_VALUE nodes do not contribute to code generation and should
1518 // always be ignored. Failure to do so may result in trying to modify
1519 // KILL flags on DEBUG_VALUE nodes.
1520 if (MO.isDebug())
1521 continue;
1522
1523 unsigned Reg = MO.getReg();
1524 if (!Reg)
1525 continue;
1526
1527 if (Reg == IncomingReg) {
1528 if (!Found) {
1529 if (MO.isKill())
1530 // The register is already marked kill.
1531 return true;
1532 if (isPhysReg && isRegTiedToDefOperand(i))
1533 // Two-address uses of physregs must not be marked kill.
1534 return true;
1535 MO.setIsKill();
1536 Found = true;
1537 }
1538 } else if (hasAliases && MO.isKill() &&
1539 TargetRegisterInfo::isPhysicalRegister(Reg)) {
1540 // A super-register kill already exists.
1541 if (RegInfo->isSuperRegister(IncomingReg, Reg))
1542 return true;
1543 if (RegInfo->isSubRegister(IncomingReg, Reg))
1544 DeadOps.push_back(i);
1545 }
1546 }
1547
1548 // Trim unneeded kill operands.
1549 while (!DeadOps.empty()) {
1550 unsigned OpIdx = DeadOps.back();
1551 if (getOperand(OpIdx).isImplicit())
1552 RemoveOperand(OpIdx);
1553 else
1554 getOperand(OpIdx).setIsKill(false);
1555 DeadOps.pop_back();
1556 }
1557
1558 // If not found, this means an alias of one of the operands is killed. Add a
1559 // new implicit operand if required.
1560 if (!Found && AddIfNotFound) {
1561 addOperand(MachineOperand::CreateReg(IncomingReg,
1562 false /*IsDef*/,
1563 true /*IsImp*/,
1564 true /*IsKill*/));
1565 return true;
1566 }
1567 return Found;
1568}
1569
1570void MachineInstr::clearRegisterKills(unsigned Reg,
1571 const TargetRegisterInfo *RegInfo) {
1572 if (!TargetRegisterInfo::isPhysicalRegister(Reg))
1573 RegInfo = nullptr;
1574 for (MachineOperand &MO : operands()) {
1575 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1576 continue;
1577 unsigned OpReg = MO.getReg();
1578 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
1579 MO.setIsKill(false);
1580 }
1581}
1582
1583bool MachineInstr::addRegisterDead(unsigned Reg,
1584 const TargetRegisterInfo *RegInfo,
1585 bool AddIfNotFound) {
1586 bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg);
1587 bool hasAliases = isPhysReg &&
1588 MCRegAliasIterator(Reg, RegInfo, false).isValid();
1589 bool Found = false;
1590 SmallVector<unsigned,4> DeadOps;
1591 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1592 MachineOperand &MO = getOperand(i);
1593 if (!MO.isReg() || !MO.isDef())
1594 continue;
1595 unsigned MOReg = MO.getReg();
1596 if (!MOReg)
1597 continue;
1598
1599 if (MOReg == Reg) {
1600 MO.setIsDead();
1601 Found = true;
1602 } else if (hasAliases && MO.isDead() &&
1603 TargetRegisterInfo::isPhysicalRegister(MOReg)) {
1604 // There exists a super-register that's marked dead.
1605 if (RegInfo->isSuperRegister(Reg, MOReg))
1606 return true;
1607 if (RegInfo->isSubRegister(Reg, MOReg))
1608 DeadOps.push_back(i);
1609 }
1610 }
1611
1612 // Trim unneeded dead operands.
1613 while (!DeadOps.empty()) {
1614 unsigned OpIdx = DeadOps.back();
1615 if (getOperand(OpIdx).isImplicit())
1616 RemoveOperand(OpIdx);
1617 else
1618 getOperand(OpIdx).setIsDead(false);
1619 DeadOps.pop_back();
1620 }
1621
1622 // If not found, this means an alias of one of the operands is dead. Add a
1623 // new implicit operand if required.
1624 if (Found || !AddIfNotFound)
1625 return Found;
1626
1627 addOperand(MachineOperand::CreateReg(Reg,
1628 true /*IsDef*/,
1629 true /*IsImp*/,
1630 false /*IsKill*/,
1631 true /*IsDead*/));
1632 return true;
1633}
1634
1635void MachineInstr::clearRegisterDeads(unsigned Reg) {
1636 for (MachineOperand &MO : operands()) {
1637 if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
1638 continue;
1639 MO.setIsDead(false);
1640 }
1641}
1642
1643void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) {
1644 for (MachineOperand &MO : operands()) {
1645 if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
1646 continue;
1647 MO.setIsUndef(IsUndef);
1648 }
1649}
1650
1651void MachineInstr::addRegisterDefined(unsigned Reg,
1652 const TargetRegisterInfo *RegInfo) {
1653 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1654 MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo);
1655 if (MO)
1656 return;
1657 } else {
1658 for (const MachineOperand &MO : operands()) {
1659 if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
1660 MO.getSubReg() == 0)
1661 return;
1662 }
1663 }
1664 addOperand(MachineOperand::CreateReg(Reg,
1665 true /*IsDef*/,
1666 true /*IsImp*/));
1667}
1668
1669void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
1670 const TargetRegisterInfo &TRI) {
1671 bool HasRegMask = false;
1672 for (MachineOperand &MO : operands()) {
1673 if (MO.isRegMask()) {
1674 HasRegMask = true;
1675 continue;
1676 }
1677 if (!MO.isReg() || !MO.isDef()) continue;
1678 unsigned Reg = MO.getReg();
1679 if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
1680 // If there are no uses, including partial uses, the def is dead.
1681 if (llvm::none_of(UsedRegs,
1682 [&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }))
1683 MO.setIsDead();
1684 }
1685
1686 // This is a call with a register mask operand.
1687 // Mask clobbers are always dead, so add defs for the non-dead defines.
1688 if (HasRegMask)
1689 for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
1690 I != E; ++I)
1691 addRegisterDefined(*I, &TRI);
1692}
1693
1694unsigned
1695MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
1696 // Build up a buffer of hash code components.
1697 SmallVector<size_t, 8> HashComponents;
1698 HashComponents.reserve(MI->getNumOperands() + 1);
1699 HashComponents.push_back(MI->getOpcode());
1700 for (const MachineOperand &MO : MI->operands()) {
1701 if (MO.isReg() && MO.isDef() &&
1702 TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1703 continue; // Skip virtual register defs.
1704
1705 HashComponents.push_back(hash_value(MO));
1706 }
1707 return hash_combine_range(HashComponents.begin(), HashComponents.end());
1708}
1709
1710void MachineInstr::emitError(StringRef Msg) const {
1711 // Find the source location cookie.
1712 unsigned LocCookie = 0;
1713 const MDNode *LocMD = nullptr;
1714 for (unsigned i = getNumOperands(); i != 0; --i) {
1715 if (getOperand(i-1).isMetadata() &&
1716 (LocMD = getOperand(i-1).getMetadata()) &&
1717 LocMD->getNumOperands() != 0) {
1718 if (const ConstantInt *CI =
1719 mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
1720 LocCookie = CI->getZExtValue();
1721 break;
1722 }
1723 }
1724 }
1725
1726 if (const MachineBasicBlock *MBB = getParent())
1727 if (const MachineFunction *MF = MBB->getParent())
1728 return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
1729 report_fatal_error(Msg);
1730}
1731
1732MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
1733 const MCInstrDesc &MCID, bool IsIndirect,
1734 unsigned Reg, const MDNode *Variable,
1735 const MDNode *Expr) {
1736 assert(isa<DILocalVariable>(Variable) && "not a variable")(static_cast <bool> (isa<DILocalVariable>(Variable
) && "not a variable") ? void (0) : __assert_fail ("isa<DILocalVariable>(Variable) && \"not a variable\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1736, __extension__ __PRETTY_FUNCTION__))
;
1737 assert(cast<DIExpression>(Expr)->isValid() && "not an expression")(static_cast <bool> (cast<DIExpression>(Expr)->
isValid() && "not an expression") ? void (0) : __assert_fail
("cast<DIExpression>(Expr)->isValid() && \"not an expression\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1737, __extension__ __PRETTY_FUNCTION__))
;
1738 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&(static_cast <bool> (cast<DILocalVariable>(Variable
)->isValidLocationForIntrinsic(DL) && "Expected inlined-at fields to agree"
) ? void (0) : __assert_fail ("cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1739, __extension__ __PRETTY_FUNCTION__))
1739 "Expected inlined-at fields to agree")(static_cast <bool> (cast<DILocalVariable>(Variable
)->isValidLocationForIntrinsic(DL) && "Expected inlined-at fields to agree"
) ? void (0) : __assert_fail ("cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1739, __extension__ __PRETTY_FUNCTION__))
;
1740 if (IsIndirect)
2
Assuming 'IsIndirect' is 0
3
Taking false branch
1741 return BuildMI(MF, DL, MCID)
1742 .addReg(Reg, RegState::Debug)
1743 .addImm(0U)
1744 .addMetadata(Variable)
1745 .addMetadata(Expr);
1746 else
1747 return BuildMI(MF, DL, MCID)
4
Calling 'MachineInstrBuilder::addMetadata'
1748 .addReg(Reg, RegState::Debug)
1749 .addReg(0U, RegState::Debug)
1750 .addMetadata(Variable)
1751 .addMetadata(Expr);
1752}
1753
1754MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
1755 MachineBasicBlock::iterator I,
1756 const DebugLoc &DL, const MCInstrDesc &MCID,
1757 bool IsIndirect, unsigned Reg,
1758 const MDNode *Variable, const MDNode *Expr) {
1759 assert(isa<DILocalVariable>(Variable) && "not a variable")(static_cast <bool> (isa<DILocalVariable>(Variable
) && "not a variable") ? void (0) : __assert_fail ("isa<DILocalVariable>(Variable) && \"not a variable\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1759, __extension__ __PRETTY_FUNCTION__))
;
1760 assert(cast<DIExpression>(Expr)->isValid() && "not an expression")(static_cast <bool> (cast<DIExpression>(Expr)->
isValid() && "not an expression") ? void (0) : __assert_fail
("cast<DIExpression>(Expr)->isValid() && \"not an expression\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1760, __extension__ __PRETTY_FUNCTION__))
;
1761 MachineFunction &MF = *BB.getParent();
1762 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
1
Calling 'BuildMI'
1763 BB.insert(I, MI);
1764 return MachineInstrBuilder(MF, MI);
1765}
1766
1767/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
1768/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
1769static const DIExpression *computeExprForSpill(const MachineInstr &MI) {
1770 assert(MI.getOperand(0).isReg() && "can't spill non-register")(static_cast <bool> (MI.getOperand(0).isReg() &&
"can't spill non-register") ? void (0) : __assert_fail ("MI.getOperand(0).isReg() && \"can't spill non-register\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1770, __extension__ __PRETTY_FUNCTION__))
;
1771 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&(static_cast <bool> (MI.getDebugVariable()->isValidLocationForIntrinsic
(MI.getDebugLoc()) && "Expected inlined-at fields to agree"
) ? void (0) : __assert_fail ("MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1772, __extension__ __PRETTY_FUNCTION__))
1772 "Expected inlined-at fields to agree")(static_cast <bool> (MI.getDebugVariable()->isValidLocationForIntrinsic
(MI.getDebugLoc()) && "Expected inlined-at fields to agree"
) ? void (0) : __assert_fail ("MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1772, __extension__ __PRETTY_FUNCTION__))
;
1773
1774 const DIExpression *Expr = MI.getDebugExpression();
1775 if (MI.isIndirectDebugValue()) {
1776 assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset")(static_cast <bool> (MI.getOperand(1).getImm() == 0 &&
"DBG_VALUE with nonzero offset") ? void (0) : __assert_fail (
"MI.getOperand(1).getImm() == 0 && \"DBG_VALUE with nonzero offset\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/CodeGen/MachineInstr.cpp"
, 1776, __extension__ __PRETTY_FUNCTION__))
;
1777 Expr = DIExpression::prepend(Expr, DIExpression::WithDeref);
1778 }
1779 return Expr;
1780}
1781
1782MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
1783 MachineBasicBlock::iterator I,
1784 const MachineInstr &Orig,
1785 int FrameIndex) {
1786 const DIExpression *Expr = computeExprForSpill(Orig);
1787 return BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc())
1788 .addFrameIndex(FrameIndex)
1789 .addImm(0U)
1790 .addMetadata(Orig.getDebugVariable())
1791 .addMetadata(Expr);
1792}
1793
1794void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex) {
1795 const DIExpression *Expr = computeExprForSpill(Orig);
1796 Orig.getOperand(0).ChangeToFrameIndex(FrameIndex);
1797 Orig.getOperand(1).ChangeToImmediate(0U);
1798 Orig.getOperand(3).setMetadata(Expr);
1799}

/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/CodeGen/MachineInstrBuilder.h

1//===- CodeGen/MachineInstrBuilder.h - Simplify creation of MIs --*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file exposes a function named BuildMI, which is useful for dramatically
11// simplifying how MachineInstr's are created. It allows use of code like this:
12//
13// M = BuildMI(MBB, MI, DL, TII.get(X86::ADD8rr), Dst)
14// .addReg(argVal1)
15// .addReg(argVal2);
16//
17//===----------------------------------------------------------------------===//
18
19#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H
20#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
21
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/CodeGen/MachineBasicBlock.h"
24#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBundle.h"
27#include "llvm/CodeGen/MachineOperand.h"
28#include "llvm/CodeGen/TargetRegisterInfo.h"
29#include "llvm/IR/InstrTypes.h"
30#include "llvm/IR/Intrinsics.h"
31#include "llvm/Support/ErrorHandling.h"
32#include <cassert>
33#include <cstdint>
34#include <utility>
35
36namespace llvm {
37
38class MCInstrDesc;
39class MDNode;
40
41namespace RegState {
42
43 enum {
44 Define = 0x2,
45 Implicit = 0x4,
46 Kill = 0x8,
47 Dead = 0x10,
48 Undef = 0x20,
49 EarlyClobber = 0x40,
50 Debug = 0x80,
51 InternalRead = 0x100,
52 Renamable = 0x200,
53 DefineNoRead = Define | Undef,
54 ImplicitDefine = Implicit | Define,
55 ImplicitKill = Implicit | Kill
56 };
57
58} // end namespace RegState
59
60class MachineInstrBuilder {
61 MachineFunction *MF = nullptr;
62 MachineInstr *MI = nullptr;
63
64public:
65 MachineInstrBuilder() = default;
66
67 /// Create a MachineInstrBuilder for manipulating an existing instruction.
68 /// F must be the machine function that was used to allocate I.
69 MachineInstrBuilder(MachineFunction &F, MachineInstr *I) : MF(&F), MI(I) {}
70 MachineInstrBuilder(MachineFunction &F, MachineBasicBlock::iterator I)
71 : MF(&F), MI(&*I) {}
72
73 /// Allow automatic conversion to the machine instruction we are working on.
74 operator MachineInstr*() const { return MI; }
75 MachineInstr *operator->() const { return MI; }
76 operator MachineBasicBlock::iterator() const { return MI; }
77
78 /// If conversion operators fail, use this method to get the MachineInstr
79 /// explicitly.
80 MachineInstr *getInstr() const { return MI; }
81
82 /// Add a new virtual register operand.
83 const MachineInstrBuilder &addReg(unsigned RegNo, unsigned flags = 0,
84 unsigned SubReg = 0) const {
85 assert((flags & 0x1) == 0 &&(static_cast <bool> ((flags & 0x1) == 0 && "Passing in 'true' to addReg is forbidden! Use enums instead."
) ? void (0) : __assert_fail ("(flags & 0x1) == 0 && \"Passing in 'true' to addReg is forbidden! Use enums instead.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/CodeGen/MachineInstrBuilder.h"
, 86, __extension__ __PRETTY_FUNCTION__))
86 "Passing in 'true' to addReg is forbidden! Use enums instead.")(static_cast <bool> ((flags & 0x1) == 0 && "Passing in 'true' to addReg is forbidden! Use enums instead."
) ? void (0) : __assert_fail ("(flags & 0x1) == 0 && \"Passing in 'true' to addReg is forbidden! Use enums instead.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/CodeGen/MachineInstrBuilder.h"
, 86, __extension__ __PRETTY_FUNCTION__))
;
87 MI->addOperand(*MF, MachineOperand::CreateReg(RegNo,
88 flags & RegState::Define,
89 flags & RegState::Implicit,
90 flags & RegState::Kill,
91 flags & RegState::Dead,
92 flags & RegState::Undef,
93 flags & RegState::EarlyClobber,
94 SubReg,
95 flags & RegState::Debug,
96 flags & RegState::InternalRead,
97 flags & RegState::Renamable));
98 return *this;
99 }
100
101 /// Add a virtual register definition operand.
102 const MachineInstrBuilder &addDef(unsigned RegNo, unsigned Flags = 0,
103 unsigned SubReg = 0) const {
104 return addReg(RegNo, Flags | RegState::Define, SubReg);
105 }
106
107 /// Add a virtual register use operand. It is an error for Flags to contain
108 /// `RegState::Define` when calling this function.
109 const MachineInstrBuilder &addUse(unsigned RegNo, unsigned Flags = 0,
110 unsigned SubReg = 0) const {
111 assert(!(Flags & RegState::Define) &&(static_cast <bool> (!(Flags & RegState::Define) &&
"Misleading addUse defines register, use addReg instead.") ?
void (0) : __assert_fail ("!(Flags & RegState::Define) && \"Misleading addUse defines register, use addReg instead.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/CodeGen/MachineInstrBuilder.h"
, 112, __extension__ __PRETTY_FUNCTION__))
112 "Misleading addUse defines register, use addReg instead.")(static_cast <bool> (!(Flags & RegState::Define) &&
"Misleading addUse defines register, use addReg instead.") ?
void (0) : __assert_fail ("!(Flags & RegState::Define) && \"Misleading addUse defines register, use addReg instead.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/CodeGen/MachineInstrBuilder.h"
, 112, __extension__ __PRETTY_FUNCTION__))
;
113 return addReg(RegNo, Flags, SubReg);
114 }
115
116 /// Add a new immediate operand.
117 const MachineInstrBuilder &addImm(int64_t Val) const {
118 MI->addOperand(*MF, MachineOperand::CreateImm(Val));
119 return *this;
120 }
121
122 const MachineInstrBuilder &addCImm(const ConstantInt *Val) const {
123 MI->addOperand(*MF, MachineOperand::CreateCImm(Val));
124 return *this;
125 }
126
127 const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
128 MI->addOperand(*MF, MachineOperand::CreateFPImm(Val));
129 return *this;
130 }
131
132 const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB,
133 unsigned char TargetFlags = 0) const {
134 MI->addOperand(*MF, MachineOperand::CreateMBB(MBB, TargetFlags));
135 return *this;
136 }
137
138 const MachineInstrBuilder &addFrameIndex(int Idx) const {
139 MI->addOperand(*MF, MachineOperand::CreateFI(Idx));
140 return *this;
141 }
142
143 const MachineInstrBuilder &addConstantPoolIndex(unsigned Idx,
144 int Offset = 0,
145 unsigned char TargetFlags = 0) const {
146 MI->addOperand(*MF, MachineOperand::CreateCPI(Idx, Offset, TargetFlags));
147 return *this;
148 }
149
150 const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
151 unsigned char TargetFlags = 0) const {
152 MI->addOperand(*MF, MachineOperand::CreateTargetIndex(Idx, Offset,
153 TargetFlags));
154 return *this;
155 }
156
157 const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
158 unsigned char TargetFlags = 0) const {
159 MI->addOperand(*MF, MachineOperand::CreateJTI(Idx, TargetFlags));
160 return *this;
161 }
162
163 const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
164 int64_t Offset = 0,
165 unsigned char TargetFlags = 0) const {
166 MI->addOperand(*MF, MachineOperand::CreateGA(GV, Offset, TargetFlags));
167 return *this;
168 }
169
170 const MachineInstrBuilder &addExternalSymbol(const char *FnName,
171 unsigned char TargetFlags = 0) const {
172 MI->addOperand(*MF, MachineOperand::CreateES(FnName, TargetFlags));
173 return *this;
174 }
175
176 const MachineInstrBuilder &addBlockAddress(const BlockAddress *BA,
177 int64_t Offset = 0,
178 unsigned char TargetFlags = 0) const {
179 MI->addOperand(*MF, MachineOperand::CreateBA(BA, Offset, TargetFlags));
180 return *this;
181 }
182
183 const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const {
184 MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask));
185 return *this;
186 }
187
188 const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
189 MI->addMemOperand(*MF, MMO);
190 return *this;
191 }
192
193 const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
194 MachineInstr::mmo_iterator e) const {
195 MI->setMemRefs(b, e);
196 return *this;
197 }
198
199 const MachineInstrBuilder &setMemRefs(std::pair<MachineInstr::mmo_iterator,
200 unsigned> MemOperandsRef) const {
201 MI->setMemRefs(MemOperandsRef);
202 return *this;
203 }
204
205 const MachineInstrBuilder &add(const MachineOperand &MO) const {
206 MI->addOperand(*MF, MO);
207 return *this;
208 }
209
210 const MachineInstrBuilder &add(ArrayRef<MachineOperand> MOs) const {
211 for (const MachineOperand &MO : MOs) {
212 MI->addOperand(*MF, MO);
213 }
214 return *this;
215 }
216
217 const MachineInstrBuilder &addMetadata(const MDNode *MD) const {
218 MI->addOperand(*MF, MachineOperand::CreateMetadata(MD));
5
Calling 'MachineInstr::addOperand'
219 assert((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable())(static_cast <bool> ((MI->isDebugValue() ? static_cast
<bool>(MI->getDebugVariable()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? void (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/CodeGen/MachineInstrBuilder.h"
, 221, __extension__ __PRETTY_FUNCTION__))
220 : true) &&(static_cast <bool> ((MI->isDebugValue() ? static_cast
<bool>(MI->getDebugVariable()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? void (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/CodeGen/MachineInstrBuilder.h"
, 221, __extension__ __PRETTY_FUNCTION__))
221 "first MDNode argument of a DBG_VALUE not a variable")(static_cast <bool> ((MI->isDebugValue() ? static_cast
<bool>(MI->getDebugVariable()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? void (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/CodeGen/MachineInstrBuilder.h"
, 221, __extension__ __PRETTY_FUNCTION__))
;
222 return *this;
223 }
224
225 const MachineInstrBuilder &addCFIIndex(unsigned CFIIndex) const {
226 MI->addOperand(*MF, MachineOperand::CreateCFIIndex(CFIIndex));
227 return *this;
228 }
229
230 const MachineInstrBuilder &addIntrinsicID(Intrinsic::ID ID) const {
231 MI->addOperand(*MF, MachineOperand::CreateIntrinsicID(ID));
232 return *this;
233 }
234
235 const MachineInstrBuilder &addPredicate(CmpInst::Predicate Pred) const {
236 MI->addOperand(*MF, MachineOperand::CreatePredicate(Pred));
237 return *this;
238 }
239
240 const MachineInstrBuilder &addSym(MCSymbol *Sym,
241 unsigned char TargetFlags = 0) const {
242 MI->addOperand(*MF, MachineOperand::CreateMCSymbol(Sym, TargetFlags));
243 return *this;
244 }
245
246 const MachineInstrBuilder &setMIFlags(unsigned Flags) const {
247 MI->setFlags(Flags);
248 return *this;
249 }
250
251 const MachineInstrBuilder &setMIFlag(MachineInstr::MIFlag Flag) const {
252 MI->setFlag(Flag);
253 return *this;
254 }
255
256 // Add a displacement from an existing MachineOperand with an added offset.
257 const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off,
258 unsigned char TargetFlags = 0) const {
259 // If caller specifies new TargetFlags then use it, otherwise the
260 // default behavior is to copy the target flags from the existing
261 // MachineOperand. This means if the caller wants to clear the
262 // target flags it needs to do so explicitly.
263 if (0 == TargetFlags)
264 TargetFlags = Disp.getTargetFlags();
265
266 switch (Disp.getType()) {
267 default:
268 llvm_unreachable("Unhandled operand type in addDisp()")::llvm::llvm_unreachable_internal("Unhandled operand type in addDisp()"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/CodeGen/MachineInstrBuilder.h"
, 268)
;
269 case MachineOperand::MO_Immediate:
270 return addImm(Disp.getImm() + off);
271 case MachineOperand::MO_ConstantPoolIndex:
272 return addConstantPoolIndex(Disp.getIndex(), Disp.getOffset() + off,
273 TargetFlags);
274 case MachineOperand::MO_GlobalAddress:
275 return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
276 TargetFlags);
277 }
278 }
279
280 /// Copy all the implicit operands from OtherMI onto this one.
281 const MachineInstrBuilder &
282 copyImplicitOps(const MachineInstr &OtherMI) const {
283 MI->copyImplicitOps(*MF, OtherMI);
284 return *this;
285 }
286};
287
288/// Builder interface. Specify how to create the initial instruction itself.
289inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
290 const MCInstrDesc &MCID) {
291 return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL));
292}
293
294/// This version of the builder sets up the first operand as a
295/// destination virtual register.
296inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
297 const MCInstrDesc &MCID, unsigned DestReg) {
298 return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL))
299 .addReg(DestReg, RegState::Define);
300}
301
302/// This version of the builder inserts the newly-built instruction before
303/// the given position in the given MachineBasicBlock, and sets up the first
304/// operand as a destination virtual register.
305inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
306 MachineBasicBlock::iterator I,
307 const DebugLoc &DL, const MCInstrDesc &MCID,
308 unsigned DestReg) {
309 MachineFunction &MF = *BB.getParent();
310 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
311 BB.insert(I, MI);
312 return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
313}
314
315/// This version of the builder inserts the newly-built instruction before
316/// the given position in the given MachineBasicBlock, and sets up the first
317/// operand as a destination virtual register.
318///
319/// If \c I is inside a bundle, then the newly inserted \a MachineInstr is
320/// added to the same bundle.
321inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
322 MachineBasicBlock::instr_iterator I,
323 const DebugLoc &DL, const MCInstrDesc &MCID,
324 unsigned DestReg) {
325 MachineFunction &MF = *BB.getParent();
326 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
327 BB.insert(I, MI);
328 return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
329}
330
331inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
332 const DebugLoc &DL, const MCInstrDesc &MCID,
333 unsigned DestReg) {
334 // Calling the overload for instr_iterator is always correct. However, the
335 // definition is not available in headers, so inline the check.
336 if (I.isInsideBundle())
337 return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID, DestReg);
338 return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID, DestReg);
339}
340
341inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
342 const DebugLoc &DL, const MCInstrDesc &MCID,
343 unsigned DestReg) {
344 return BuildMI(BB, *I, DL, MCID, DestReg);
345}
346
347/// This version of the builder inserts the newly-built instruction before the
348/// given position in the given MachineBasicBlock, and does NOT take a
349/// destination register.
350inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
351 MachineBasicBlock::iterator I,
352 const DebugLoc &DL,
353 const MCInstrDesc &MCID) {
354 MachineFunction &MF = *BB.getParent();
355 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
356 BB.insert(I, MI);
357 return MachineInstrBuilder(MF, MI);
358}
359
360inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
361 MachineBasicBlock::instr_iterator I,
362 const DebugLoc &DL,
363 const MCInstrDesc &MCID) {
364 MachineFunction &MF = *BB.getParent();
365 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
366 BB.insert(I, MI);
367 return MachineInstrBuilder(MF, MI);
368}
369
370inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
371 const DebugLoc &DL,
372 const MCInstrDesc &MCID) {
373 // Calling the overload for instr_iterator is always correct. However, the
374 // definition is not available in headers, so inline the check.
375 if (I.isInsideBundle())
376 return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID);
377 return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID);
378}
379
380inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
381 const DebugLoc &DL,
382 const MCInstrDesc &MCID) {
383 return BuildMI(BB, *I, DL, MCID);
384}
385
386/// This version of the builder inserts the newly-built instruction at the end
387/// of the given MachineBasicBlock, and does NOT take a destination register.
388inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
389 const MCInstrDesc &MCID) {
390 return BuildMI(*BB, BB->end(), DL, MCID);
391}
392
393/// This version of the builder inserts the newly-built instruction at the
394/// end of the given MachineBasicBlock, and sets up the first operand as a
395/// destination virtual register.
396inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
397 const MCInstrDesc &MCID, unsigned DestReg) {
398 return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
399}
400
401/// This version of the builder builds a DBG_VALUE intrinsic
402/// for either a value in a register or a register-indirect
403/// address. The convention is that a DBG_VALUE is indirect iff the
404/// second operand is an immediate.
405MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
406 const MCInstrDesc &MCID, bool IsIndirect,
407 unsigned Reg, const MDNode *Variable,
408 const MDNode *Expr);
409
410/// This version of the builder builds a DBG_VALUE intrinsic
411/// for either a value in a register or a register-indirect
412/// address and inserts it at position I.
413MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
414 MachineBasicBlock::iterator I, const DebugLoc &DL,
415 const MCInstrDesc &MCID, bool IsIndirect,
416 unsigned Reg, const MDNode *Variable,
417 const MDNode *Expr);
418
419/// Clone a DBG_VALUE whose value has been spilled to FrameIndex.
420MachineInstr *buildDbgValueForSpill(MachineBasicBlock &BB,
421 MachineBasicBlock::iterator I,
422 const MachineInstr &Orig, int FrameIndex);
423
424/// Update a DBG_VALUE whose value has been spilled to FrameIndex. Useful when
425/// modifying an instruction in place while iterating over a basic block.
426void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex);
427
428inline unsigned getDefRegState(bool B) {
429 return B ? RegState::Define : 0;
430}
431inline unsigned getImplRegState(bool B) {
432 return B ? RegState::Implicit : 0;
433}
434inline unsigned getKillRegState(bool B) {
435 return B ? RegState::Kill : 0;
436}
437inline unsigned getDeadRegState(bool B) {
438 return B ? RegState::Dead : 0;
439}
440inline unsigned getUndefRegState(bool B) {
441 return B ? RegState::Undef : 0;
442}
443inline unsigned getInternalReadRegState(bool B) {
444 return B ? RegState::InternalRead : 0;
445}
446inline unsigned getDebugRegState(bool B) {
447 return B ? RegState::Debug : 0;
448}
449inline unsigned getRenamableRegState(bool B) {
450 return B ? RegState::Renamable : 0;
451}
452
453/// Get all register state flags from machine operand \p RegOp.
454inline unsigned getRegState(const MachineOperand &RegOp) {
455 assert(RegOp.isReg() && "Not a register operand")(static_cast <bool> (RegOp.isReg() && "Not a register operand"
) ? void (0) : __assert_fail ("RegOp.isReg() && \"Not a register operand\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/CodeGen/MachineInstrBuilder.h"
, 455, __extension__ __PRETTY_FUNCTION__))
;
456 return getDefRegState(RegOp.isDef()) |
457 getImplRegState(RegOp.isImplicit()) |
458 getKillRegState(RegOp.isKill()) |
459 getDeadRegState(RegOp.isDead()) |
460 getUndefRegState(RegOp.isUndef()) |
461 getInternalReadRegState(RegOp.isInternalRead()) |
462 getDebugRegState(RegOp.isDebug()) |
463 getRenamableRegState(
464 TargetRegisterInfo::isPhysicalRegister(RegOp.getReg()) &&
465 RegOp.isRenamable());
466}
467
468/// Helper class for constructing bundles of MachineInstrs.
469///
470/// MIBundleBuilder can create a bundle from scratch by inserting new
471/// MachineInstrs one at a time, or it can create a bundle from a sequence of
472/// existing MachineInstrs in a basic block.
473class MIBundleBuilder {
474 MachineBasicBlock &MBB;
475 MachineBasicBlock::instr_iterator Begin;
476 MachineBasicBlock::instr_iterator End;
477
478public:
479 /// Create an MIBundleBuilder that inserts instructions into a new bundle in
480 /// BB above the bundle or instruction at Pos.
481 MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator Pos)
482 : MBB(BB), Begin(Pos.getInstrIterator()), End(Begin) {}
483
484 /// Create a bundle from the sequence of instructions between B and E.
485 MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator B,
486 MachineBasicBlock::iterator E)
487 : MBB(BB), Begin(B.getInstrIterator()), End(E.getInstrIterator()) {
488 assert(B != E && "No instructions to bundle")(static_cast <bool> (B != E && "No instructions to bundle"
) ? void (0) : __assert_fail ("B != E && \"No instructions to bundle\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/CodeGen/MachineInstrBuilder.h"
, 488, __extension__ __PRETTY_FUNCTION__))
;
489 ++B;
490 while (B != E) {
491 MachineInstr &MI = *B;
492 ++B;
493 MI.bundleWithPred();
494 }
495 }
496
497 /// Create an MIBundleBuilder representing an existing instruction or bundle
498 /// that has MI as its head.
499 explicit MIBundleBuilder(MachineInstr *MI)
500 : MBB(*MI->getParent()), Begin(MI),
501 End(getBundleEnd(MI->getIterator())) {}
502
503 /// Return a reference to the basic block containing this bundle.
504 MachineBasicBlock &getMBB() const { return MBB; }
505
506 /// Return true if no instructions have been inserted in this bundle yet.
507 /// Empty bundles aren't representable in a MachineBasicBlock.
508 bool empty() const { return Begin == End; }
509
510 /// Return an iterator to the first bundled instruction.
511 MachineBasicBlock::instr_iterator begin() const { return Begin; }
512
513 /// Return an iterator beyond the last bundled instruction.
514 MachineBasicBlock::instr_iterator end() const { return End; }
515
516 /// Insert MI into this bundle before I which must point to an instruction in
517 /// the bundle, or end().
518 MIBundleBuilder &insert(MachineBasicBlock::instr_iterator I,
519 MachineInstr *MI) {
520 MBB.insert(I, MI);
521 if (I == Begin) {
522 if (!empty())
523 MI->bundleWithSucc();
524 Begin = MI->getIterator();
525 return *this;
526 }
527 if (I == End) {
528 MI->bundleWithPred();
529 return *this;
530 }
531 // MI was inserted in the middle of the bundle, so its neighbors' flags are
532 // already fine. Update MI's bundle flags manually.
533 MI->setFlag(MachineInstr::BundledPred);
534 MI->setFlag(MachineInstr::BundledSucc);
535 return *this;
536 }
537
538 /// Insert MI into MBB by prepending it to the instructions in the bundle.
539 /// MI will become the first instruction in the bundle.
540 MIBundleBuilder &prepend(MachineInstr *MI) {
541 return insert(begin(), MI);
542 }
543
544 /// Insert MI into MBB by appending it to the instructions in the bundle.
545 /// MI will become the last instruction in the bundle.
546 MIBundleBuilder &append(MachineInstr *MI) {
547 return insert(end(), MI);
548 }
549};
550
551} // end namespace llvm
552
553#endif // LLVM_CODEGEN_MACHINEINSTRBUILDER_H