Bug Summary

File:lib/CodeGen/MachineInstr.cpp
Warning:line 192, column 3
Null pointer passed to 2nd parameter expecting 'nonnull'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MachineInstr.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen -I /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/include -I /build/llvm-toolchain-snapshot-10~svn374877/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~svn374877=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-10-15-233810-7101-1 -x c++ /build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp

/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp

1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Methods common to all machine instructions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/MachineInstr.h"
14#include "llvm/ADT/APFloat.h"
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/FoldingSet.h"
17#include "llvm/ADT/Hashing.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallBitVector.h"
21#include "llvm/ADT/SmallString.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/Analysis/Loads.h"
25#include "llvm/Analysis/MemoryLocation.h"
26#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
27#include "llvm/CodeGen/MachineBasicBlock.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachineInstrBundle.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/MachineModuleInfo.h"
34#include "llvm/CodeGen/MachineOperand.h"
35#include "llvm/CodeGen/MachineRegisterInfo.h"
36#include "llvm/CodeGen/PseudoSourceValue.h"
37#include "llvm/CodeGen/TargetInstrInfo.h"
38#include "llvm/CodeGen/TargetRegisterInfo.h"
39#include "llvm/CodeGen/TargetSubtargetInfo.h"
40#include "llvm/Config/llvm-config.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DebugInfoMetadata.h"
43#include "llvm/IR/DebugLoc.h"
44#include "llvm/IR/DerivedTypes.h"
45#include "llvm/IR/Function.h"
46#include "llvm/IR/InlineAsm.h"
47#include "llvm/IR/InstrTypes.h"
48#include "llvm/IR/Intrinsics.h"
49#include "llvm/IR/LLVMContext.h"
50#include "llvm/IR/Metadata.h"
51#include "llvm/IR/Module.h"
52#include "llvm/IR/ModuleSlotTracker.h"
53#include "llvm/IR/Operator.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/Value.h"
56#include "llvm/MC/MCInstrDesc.h"
57#include "llvm/MC/MCRegisterInfo.h"
58#include "llvm/MC/MCSymbol.h"
59#include "llvm/Support/Casting.h"
60#include "llvm/Support/CommandLine.h"
61#include "llvm/Support/Compiler.h"
62#include "llvm/Support/Debug.h"
63#include "llvm/Support/ErrorHandling.h"
64#include "llvm/Support/LowLevelTypeImpl.h"
65#include "llvm/Support/MathExtras.h"
66#include "llvm/Support/raw_ostream.h"
67#include "llvm/Target/TargetIntrinsicInfo.h"
68#include "llvm/Target/TargetMachine.h"
69#include <algorithm>
70#include <cassert>
71#include <cstddef>
72#include <cstdint>
73#include <cstring>
74#include <iterator>
75#include <utility>
76
77using namespace llvm;
78
79static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
80 if (const MachineBasicBlock *MBB = MI.getParent())
81 if (const MachineFunction *MF = MBB->getParent())
82 return MF;
83 return nullptr;
84}
85
86// Try to crawl up to the machine function and get TRI and IntrinsicInfo from
87// it.
88static void tryToGetTargetInfo(const MachineInstr &MI,
89 const TargetRegisterInfo *&TRI,
90 const MachineRegisterInfo *&MRI,
91 const TargetIntrinsicInfo *&IntrinsicInfo,
92 const TargetInstrInfo *&TII) {
93
94 if (const MachineFunction *MF = getMFIfAvailable(MI)) {
95 TRI = MF->getSubtarget().getRegisterInfo();
96 MRI = &MF->getRegInfo();
97 IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
98 TII = MF->getSubtarget().getInstrInfo();
99 }
100}
101
102void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
103 if (MCID->ImplicitDefs)
104 for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); *ImpDefs;
105 ++ImpDefs)
106 addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true));
107 if (MCID->ImplicitUses)
108 for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); *ImpUses;
109 ++ImpUses)
110 addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true));
111}
112
113/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
114/// implicit operands. It reserves space for the number of operands specified by
115/// the MCInstrDesc.
116MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
117 DebugLoc dl, bool NoImp)
118 : MCID(&tid), debugLoc(std::move(dl)) {
119 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 119, __PRETTY_FUNCTION__))
;
120
121 // Reserve space for the expected number of operands.
122 if (unsigned NumOps = MCID->getNumOperands() +
123 MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
124 CapOperands = OperandCapacity::get(NumOps);
125 Operands = MF.allocateOperandArray(CapOperands);
126 }
127
128 if (!NoImp)
129 addImplicitDefUseOperands(MF);
130}
131
132/// MachineInstr ctor - Copies MachineInstr arg exactly
133///
134MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
135 : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) {
136 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 136, __PRETTY_FUNCTION__))
;
137
138 CapOperands = OperandCapacity::get(MI.getNumOperands());
139 Operands = MF.allocateOperandArray(CapOperands);
140
141 // Copy operands.
142 for (const MachineOperand &MO : MI.operands())
143 addOperand(MF, MO);
144
145 // Copy all the sensible flags.
146 setFlags(MI.Flags);
147}
148
149/// getRegInfo - If this instruction is embedded into a MachineFunction,
150/// return the MachineRegisterInfo object for the current function, otherwise
151/// return null.
152MachineRegisterInfo *MachineInstr::getRegInfo() {
153 if (MachineBasicBlock *MBB = getParent())
18
Assuming 'MBB' is null
19
Taking false branch
154 return &MBB->getParent()->getRegInfo();
155 return nullptr;
20
Returning null pointer, which participates in a condition later
156}
157
158/// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
159/// this instruction from their respective use lists. This requires that the
160/// operands already be on their use lists.
161void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
162 for (MachineOperand &MO : operands())
163 if (MO.isReg())
164 MRI.removeRegOperandFromUseList(&MO);
165}
166
167/// AddRegOperandsToUseLists - Add all of the register operands in
168/// this instruction from their respective use lists. This requires that the
169/// operands not be on their use lists yet.
170void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
171 for (MachineOperand &MO : operands())
172 if (MO.isReg())
173 MRI.addRegOperandToUseList(&MO);
174}
175
176void MachineInstr::addOperand(const MachineOperand &Op) {
177 MachineBasicBlock *MBB = getParent();
178 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs")((MBB && "Use MachineInstrBuilder to add operands to dangling instrs"
) ? static_cast<void> (0) : __assert_fail ("MBB && \"Use MachineInstrBuilder to add operands to dangling instrs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 178, __PRETTY_FUNCTION__))
;
179 MachineFunction *MF = MBB->getParent();
180 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs")((MF && "Use MachineInstrBuilder to add operands to dangling instrs"
) ? static_cast<void> (0) : __assert_fail ("MF && \"Use MachineInstrBuilder to add operands to dangling instrs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 180, __PRETTY_FUNCTION__))
;
181 addOperand(*MF, Op);
182}
183
184/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
185/// ranges. If MRI is non-null also update use-def chains.
186static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
187 unsigned NumOps, MachineRegisterInfo *MRI) {
188 if (MRI
29.1
'MRI' is null
29.1
'MRI' is null
29.1
'MRI' is null
)
30
Taking false branch
189 return MRI->moveOperands(Dst, Src, NumOps);
190
191 // MachineOperand is a trivially copyable type so we can just use memmove.
192 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
31
Null pointer passed to 2nd parameter expecting 'nonnull'
193}
194
195/// addOperand - Add the specified operand to the instruction. If it is an
196/// implicit operand, it is added to the end of the operand list. If it is
197/// an explicit operand it is added at the end of the explicit operand list
198/// (before the first implicit operand).
199void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
200 assert(MCID && "Cannot add operands before providing an instr descriptor")((MCID && "Cannot add operands before providing an instr descriptor"
) ? static_cast<void> (0) : __assert_fail ("MCID && \"Cannot add operands before providing an instr descriptor\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 200, __PRETTY_FUNCTION__))
;
6
Assuming field 'MCID' is non-null
7
'?' condition is true
201
202 // Check if we're adding one of our existing operands.
203 if (&Op >= Operands && &Op < Operands + NumOperands) {
8
Assuming the condition is false
204 // This is unusual: MI->addOperand(MI->getOperand(i)).
205 // If adding Op requires reallocating or moving existing operands around,
206 // the Op reference could go stale. Support it by copying Op.
207 MachineOperand CopyOp(Op);
208 return addOperand(MF, CopyOp);
209 }
210
211 // Find the insert location for the new operand. Implicit registers go at
212 // the end, everything else goes before the implicit regs.
213 //
214 // FIXME: Allow mixed explicit and implicit operands on inline asm.
215 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
216 // implicit-defs, but they must not be moved around. See the FIXME in
217 // InstrEmitter.cpp.
218 unsigned OpNo = getNumOperands();
219 bool isImpReg = Op.isReg() && Op.isImplicit();
220 if (!isImpReg
8.1
'isImpReg' is false
8.1
'isImpReg' is false
8.1
'isImpReg' is false
&& !isInlineAsm()) {
9
Calling 'MachineInstr::isInlineAsm'
13
Returning from 'MachineInstr::isInlineAsm'
14
Taking false branch
221 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
222 --OpNo;
223 assert(!Operands[OpNo].isTied() && "Cannot move tied operands")((!Operands[OpNo].isTied() && "Cannot move tied operands"
) ? static_cast<void> (0) : __assert_fail ("!Operands[OpNo].isTied() && \"Cannot move tied operands\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 223, __PRETTY_FUNCTION__))
;
224 }
225 }
226
227#ifndef NDEBUG
228 bool isDebugOp = Op.getType() == MachineOperand::MO_Metadata ||
229 Op.getType() == MachineOperand::MO_MCSymbol;
230 // OpNo now points as the desired insertion point. Unless this is a variadic
231 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
232 // RegMask operands go between the explicit and implicit operands.
233 assert
14.1
'isImpReg' is false
14.1
'isImpReg' is false
14.1
'isImpReg' is false
((isImpReg || Op.isRegMask() || MCID->isVariadic() ||(((isImpReg || Op.isRegMask() || MCID->isVariadic() || OpNo
< MCID->getNumOperands() || isDebugOp) && "Trying to add an operand to a machine instr that is already done!"
) ? static_cast<void> (0) : __assert_fail ("(isImpReg || Op.isRegMask() || MCID->isVariadic() || OpNo < MCID->getNumOperands() || isDebugOp) && \"Trying to add an operand to a machine instr that is already done!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 235, __PRETTY_FUNCTION__))
15
Assuming the condition is true
16
'?' condition is true
234 OpNo < MCID->getNumOperands() || isDebugOp) &&(((isImpReg || Op.isRegMask() || MCID->isVariadic() || OpNo
< MCID->getNumOperands() || isDebugOp) && "Trying to add an operand to a machine instr that is already done!"
) ? static_cast<void> (0) : __assert_fail ("(isImpReg || Op.isRegMask() || MCID->isVariadic() || OpNo < MCID->getNumOperands() || isDebugOp) && \"Trying to add an operand to a machine instr that is already done!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 235, __PRETTY_FUNCTION__))
235 "Trying to add an operand to a machine instr that is already done!")(((isImpReg || Op.isRegMask() || MCID->isVariadic() || OpNo
< MCID->getNumOperands() || isDebugOp) && "Trying to add an operand to a machine instr that is already done!"
) ? static_cast<void> (0) : __assert_fail ("(isImpReg || Op.isRegMask() || MCID->isVariadic() || OpNo < MCID->getNumOperands() || isDebugOp) && \"Trying to add an operand to a machine instr that is already done!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 235, __PRETTY_FUNCTION__))
;
236#endif
237
238 MachineRegisterInfo *MRI = getRegInfo();
17
Calling 'MachineInstr::getRegInfo'
21
Returning from 'MachineInstr::getRegInfo'
239
240 // Determine if the Operands array needs to be reallocated.
241 // Save the old capacity and operand array.
242 OperandCapacity OldCap = CapOperands;
243 MachineOperand *OldOperands = Operands;
22
'OldOperands' initialized here
244 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
23
Assuming pointer value is null
24
Assuming 'OldOperands' is null
245 CapOperands = OldOperands
24.1
'OldOperands' is null
24.1
'OldOperands' is null
24.1
'OldOperands' is null
? OldCap.getNext() : OldCap.get(1);
25
'?' condition is false
246 Operands = MF.allocateOperandArray(CapOperands);
247 // Move the operands before the insertion point.
248 if (OpNo)
26
Assuming 'OpNo' is not equal to 0
27
Taking true branch
249 moveOperands(Operands, OldOperands, OpNo, MRI);
28
Passing null pointer value via 2nd parameter 'Src'
29
Calling 'moveOperands'
250 }
251
252 // Move the operands following the insertion point.
253 if (OpNo != NumOperands)
254 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
255 MRI);
256 ++NumOperands;
257
258 // Deallocate the old operand array.
259 if (OldOperands != Operands && OldOperands)
260 MF.deallocateOperandArray(OldCap, OldOperands);
261
262 // Copy Op into place. It still needs to be inserted into the MRI use lists.
263 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
264 NewMO->ParentMI = this;
265
266 // When adding a register operand, tell MRI about it.
267 if (NewMO->isReg()) {
268 // Ensure isOnRegUseList() returns false, regardless of Op's status.
269 NewMO->Contents.Reg.Prev = nullptr;
270 // Ignore existing ties. This is not a property that can be copied.
271 NewMO->TiedTo = 0;
272 // Add the new operand to MRI, but only for instructions in an MBB.
273 if (MRI)
274 MRI->addRegOperandToUseList(NewMO);
275 // The MCID operand information isn't accurate until we start adding
276 // explicit operands. The implicit operands are added first, then the
277 // explicits are inserted before them.
278 if (!isImpReg) {
279 // Tie uses to defs as indicated in MCInstrDesc.
280 if (NewMO->isUse()) {
281 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
282 if (DefIdx != -1)
283 tieOperands(DefIdx, OpNo);
284 }
285 // If the register operand is flagged as early, mark the operand as such.
286 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
287 NewMO->setIsEarlyClobber(true);
288 }
289 }
290}
291
292/// RemoveOperand - Erase an operand from an instruction, leaving it with one
293/// fewer operand than it started with.
294///
295void MachineInstr::RemoveOperand(unsigned OpNo) {
296 assert(OpNo < getNumOperands() && "Invalid operand number")((OpNo < getNumOperands() && "Invalid operand number"
) ? static_cast<void> (0) : __assert_fail ("OpNo < getNumOperands() && \"Invalid operand number\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 296, __PRETTY_FUNCTION__))
;
297 untieRegOperand(OpNo);
298
299#ifndef NDEBUG
300 // Moving tied operands would break the ties.
301 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
302 if (Operands[i].isReg())
303 assert(!Operands[i].isTied() && "Cannot move tied operands")((!Operands[i].isTied() && "Cannot move tied operands"
) ? static_cast<void> (0) : __assert_fail ("!Operands[i].isTied() && \"Cannot move tied operands\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 303, __PRETTY_FUNCTION__))
;
304#endif
305
306 MachineRegisterInfo *MRI = getRegInfo();
307 if (MRI && Operands[OpNo].isReg())
308 MRI->removeRegOperandFromUseList(Operands + OpNo);
309
310 // Don't call the MachineOperand destructor. A lot of this code depends on
311 // MachineOperand having a trivial destructor anyway, and adding a call here
312 // wouldn't make it 'destructor-correct'.
313
314 if (unsigned N = NumOperands - 1 - OpNo)
315 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
316 --NumOperands;
317}
318
319void MachineInstr::dropMemRefs(MachineFunction &MF) {
320 if (memoperands_empty())
321 return;
322
323 // See if we can just drop all of our extra info.
324 if (!getPreInstrSymbol() && !getPostInstrSymbol()) {
325 Info.clear();
326 return;
327 }
328 if (!getPostInstrSymbol()) {
329 Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol());
330 return;
331 }
332 if (!getPreInstrSymbol()) {
333 Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol());
334 return;
335 }
336
337 // Otherwise allocate a fresh extra info with just these symbols.
338 Info.set<EIIK_OutOfLine>(
339 MF.createMIExtraInfo({}, getPreInstrSymbol(), getPostInstrSymbol()));
340}
341
342void MachineInstr::setMemRefs(MachineFunction &MF,
343 ArrayRef<MachineMemOperand *> MMOs) {
344 if (MMOs.empty()) {
345 dropMemRefs(MF);
346 return;
347 }
348
349 // Try to store a single MMO inline.
350 if (MMOs.size() == 1 && !getPreInstrSymbol() && !getPostInstrSymbol()) {
351 Info.set<EIIK_MMO>(MMOs[0]);
352 return;
353 }
354
355 // Otherwise create an extra info struct with all of our info.
356 Info.set<EIIK_OutOfLine>(
357 MF.createMIExtraInfo(MMOs, getPreInstrSymbol(), getPostInstrSymbol()));
358}
359
360void MachineInstr::addMemOperand(MachineFunction &MF,
361 MachineMemOperand *MO) {
362 SmallVector<MachineMemOperand *, 2> MMOs;
363 MMOs.append(memoperands_begin(), memoperands_end());
364 MMOs.push_back(MO);
365 setMemRefs(MF, MMOs);
366}
367
368void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
369 if (this == &MI)
370 // Nothing to do for a self-clone!
371 return;
372
373 assert(&MF == MI.getMF() &&((&MF == MI.getMF() && "Invalid machine functions when cloning memory refrences!"
) ? static_cast<void> (0) : __assert_fail ("&MF == MI.getMF() && \"Invalid machine functions when cloning memory refrences!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 374, __PRETTY_FUNCTION__))
374 "Invalid machine functions when cloning memory refrences!")((&MF == MI.getMF() && "Invalid machine functions when cloning memory refrences!"
) ? static_cast<void> (0) : __assert_fail ("&MF == MI.getMF() && \"Invalid machine functions when cloning memory refrences!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 374, __PRETTY_FUNCTION__))
;
375 // See if we can just steal the extra info already allocated for the
376 // instruction. We can do this whenever the pre- and post-instruction symbols
377 // are the same (including null).
378 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
379 getPostInstrSymbol() == MI.getPostInstrSymbol()) {
380 Info = MI.Info;
381 return;
382 }
383
384 // Otherwise, fall back on a copy-based clone.
385 setMemRefs(MF, MI.memoperands());
386}
387
388/// Check to see if the MMOs pointed to by the two MemRefs arrays are
389/// identical.
390static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
391 ArrayRef<MachineMemOperand *> RHS) {
392 if (LHS.size() != RHS.size())
393 return false;
394
395 auto LHSPointees = make_pointee_range(LHS);
396 auto RHSPointees = make_pointee_range(RHS);
397 return std::equal(LHSPointees.begin(), LHSPointees.end(),
398 RHSPointees.begin());
399}
400
401void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
402 ArrayRef<const MachineInstr *> MIs) {
403 // Try handling easy numbers of MIs with simpler mechanisms.
404 if (MIs.empty()) {
405 dropMemRefs(MF);
406 return;
407 }
408 if (MIs.size() == 1) {
409 cloneMemRefs(MF, *MIs[0]);
410 return;
411 }
412 // Because an empty memoperands list provides *no* information and must be
413 // handled conservatively (assuming the instruction can do anything), the only
414 // way to merge with it is to drop all other memoperands.
415 if (MIs[0]->memoperands_empty()) {
416 dropMemRefs(MF);
417 return;
418 }
419
420 // Handle the general case.
421 SmallVector<MachineMemOperand *, 2> MergedMMOs;
422 // Start with the first instruction.
423 assert(&MF == MIs[0]->getMF() &&((&MF == MIs[0]->getMF() && "Invalid machine functions when cloning memory references!"
) ? static_cast<void> (0) : __assert_fail ("&MF == MIs[0]->getMF() && \"Invalid machine functions when cloning memory references!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 424, __PRETTY_FUNCTION__))
424 "Invalid machine functions when cloning memory references!")((&MF == MIs[0]->getMF() && "Invalid machine functions when cloning memory references!"
) ? static_cast<void> (0) : __assert_fail ("&MF == MIs[0]->getMF() && \"Invalid machine functions when cloning memory references!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 424, __PRETTY_FUNCTION__))
;
425 MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
426 // Now walk all the other instructions and accumulate any different MMOs.
427 for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
428 assert(&MF == MI.getMF() &&((&MF == MI.getMF() && "Invalid machine functions when cloning memory references!"
) ? static_cast<void> (0) : __assert_fail ("&MF == MI.getMF() && \"Invalid machine functions when cloning memory references!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 429, __PRETTY_FUNCTION__))
429 "Invalid machine functions when cloning memory references!")((&MF == MI.getMF() && "Invalid machine functions when cloning memory references!"
) ? static_cast<void> (0) : __assert_fail ("&MF == MI.getMF() && \"Invalid machine functions when cloning memory references!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 429, __PRETTY_FUNCTION__))
;
430
431 // Skip MIs with identical operands to the first. This is a somewhat
432 // arbitrary hack but will catch common cases without being quadratic.
433 // TODO: We could fully implement merge semantics here if needed.
434 if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
435 continue;
436
437 // Because an empty memoperands list provides *no* information and must be
438 // handled conservatively (assuming the instruction can do anything), the
439 // only way to merge with it is to drop all other memoperands.
440 if (MI.memoperands_empty()) {
441 dropMemRefs(MF);
442 return;
443 }
444
445 // Otherwise accumulate these into our temporary buffer of the merged state.
446 MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
447 }
448
449 setMemRefs(MF, MergedMMOs);
450}
451
452void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
453 MCSymbol *OldSymbol = getPreInstrSymbol();
454 if (OldSymbol == Symbol)
455 return;
456 if (OldSymbol && !Symbol) {
457 // We're removing a symbol rather than adding one. Try to clean up any
458 // extra info carried around.
459 if (Info.is<EIIK_PreInstrSymbol>()) {
460 Info.clear();
461 return;
462 }
463
464 if (memoperands_empty()) {
465 assert(getPostInstrSymbol() &&((getPostInstrSymbol() && "Should never have only a single symbol allocated out-of-line!"
) ? static_cast<void> (0) : __assert_fail ("getPostInstrSymbol() && \"Should never have only a single symbol allocated out-of-line!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 466, __PRETTY_FUNCTION__))
466 "Should never have only a single symbol allocated out-of-line!")((getPostInstrSymbol() && "Should never have only a single symbol allocated out-of-line!"
) ? static_cast<void> (0) : __assert_fail ("getPostInstrSymbol() && \"Should never have only a single symbol allocated out-of-line!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 466, __PRETTY_FUNCTION__))
;
467 Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol());
468 return;
469 }
470
471 // Otherwise fallback on the generic update.
472 } else if (!Info || Info.is<EIIK_PreInstrSymbol>()) {
473 // If we don't have any other extra info, we can store this inline.
474 Info.set<EIIK_PreInstrSymbol>(Symbol);
475 return;
476 }
477
478 // Otherwise, allocate a full new set of extra info.
479 // FIXME: Maybe we should make the symbols in the extra info mutable?
480 Info.set<EIIK_OutOfLine>(
481 MF.createMIExtraInfo(memoperands(), Symbol, getPostInstrSymbol()));
482}
483
484void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
485 MCSymbol *OldSymbol = getPostInstrSymbol();
486 if (OldSymbol == Symbol)
487 return;
488 if (OldSymbol && !Symbol) {
489 // We're removing a symbol rather than adding one. Try to clean up any
490 // extra info carried around.
491 if (Info.is<EIIK_PostInstrSymbol>()) {
492 Info.clear();
493 return;
494 }
495
496 if (memoperands_empty()) {
497 assert(getPreInstrSymbol() &&((getPreInstrSymbol() && "Should never have only a single symbol allocated out-of-line!"
) ? static_cast<void> (0) : __assert_fail ("getPreInstrSymbol() && \"Should never have only a single symbol allocated out-of-line!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 498, __PRETTY_FUNCTION__))
498 "Should never have only a single symbol allocated out-of-line!")((getPreInstrSymbol() && "Should never have only a single symbol allocated out-of-line!"
) ? static_cast<void> (0) : __assert_fail ("getPreInstrSymbol() && \"Should never have only a single symbol allocated out-of-line!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 498, __PRETTY_FUNCTION__))
;
499 Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol());
500 return;
501 }
502
503 // Otherwise fallback on the generic update.
504 } else if (!Info || Info.is<EIIK_PostInstrSymbol>()) {
505 // If we don't have any other extra info, we can store this inline.
506 Info.set<EIIK_PostInstrSymbol>(Symbol);
507 return;
508 }
509
510 // Otherwise, allocate a full new set of extra info.
511 // FIXME: Maybe we should make the symbols in the extra info mutable?
512 Info.set<EIIK_OutOfLine>(
513 MF.createMIExtraInfo(memoperands(), getPreInstrSymbol(), Symbol));
514}
515
516void MachineInstr::cloneInstrSymbols(MachineFunction &MF,
517 const MachineInstr &MI) {
518 if (this == &MI)
519 // Nothing to do for a self-clone!
520 return;
521
522 assert(&MF == MI.getMF() &&((&MF == MI.getMF() && "Invalid machine functions when cloning instruction symbols!"
) ? static_cast<void> (0) : __assert_fail ("&MF == MI.getMF() && \"Invalid machine functions when cloning instruction symbols!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 523, __PRETTY_FUNCTION__))
523 "Invalid machine functions when cloning instruction symbols!")((&MF == MI.getMF() && "Invalid machine functions when cloning instruction symbols!"
) ? static_cast<void> (0) : __assert_fail ("&MF == MI.getMF() && \"Invalid machine functions when cloning instruction symbols!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 523, __PRETTY_FUNCTION__))
;
524
525 setPreInstrSymbol(MF, MI.getPreInstrSymbol());
526 setPostInstrSymbol(MF, MI.getPostInstrSymbol());
527}
528
529uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
530 // For now, the just return the union of the flags. If the flags get more
531 // complicated over time, we might need more logic here.
532 return getFlags() | Other.getFlags();
533}
534
535uint16_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) {
536 uint16_t MIFlags = 0;
537 // Copy the wrapping flags.
538 if (const OverflowingBinaryOperator *OB =
539 dyn_cast<OverflowingBinaryOperator>(&I)) {
540 if (OB->hasNoSignedWrap())
541 MIFlags |= MachineInstr::MIFlag::NoSWrap;
542 if (OB->hasNoUnsignedWrap())
543 MIFlags |= MachineInstr::MIFlag::NoUWrap;
544 }
545
546 // Copy the exact flag.
547 if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
548 if (PE->isExact())
549 MIFlags |= MachineInstr::MIFlag::IsExact;
550
551 // Copy the fast-math flags.
552 if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
553 const FastMathFlags Flags = FP->getFastMathFlags();
554 if (Flags.noNaNs())
555 MIFlags |= MachineInstr::MIFlag::FmNoNans;
556 if (Flags.noInfs())
557 MIFlags |= MachineInstr::MIFlag::FmNoInfs;
558 if (Flags.noSignedZeros())
559 MIFlags |= MachineInstr::MIFlag::FmNsz;
560 if (Flags.allowReciprocal())
561 MIFlags |= MachineInstr::MIFlag::FmArcp;
562 if (Flags.allowContract())
563 MIFlags |= MachineInstr::MIFlag::FmContract;
564 if (Flags.approxFunc())
565 MIFlags |= MachineInstr::MIFlag::FmAfn;
566 if (Flags.allowReassoc())
567 MIFlags |= MachineInstr::MIFlag::FmReassoc;
568 }
569
570 return MIFlags;
571}
572
573void MachineInstr::copyIRFlags(const Instruction &I) {
574 Flags = copyFlagsFromInstruction(I);
575}
576
577bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
578 assert(!isBundledWithPred() && "Must be called on bundle header")((!isBundledWithPred() && "Must be called on bundle header"
) ? static_cast<void> (0) : __assert_fail ("!isBundledWithPred() && \"Must be called on bundle header\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 578, __PRETTY_FUNCTION__))
;
579 for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
580 if (MII->getDesc().getFlags() & Mask) {
581 if (Type == AnyInBundle)
582 return true;
583 } else {
584 if (Type == AllInBundle && !MII->isBundle())
585 return false;
586 }
587 // This was the last instruction in the bundle.
588 if (!MII->isBundledWithSucc())
589 return Type == AllInBundle;
590 }
591}
592
593bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
594 MICheckType Check) const {
595 // If opcodes or number of operands are not the same then the two
596 // instructions are obviously not identical.
597 if (Other.getOpcode() != getOpcode() ||
598 Other.getNumOperands() != getNumOperands())
599 return false;
600
601 if (isBundle()) {
602 // We have passed the test above that both instructions have the same
603 // opcode, so we know that both instructions are bundles here. Let's compare
604 // MIs inside the bundle.
605 assert(Other.isBundle() && "Expected that both instructions are bundles.")((Other.isBundle() && "Expected that both instructions are bundles."
) ? static_cast<void> (0) : __assert_fail ("Other.isBundle() && \"Expected that both instructions are bundles.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 605, __PRETTY_FUNCTION__))
;
606 MachineBasicBlock::const_instr_iterator I1 = getIterator();
607 MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
608 // Loop until we analysed the last intruction inside at least one of the
609 // bundles.
610 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
611 ++I1;
612 ++I2;
613 if (!I1->isIdenticalTo(*I2, Check))
614 return false;
615 }
616 // If we've reached the end of just one of the two bundles, but not both,
617 // the instructions are not identical.
618 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
619 return false;
620 }
621
622 // Check operands to make sure they match.
623 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
624 const MachineOperand &MO = getOperand(i);
625 const MachineOperand &OMO = Other.getOperand(i);
626 if (!MO.isReg()) {
627 if (!MO.isIdenticalTo(OMO))
628 return false;
629 continue;
630 }
631
632 // Clients may or may not want to ignore defs when testing for equality.
633 // For example, machine CSE pass only cares about finding common
634 // subexpressions, so it's safe to ignore virtual register defs.
635 if (MO.isDef()) {
636 if (Check == IgnoreDefs)
637 continue;
638 else if (Check == IgnoreVRegDefs) {
639 if (!Register::isVirtualRegister(MO.getReg()) ||
640 !Register::isVirtualRegister(OMO.getReg()))
641 if (!MO.isIdenticalTo(OMO))
642 return false;
643 } else {
644 if (!MO.isIdenticalTo(OMO))
645 return false;
646 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
647 return false;
648 }
649 } else {
650 if (!MO.isIdenticalTo(OMO))
651 return false;
652 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
653 return false;
654 }
655 }
656 // If DebugLoc does not match then two debug instructions are not identical.
657 if (isDebugInstr())
658 if (getDebugLoc() && Other.getDebugLoc() &&
659 getDebugLoc() != Other.getDebugLoc())
660 return false;
661 return true;
662}
663
664const MachineFunction *MachineInstr::getMF() const {
665 return getParent()->getParent();
666}
667
668MachineInstr *MachineInstr::removeFromParent() {
669 assert(getParent() && "Not embedded in a basic block!")((getParent() && "Not embedded in a basic block!") ? static_cast
<void> (0) : __assert_fail ("getParent() && \"Not embedded in a basic block!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 669, __PRETTY_FUNCTION__))
;
670 return getParent()->remove(this);
671}
672
673MachineInstr *MachineInstr::removeFromBundle() {
674 assert(getParent() && "Not embedded in a basic block!")((getParent() && "Not embedded in a basic block!") ? static_cast
<void> (0) : __assert_fail ("getParent() && \"Not embedded in a basic block!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 674, __PRETTY_FUNCTION__))
;
675 return getParent()->remove_instr(this);
676}
677
678void MachineInstr::eraseFromParent() {
679 assert(getParent() && "Not embedded in a basic block!")((getParent() && "Not embedded in a basic block!") ? static_cast
<void> (0) : __assert_fail ("getParent() && \"Not embedded in a basic block!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 679, __PRETTY_FUNCTION__))
;
680 getParent()->erase(this);
681}
682
683void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() {
684 assert(getParent() && "Not embedded in a basic block!")((getParent() && "Not embedded in a basic block!") ? static_cast
<void> (0) : __assert_fail ("getParent() && \"Not embedded in a basic block!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 684, __PRETTY_FUNCTION__))
;
685 MachineBasicBlock *MBB = getParent();
686 MachineFunction *MF = MBB->getParent();
687 assert(MF && "Not embedded in a function!")((MF && "Not embedded in a function!") ? static_cast<
void> (0) : __assert_fail ("MF && \"Not embedded in a function!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 687, __PRETTY_FUNCTION__))
;
688
689 MachineInstr *MI = (MachineInstr *)this;
690 MachineRegisterInfo &MRI = MF->getRegInfo();
691
692 for (const MachineOperand &MO : MI->operands()) {
693 if (!MO.isReg() || !MO.isDef())
694 continue;
695 Register Reg = MO.getReg();
696 if (!Reg.isVirtual())
697 continue;
698 MRI.markUsesInDebugValueAsUndef(Reg);
699 }
700 MI->eraseFromParent();
701}
702
703void MachineInstr::eraseFromBundle() {
704 assert(getParent() && "Not embedded in a basic block!")((getParent() && "Not embedded in a basic block!") ? static_cast
<void> (0) : __assert_fail ("getParent() && \"Not embedded in a basic block!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 704, __PRETTY_FUNCTION__))
;
705 getParent()->erase_instr(this);
706}
707
708unsigned MachineInstr::getNumExplicitOperands() const {
709 unsigned NumOperands = MCID->getNumOperands();
710 if (!MCID->isVariadic())
711 return NumOperands;
712
713 for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
714 const MachineOperand &MO = getOperand(I);
715 // The operands must always be in the following order:
716 // - explicit reg defs,
717 // - other explicit operands (reg uses, immediates, etc.),
718 // - implicit reg defs
719 // - implicit reg uses
720 if (MO.isReg() && MO.isImplicit())
721 break;
722 ++NumOperands;
723 }
724 return NumOperands;
725}
726
727unsigned MachineInstr::getNumExplicitDefs() const {
728 unsigned NumDefs = MCID->getNumDefs();
729 if (!MCID->isVariadic())
730 return NumDefs;
731
732 for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
733 const MachineOperand &MO = getOperand(I);
734 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
735 break;
736 ++NumDefs;
737 }
738 return NumDefs;
739}
740
741void MachineInstr::bundleWithPred() {
742 assert(!isBundledWithPred() && "MI is already bundled with its predecessor")((!isBundledWithPred() && "MI is already bundled with its predecessor"
) ? static_cast<void> (0) : __assert_fail ("!isBundledWithPred() && \"MI is already bundled with its predecessor\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 742, __PRETTY_FUNCTION__))
;
743 setFlag(BundledPred);
744 MachineBasicBlock::instr_iterator Pred = getIterator();
745 --Pred;
746 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags")((!Pred->isBundledWithSucc() && "Inconsistent bundle flags"
) ? static_cast<void> (0) : __assert_fail ("!Pred->isBundledWithSucc() && \"Inconsistent bundle flags\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 746, __PRETTY_FUNCTION__))
;
747 Pred->setFlag(BundledSucc);
748}
749
750void MachineInstr::bundleWithSucc() {
751 assert(!isBundledWithSucc() && "MI is already bundled with its successor")((!isBundledWithSucc() && "MI is already bundled with its successor"
) ? static_cast<void> (0) : __assert_fail ("!isBundledWithSucc() && \"MI is already bundled with its successor\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 751, __PRETTY_FUNCTION__))
;
752 setFlag(BundledSucc);
753 MachineBasicBlock::instr_iterator Succ = getIterator();
754 ++Succ;
755 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags")((!Succ->isBundledWithPred() && "Inconsistent bundle flags"
) ? static_cast<void> (0) : __assert_fail ("!Succ->isBundledWithPred() && \"Inconsistent bundle flags\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 755, __PRETTY_FUNCTION__))
;
756 Succ->setFlag(BundledPred);
757}
758
759void MachineInstr::unbundleFromPred() {
760 assert(isBundledWithPred() && "MI isn't bundled with its predecessor")((isBundledWithPred() && "MI isn't bundled with its predecessor"
) ? static_cast<void> (0) : __assert_fail ("isBundledWithPred() && \"MI isn't bundled with its predecessor\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 760, __PRETTY_FUNCTION__))
;
761 clearFlag(BundledPred);
762 MachineBasicBlock::instr_iterator Pred = getIterator();
763 --Pred;
764 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags")((Pred->isBundledWithSucc() && "Inconsistent bundle flags"
) ? static_cast<void> (0) : __assert_fail ("Pred->isBundledWithSucc() && \"Inconsistent bundle flags\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 764, __PRETTY_FUNCTION__))
;
765 Pred->clearFlag(BundledSucc);
766}
767
768void MachineInstr::unbundleFromSucc() {
769 assert(isBundledWithSucc() && "MI isn't bundled with its successor")((isBundledWithSucc() && "MI isn't bundled with its successor"
) ? static_cast<void> (0) : __assert_fail ("isBundledWithSucc() && \"MI isn't bundled with its successor\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 769, __PRETTY_FUNCTION__))
;
770 clearFlag(BundledSucc);
771 MachineBasicBlock::instr_iterator Succ = getIterator();
772 ++Succ;
773 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags")((Succ->isBundledWithPred() && "Inconsistent bundle flags"
) ? static_cast<void> (0) : __assert_fail ("Succ->isBundledWithPred() && \"Inconsistent bundle flags\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 773, __PRETTY_FUNCTION__))
;
774 Succ->clearFlag(BundledPred);
775}
776
777bool MachineInstr::isStackAligningInlineAsm() const {
778 if (isInlineAsm()) {
779 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
780 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
781 return true;
782 }
783 return false;
784}
785
786InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
787 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!")((isInlineAsm() && "getInlineAsmDialect() only works for inline asms!"
) ? static_cast<void> (0) : __assert_fail ("isInlineAsm() && \"getInlineAsmDialect() only works for inline asms!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 787, __PRETTY_FUNCTION__))
;
788 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
789 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
790}
791
792int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
793 unsigned *GroupNo) const {
794 assert(isInlineAsm() && "Expected an inline asm instruction")((isInlineAsm() && "Expected an inline asm instruction"
) ? static_cast<void> (0) : __assert_fail ("isInlineAsm() && \"Expected an inline asm instruction\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 794, __PRETTY_FUNCTION__))
;
795 assert(OpIdx < getNumOperands() && "OpIdx out of range")((OpIdx < getNumOperands() && "OpIdx out of range"
) ? static_cast<void> (0) : __assert_fail ("OpIdx < getNumOperands() && \"OpIdx out of range\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 795, __PRETTY_FUNCTION__))
;
796
797 // Ignore queries about the initial operands.
798 if (OpIdx < InlineAsm::MIOp_FirstOperand)
799 return -1;
800
801 unsigned Group = 0;
802 unsigned NumOps;
803 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
804 i += NumOps) {
805 const MachineOperand &FlagMO = getOperand(i);
806 // If we reach the implicit register operands, stop looking.
807 if (!FlagMO.isImm())
808 return -1;
809 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
810 if (i + NumOps > OpIdx) {
811 if (GroupNo)
812 *GroupNo = Group;
813 return i;
814 }
815 ++Group;
816 }
817 return -1;
818}
819
820const DILabel *MachineInstr::getDebugLabel() const {
821 assert(isDebugLabel() && "not a DBG_LABEL")((isDebugLabel() && "not a DBG_LABEL") ? static_cast<
void> (0) : __assert_fail ("isDebugLabel() && \"not a DBG_LABEL\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 821, __PRETTY_FUNCTION__))
;
822 return cast<DILabel>(getOperand(0).getMetadata());
823}
824
825const DILocalVariable *MachineInstr::getDebugVariable() const {
826 assert(isDebugValue() && "not a DBG_VALUE")((isDebugValue() && "not a DBG_VALUE") ? static_cast<
void> (0) : __assert_fail ("isDebugValue() && \"not a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 826, __PRETTY_FUNCTION__))
;
827 return cast<DILocalVariable>(getOperand(2).getMetadata());
828}
829
830const DIExpression *MachineInstr::getDebugExpression() const {
831 assert(isDebugValue() && "not a DBG_VALUE")((isDebugValue() && "not a DBG_VALUE") ? static_cast<
void> (0) : __assert_fail ("isDebugValue() && \"not a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 831, __PRETTY_FUNCTION__))
;
832 return cast<DIExpression>(getOperand(3).getMetadata());
833}
834
835const TargetRegisterClass*
836MachineInstr::getRegClassConstraint(unsigned OpIdx,
837 const TargetInstrInfo *TII,
838 const TargetRegisterInfo *TRI) const {
839 assert(getParent() && "Can't have an MBB reference here!")((getParent() && "Can't have an MBB reference here!")
? static_cast<void> (0) : __assert_fail ("getParent() && \"Can't have an MBB reference here!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 839, __PRETTY_FUNCTION__))
;
840 assert(getMF() && "Can't have an MF reference here!")((getMF() && "Can't have an MF reference here!") ? static_cast
<void> (0) : __assert_fail ("getMF() && \"Can't have an MF reference here!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 840, __PRETTY_FUNCTION__))
;
841 const MachineFunction &MF = *getMF();
842
843 // Most opcodes have fixed constraints in their MCInstrDesc.
844 if (!isInlineAsm())
845 return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
846
847 if (!getOperand(OpIdx).isReg())
848 return nullptr;
849
850 // For tied uses on inline asm, get the constraint from the def.
851 unsigned DefIdx;
852 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
853 OpIdx = DefIdx;
854
855 // Inline asm stores register class constraints in the flag word.
856 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
857 if (FlagIdx < 0)
858 return nullptr;
859
860 unsigned Flag = getOperand(FlagIdx).getImm();
861 unsigned RCID;
862 if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
863 InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
864 InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
865 InlineAsm::hasRegClassConstraint(Flag, RCID))
866 return TRI->getRegClass(RCID);
867
868 // Assume that all registers in a memory operand are pointers.
869 if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
870 return TRI->getPointerRegClass(MF);
871
872 return nullptr;
873}
874
875const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
876 Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
877 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
878 // Check every operands inside the bundle if we have
879 // been asked to.
880 if (ExploreBundle)
881 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
882 ++OpndIt)
883 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
884 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
885 else
886 // Otherwise, just check the current operands.
887 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
888 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
889 return CurRC;
890}
891
892const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
893 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
894 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
895 assert(CurRC && "Invalid initial register class")((CurRC && "Invalid initial register class") ? static_cast
<void> (0) : __assert_fail ("CurRC && \"Invalid initial register class\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 895, __PRETTY_FUNCTION__))
;
896 // Check if Reg is constrained by some of its use/def from MI.
897 const MachineOperand &MO = getOperand(OpIdx);
898 if (!MO.isReg() || MO.getReg() != Reg)
899 return CurRC;
900 // If yes, accumulate the constraints through the operand.
901 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
902}
903
904const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
905 unsigned OpIdx, const TargetRegisterClass *CurRC,
906 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
907 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
908 const MachineOperand &MO = getOperand(OpIdx);
909 assert(MO.isReg() &&((MO.isReg() && "Cannot get register constraints for non-register operand"
) ? static_cast<void> (0) : __assert_fail ("MO.isReg() && \"Cannot get register constraints for non-register operand\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 910, __PRETTY_FUNCTION__))
910 "Cannot get register constraints for non-register operand")((MO.isReg() && "Cannot get register constraints for non-register operand"
) ? static_cast<void> (0) : __assert_fail ("MO.isReg() && \"Cannot get register constraints for non-register operand\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 910, __PRETTY_FUNCTION__))
;
911 assert(CurRC && "Invalid initial register class")((CurRC && "Invalid initial register class") ? static_cast
<void> (0) : __assert_fail ("CurRC && \"Invalid initial register class\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 911, __PRETTY_FUNCTION__))
;
912 if (unsigned SubIdx = MO.getSubReg()) {
913 if (OpRC)
914 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
915 else
916 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
917 } else if (OpRC)
918 CurRC = TRI->getCommonSubClass(CurRC, OpRC);
919 return CurRC;
920}
921
922/// Return the number of instructions inside the MI bundle, not counting the
923/// header instruction.
924unsigned MachineInstr::getBundleSize() const {
925 MachineBasicBlock::const_instr_iterator I = getIterator();
926 unsigned Size = 0;
927 while (I->isBundledWithSucc()) {
928 ++Size;
929 ++I;
930 }
931 return Size;
932}
933
934/// Returns true if the MachineInstr has an implicit-use operand of exactly
935/// the given register (not considering sub/super-registers).
936bool MachineInstr::hasRegisterImplicitUseOperand(Register Reg) const {
937 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
938 const MachineOperand &MO = getOperand(i);
939 if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg)
940 return true;
941 }
942 return false;
943}
944
945/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
946/// the specific register or -1 if it is not found. It further tightens
947/// the search criteria to a use that kills the register if isKill is true.
948int MachineInstr::findRegisterUseOperandIdx(
949 Register Reg, bool isKill, const TargetRegisterInfo *TRI) const {
950 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
951 const MachineOperand &MO = getOperand(i);
952 if (!MO.isReg() || !MO.isUse())
953 continue;
954 Register MOReg = MO.getReg();
955 if (!MOReg)
956 continue;
957 if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
958 if (!isKill || MO.isKill())
959 return i;
960 }
961 return -1;
962}
963
964/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
965/// indicating if this instruction reads or writes Reg. This also considers
966/// partial defines.
967std::pair<bool,bool>
968MachineInstr::readsWritesVirtualRegister(Register Reg,
969 SmallVectorImpl<unsigned> *Ops) const {
970 bool PartDef = false; // Partial redefine.
971 bool FullDef = false; // Full define.
972 bool Use = false;
973
974 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
975 const MachineOperand &MO = getOperand(i);
976 if (!MO.isReg() || MO.getReg() != Reg)
977 continue;
978 if (Ops)
979 Ops->push_back(i);
980 if (MO.isUse())
981 Use |= !MO.isUndef();
982 else if (MO.getSubReg() && !MO.isUndef())
983 // A partial def undef doesn't count as reading the register.
984 PartDef = true;
985 else
986 FullDef = true;
987 }
988 // A partial redefine uses Reg unless there is also a full define.
989 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
990}
991
992/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
993/// the specified register or -1 if it is not found. If isDead is true, defs
994/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
995/// also checks if there is a def of a super-register.
996int
997MachineInstr::findRegisterDefOperandIdx(Register Reg, bool isDead, bool Overlap,
998 const TargetRegisterInfo *TRI) const {
999 bool isPhys = Register::isPhysicalRegister(Reg);
1000 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1001 const MachineOperand &MO = getOperand(i);
1002 // Accept regmask operands when Overlap is set.
1003 // Ignore them when looking for a specific def operand (Overlap == false).
1004 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1005 return i;
1006 if (!MO.isReg() || !MO.isDef())
1007 continue;
1008 Register MOReg = MO.getReg();
1009 bool Found = (MOReg == Reg);
1010 if (!Found && TRI && isPhys && Register::isPhysicalRegister(MOReg)) {
1011 if (Overlap)
1012 Found = TRI->regsOverlap(MOReg, Reg);
1013 else
1014 Found = TRI->isSubRegister(MOReg, Reg);
1015 }
1016 if (Found && (!isDead || MO.isDead()))
1017 return i;
1018 }
1019 return -1;
1020}
1021
1022/// findFirstPredOperandIdx() - Find the index of the first operand in the
1023/// operand list that is used to represent the predicate. It returns -1 if
1024/// none is found.
1025int MachineInstr::findFirstPredOperandIdx() const {
1026 // Don't call MCID.findFirstPredOperandIdx() because this variant
1027 // is sometimes called on an instruction that's not yet complete, and
1028 // so the number of operands is less than the MCID indicates. In
1029 // particular, the PTX target does this.
1030 const MCInstrDesc &MCID = getDesc();
1031 if (MCID.isPredicable()) {
1032 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1033 if (MCID.OpInfo[i].isPredicate())
1034 return i;
1035 }
1036
1037 return -1;
1038}
1039
1040// MachineOperand::TiedTo is 4 bits wide.
1041const unsigned TiedMax = 15;
1042
1043/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1044///
1045/// Use and def operands can be tied together, indicated by a non-zero TiedTo
1046/// field. TiedTo can have these values:
1047///
1048/// 0: Operand is not tied to anything.
1049/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1050/// TiedMax: Tied to an operand >= TiedMax-1.
1051///
1052/// The tied def must be one of the first TiedMax operands on a normal
1053/// instruction. INLINEASM instructions allow more tied defs.
1054///
1055void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1056 MachineOperand &DefMO = getOperand(DefIdx);
1057 MachineOperand &UseMO = getOperand(UseIdx);
1058 assert(DefMO.isDef() && "DefIdx must be a def operand")((DefMO.isDef() && "DefIdx must be a def operand") ? static_cast
<void> (0) : __assert_fail ("DefMO.isDef() && \"DefIdx must be a def operand\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1058, __PRETTY_FUNCTION__))
;
1059 assert(UseMO.isUse() && "UseIdx must be a use operand")((UseMO.isUse() && "UseIdx must be a use operand") ? static_cast
<void> (0) : __assert_fail ("UseMO.isUse() && \"UseIdx must be a use operand\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1059, __PRETTY_FUNCTION__))
;
1060 assert(!DefMO.isTied() && "Def is already tied to another use")((!DefMO.isTied() && "Def is already tied to another use"
) ? static_cast<void> (0) : __assert_fail ("!DefMO.isTied() && \"Def is already tied to another use\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1060, __PRETTY_FUNCTION__))
;
1061 assert(!UseMO.isTied() && "Use is already tied to another def")((!UseMO.isTied() && "Use is already tied to another def"
) ? static_cast<void> (0) : __assert_fail ("!UseMO.isTied() && \"Use is already tied to another def\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1061, __PRETTY_FUNCTION__))
;
1062
1063 if (DefIdx < TiedMax)
1064 UseMO.TiedTo = DefIdx + 1;
1065 else {
1066 // Inline asm can use the group descriptors to find tied operands, but on
1067 // normal instruction, the tied def must be within the first TiedMax
1068 // operands.
1069 assert(isInlineAsm() && "DefIdx out of range")((isInlineAsm() && "DefIdx out of range") ? static_cast
<void> (0) : __assert_fail ("isInlineAsm() && \"DefIdx out of range\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1069, __PRETTY_FUNCTION__))
;
1070 UseMO.TiedTo = TiedMax;
1071 }
1072
1073 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1074 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1075}
1076
1077/// Given the index of a tied register operand, find the operand it is tied to.
1078/// Defs are tied to uses and vice versa. Returns the index of the tied operand
1079/// which must exist.
1080unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1081 const MachineOperand &MO = getOperand(OpIdx);
1082 assert(MO.isTied() && "Operand isn't tied")((MO.isTied() && "Operand isn't tied") ? static_cast<
void> (0) : __assert_fail ("MO.isTied() && \"Operand isn't tied\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1082, __PRETTY_FUNCTION__))
;
1083
1084 // Normally TiedTo is in range.
1085 if (MO.TiedTo < TiedMax)
1086 return MO.TiedTo - 1;
1087
1088 // Uses on normal instructions can be out of range.
1089 if (!isInlineAsm()) {
1090 // Normal tied defs must be in the 0..TiedMax-1 range.
1091 if (MO.isUse())
1092 return TiedMax - 1;
1093 // MO is a def. Search for the tied use.
1094 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1095 const MachineOperand &UseMO = getOperand(i);
1096 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1097 return i;
1098 }
1099 llvm_unreachable("Can't find tied use")::llvm::llvm_unreachable_internal("Can't find tied use", "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1099)
;
1100 }
1101
1102 // Now deal with inline asm by parsing the operand group descriptor flags.
1103 // Find the beginning of each operand group.
1104 SmallVector<unsigned, 8> GroupIdx;
1105 unsigned OpIdxGroup = ~0u;
1106 unsigned NumOps;
1107 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1108 i += NumOps) {
1109 const MachineOperand &FlagMO = getOperand(i);
1110 assert(FlagMO.isImm() && "Invalid tied operand on inline asm")((FlagMO.isImm() && "Invalid tied operand on inline asm"
) ? static_cast<void> (0) : __assert_fail ("FlagMO.isImm() && \"Invalid tied operand on inline asm\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1110, __PRETTY_FUNCTION__))
;
1111 unsigned CurGroup = GroupIdx.size();
1112 GroupIdx.push_back(i);
1113 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
1114 // OpIdx belongs to this operand group.
1115 if (OpIdx > i && OpIdx < i + NumOps)
1116 OpIdxGroup = CurGroup;
1117 unsigned TiedGroup;
1118 if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
1119 continue;
1120 // Operands in this group are tied to operands in TiedGroup which must be
1121 // earlier. Find the number of operands between the two groups.
1122 unsigned Delta = i - GroupIdx[TiedGroup];
1123
1124 // OpIdx is a use tied to TiedGroup.
1125 if (OpIdxGroup == CurGroup)
1126 return OpIdx - Delta;
1127
1128 // OpIdx is a def tied to this use group.
1129 if (OpIdxGroup == TiedGroup)
1130 return OpIdx + Delta;
1131 }
1132 llvm_unreachable("Invalid tied operand on inline asm")::llvm::llvm_unreachable_internal("Invalid tied operand on inline asm"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1132)
;
1133}
1134
1135/// clearKillInfo - Clears kill flags on all operands.
1136///
1137void MachineInstr::clearKillInfo() {
1138 for (MachineOperand &MO : operands()) {
1139 if (MO.isReg() && MO.isUse())
1140 MO.setIsKill(false);
1141 }
1142}
1143
1144void MachineInstr::substituteRegister(Register FromReg, Register ToReg,
1145 unsigned SubIdx,
1146 const TargetRegisterInfo &RegInfo) {
1147 if (Register::isPhysicalRegister(ToReg)) {
1148 if (SubIdx)
1149 ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1150 for (MachineOperand &MO : operands()) {
1151 if (!MO.isReg() || MO.getReg() != FromReg)
1152 continue;
1153 MO.substPhysReg(ToReg, RegInfo);
1154 }
1155 } else {
1156 for (MachineOperand &MO : operands()) {
1157 if (!MO.isReg() || MO.getReg() != FromReg)
1158 continue;
1159 MO.substVirtReg(ToReg, SubIdx, RegInfo);
1160 }
1161 }
1162}
1163
1164/// isSafeToMove - Return true if it is safe to move this instruction. If
1165/// SawStore is set to true, it means that there is a store (or call) between
1166/// the instruction's location and its intended destination.
1167bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
1168 // Ignore stuff that we obviously can't move.
1169 //
1170 // Treat volatile loads as stores. This is not strictly necessary for
1171 // volatiles, but it is required for atomic loads. It is not allowed to move
1172 // a load across an atomic load with Ordering > Monotonic.
1173 if (mayStore() || isCall() || isPHI() ||
1174 (mayLoad() && hasOrderedMemoryRef())) {
1175 SawStore = true;
1176 return false;
1177 }
1178
1179 if (isPosition() || isDebugInstr() || isTerminator() ||
1180 mayRaiseFPException() || hasUnmodeledSideEffects())
1181 return false;
1182
1183 // See if this instruction does a load. If so, we have to guarantee that the
1184 // loaded value doesn't change between the load and the its intended
1185 // destination. The check for isInvariantLoad gives the targe the chance to
1186 // classify the load as always returning a constant, e.g. a constant pool
1187 // load.
1188 if (mayLoad() && !isDereferenceableInvariantLoad(AA))
1189 // Otherwise, this is a real load. If there is a store between the load and
1190 // end of block, we can't move it.
1191 return !SawStore;
1192
1193 return true;
1194}
1195
1196bool MachineInstr::mayAlias(AliasAnalysis *AA, const MachineInstr &Other,
1197 bool UseTBAA) const {
1198 const MachineFunction *MF = getMF();
1199 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1200 const MachineFrameInfo &MFI = MF->getFrameInfo();
1201
1202 // If neither instruction stores to memory, they can't alias in any
1203 // meaningful way, even if they read from the same address.
1204 if (!mayStore() && !Other.mayStore())
1205 return false;
1206
1207 // Let the target decide if memory accesses cannot possibly overlap.
1208 if (TII->areMemAccessesTriviallyDisjoint(*this, Other))
1209 return false;
1210
1211 // FIXME: Need to handle multiple memory operands to support all targets.
1212 if (!hasOneMemOperand() || !Other.hasOneMemOperand())
1213 return true;
1214
1215 MachineMemOperand *MMOa = *memoperands_begin();
1216 MachineMemOperand *MMOb = *Other.memoperands_begin();
1217
1218 // The following interface to AA is fashioned after DAGCombiner::isAlias
1219 // and operates with MachineMemOperand offset with some important
1220 // assumptions:
1221 // - LLVM fundamentally assumes flat address spaces.
1222 // - MachineOperand offset can *only* result from legalization and
1223 // cannot affect queries other than the trivial case of overlap
1224 // checking.
1225 // - These offsets never wrap and never step outside
1226 // of allocated objects.
1227 // - There should never be any negative offsets here.
1228 //
1229 // FIXME: Modify API to hide this math from "user"
1230 // Even before we go to AA we can reason locally about some
1231 // memory objects. It can save compile time, and possibly catch some
1232 // corner cases not currently covered.
1233
1234 int64_t OffsetA = MMOa->getOffset();
1235 int64_t OffsetB = MMOb->getOffset();
1236 int64_t MinOffset = std::min(OffsetA, OffsetB);
1237
1238 uint64_t WidthA = MMOa->getSize();
1239 uint64_t WidthB = MMOb->getSize();
1240 bool KnownWidthA = WidthA != MemoryLocation::UnknownSize;
1241 bool KnownWidthB = WidthB != MemoryLocation::UnknownSize;
1242
1243 const Value *ValA = MMOa->getValue();
1244 const Value *ValB = MMOb->getValue();
1245 bool SameVal = (ValA && ValB && (ValA == ValB));
1246 if (!SameVal) {
1247 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1248 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1249 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1250 return false;
1251 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1252 return false;
1253 if (PSVa && PSVb && (PSVa == PSVb))
1254 SameVal = true;
1255 }
1256
1257 if (SameVal) {
1258 if (!KnownWidthA || !KnownWidthB)
1259 return true;
1260 int64_t MaxOffset = std::max(OffsetA, OffsetB);
1261 int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
1262 return (MinOffset + LowWidth > MaxOffset);
1263 }
1264
1265 if (!AA)
1266 return true;
1267
1268 if (!ValA || !ValB)
1269 return true;
1270
1271 assert((OffsetA >= 0) && "Negative MachineMemOperand offset")(((OffsetA >= 0) && "Negative MachineMemOperand offset"
) ? static_cast<void> (0) : __assert_fail ("(OffsetA >= 0) && \"Negative MachineMemOperand offset\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1271, __PRETTY_FUNCTION__))
;
1272 assert((OffsetB >= 0) && "Negative MachineMemOperand offset")(((OffsetB >= 0) && "Negative MachineMemOperand offset"
) ? static_cast<void> (0) : __assert_fail ("(OffsetB >= 0) && \"Negative MachineMemOperand offset\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1272, __PRETTY_FUNCTION__))
;
1273
1274 int64_t OverlapA = KnownWidthA ? WidthA + OffsetA - MinOffset
1275 : MemoryLocation::UnknownSize;
1276 int64_t OverlapB = KnownWidthB ? WidthB + OffsetB - MinOffset
1277 : MemoryLocation::UnknownSize;
1278
1279 AliasResult AAResult = AA->alias(
1280 MemoryLocation(ValA, OverlapA,
1281 UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1282 MemoryLocation(ValB, OverlapB,
1283 UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1284
1285 return (AAResult != NoAlias);
1286}
1287
1288/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1289/// or volatile memory reference, or if the information describing the memory
1290/// reference is not available. Return false if it is known to have no ordered
1291/// memory references.
1292bool MachineInstr::hasOrderedMemoryRef() const {
1293 // An instruction known never to access memory won't have a volatile access.
1294 if (!mayStore() &&
1295 !mayLoad() &&
1296 !isCall() &&
1297 !hasUnmodeledSideEffects())
1298 return false;
1299
1300 // Otherwise, if the instruction has no memory reference information,
1301 // conservatively assume it wasn't preserved.
1302 if (memoperands_empty())
1303 return true;
1304
1305 // Check if any of our memory operands are ordered.
1306 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1307 return !MMO->isUnordered();
1308 });
1309}
1310
1311/// isDereferenceableInvariantLoad - Return true if this instruction will never
1312/// trap and is loading from a location whose value is invariant across a run of
1313/// this function.
1314bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const {
1315 // If the instruction doesn't load at all, it isn't an invariant load.
1316 if (!mayLoad())
1317 return false;
1318
1319 // If the instruction has lost its memoperands, conservatively assume that
1320 // it may not be an invariant load.
1321 if (memoperands_empty())
1322 return false;
1323
1324 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1325
1326 for (MachineMemOperand *MMO : memoperands()) {
1327 if (!MMO->isUnordered())
1328 // If the memory operand has ordering side effects, we can't move the
1329 // instruction. Such an instruction is technically an invariant load,
1330 // but the caller code would need updated to expect that.
1331 return false;
1332 if (MMO->isStore()) return false;
1333 if (MMO->isInvariant() && MMO->isDereferenceable())
1334 continue;
1335
1336 // A load from a constant PseudoSourceValue is invariant.
1337 if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
1338 if (PSV->isConstant(&MFI))
1339 continue;
1340
1341 if (const Value *V = MMO->getValue()) {
1342 // If we have an AliasAnalysis, ask it whether the memory is constant.
1343 if (AA &&
1344 AA->pointsToConstantMemory(
1345 MemoryLocation(V, MMO->getSize(), MMO->getAAInfo())))
1346 continue;
1347 }
1348
1349 // Otherwise assume conservatively.
1350 return false;
1351 }
1352
1353 // Everything checks out.
1354 return true;
1355}
1356
1357/// isConstantValuePHI - If the specified instruction is a PHI that always
1358/// merges together the same virtual register, return the register, otherwise
1359/// return 0.
1360unsigned MachineInstr::isConstantValuePHI() const {
1361 if (!isPHI())
1362 return 0;
1363 assert(getNumOperands() >= 3 &&((getNumOperands() >= 3 && "It's illegal to have a PHI without source operands"
) ? static_cast<void> (0) : __assert_fail ("getNumOperands() >= 3 && \"It's illegal to have a PHI without source operands\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1364, __PRETTY_FUNCTION__))
1364 "It's illegal to have a PHI without source operands")((getNumOperands() >= 3 && "It's illegal to have a PHI without source operands"
) ? static_cast<void> (0) : __assert_fail ("getNumOperands() >= 3 && \"It's illegal to have a PHI without source operands\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1364, __PRETTY_FUNCTION__))
;
1365
1366 Register Reg = getOperand(1).getReg();
1367 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1368 if (getOperand(i).getReg() != Reg)
1369 return 0;
1370 return Reg;
1371}
1372
1373bool MachineInstr::hasUnmodeledSideEffects() const {
1374 if (hasProperty(MCID::UnmodeledSideEffects))
1375 return true;
1376 if (isInlineAsm()) {
1377 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1378 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1379 return true;
1380 }
1381
1382 return false;
1383}
1384
1385bool MachineInstr::isLoadFoldBarrier() const {
1386 return mayStore() || isCall() || hasUnmodeledSideEffects();
1387}
1388
1389/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1390///
1391bool MachineInstr::allDefsAreDead() const {
1392 for (const MachineOperand &MO : operands()) {
1393 if (!MO.isReg() || MO.isUse())
1394 continue;
1395 if (!MO.isDead())
1396 return false;
1397 }
1398 return true;
1399}
1400
1401/// copyImplicitOps - Copy implicit register operands from specified
1402/// instruction to this instruction.
1403void MachineInstr::copyImplicitOps(MachineFunction &MF,
1404 const MachineInstr &MI) {
1405 for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands();
1406 i != e; ++i) {
1407 const MachineOperand &MO = MI.getOperand(i);
1408 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1409 addOperand(MF, MO);
1410 }
1411}
1412
1413bool MachineInstr::hasComplexRegisterTies() const {
1414 const MCInstrDesc &MCID = getDesc();
1415 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1416 const auto &Operand = getOperand(I);
1417 if (!Operand.isReg() || Operand.isDef())
1418 // Ignore the defined registers as MCID marks only the uses as tied.
1419 continue;
1420 int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1421 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1422 if (ExpectedTiedIdx != TiedIdx)
1423 return true;
1424 }
1425 return false;
1426}
1427
1428LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1429 const MachineRegisterInfo &MRI) const {
1430 const MachineOperand &Op = getOperand(OpIdx);
1431 if (!Op.isReg())
1432 return LLT{};
1433
1434 if (isVariadic() || OpIdx >= getNumExplicitOperands())
1435 return MRI.getType(Op.getReg());
1436
1437 auto &OpInfo = getDesc().OpInfo[OpIdx];
1438 if (!OpInfo.isGenericType())
1439 return MRI.getType(Op.getReg());
1440
1441 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1442 return LLT{};
1443
1444 LLT TypeToPrint = MRI.getType(Op.getReg());
1445 // Don't mark the type index printed if it wasn't actually printed: maybe
1446 // another operand with the same type index has an actual type attached:
1447 if (TypeToPrint.isValid())
1448 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1449 return TypeToPrint;
1450}
1451
1452#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1453LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MachineInstr::dump() const {
1454 dbgs() << " ";
1455 print(dbgs());
1456}
1457#endif
1458
1459void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1460 bool SkipDebugLoc, bool AddNewLine,
1461 const TargetInstrInfo *TII) const {
1462 const Module *M = nullptr;
1463 const Function *F = nullptr;
1464 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1465 F = &MF->getFunction();
1466 M = F->getParent();
1467 if (!TII)
1468 TII = MF->getSubtarget().getInstrInfo();
1469 }
1470
1471 ModuleSlotTracker MST(M);
1472 if (F)
1473 MST.incorporateFunction(*F);
1474 print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1475}
1476
1477void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1478 bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1479 bool AddNewLine, const TargetInstrInfo *TII) const {
1480 // We can be a bit tidier if we know the MachineFunction.
1481 const MachineFunction *MF = nullptr;
1482 const TargetRegisterInfo *TRI = nullptr;
1483 const MachineRegisterInfo *MRI = nullptr;
1484 const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1485 tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
1486
1487 if (isCFIInstruction())
1488 assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction")((getNumOperands() == 1 && "Expected 1 operand in CFI instruction"
) ? static_cast<void> (0) : __assert_fail ("getNumOperands() == 1 && \"Expected 1 operand in CFI instruction\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 1488, __PRETTY_FUNCTION__))
;
1489
1490 SmallBitVector PrintedTypes(8);
1491 bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1492 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1493 if (!ShouldPrintRegisterTies)
1494 return 0U;
1495 const MachineOperand &MO = getOperand(OpIdx);
1496 if (MO.isReg() && MO.isTied() && !MO.isDef())
1497 return findTiedOperandIdx(OpIdx);
1498 return 0U;
1499 };
1500 unsigned StartOp = 0;
1501 unsigned e = getNumOperands();
1502
1503 // Print explicitly defined operands on the left of an assignment syntax.
1504 while (StartOp < e) {
1505 const MachineOperand &MO = getOperand(StartOp);
1506 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1507 break;
1508
1509 if (StartOp != 0)
1510 OS << ", ";
1511
1512 LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1513 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1514 MO.print(OS, MST, TypeToPrint, /*PrintDef=*/false, IsStandalone,
1515 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1516 ++StartOp;
1517 }
1518
1519 if (StartOp != 0)
1520 OS << " = ";
1521
1522 if (getFlag(MachineInstr::FrameSetup))
1523 OS << "frame-setup ";
1524 if (getFlag(MachineInstr::FrameDestroy))
1525 OS << "frame-destroy ";
1526 if (getFlag(MachineInstr::FmNoNans))
1527 OS << "nnan ";
1528 if (getFlag(MachineInstr::FmNoInfs))
1529 OS << "ninf ";
1530 if (getFlag(MachineInstr::FmNsz))
1531 OS << "nsz ";
1532 if (getFlag(MachineInstr::FmArcp))
1533 OS << "arcp ";
1534 if (getFlag(MachineInstr::FmContract))
1535 OS << "contract ";
1536 if (getFlag(MachineInstr::FmAfn))
1537 OS << "afn ";
1538 if (getFlag(MachineInstr::FmReassoc))
1539 OS << "reassoc ";
1540 if (getFlag(MachineInstr::NoUWrap))
1541 OS << "nuw ";
1542 if (getFlag(MachineInstr::NoSWrap))
1543 OS << "nsw ";
1544 if (getFlag(MachineInstr::IsExact))
1545 OS << "exact ";
1546 if (getFlag(MachineInstr::FPExcept))
1547 OS << "fpexcept ";
1548
1549 // Print the opcode name.
1550 if (TII)
1551 OS << TII->getName(getOpcode());
1552 else
1553 OS << "UNKNOWN";
1554
1555 if (SkipOpers)
1556 return;
1557
1558 // Print the rest of the operands.
1559 bool FirstOp = true;
1560 unsigned AsmDescOp = ~0u;
1561 unsigned AsmOpCount = 0;
1562
1563 if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1564 // Print asm string.
1565 OS << " ";
1566 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1567 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1568 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1569 getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1570 ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1571 IntrinsicInfo);
1572
1573 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1574 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1575 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1576 OS << " [sideeffect]";
1577 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1578 OS << " [mayload]";
1579 if (ExtraInfo & InlineAsm::Extra_MayStore)
1580 OS << " [maystore]";
1581 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1582 OS << " [isconvergent]";
1583 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1584 OS << " [alignstack]";
1585 if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1586 OS << " [attdialect]";
1587 if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1588 OS << " [inteldialect]";
1589
1590 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1591 FirstOp = false;
1592 }
1593
1594 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1595 const MachineOperand &MO = getOperand(i);
1596
1597 if (FirstOp) FirstOp = false; else OS << ",";
1598 OS << " ";
1599
1600 if (isDebugValue() && MO.isMetadata()) {
1601 // Pretty print DBG_VALUE instructions.
1602 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1603 if (DIV && !DIV->getName().empty())
1604 OS << "!\"" << DIV->getName() << '\"';
1605 else {
1606 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1607 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1608 MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1609 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1610 }
1611 } else if (isDebugLabel() && MO.isMetadata()) {
1612 // Pretty print DBG_LABEL instructions.
1613 auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1614 if (DIL && !DIL->getName().empty())
1615 OS << "\"" << DIL->getName() << '\"';
1616 else {
1617 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1618 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1619 MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1620 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1621 }
1622 } else if (i == AsmDescOp && MO.isImm()) {
1623 // Pretty print the inline asm operand descriptor.
1624 OS << '$' << AsmOpCount++;
1625 unsigned Flag = MO.getImm();
1626 switch (InlineAsm::getKind(Flag)) {
1627 case InlineAsm::Kind_RegUse: OS << ":[reguse"; break;
1628 case InlineAsm::Kind_RegDef: OS << ":[regdef"; break;
1629 case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break;
1630 case InlineAsm::Kind_Clobber: OS << ":[clobber"; break;
1631 case InlineAsm::Kind_Imm: OS << ":[imm"; break;
1632 case InlineAsm::Kind_Mem: OS << ":[mem"; break;
1633 default: OS << ":[??" << InlineAsm::getKind(Flag); break;
1634 }
1635
1636 unsigned RCID = 0;
1637 if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1638 InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1639 if (TRI) {
1640 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1641 } else
1642 OS << ":RC" << RCID;
1643 }
1644
1645 if (InlineAsm::isMemKind(Flag)) {
1646 unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1647 switch (MCID) {
1648 case InlineAsm::Constraint_es: OS << ":es"; break;
1649 case InlineAsm::Constraint_i: OS << ":i"; break;
1650 case InlineAsm::Constraint_m: OS << ":m"; break;
1651 case InlineAsm::Constraint_o: OS << ":o"; break;
1652 case InlineAsm::Constraint_v: OS << ":v"; break;
1653 case InlineAsm::Constraint_Q: OS << ":Q"; break;
1654 case InlineAsm::Constraint_R: OS << ":R"; break;
1655 case InlineAsm::Constraint_S: OS << ":S"; break;
1656 case InlineAsm::Constraint_T: OS << ":T"; break;
1657 case InlineAsm::Constraint_Um: OS << ":Um"; break;
1658 case InlineAsm::Constraint_Un: OS << ":Un"; break;
1659 case InlineAsm::Constraint_Uq: OS << ":Uq"; break;
1660 case InlineAsm::Constraint_Us: OS << ":Us"; break;
1661 case InlineAsm::Constraint_Ut: OS << ":Ut"; break;
1662 case InlineAsm::Constraint_Uv: OS << ":Uv"; break;
1663 case InlineAsm::Constraint_Uy: OS << ":Uy"; break;
1664 case InlineAsm::Constraint_X: OS << ":X"; break;
1665 case InlineAsm::Constraint_Z: OS << ":Z"; break;
1666 case InlineAsm::Constraint_ZC: OS << ":ZC"; break;
1667 case InlineAsm::Constraint_Zy: OS << ":Zy"; break;
1668 default: OS << ":?"; break;
1669 }
1670 }
1671
1672 unsigned TiedTo = 0;
1673 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1674 OS << " tiedto:$" << TiedTo;
1675
1676 OS << ']';
1677
1678 // Compute the index of the next operand descriptor.
1679 AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
1680 } else {
1681 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1682 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1683 if (MO.isImm() && isOperandSubregIdx(i))
1684 MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI);
1685 else
1686 MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1687 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1688 }
1689 }
1690
1691 // Print any optional symbols attached to this instruction as-if they were
1692 // operands.
1693 if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1694 if (!FirstOp) {
1695 FirstOp = false;
1696 OS << ',';
1697 }
1698 OS << " pre-instr-symbol ";
1699 MachineOperand::printSymbol(OS, *PreInstrSymbol);
1700 }
1701 if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
1702 if (!FirstOp) {
1703 FirstOp = false;
1704 OS << ',';
1705 }
1706 OS << " post-instr-symbol ";
1707 MachineOperand::printSymbol(OS, *PostInstrSymbol);
1708 }
1709
1710 if (!SkipDebugLoc) {
1711 if (const DebugLoc &DL = getDebugLoc()) {
1712 if (!FirstOp)
1713 OS << ',';
1714 OS << " debug-location ";
1715 DL->printAsOperand(OS, MST);
1716 }
1717 }
1718
1719 if (!memoperands_empty()) {
1720 SmallVector<StringRef, 0> SSNs;
1721 const LLVMContext *Context = nullptr;
1722 std::unique_ptr<LLVMContext> CtxPtr;
1723 const MachineFrameInfo *MFI = nullptr;
1724 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1725 MFI = &MF->getFrameInfo();
1726 Context = &MF->getFunction().getContext();
1727 } else {
1728 CtxPtr = std::make_unique<LLVMContext>();
1729 Context = CtxPtr.get();
1730 }
1731
1732 OS << " :: ";
1733 bool NeedComma = false;
1734 for (const MachineMemOperand *Op : memoperands()) {
1735 if (NeedComma)
1736 OS << ", ";
1737 Op->print(OS, MST, SSNs, *Context, MFI, TII);
1738 NeedComma = true;
1739 }
1740 }
1741
1742 if (SkipDebugLoc)
1743 return;
1744
1745 bool HaveSemi = false;
1746
1747 // Print debug location information.
1748 if (const DebugLoc &DL = getDebugLoc()) {
1749 if (!HaveSemi) {
1750 OS << ';';
1751 HaveSemi = true;
1752 }
1753 OS << ' ';
1754 DL.print(OS);
1755 }
1756
1757 // Print extra comments for DEBUG_VALUE.
1758 if (isDebugValue() && getOperand(e - 2).isMetadata()) {
1759 if (!HaveSemi) {
1760 OS << ";";
1761 HaveSemi = true;
1762 }
1763 auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata());
1764 OS << " line no:" << DV->getLine();
1765 if (auto *InlinedAt = debugLoc->getInlinedAt()) {
1766 DebugLoc InlinedAtDL(InlinedAt);
1767 if (InlinedAtDL && MF) {
1768 OS << " inlined @[ ";
1769 InlinedAtDL.print(OS);
1770 OS << " ]";
1771 }
1772 }
1773 if (isIndirectDebugValue())
1774 OS << " indirect";
1775 }
1776 // TODO: DBG_LABEL
1777
1778 if (AddNewLine)
1779 OS << '\n';
1780}
1781
1782bool MachineInstr::addRegisterKilled(Register IncomingReg,
1783 const TargetRegisterInfo *RegInfo,
1784 bool AddIfNotFound) {
1785 bool isPhysReg = Register::isPhysicalRegister(IncomingReg);
1786 bool hasAliases = isPhysReg &&
1787 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
1788 bool Found = false;
1789 SmallVector<unsigned,4> DeadOps;
1790 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1791 MachineOperand &MO = getOperand(i);
1792 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
1793 continue;
1794
1795 // DEBUG_VALUE nodes do not contribute to code generation and should
1796 // always be ignored. Failure to do so may result in trying to modify
1797 // KILL flags on DEBUG_VALUE nodes.
1798 if (MO.isDebug())
1799 continue;
1800
1801 Register Reg = MO.getReg();
1802 if (!Reg)
1803 continue;
1804
1805 if (Reg == IncomingReg) {
1806 if (!Found) {
1807 if (MO.isKill())
1808 // The register is already marked kill.
1809 return true;
1810 if (isPhysReg && isRegTiedToDefOperand(i))
1811 // Two-address uses of physregs must not be marked kill.
1812 return true;
1813 MO.setIsKill();
1814 Found = true;
1815 }
1816 } else if (hasAliases && MO.isKill() && Register::isPhysicalRegister(Reg)) {
1817 // A super-register kill already exists.
1818 if (RegInfo->isSuperRegister(IncomingReg, Reg))
1819 return true;
1820 if (RegInfo->isSubRegister(IncomingReg, Reg))
1821 DeadOps.push_back(i);
1822 }
1823 }
1824
1825 // Trim unneeded kill operands.
1826 while (!DeadOps.empty()) {
1827 unsigned OpIdx = DeadOps.back();
1828 if (getOperand(OpIdx).isImplicit() &&
1829 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
1830 RemoveOperand(OpIdx);
1831 else
1832 getOperand(OpIdx).setIsKill(false);
1833 DeadOps.pop_back();
1834 }
1835
1836 // If not found, this means an alias of one of the operands is killed. Add a
1837 // new implicit operand if required.
1838 if (!Found && AddIfNotFound) {
1839 addOperand(MachineOperand::CreateReg(IncomingReg,
1840 false /*IsDef*/,
1841 true /*IsImp*/,
1842 true /*IsKill*/));
1843 return true;
1844 }
1845 return Found;
1846}
1847
1848void MachineInstr::clearRegisterKills(Register Reg,
1849 const TargetRegisterInfo *RegInfo) {
1850 if (!Register::isPhysicalRegister(Reg))
1851 RegInfo = nullptr;
1852 for (MachineOperand &MO : operands()) {
1853 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1854 continue;
1855 Register OpReg = MO.getReg();
1856 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
1857 MO.setIsKill(false);
1858 }
1859}
1860
1861bool MachineInstr::addRegisterDead(Register Reg,
1862 const TargetRegisterInfo *RegInfo,
1863 bool AddIfNotFound) {
1864 bool isPhysReg = Register::isPhysicalRegister(Reg);
1865 bool hasAliases = isPhysReg &&
1866 MCRegAliasIterator(Reg, RegInfo, false).isValid();
1867 bool Found = false;
1868 SmallVector<unsigned,4> DeadOps;
1869 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1870 MachineOperand &MO = getOperand(i);
1871 if (!MO.isReg() || !MO.isDef())
1872 continue;
1873 Register MOReg = MO.getReg();
1874 if (!MOReg)
1875 continue;
1876
1877 if (MOReg == Reg) {
1878 MO.setIsDead();
1879 Found = true;
1880 } else if (hasAliases && MO.isDead() &&
1881 Register::isPhysicalRegister(MOReg)) {
1882 // There exists a super-register that's marked dead.
1883 if (RegInfo->isSuperRegister(Reg, MOReg))
1884 return true;
1885 if (RegInfo->isSubRegister(Reg, MOReg))
1886 DeadOps.push_back(i);
1887 }
1888 }
1889
1890 // Trim unneeded dead operands.
1891 while (!DeadOps.empty()) {
1892 unsigned OpIdx = DeadOps.back();
1893 if (getOperand(OpIdx).isImplicit() &&
1894 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
1895 RemoveOperand(OpIdx);
1896 else
1897 getOperand(OpIdx).setIsDead(false);
1898 DeadOps.pop_back();
1899 }
1900
1901 // If not found, this means an alias of one of the operands is dead. Add a
1902 // new implicit operand if required.
1903 if (Found || !AddIfNotFound)
1904 return Found;
1905
1906 addOperand(MachineOperand::CreateReg(Reg,
1907 true /*IsDef*/,
1908 true /*IsImp*/,
1909 false /*IsKill*/,
1910 true /*IsDead*/));
1911 return true;
1912}
1913
1914void MachineInstr::clearRegisterDeads(Register Reg) {
1915 for (MachineOperand &MO : operands()) {
1916 if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
1917 continue;
1918 MO.setIsDead(false);
1919 }
1920}
1921
1922void MachineInstr::setRegisterDefReadUndef(Register Reg, bool IsUndef) {
1923 for (MachineOperand &MO : operands()) {
1924 if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
1925 continue;
1926 MO.setIsUndef(IsUndef);
1927 }
1928}
1929
1930void MachineInstr::addRegisterDefined(Register Reg,
1931 const TargetRegisterInfo *RegInfo) {
1932 if (Register::isPhysicalRegister(Reg)) {
1933 MachineOperand *MO = findRegisterDefOperand(Reg, false, false, RegInfo);
1934 if (MO)
1935 return;
1936 } else {
1937 for (const MachineOperand &MO : operands()) {
1938 if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
1939 MO.getSubReg() == 0)
1940 return;
1941 }
1942 }
1943 addOperand(MachineOperand::CreateReg(Reg,
1944 true /*IsDef*/,
1945 true /*IsImp*/));
1946}
1947
1948void MachineInstr::setPhysRegsDeadExcept(ArrayRef<Register> UsedRegs,
1949 const TargetRegisterInfo &TRI) {
1950 bool HasRegMask = false;
1951 for (MachineOperand &MO : operands()) {
1952 if (MO.isRegMask()) {
1953 HasRegMask = true;
1954 continue;
1955 }
1956 if (!MO.isReg() || !MO.isDef()) continue;
1957 Register Reg = MO.getReg();
1958 if (!Reg.isPhysical())
1959 continue;
1960 // If there are no uses, including partial uses, the def is dead.
1961 if (llvm::none_of(UsedRegs,
1962 [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
1963 MO.setIsDead();
1964 }
1965
1966 // This is a call with a register mask operand.
1967 // Mask clobbers are always dead, so add defs for the non-dead defines.
1968 if (HasRegMask)
1969 for (ArrayRef<Register>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
1970 I != E; ++I)
1971 addRegisterDefined(*I, &TRI);
1972}
1973
1974unsigned
1975MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
1976 // Build up a buffer of hash code components.
1977 SmallVector<size_t, 8> HashComponents;
1978 HashComponents.reserve(MI->getNumOperands() + 1);
1979 HashComponents.push_back(MI->getOpcode());
1980 for (const MachineOperand &MO : MI->operands()) {
1981 if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
1982 continue; // Skip virtual register defs.
1983
1984 HashComponents.push_back(hash_value(MO));
1985 }
1986 return hash_combine_range(HashComponents.begin(), HashComponents.end());
1987}
1988
1989void MachineInstr::emitError(StringRef Msg) const {
1990 // Find the source location cookie.
1991 unsigned LocCookie = 0;
1992 const MDNode *LocMD = nullptr;
1993 for (unsigned i = getNumOperands(); i != 0; --i) {
1994 if (getOperand(i-1).isMetadata() &&
1995 (LocMD = getOperand(i-1).getMetadata()) &&
1996 LocMD->getNumOperands() != 0) {
1997 if (const ConstantInt *CI =
1998 mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
1999 LocCookie = CI->getZExtValue();
2000 break;
2001 }
2002 }
2003 }
2004
2005 if (const MachineBasicBlock *MBB = getParent())
2006 if (const MachineFunction *MF = MBB->getParent())
2007 return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
2008 report_fatal_error(Msg);
2009}
2010
2011MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2012 const MCInstrDesc &MCID, bool IsIndirect,
2013 Register Reg, const MDNode *Variable,
2014 const MDNode *Expr) {
2015 assert(isa<DILocalVariable>(Variable) && "not a variable")((isa<DILocalVariable>(Variable) && "not a variable"
) ? static_cast<void> (0) : __assert_fail ("isa<DILocalVariable>(Variable) && \"not a variable\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2015, __PRETTY_FUNCTION__))
;
2016 assert(cast<DIExpression>(Expr)->isValid() && "not an expression")((cast<DIExpression>(Expr)->isValid() && "not an expression"
) ? static_cast<void> (0) : __assert_fail ("cast<DIExpression>(Expr)->isValid() && \"not an expression\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2016, __PRETTY_FUNCTION__))
;
2017 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&((cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic
(DL) && "Expected inlined-at fields to agree") ? static_cast
<void> (0) : __assert_fail ("cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2018, __PRETTY_FUNCTION__))
2018 "Expected inlined-at fields to agree")((cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic
(DL) && "Expected inlined-at fields to agree") ? static_cast
<void> (0) : __assert_fail ("cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2018, __PRETTY_FUNCTION__))
;
2019 auto MIB = BuildMI(MF, DL, MCID).addReg(Reg, RegState::Debug);
2020 if (IsIndirect)
2021 MIB.addImm(0U);
2022 else
2023 MIB.addReg(0U, RegState::Debug);
2024 return MIB.addMetadata(Variable).addMetadata(Expr);
2025}
2026
2027MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2028 const MCInstrDesc &MCID, bool IsIndirect,
2029 MachineOperand &MO, const MDNode *Variable,
2030 const MDNode *Expr) {
2031 assert(isa<DILocalVariable>(Variable) && "not a variable")((isa<DILocalVariable>(Variable) && "not a variable"
) ? static_cast<void> (0) : __assert_fail ("isa<DILocalVariable>(Variable) && \"not a variable\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2031, __PRETTY_FUNCTION__))
;
2032 assert(cast<DIExpression>(Expr)->isValid() && "not an expression")((cast<DIExpression>(Expr)->isValid() && "not an expression"
) ? static_cast<void> (0) : __assert_fail ("cast<DIExpression>(Expr)->isValid() && \"not an expression\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2032, __PRETTY_FUNCTION__))
;
2033 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&((cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic
(DL) && "Expected inlined-at fields to agree") ? static_cast
<void> (0) : __assert_fail ("cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2034, __PRETTY_FUNCTION__))
2034 "Expected inlined-at fields to agree")((cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic
(DL) && "Expected inlined-at fields to agree") ? static_cast
<void> (0) : __assert_fail ("cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2034, __PRETTY_FUNCTION__))
;
2035 if (MO.isReg())
2036 return BuildMI(MF, DL, MCID, IsIndirect, MO.getReg(), Variable, Expr);
2037
2038 auto MIB = BuildMI(MF, DL, MCID).add(MO);
2039 if (IsIndirect)
2040 MIB.addImm(0U);
2041 else
2042 MIB.addReg(0U, RegState::Debug);
2043 return MIB.addMetadata(Variable).addMetadata(Expr);
2044 }
2045
2046MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2047 MachineBasicBlock::iterator I,
2048 const DebugLoc &DL, const MCInstrDesc &MCID,
2049 bool IsIndirect, Register Reg,
2050 const MDNode *Variable, const MDNode *Expr) {
2051 MachineFunction &MF = *BB.getParent();
2052 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2053 BB.insert(I, MI);
2054 return MachineInstrBuilder(MF, MI);
2055}
2056
2057MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2058 MachineBasicBlock::iterator I,
2059 const DebugLoc &DL, const MCInstrDesc &MCID,
2060 bool IsIndirect, MachineOperand &MO,
2061 const MDNode *Variable, const MDNode *Expr) {
2062 MachineFunction &MF = *BB.getParent();
2063 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, MO, Variable, Expr);
2064 BB.insert(I, MI);
2065 return MachineInstrBuilder(MF, *MI);
2066}
2067
2068/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2069/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2070static const DIExpression *computeExprForSpill(const MachineInstr &MI) {
2071 assert(MI.getOperand(0).isReg() && "can't spill non-register")((MI.getOperand(0).isReg() && "can't spill non-register"
) ? static_cast<void> (0) : __assert_fail ("MI.getOperand(0).isReg() && \"can't spill non-register\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2071, __PRETTY_FUNCTION__))
;
2072 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&((MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc
()) && "Expected inlined-at fields to agree") ? static_cast
<void> (0) : __assert_fail ("MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2073, __PRETTY_FUNCTION__))
2073 "Expected inlined-at fields to agree")((MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc
()) && "Expected inlined-at fields to agree") ? static_cast
<void> (0) : __assert_fail ("MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2073, __PRETTY_FUNCTION__))
;
2074
2075 const DIExpression *Expr = MI.getDebugExpression();
2076 if (MI.isIndirectDebugValue()) {
2077 assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset")((MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset"
) ? static_cast<void> (0) : __assert_fail ("MI.getOperand(1).getImm() == 0 && \"DBG_VALUE with nonzero offset\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/CodeGen/MachineInstr.cpp"
, 2077, __PRETTY_FUNCTION__))
;
2078 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2079 }
2080 return Expr;
2081}
2082
2083MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
2084 MachineBasicBlock::iterator I,
2085 const MachineInstr &Orig,
2086 int FrameIndex) {
2087 const DIExpression *Expr = computeExprForSpill(Orig);
2088 return BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc())
1
Calling 'BuildMI'
3
Returning from 'BuildMI'
4
Calling 'MachineInstrBuilder::addFrameIndex'
2089 .addFrameIndex(FrameIndex)
2090 .addImm(0U)
2091 .addMetadata(Orig.getDebugVariable())
2092 .addMetadata(Expr);
2093}
2094
2095void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex) {
2096 const DIExpression *Expr = computeExprForSpill(Orig);
2097 Orig.getOperand(0).ChangeToFrameIndex(FrameIndex);
2098 Orig.getOperand(1).ChangeToImmediate(0U);
2099 Orig.getOperand(3).setMetadata(Expr);
2100}
2101
2102void MachineInstr::collectDebugValues(
2103 SmallVectorImpl<MachineInstr *> &DbgValues) {
2104 MachineInstr &MI = *this;
2105 if (!MI.getOperand(0).isReg())
2106 return;
2107
2108 MachineBasicBlock::iterator DI = MI; ++DI;
2109 for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2110 DI != DE; ++DI) {
2111 if (!DI->isDebugValue())
2112 return;
2113 if (DI->getOperand(0).isReg() &&
2114 DI->getOperand(0).getReg() == MI.getOperand(0).getReg())
2115 DbgValues.push_back(&*DI);
2116 }
2117}
2118
2119void MachineInstr::changeDebugValuesDefReg(Register Reg) {
2120 // Collect matching debug values.
2121 SmallVector<MachineInstr *, 2> DbgValues;
2122
2123 if (!getOperand(0).isReg())
2124 return;
2125
2126 unsigned DefReg = getOperand(0).getReg();
2127 auto *MRI = getRegInfo();
2128 for (auto &MO : MRI->use_operands(DefReg)) {
2129 auto *DI = MO.getParent();
2130 if (!DI->isDebugValue())
2131 continue;
2132 if (DI->getOperand(0).isReg() &&
2133 DI->getOperand(0).getReg() == DefReg){
2134 DbgValues.push_back(DI);
2135 }
2136 }
2137
2138 // Propagate Reg to debug value instructions.
2139 for (auto *DBI : DbgValues)
2140 DBI->getOperand(0).setReg(Reg);
2141}
2142
2143using MMOList = SmallVector<const MachineMemOperand *, 2>;
2144
2145static unsigned getSpillSlotSize(MMOList &Accesses,
2146 const MachineFrameInfo &MFI) {
2147 unsigned Size = 0;
2148 for (auto A : Accesses)
2149 if (MFI.isSpillSlotObjectIndex(
2150 cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2151 ->getFrameIndex()))
2152 Size += A->getSize();
2153 return Size;
2154}
2155
2156Optional<unsigned>
2157MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
2158 int FI;
2159 if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2160 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2161 if (MFI.isSpillSlotObjectIndex(FI))
2162 return (*memoperands_begin())->getSize();
2163 }
2164 return None;
2165}
2166
2167Optional<unsigned>
2168MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
2169 MMOList Accesses;
2170 if (TII->hasStoreToStackSlot(*this, Accesses))
2171 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2172 return None;
2173}
2174
2175Optional<unsigned>
2176MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
2177 int FI;
2178 if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2179 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2180 if (MFI.isSpillSlotObjectIndex(FI))
2181 return (*memoperands_begin())->getSize();
2182 }
2183 return None;
2184}
2185
2186Optional<unsigned>
2187MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
2188 MMOList Accesses;
2189 if (TII->hasLoadFromStackSlot(*this, Accesses))
2190 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2191 return None;
2192}

/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h

1//===- CodeGen/MachineInstrBuilder.h - Simplify creation of MIs --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes a function named BuildMI, which is useful for dramatically
10// simplifying how MachineInstr's are created. It allows use of code like this:
11//
12// M = BuildMI(MBB, MI, DL, TII.get(X86::ADD8rr), Dst)
13// .addReg(argVal1)
14// .addReg(argVal2);
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H
19#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
20
21#include "llvm/ADT/ArrayRef.h"
22#include "llvm/CodeGen/GlobalISel/Utils.h"
23#include "llvm/CodeGen/MachineBasicBlock.h"
24#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBundle.h"
27#include "llvm/CodeGen/MachineOperand.h"
28#include "llvm/CodeGen/TargetRegisterInfo.h"
29#include "llvm/IR/InstrTypes.h"
30#include "llvm/IR/Intrinsics.h"
31#include "llvm/Support/ErrorHandling.h"
32#include <cassert>
33#include <cstdint>
34#include <utility>
35
36namespace llvm {
37
38class MCInstrDesc;
39class MDNode;
40
41namespace RegState {
42
43 enum {
44 Define = 0x2,
45 Implicit = 0x4,
46 Kill = 0x8,
47 Dead = 0x10,
48 Undef = 0x20,
49 EarlyClobber = 0x40,
50 Debug = 0x80,
51 InternalRead = 0x100,
52 Renamable = 0x200,
53 DefineNoRead = Define | Undef,
54 ImplicitDefine = Implicit | Define,
55 ImplicitKill = Implicit | Kill
56 };
57
58} // end namespace RegState
59
60class MachineInstrBuilder {
61 MachineFunction *MF = nullptr;
62 MachineInstr *MI = nullptr;
63
64public:
65 MachineInstrBuilder() = default;
66
67 /// Create a MachineInstrBuilder for manipulating an existing instruction.
68 /// F must be the machine function that was used to allocate I.
69 MachineInstrBuilder(MachineFunction &F, MachineInstr *I) : MF(&F), MI(I) {}
70 MachineInstrBuilder(MachineFunction &F, MachineBasicBlock::iterator I)
71 : MF(&F), MI(&*I) {}
72
73 /// Allow automatic conversion to the machine instruction we are working on.
74 operator MachineInstr*() const { return MI; }
75 MachineInstr *operator->() const { return MI; }
76 operator MachineBasicBlock::iterator() const { return MI; }
77
78 /// If conversion operators fail, use this method to get the MachineInstr
79 /// explicitly.
80 MachineInstr *getInstr() const { return MI; }
81
82 /// Get the register for the operand index.
83 /// The operand at the index should be a register (asserted by
84 /// MachineOperand).
85 Register getReg(unsigned Idx) const { return MI->getOperand(Idx).getReg(); }
86
87 /// Add a new virtual register operand.
88 const MachineInstrBuilder &addReg(Register RegNo, unsigned flags = 0,
89 unsigned SubReg = 0) const {
90 assert((flags & 0x1) == 0 &&(((flags & 0x1) == 0 && "Passing in 'true' to addReg is forbidden! Use enums instead."
) ? static_cast<void> (0) : __assert_fail ("(flags & 0x1) == 0 && \"Passing in 'true' to addReg is forbidden! Use enums instead.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 91, __PRETTY_FUNCTION__))
91 "Passing in 'true' to addReg is forbidden! Use enums instead.")(((flags & 0x1) == 0 && "Passing in 'true' to addReg is forbidden! Use enums instead."
) ? static_cast<void> (0) : __assert_fail ("(flags & 0x1) == 0 && \"Passing in 'true' to addReg is forbidden! Use enums instead.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 91, __PRETTY_FUNCTION__))
;
92 MI->addOperand(*MF, MachineOperand::CreateReg(RegNo,
93 flags & RegState::Define,
94 flags & RegState::Implicit,
95 flags & RegState::Kill,
96 flags & RegState::Dead,
97 flags & RegState::Undef,
98 flags & RegState::EarlyClobber,
99 SubReg,
100 flags & RegState::Debug,
101 flags & RegState::InternalRead,
102 flags & RegState::Renamable));
103 return *this;
104 }
105
106 /// Add a virtual register definition operand.
107 const MachineInstrBuilder &addDef(Register RegNo, unsigned Flags = 0,
108 unsigned SubReg = 0) const {
109 return addReg(RegNo, Flags | RegState::Define, SubReg);
110 }
111
112 /// Add a virtual register use operand. It is an error for Flags to contain
113 /// `RegState::Define` when calling this function.
114 const MachineInstrBuilder &addUse(Register RegNo, unsigned Flags = 0,
115 unsigned SubReg = 0) const {
116 assert(!(Flags & RegState::Define) &&((!(Flags & RegState::Define) && "Misleading addUse defines register, use addReg instead."
) ? static_cast<void> (0) : __assert_fail ("!(Flags & RegState::Define) && \"Misleading addUse defines register, use addReg instead.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 117, __PRETTY_FUNCTION__))
117 "Misleading addUse defines register, use addReg instead.")((!(Flags & RegState::Define) && "Misleading addUse defines register, use addReg instead."
) ? static_cast<void> (0) : __assert_fail ("!(Flags & RegState::Define) && \"Misleading addUse defines register, use addReg instead.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 117, __PRETTY_FUNCTION__))
;
118 return addReg(RegNo, Flags, SubReg);
119 }
120
121 /// Add a new immediate operand.
122 const MachineInstrBuilder &addImm(int64_t Val) const {
123 MI->addOperand(*MF, MachineOperand::CreateImm(Val));
124 return *this;
125 }
126
127 const MachineInstrBuilder &addCImm(const ConstantInt *Val) const {
128 MI->addOperand(*MF, MachineOperand::CreateCImm(Val));
129 return *this;
130 }
131
132 const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
133 MI->addOperand(*MF, MachineOperand::CreateFPImm(Val));
134 return *this;
135 }
136
137 const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB,
138 unsigned TargetFlags = 0) const {
139 MI->addOperand(*MF, MachineOperand::CreateMBB(MBB, TargetFlags));
140 return *this;
141 }
142
143 const MachineInstrBuilder &addFrameIndex(int Idx) const {
144 MI->addOperand(*MF, MachineOperand::CreateFI(Idx));
5
Calling 'MachineInstr::addOperand'
145 return *this;
146 }
147
148 const MachineInstrBuilder &
149 addConstantPoolIndex(unsigned Idx, int Offset = 0,
150 unsigned TargetFlags = 0) const {
151 MI->addOperand(*MF, MachineOperand::CreateCPI(Idx, Offset, TargetFlags));
152 return *this;
153 }
154
155 const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
156 unsigned TargetFlags = 0) const {
157 MI->addOperand(*MF, MachineOperand::CreateTargetIndex(Idx, Offset,
158 TargetFlags));
159 return *this;
160 }
161
162 const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
163 unsigned TargetFlags = 0) const {
164 MI->addOperand(*MF, MachineOperand::CreateJTI(Idx, TargetFlags));
165 return *this;
166 }
167
168 const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
169 int64_t Offset = 0,
170 unsigned TargetFlags = 0) const {
171 MI->addOperand(*MF, MachineOperand::CreateGA(GV, Offset, TargetFlags));
172 return *this;
173 }
174
175 const MachineInstrBuilder &addExternalSymbol(const char *FnName,
176 unsigned TargetFlags = 0) const {
177 MI->addOperand(*MF, MachineOperand::CreateES(FnName, TargetFlags));
178 return *this;
179 }
180
181 const MachineInstrBuilder &addBlockAddress(const BlockAddress *BA,
182 int64_t Offset = 0,
183 unsigned TargetFlags = 0) const {
184 MI->addOperand(*MF, MachineOperand::CreateBA(BA, Offset, TargetFlags));
185 return *this;
186 }
187
188 const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const {
189 MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask));
190 return *this;
191 }
192
193 const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
194 MI->addMemOperand(*MF, MMO);
195 return *this;
196 }
197
198 const MachineInstrBuilder &
199 setMemRefs(ArrayRef<MachineMemOperand *> MMOs) const {
200 MI->setMemRefs(*MF, MMOs);
201 return *this;
202 }
203
204 const MachineInstrBuilder &cloneMemRefs(const MachineInstr &OtherMI) const {
205 MI->cloneMemRefs(*MF, OtherMI);
206 return *this;
207 }
208
209 const MachineInstrBuilder &
210 cloneMergedMemRefs(ArrayRef<const MachineInstr *> OtherMIs) const {
211 MI->cloneMergedMemRefs(*MF, OtherMIs);
212 return *this;
213 }
214
215 const MachineInstrBuilder &add(const MachineOperand &MO) const {
216 MI->addOperand(*MF, MO);
217 return *this;
218 }
219
220 const MachineInstrBuilder &add(ArrayRef<MachineOperand> MOs) const {
221 for (const MachineOperand &MO : MOs) {
222 MI->addOperand(*MF, MO);
223 }
224 return *this;
225 }
226
227 const MachineInstrBuilder &addMetadata(const MDNode *MD) const {
228 MI->addOperand(*MF, MachineOperand::CreateMetadata(MD));
229 assert((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable())(((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable
()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 231, __PRETTY_FUNCTION__))
230 : true) &&(((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable
()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 231, __PRETTY_FUNCTION__))
231 "first MDNode argument of a DBG_VALUE not a variable")(((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable
()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 231, __PRETTY_FUNCTION__))
;
232 assert((MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel())(((MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel
()) : true) && "first MDNode argument of a DBG_LABEL not a label"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel()) : true) && \"first MDNode argument of a DBG_LABEL not a label\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 234, __PRETTY_FUNCTION__))
233 : true) &&(((MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel
()) : true) && "first MDNode argument of a DBG_LABEL not a label"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel()) : true) && \"first MDNode argument of a DBG_LABEL not a label\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 234, __PRETTY_FUNCTION__))
234 "first MDNode argument of a DBG_LABEL not a label")(((MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel
()) : true) && "first MDNode argument of a DBG_LABEL not a label"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel()) : true) && \"first MDNode argument of a DBG_LABEL not a label\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 234, __PRETTY_FUNCTION__))
;
235 return *this;
236 }
237
238 const MachineInstrBuilder &addCFIIndex(unsigned CFIIndex) const {
239 MI->addOperand(*MF, MachineOperand::CreateCFIIndex(CFIIndex));
240 return *this;
241 }
242
243 const MachineInstrBuilder &addIntrinsicID(Intrinsic::ID ID) const {
244 MI->addOperand(*MF, MachineOperand::CreateIntrinsicID(ID));
245 return *this;
246 }
247
248 const MachineInstrBuilder &addPredicate(CmpInst::Predicate Pred) const {
249 MI->addOperand(*MF, MachineOperand::CreatePredicate(Pred));
250 return *this;
251 }
252
253 const MachineInstrBuilder &addShuffleMask(const Constant *Val) const {
254 MI->addOperand(*MF, MachineOperand::CreateShuffleMask(Val));
255 return *this;
256 }
257
258 const MachineInstrBuilder &addSym(MCSymbol *Sym,
259 unsigned char TargetFlags = 0) const {
260 MI->addOperand(*MF, MachineOperand::CreateMCSymbol(Sym, TargetFlags));
261 return *this;
262 }
263
264 const MachineInstrBuilder &setMIFlags(unsigned Flags) const {
265 MI->setFlags(Flags);
266 return *this;
267 }
268
269 const MachineInstrBuilder &setMIFlag(MachineInstr::MIFlag Flag) const {
270 MI->setFlag(Flag);
271 return *this;
272 }
273
274 // Add a displacement from an existing MachineOperand with an added offset.
275 const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off,
276 unsigned char TargetFlags = 0) const {
277 // If caller specifies new TargetFlags then use it, otherwise the
278 // default behavior is to copy the target flags from the existing
279 // MachineOperand. This means if the caller wants to clear the
280 // target flags it needs to do so explicitly.
281 if (0 == TargetFlags)
282 TargetFlags = Disp.getTargetFlags();
283
284 switch (Disp.getType()) {
285 default:
286 llvm_unreachable("Unhandled operand type in addDisp()")::llvm::llvm_unreachable_internal("Unhandled operand type in addDisp()"
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 286)
;
287 case MachineOperand::MO_Immediate:
288 return addImm(Disp.getImm() + off);
289 case MachineOperand::MO_ConstantPoolIndex:
290 return addConstantPoolIndex(Disp.getIndex(), Disp.getOffset() + off,
291 TargetFlags);
292 case MachineOperand::MO_GlobalAddress:
293 return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
294 TargetFlags);
295 case MachineOperand::MO_BlockAddress:
296 return addBlockAddress(Disp.getBlockAddress(), Disp.getOffset() + off,
297 TargetFlags);
298 }
299 }
300
301 /// Copy all the implicit operands from OtherMI onto this one.
302 const MachineInstrBuilder &
303 copyImplicitOps(const MachineInstr &OtherMI) const {
304 MI->copyImplicitOps(*MF, OtherMI);
305 return *this;
306 }
307
308 bool constrainAllUses(const TargetInstrInfo &TII,
309 const TargetRegisterInfo &TRI,
310 const RegisterBankInfo &RBI) const {
311 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
312 }
313};
314
315/// Builder interface. Specify how to create the initial instruction itself.
316inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
317 const MCInstrDesc &MCID) {
318 return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL));
319}
320
321/// This version of the builder sets up the first operand as a
322/// destination virtual register.
323inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
324 const MCInstrDesc &MCID, Register DestReg) {
325 return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL))
326 .addReg(DestReg, RegState::Define);
327}
328
329/// This version of the builder inserts the newly-built instruction before
330/// the given position in the given MachineBasicBlock, and sets up the first
331/// operand as a destination virtual register.
332inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
333 MachineBasicBlock::iterator I,
334 const DebugLoc &DL, const MCInstrDesc &MCID,
335 Register DestReg) {
336 MachineFunction &MF = *BB.getParent();
337 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
338 BB.insert(I, MI);
339 return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
340}
341
342/// This version of the builder inserts the newly-built instruction before
343/// the given position in the given MachineBasicBlock, and sets up the first
344/// operand as a destination virtual register.
345///
346/// If \c I is inside a bundle, then the newly inserted \a MachineInstr is
347/// added to the same bundle.
348inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
349 MachineBasicBlock::instr_iterator I,
350 const DebugLoc &DL, const MCInstrDesc &MCID,
351 Register DestReg) {
352 MachineFunction &MF = *BB.getParent();
353 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
354 BB.insert(I, MI);
355 return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
356}
357
358inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
359 const DebugLoc &DL, const MCInstrDesc &MCID,
360 Register DestReg) {
361 // Calling the overload for instr_iterator is always correct. However, the
362 // definition is not available in headers, so inline the check.
363 if (I.isInsideBundle())
364 return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID, DestReg);
365 return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID, DestReg);
366}
367
368inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
369 const DebugLoc &DL, const MCInstrDesc &MCID,
370 Register DestReg) {
371 return BuildMI(BB, *I, DL, MCID, DestReg);
372}
373
374/// This version of the builder inserts the newly-built instruction before the
375/// given position in the given MachineBasicBlock, and does NOT take a
376/// destination register.
377inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
378 MachineBasicBlock::iterator I,
379 const DebugLoc &DL,
380 const MCInstrDesc &MCID) {
381 MachineFunction &MF = *BB.getParent();
382 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
383 BB.insert(I, MI);
2
Value assigned to field 'Operands'
384 return MachineInstrBuilder(MF, MI);
385}
386
387inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
388 MachineBasicBlock::instr_iterator I,
389 const DebugLoc &DL,
390 const MCInstrDesc &MCID) {
391 MachineFunction &MF = *BB.getParent();
392 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
393 BB.insert(I, MI);
394 return MachineInstrBuilder(MF, MI);
395}
396
397inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
398 const DebugLoc &DL,
399 const MCInstrDesc &MCID) {
400 // Calling the overload for instr_iterator is always correct. However, the
401 // definition is not available in headers, so inline the check.
402 if (I.isInsideBundle())
403 return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID);
404 return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID);
405}
406
407inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
408 const DebugLoc &DL,
409 const MCInstrDesc &MCID) {
410 return BuildMI(BB, *I, DL, MCID);
411}
412
413/// This version of the builder inserts the newly-built instruction at the end
414/// of the given MachineBasicBlock, and does NOT take a destination register.
415inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
416 const MCInstrDesc &MCID) {
417 return BuildMI(*BB, BB->end(), DL, MCID);
418}
419
420/// This version of the builder inserts the newly-built instruction at the
421/// end of the given MachineBasicBlock, and sets up the first operand as a
422/// destination virtual register.
423inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
424 const MCInstrDesc &MCID, Register DestReg) {
425 return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
426}
427
428/// This version of the builder builds a DBG_VALUE intrinsic
429/// for either a value in a register or a register-indirect
430/// address. The convention is that a DBG_VALUE is indirect iff the
431/// second operand is an immediate.
432MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
433 const MCInstrDesc &MCID, bool IsIndirect,
434 Register Reg, const MDNode *Variable,
435 const MDNode *Expr);
436
437/// This version of the builder builds a DBG_VALUE intrinsic
438/// for a MachineOperand.
439MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
440 const MCInstrDesc &MCID, bool IsIndirect,
441 MachineOperand &MO, const MDNode *Variable,
442 const MDNode *Expr);
443
444/// This version of the builder builds a DBG_VALUE intrinsic
445/// for either a value in a register or a register-indirect
446/// address and inserts it at position I.
447MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
448 MachineBasicBlock::iterator I, const DebugLoc &DL,
449 const MCInstrDesc &MCID, bool IsIndirect,
450 Register Reg, const MDNode *Variable,
451 const MDNode *Expr);
452
453/// This version of the builder builds a DBG_VALUE intrinsic
454/// for a machine operand and inserts it at position I.
455MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
456 MachineBasicBlock::iterator I, const DebugLoc &DL,
457 const MCInstrDesc &MCID, bool IsIndirect,
458 MachineOperand &MO, const MDNode *Variable,
459 const MDNode *Expr);
460
461/// Clone a DBG_VALUE whose value has been spilled to FrameIndex.
462MachineInstr *buildDbgValueForSpill(MachineBasicBlock &BB,
463 MachineBasicBlock::iterator I,
464 const MachineInstr &Orig, int FrameIndex);
465
466/// Update a DBG_VALUE whose value has been spilled to FrameIndex. Useful when
467/// modifying an instruction in place while iterating over a basic block.
468void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex);
469
470inline unsigned getDefRegState(bool B) {
471 return B ? RegState::Define : 0;
472}
473inline unsigned getImplRegState(bool B) {
474 return B ? RegState::Implicit : 0;
475}
476inline unsigned getKillRegState(bool B) {
477 return B ? RegState::Kill : 0;
478}
479inline unsigned getDeadRegState(bool B) {
480 return B ? RegState::Dead : 0;
481}
482inline unsigned getUndefRegState(bool B) {
483 return B ? RegState::Undef : 0;
484}
485inline unsigned getInternalReadRegState(bool B) {
486 return B ? RegState::InternalRead : 0;
487}
488inline unsigned getDebugRegState(bool B) {
489 return B ? RegState::Debug : 0;
490}
491inline unsigned getRenamableRegState(bool B) {
492 return B ? RegState::Renamable : 0;
493}
494
495/// Get all register state flags from machine operand \p RegOp.
496inline unsigned getRegState(const MachineOperand &RegOp) {
497 assert(RegOp.isReg() && "Not a register operand")((RegOp.isReg() && "Not a register operand") ? static_cast
<void> (0) : __assert_fail ("RegOp.isReg() && \"Not a register operand\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 497, __PRETTY_FUNCTION__))
;
498 return getDefRegState(RegOp.isDef()) | getImplRegState(RegOp.isImplicit()) |
499 getKillRegState(RegOp.isKill()) | getDeadRegState(RegOp.isDead()) |
500 getUndefRegState(RegOp.isUndef()) |
501 getInternalReadRegState(RegOp.isInternalRead()) |
502 getDebugRegState(RegOp.isDebug()) |
503 getRenamableRegState(Register::isPhysicalRegister(RegOp.getReg()) &&
504 RegOp.isRenamable());
505}
506
507/// Helper class for constructing bundles of MachineInstrs.
508///
509/// MIBundleBuilder can create a bundle from scratch by inserting new
510/// MachineInstrs one at a time, or it can create a bundle from a sequence of
511/// existing MachineInstrs in a basic block.
512class MIBundleBuilder {
513 MachineBasicBlock &MBB;
514 MachineBasicBlock::instr_iterator Begin;
515 MachineBasicBlock::instr_iterator End;
516
517public:
518 /// Create an MIBundleBuilder that inserts instructions into a new bundle in
519 /// BB above the bundle or instruction at Pos.
520 MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator Pos)
521 : MBB(BB), Begin(Pos.getInstrIterator()), End(Begin) {}
522
523 /// Create a bundle from the sequence of instructions between B and E.
524 MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator B,
525 MachineBasicBlock::iterator E)
526 : MBB(BB), Begin(B.getInstrIterator()), End(E.getInstrIterator()) {
527 assert(B != E && "No instructions to bundle")((B != E && "No instructions to bundle") ? static_cast
<void> (0) : __assert_fail ("B != E && \"No instructions to bundle\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstrBuilder.h"
, 527, __PRETTY_FUNCTION__))
;
528 ++B;
529 while (B != E) {
530 MachineInstr &MI = *B;
531 ++B;
532 MI.bundleWithPred();
533 }
534 }
535
536 /// Create an MIBundleBuilder representing an existing instruction or bundle
537 /// that has MI as its head.
538 explicit MIBundleBuilder(MachineInstr *MI)
539 : MBB(*MI->getParent()), Begin(MI),
540 End(getBundleEnd(MI->getIterator())) {}
541
542 /// Return a reference to the basic block containing this bundle.
543 MachineBasicBlock &getMBB() const { return MBB; }
544
545 /// Return true if no instructions have been inserted in this bundle yet.
546 /// Empty bundles aren't representable in a MachineBasicBlock.
547 bool empty() const { return Begin == End; }
548
549 /// Return an iterator to the first bundled instruction.
550 MachineBasicBlock::instr_iterator begin() const { return Begin; }
551
552 /// Return an iterator beyond the last bundled instruction.
553 MachineBasicBlock::instr_iterator end() const { return End; }
554
555 /// Insert MI into this bundle before I which must point to an instruction in
556 /// the bundle, or end().
557 MIBundleBuilder &insert(MachineBasicBlock::instr_iterator I,
558 MachineInstr *MI) {
559 MBB.insert(I, MI);
560 if (I == Begin) {
561 if (!empty())
562 MI->bundleWithSucc();
563 Begin = MI->getIterator();
564 return *this;
565 }
566 if (I == End) {
567 MI->bundleWithPred();
568 return *this;
569 }
570 // MI was inserted in the middle of the bundle, so its neighbors' flags are
571 // already fine. Update MI's bundle flags manually.
572 MI->setFlag(MachineInstr::BundledPred);
573 MI->setFlag(MachineInstr::BundledSucc);
574 return *this;
575 }
576
577 /// Insert MI into MBB by prepending it to the instructions in the bundle.
578 /// MI will become the first instruction in the bundle.
579 MIBundleBuilder &prepend(MachineInstr *MI) {
580 return insert(begin(), MI);
581 }
582
583 /// Insert MI into MBB by appending it to the instructions in the bundle.
584 /// MI will become the last instruction in the bundle.
585 MIBundleBuilder &append(MachineInstr *MI) {
586 return insert(end(), MI);
587 }
588};
589
590} // end namespace llvm
591
592#endif // LLVM_CODEGEN_MACHINEINSTRBUILDER_H

/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstr.h

1//===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the MachineInstr class, which is the
10// basic representation for all target dependent machine instructions used by
11// the back end.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_MACHINEINSTR_H
16#define LLVM_CODEGEN_MACHINEINSTR_H
17
18#include "llvm/ADT/DenseMapInfo.h"
19#include "llvm/ADT/PointerSumType.h"
20#include "llvm/ADT/ilist.h"
21#include "llvm/ADT/ilist_node.h"
22#include "llvm/ADT/iterator_range.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/CodeGen/MachineMemOperand.h"
25#include "llvm/CodeGen/MachineOperand.h"
26#include "llvm/CodeGen/TargetOpcodes.h"
27#include "llvm/IR/DebugInfoMetadata.h"
28#include "llvm/IR/DebugLoc.h"
29#include "llvm/IR/InlineAsm.h"
30#include "llvm/MC/MCInstrDesc.h"
31#include "llvm/MC/MCSymbol.h"
32#include "llvm/Support/ArrayRecycler.h"
33#include "llvm/Support/TrailingObjects.h"
34#include <algorithm>
35#include <cassert>
36#include <cstdint>
37#include <utility>
38
39namespace llvm {
40
41template <typename T> class ArrayRef;
42class DIExpression;
43class DILocalVariable;
44class MachineBasicBlock;
45class MachineFunction;
46class MachineMemOperand;
47class MachineRegisterInfo;
48class ModuleSlotTracker;
49class raw_ostream;
50template <typename T> class SmallVectorImpl;
51class SmallBitVector;
52class StringRef;
53class TargetInstrInfo;
54class TargetRegisterClass;
55class TargetRegisterInfo;
56
57//===----------------------------------------------------------------------===//
58/// Representation of each machine instruction.
59///
60/// This class isn't a POD type, but it must have a trivial destructor. When a
61/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
62/// without having their destructor called.
63///
64class MachineInstr
65 : public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
66 ilist_sentinel_tracking<true>> {
67public:
68 using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator;
69
70 /// Flags to specify different kinds of comments to output in
71 /// assembly code. These flags carry semantic information not
72 /// otherwise easily derivable from the IR text.
73 ///
74 enum CommentFlag {
75 ReloadReuse = 0x1, // higher bits are reserved for target dep comments.
76 NoSchedComment = 0x2,
77 TAsmComments = 0x4 // Target Asm comments should start from this value.
78 };
79
80 enum MIFlag {
81 NoFlags = 0,
82 FrameSetup = 1 << 0, // Instruction is used as a part of
83 // function frame setup code.
84 FrameDestroy = 1 << 1, // Instruction is used as a part of
85 // function frame destruction code.
86 BundledPred = 1 << 2, // Instruction has bundled predecessors.
87 BundledSucc = 1 << 3, // Instruction has bundled successors.
88 FmNoNans = 1 << 4, // Instruction does not support Fast
89 // math nan values.
90 FmNoInfs = 1 << 5, // Instruction does not support Fast
91 // math infinity values.
92 FmNsz = 1 << 6, // Instruction is not required to retain
93 // signed zero values.
94 FmArcp = 1 << 7, // Instruction supports Fast math
95 // reciprocal approximations.
96 FmContract = 1 << 8, // Instruction supports Fast math
97 // contraction operations like fma.
98 FmAfn = 1 << 9, // Instruction may map to Fast math
99 // instrinsic approximation.
100 FmReassoc = 1 << 10, // Instruction supports Fast math
101 // reassociation of operand order.
102 NoUWrap = 1 << 11, // Instruction supports binary operator
103 // no unsigned wrap.
104 NoSWrap = 1 << 12, // Instruction supports binary operator
105 // no signed wrap.
106 IsExact = 1 << 13, // Instruction supports division is
107 // known to be exact.
108 FPExcept = 1 << 14, // Instruction may raise floating-point
109 // exceptions.
110 };
111
112private:
113 const MCInstrDesc *MCID; // Instruction descriptor.
114 MachineBasicBlock *Parent = nullptr; // Pointer to the owning basic block.
115
116 // Operands are allocated by an ArrayRecycler.
117 MachineOperand *Operands = nullptr; // Pointer to the first operand.
118 unsigned NumOperands = 0; // Number of operands on instruction.
119 using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
120 OperandCapacity CapOperands; // Capacity of the Operands array.
121
122 uint16_t Flags = 0; // Various bits of additional
123 // information about machine
124 // instruction.
125
126 uint8_t AsmPrinterFlags = 0; // Various bits of information used by
127 // the AsmPrinter to emit helpful
128 // comments. This is *not* semantic
129 // information. Do not use this for
130 // anything other than to convey comment
131 // information to AsmPrinter.
132
133 /// Internal implementation detail class that provides out-of-line storage for
134 /// extra info used by the machine instruction when this info cannot be stored
135 /// in-line within the instruction itself.
136 ///
137 /// This has to be defined eagerly due to the implementation constraints of
138 /// `PointerSumType` where it is used.
139 class ExtraInfo final
140 : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *> {
141 public:
142 static ExtraInfo *create(BumpPtrAllocator &Allocator,
143 ArrayRef<MachineMemOperand *> MMOs,
144 MCSymbol *PreInstrSymbol = nullptr,
145 MCSymbol *PostInstrSymbol = nullptr) {
146 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
147 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
148 auto *Result = new (Allocator.Allocate(
149 totalSizeToAlloc<MachineMemOperand *, MCSymbol *>(
150 MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol),
151 alignof(ExtraInfo)))
152 ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol);
153
154 // Copy the actual data into the trailing objects.
155 std::copy(MMOs.begin(), MMOs.end(),
156 Result->getTrailingObjects<MachineMemOperand *>());
157
158 if (HasPreInstrSymbol)
159 Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol;
160 if (HasPostInstrSymbol)
161 Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] =
162 PostInstrSymbol;
163
164 return Result;
165 }
166
167 ArrayRef<MachineMemOperand *> getMMOs() const {
168 return makeArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs);
169 }
170
171 MCSymbol *getPreInstrSymbol() const {
172 return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr;
173 }
174
175 MCSymbol *getPostInstrSymbol() const {
176 return HasPostInstrSymbol
177 ? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol]
178 : nullptr;
179 }
180
181 private:
182 friend TrailingObjects;
183
184 // Description of the extra info, used to interpret the actual optional
185 // data appended.
186 //
187 // Note that this is not terribly space optimized. This leaves a great deal
188 // of flexibility to fit more in here later.
189 const int NumMMOs;
190 const bool HasPreInstrSymbol;
191 const bool HasPostInstrSymbol;
192
193 // Implement the `TrailingObjects` internal API.
194 size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const {
195 return NumMMOs;
196 }
197 size_t numTrailingObjects(OverloadToken<MCSymbol *>) const {
198 return HasPreInstrSymbol + HasPostInstrSymbol;
199 }
200
201 // Just a boring constructor to allow us to initialize the sizes. Always use
202 // the `create` routine above.
203 ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol)
204 : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol),
205 HasPostInstrSymbol(HasPostInstrSymbol) {}
206 };
207
208 /// Enumeration of the kinds of inline extra info available. It is important
209 /// that the `MachineMemOperand` inline kind has a tag value of zero to make
210 /// it accessible as an `ArrayRef`.
211 enum ExtraInfoInlineKinds {
212 EIIK_MMO = 0,
213 EIIK_PreInstrSymbol,
214 EIIK_PostInstrSymbol,
215 EIIK_OutOfLine
216 };
217
218 // We store extra information about the instruction here. The common case is
219 // expected to be nothing or a single pointer (typically a MMO or a symbol).
220 // We work to optimize this common case by storing it inline here rather than
221 // requiring a separate allocation, but we fall back to an allocation when
222 // multiple pointers are needed.
223 PointerSumType<ExtraInfoInlineKinds,
224 PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>,
225 PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>,
226 PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>,
227 PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>>
228 Info;
229
230 DebugLoc debugLoc; // Source line information.
231
232 // Intrusive list support
233 friend struct ilist_traits<MachineInstr>;
234 friend struct ilist_callback_traits<MachineBasicBlock>;
235 void setParent(MachineBasicBlock *P) { Parent = P; }
236
237 /// This constructor creates a copy of the given
238 /// MachineInstr in the given MachineFunction.
239 MachineInstr(MachineFunction &, const MachineInstr &);
240
241 /// This constructor create a MachineInstr and add the implicit operands.
242 /// It reserves space for number of operands specified by
243 /// MCInstrDesc. An explicit DebugLoc is supplied.
244 MachineInstr(MachineFunction &, const MCInstrDesc &tid, DebugLoc dl,
245 bool NoImp = false);
246
247 // MachineInstrs are pool-allocated and owned by MachineFunction.
248 friend class MachineFunction;
249
250public:
251 MachineInstr(const MachineInstr &) = delete;
252 MachineInstr &operator=(const MachineInstr &) = delete;
253 // Use MachineFunction::DeleteMachineInstr() instead.
254 ~MachineInstr() = delete;
255
256 const MachineBasicBlock* getParent() const { return Parent; }
257 MachineBasicBlock* getParent() { return Parent; }
258
259 /// Return the function that contains the basic block that this instruction
260 /// belongs to.
261 ///
262 /// Note: this is undefined behaviour if the instruction does not have a
263 /// parent.
264 const MachineFunction *getMF() const;
265 MachineFunction *getMF() {
266 return const_cast<MachineFunction *>(
267 static_cast<const MachineInstr *>(this)->getMF());
268 }
269
270 /// Return the asm printer flags bitvector.
271 uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }
272
273 /// Clear the AsmPrinter bitvector.
274 void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
275
276 /// Return whether an AsmPrinter flag is set.
277 bool getAsmPrinterFlag(CommentFlag Flag) const {
278 return AsmPrinterFlags & Flag;
279 }
280
281 /// Set a flag for the AsmPrinter.
282 void setAsmPrinterFlag(uint8_t Flag) {
283 AsmPrinterFlags |= Flag;
284 }
285
286 /// Clear specific AsmPrinter flags.
287 void clearAsmPrinterFlag(CommentFlag Flag) {
288 AsmPrinterFlags &= ~Flag;
289 }
290
291 /// Return the MI flags bitvector.
292 uint16_t getFlags() const {
293 return Flags;
294 }
295
296 /// Return whether an MI flag is set.
297 bool getFlag(MIFlag Flag) const {
298 return Flags & Flag;
299 }
300
301 /// Set a MI flag.
302 void setFlag(MIFlag Flag) {
303 Flags |= (uint16_t)Flag;
304 }
305
306 void setFlags(unsigned flags) {
307 // Filter out the automatically maintained flags.
308 unsigned Mask = BundledPred | BundledSucc;
309 Flags = (Flags & Mask) | (flags & ~Mask);
310 }
311
312 /// clearFlag - Clear a MI flag.
313 void clearFlag(MIFlag Flag) {
314 Flags &= ~((uint16_t)Flag);
315 }
316
317 /// Return true if MI is in a bundle (but not the first MI in a bundle).
318 ///
319 /// A bundle looks like this before it's finalized:
320 /// ----------------
321 /// | MI |
322 /// ----------------
323 /// |
324 /// ----------------
325 /// | MI * |
326 /// ----------------
327 /// |
328 /// ----------------
329 /// | MI * |
330 /// ----------------
331 /// In this case, the first MI starts a bundle but is not inside a bundle, the
332 /// next 2 MIs are considered "inside" the bundle.
333 ///
334 /// After a bundle is finalized, it looks like this:
335 /// ----------------
336 /// | Bundle |
337 /// ----------------
338 /// |
339 /// ----------------
340 /// | MI * |
341 /// ----------------
342 /// |
343 /// ----------------
344 /// | MI * |
345 /// ----------------
346 /// |
347 /// ----------------
348 /// | MI * |
349 /// ----------------
350 /// The first instruction has the special opcode "BUNDLE". It's not "inside"
351 /// a bundle, but the next three MIs are.
352 bool isInsideBundle() const {
353 return getFlag(BundledPred);
354 }
355
356 /// Return true if this instruction part of a bundle. This is true
357 /// if either itself or its following instruction is marked "InsideBundle".
358 bool isBundled() const {
359 return isBundledWithPred() || isBundledWithSucc();
360 }
361
362 /// Return true if this instruction is part of a bundle, and it is not the
363 /// first instruction in the bundle.
364 bool isBundledWithPred() const { return getFlag(BundledPred); }
365
366 /// Return true if this instruction is part of a bundle, and it is not the
367 /// last instruction in the bundle.
368 bool isBundledWithSucc() const { return getFlag(BundledSucc); }
369
370 /// Bundle this instruction with its predecessor. This can be an unbundled
371 /// instruction, or it can be the first instruction in a bundle.
372 void bundleWithPred();
373
374 /// Bundle this instruction with its successor. This can be an unbundled
375 /// instruction, or it can be the last instruction in a bundle.
376 void bundleWithSucc();
377
378 /// Break bundle above this instruction.
379 void unbundleFromPred();
380
381 /// Break bundle below this instruction.
382 void unbundleFromSucc();
383
384 /// Returns the debug location id of this MachineInstr.
385 const DebugLoc &getDebugLoc() const { return debugLoc; }
386
387 /// Return the debug variable referenced by
388 /// this DBG_VALUE instruction.
389 const DILocalVariable *getDebugVariable() const;
390
391 /// Return the complex address expression referenced by
392 /// this DBG_VALUE instruction.
393 const DIExpression *getDebugExpression() const;
394
395 /// Return the debug label referenced by
396 /// this DBG_LABEL instruction.
397 const DILabel *getDebugLabel() const;
398
399 /// Emit an error referring to the source location of this instruction.
400 /// This should only be used for inline assembly that is somehow
401 /// impossible to compile. Other errors should have been handled much
402 /// earlier.
403 ///
404 /// If this method returns, the caller should try to recover from the error.
405 void emitError(StringRef Msg) const;
406
407 /// Returns the target instruction descriptor of this MachineInstr.
408 const MCInstrDesc &getDesc() const { return *MCID; }
409
410 /// Returns the opcode of this MachineInstr.
411 unsigned getOpcode() const { return MCID->Opcode; }
412
413 /// Retuns the total number of operands.
414 unsigned getNumOperands() const { return NumOperands; }
415
416 const MachineOperand& getOperand(unsigned i) const {
417 assert(i < getNumOperands() && "getOperand() out of range!")((i < getNumOperands() && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstr.h"
, 417, __PRETTY_FUNCTION__))
;
418 return Operands[i];
419 }
420 MachineOperand& getOperand(unsigned i) {
421 assert(i < getNumOperands() && "getOperand() out of range!")((i < getNumOperands() && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstr.h"
, 421, __PRETTY_FUNCTION__))
;
422 return Operands[i];
423 }
424
425 /// Returns the total number of definitions.
426 unsigned getNumDefs() const {
427 return getNumExplicitDefs() + MCID->getNumImplicitDefs();
428 }
429
430 /// Returns true if the instruction has implicit definition.
431 bool hasImplicitDef() const {
432 for (unsigned I = getNumExplicitOperands(), E = getNumOperands();
433 I != E; ++I) {
434 const MachineOperand &MO = getOperand(I);
435 if (MO.isDef() && MO.isImplicit())
436 return true;
437 }
438 return false;
439 }
440
441 /// Returns the implicit operands number.
442 unsigned getNumImplicitOperands() const {
443 return getNumOperands() - getNumExplicitOperands();
444 }
445
446 /// Return true if operand \p OpIdx is a subregister index.
447 bool isOperandSubregIdx(unsigned OpIdx) const {
448 assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&((getOperand(OpIdx).getType() == MachineOperand::MO_Immediate
&& "Expected MO_Immediate operand type.") ? static_cast
<void> (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstr.h"
, 449, __PRETTY_FUNCTION__))
449 "Expected MO_Immediate operand type.")((getOperand(OpIdx).getType() == MachineOperand::MO_Immediate
&& "Expected MO_Immediate operand type.") ? static_cast
<void> (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstr.h"
, 449, __PRETTY_FUNCTION__))
;
450 if (isExtractSubreg() && OpIdx == 2)
451 return true;
452 if (isInsertSubreg() && OpIdx == 3)
453 return true;
454 if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0)
455 return true;
456 if (isSubregToReg() && OpIdx == 3)
457 return true;
458 return false;
459 }
460
461 /// Returns the number of non-implicit operands.
462 unsigned getNumExplicitOperands() const;
463
464 /// Returns the number of non-implicit definitions.
465 unsigned getNumExplicitDefs() const;
466
467 /// iterator/begin/end - Iterate over all operands of a machine instruction.
468 using mop_iterator = MachineOperand *;
469 using const_mop_iterator = const MachineOperand *;
470
471 mop_iterator operands_begin() { return Operands; }
472 mop_iterator operands_end() { return Operands + NumOperands; }
473
474 const_mop_iterator operands_begin() const { return Operands; }
475 const_mop_iterator operands_end() const { return Operands + NumOperands; }
476
477 iterator_range<mop_iterator> operands() {
478 return make_range(operands_begin(), operands_end());
479 }
480 iterator_range<const_mop_iterator> operands() const {
481 return make_range(operands_begin(), operands_end());
482 }
483 iterator_range<mop_iterator> explicit_operands() {
484 return make_range(operands_begin(),
485 operands_begin() + getNumExplicitOperands());
486 }
487 iterator_range<const_mop_iterator> explicit_operands() const {
488 return make_range(operands_begin(),
489 operands_begin() + getNumExplicitOperands());
490 }
491 iterator_range<mop_iterator> implicit_operands() {
492 return make_range(explicit_operands().end(), operands_end());
493 }
494 iterator_range<const_mop_iterator> implicit_operands() const {
495 return make_range(explicit_operands().end(), operands_end());
496 }
497 /// Returns a range over all explicit operands that are register definitions.
498 /// Implicit definition are not included!
499 iterator_range<mop_iterator> defs() {
500 return make_range(operands_begin(),
501 operands_begin() + getNumExplicitDefs());
502 }
503 /// \copydoc defs()
504 iterator_range<const_mop_iterator> defs() const {
505 return make_range(operands_begin(),
506 operands_begin() + getNumExplicitDefs());
507 }
508 /// Returns a range that includes all operands that are register uses.
509 /// This may include unrelated operands which are not register uses.
510 iterator_range<mop_iterator> uses() {
511 return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
512 }
513 /// \copydoc uses()
514 iterator_range<const_mop_iterator> uses() const {
515 return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
516 }
517 iterator_range<mop_iterator> explicit_uses() {
518 return make_range(operands_begin() + getNumExplicitDefs(),
519 operands_begin() + getNumExplicitOperands());
520 }
521 iterator_range<const_mop_iterator> explicit_uses() const {
522 return make_range(operands_begin() + getNumExplicitDefs(),
523 operands_begin() + getNumExplicitOperands());
524 }
525
526 /// Returns the number of the operand iterator \p I points to.
527 unsigned getOperandNo(const_mop_iterator I) const {
528 return I - operands_begin();
529 }
530
531 /// Access to memory operands of the instruction. If there are none, that does
532 /// not imply anything about whether the function accesses memory. Instead,
533 /// the caller must behave conservatively.
534 ArrayRef<MachineMemOperand *> memoperands() const {
535 if (!Info)
536 return {};
537
538 if (Info.is<EIIK_MMO>())
539 return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1);
540
541 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
542 return EI->getMMOs();
543
544 return {};
545 }
546
547 /// Access to memory operands of the instruction.
548 ///
549 /// If `memoperands_begin() == memoperands_end()`, that does not imply
550 /// anything about whether the function accesses memory. Instead, the caller
551 /// must behave conservatively.
552 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
553
554 /// Access to memory operands of the instruction.
555 ///
556 /// If `memoperands_begin() == memoperands_end()`, that does not imply
557 /// anything about whether the function accesses memory. Instead, the caller
558 /// must behave conservatively.
559 mmo_iterator memoperands_end() const { return memoperands().end(); }
560
561 /// Return true if we don't have any memory operands which described the
562 /// memory access done by this instruction. If this is true, calling code
563 /// must be conservative.
564 bool memoperands_empty() const { return memoperands().empty(); }
565
566 /// Return true if this instruction has exactly one MachineMemOperand.
567 bool hasOneMemOperand() const { return memoperands().size() == 1; }
568
569 /// Return the number of memory operands.
570 unsigned getNumMemOperands() const { return memoperands().size(); }
571
572 /// Helper to extract a pre-instruction symbol if one has been added.
573 MCSymbol *getPreInstrSymbol() const {
574 if (!Info)
575 return nullptr;
576 if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>())
577 return S;
578 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
579 return EI->getPreInstrSymbol();
580
581 return nullptr;
582 }
583
584 /// Helper to extract a post-instruction symbol if one has been added.
585 MCSymbol *getPostInstrSymbol() const {
586 if (!Info)
587 return nullptr;
588 if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>())
589 return S;
590 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
591 return EI->getPostInstrSymbol();
592
593 return nullptr;
594 }
595
596 /// API for querying MachineInstr properties. They are the same as MCInstrDesc
597 /// queries but they are bundle aware.
598
599 enum QueryType {
600 IgnoreBundle, // Ignore bundles
601 AnyInBundle, // Return true if any instruction in bundle has property
602 AllInBundle // Return true if all instructions in bundle have property
603 };
604
605 /// Return true if the instruction (or in the case of a bundle,
606 /// the instructions inside the bundle) has the specified property.
607 /// The first argument is the property being queried.
608 /// The second argument indicates whether the query should look inside
609 /// instruction bundles.
610 bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
611 assert(MCFlag < 64 &&((MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle."
) ? static_cast<void> (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstr.h"
, 612, __PRETTY_FUNCTION__))
612 "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.")((MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle."
) ? static_cast<void> (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstr.h"
, 612, __PRETTY_FUNCTION__))
;
613 // Inline the fast path for unbundled or bundle-internal instructions.
614 if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
615 return getDesc().getFlags() & (1ULL << MCFlag);
616
617 // If this is the first instruction in a bundle, take the slow path.
618 return hasPropertyInBundle(1ULL << MCFlag, Type);
619 }
620
621 /// Return true if this is an instruction that should go through the usual
622 /// legalization steps.
623 bool isPreISelOpcode(QueryType Type = IgnoreBundle) const {
624 return hasProperty(MCID::PreISelOpcode, Type);
625 }
626
627 /// Return true if this instruction can have a variable number of operands.
628 /// In this case, the variable operands will be after the normal
629 /// operands but before the implicit definitions and uses (if any are
630 /// present).
631 bool isVariadic(QueryType Type = IgnoreBundle) const {
632 return hasProperty(MCID::Variadic, Type);
633 }
634
635 /// Set if this instruction has an optional definition, e.g.
636 /// ARM instructions which can set condition code if 's' bit is set.
637 bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
638 return hasProperty(MCID::HasOptionalDef, Type);
639 }
640
641 /// Return true if this is a pseudo instruction that doesn't
642 /// correspond to a real machine instruction.
643 bool isPseudo(QueryType Type = IgnoreBundle) const {
644 return hasProperty(MCID::Pseudo, Type);
645 }
646
647 bool isReturn(QueryType Type = AnyInBundle) const {
648 return hasProperty(MCID::Return, Type);
649 }
650
651 /// Return true if this is an instruction that marks the end of an EH scope,
652 /// i.e., a catchpad or a cleanuppad instruction.
653 bool isEHScopeReturn(QueryType Type = AnyInBundle) const {
654 return hasProperty(MCID::EHScopeReturn, Type);
655 }
656
657 bool isCall(QueryType Type = AnyInBundle) const {
658 return hasProperty(MCID::Call, Type);
659 }
660
661 /// Returns true if the specified instruction stops control flow
662 /// from executing the instruction immediately following it. Examples include
663 /// unconditional branches and return instructions.
664 bool isBarrier(QueryType Type = AnyInBundle) const {
665 return hasProperty(MCID::Barrier, Type);
666 }
667
668 /// Returns true if this instruction part of the terminator for a basic block.
669 /// Typically this is things like return and branch instructions.
670 ///
671 /// Various passes use this to insert code into the bottom of a basic block,
672 /// but before control flow occurs.
673 bool isTerminator(QueryType Type = AnyInBundle) const {
674 return hasProperty(MCID::Terminator, Type);
675 }
676
677 /// Returns true if this is a conditional, unconditional, or indirect branch.
678 /// Predicates below can be used to discriminate between
679 /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
680 /// get more information.
681 bool isBranch(QueryType Type = AnyInBundle) const {
682 return hasProperty(MCID::Branch, Type);
683 }
684
685 /// Return true if this is an indirect branch, such as a
686 /// branch through a register.
687 bool isIndirectBranch(QueryType Type = AnyInBundle) const {
688 return hasProperty(MCID::IndirectBranch, Type);
689 }
690
691 /// Return true if this is a branch which may fall
692 /// through to the next instruction or may transfer control flow to some other
693 /// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more
694 /// information about this branch.
695 bool isConditionalBranch(QueryType Type = AnyInBundle) const {
696 return isBranch(Type) & !isBarrier(Type) & !isIndirectBranch(Type);
697 }
698
699 /// Return true if this is a branch which always
700 /// transfers control flow to some other block. The
701 /// TargetInstrInfo::AnalyzeBranch method can be used to get more information
702 /// about this branch.
703 bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
704 return isBranch(Type) & isBarrier(Type) & !isIndirectBranch(Type);
705 }
706
707 /// Return true if this instruction has a predicate operand that
708 /// controls execution. It may be set to 'always', or may be set to other
709 /// values. There are various methods in TargetInstrInfo that can be used to
710 /// control and modify the predicate in this instruction.
711 bool isPredicable(QueryType Type = AllInBundle) const {
712 // If it's a bundle than all bundled instructions must be predicable for this
713 // to return true.
714 return hasProperty(MCID::Predicable, Type);
715 }
716
717 /// Return true if this instruction is a comparison.
718 bool isCompare(QueryType Type = IgnoreBundle) const {
719 return hasProperty(MCID::Compare, Type);
720 }
721
722 /// Return true if this instruction is a move immediate
723 /// (including conditional moves) instruction.
724 bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
725 return hasProperty(MCID::MoveImm, Type);
726 }
727
728 /// Return true if this instruction is a register move.
729 /// (including moving values from subreg to reg)
730 bool isMoveReg(QueryType Type = IgnoreBundle) const {
731 return hasProperty(MCID::MoveReg, Type);
732 }
733
734 /// Return true if this instruction is a bitcast instruction.
735 bool isBitcast(QueryType Type = IgnoreBundle) const {
736 return hasProperty(MCID::Bitcast, Type);
737 }
738
739 /// Return true if this instruction is a select instruction.
740 bool isSelect(QueryType Type = IgnoreBundle) const {
741 return hasProperty(MCID::Select, Type);
742 }
743
744 /// Return true if this instruction cannot be safely duplicated.
745 /// For example, if the instruction has a unique labels attached
746 /// to it, duplicating it would cause multiple definition errors.
747 bool isNotDuplicable(QueryType Type = AnyInBundle) const {
748 return hasProperty(MCID::NotDuplicable, Type);
749 }
750
751 /// Return true if this instruction is convergent.
752 /// Convergent instructions can not be made control-dependent on any
753 /// additional values.
754 bool isConvergent(QueryType Type = AnyInBundle) const {
755 if (isInlineAsm()) {
756 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
757 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
758 return true;
759 }
760 return hasProperty(MCID::Convergent, Type);
761 }
762
763 /// Returns true if the specified instruction has a delay slot
764 /// which must be filled by the code generator.
765 bool hasDelaySlot(QueryType Type = AnyInBundle) const {
766 return hasProperty(MCID::DelaySlot, Type);
767 }
768
769 /// Return true for instructions that can be folded as
770 /// memory operands in other instructions. The most common use for this
771 /// is instructions that are simple loads from memory that don't modify
772 /// the loaded value in any way, but it can also be used for instructions
773 /// that can be expressed as constant-pool loads, such as V_SETALLONES
774 /// on x86, to allow them to be folded when it is beneficial.
775 /// This should only be set on instructions that return a value in their
776 /// only virtual register definition.
777 bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
778 return hasProperty(MCID::FoldableAsLoad, Type);
779 }
780
781 /// Return true if this instruction behaves
782 /// the same way as the generic REG_SEQUENCE instructions.
783 /// E.g., on ARM,
784 /// dX VMOVDRR rY, rZ
785 /// is equivalent to
786 /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
787 ///
788 /// Note that for the optimizers to be able to take advantage of
789 /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
790 /// override accordingly.
791 bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
792 return hasProperty(MCID::RegSequence, Type);
793 }
794
795 /// Return true if this instruction behaves
796 /// the same way as the generic EXTRACT_SUBREG instructions.
797 /// E.g., on ARM,
798 /// rX, rY VMOVRRD dZ
799 /// is equivalent to two EXTRACT_SUBREG:
800 /// rX = EXTRACT_SUBREG dZ, ssub_0
801 /// rY = EXTRACT_SUBREG dZ, ssub_1
802 ///
803 /// Note that for the optimizers to be able to take advantage of
804 /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
805 /// override accordingly.
806 bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
807 return hasProperty(MCID::ExtractSubreg, Type);
808 }
809
810 /// Return true if this instruction behaves
811 /// the same way as the generic INSERT_SUBREG instructions.
812 /// E.g., on ARM,
813 /// dX = VSETLNi32 dY, rZ, Imm
814 /// is equivalent to a INSERT_SUBREG:
815 /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
816 ///
817 /// Note that for the optimizers to be able to take advantage of
818 /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
819 /// override accordingly.
820 bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
821 return hasProperty(MCID::InsertSubreg, Type);
822 }
823
824 //===--------------------------------------------------------------------===//
825 // Side Effect Analysis
826 //===--------------------------------------------------------------------===//
827
828 /// Return true if this instruction could possibly read memory.
829 /// Instructions with this flag set are not necessarily simple load
830 /// instructions, they may load a value and modify it, for example.
831 bool mayLoad(QueryType Type = AnyInBundle) const {
832 if (isInlineAsm()) {
833 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
834 if (ExtraInfo & InlineAsm::Extra_MayLoad)
835 return true;
836 }
837 return hasProperty(MCID::MayLoad, Type);
838 }
839
840 /// Return true if this instruction could possibly modify memory.
841 /// Instructions with this flag set are not necessarily simple store
842 /// instructions, they may store a modified value based on their operands, or
843 /// may not actually modify anything, for example.
844 bool mayStore(QueryType Type = AnyInBundle) const {
845 if (isInlineAsm()) {
846 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
847 if (ExtraInfo & InlineAsm::Extra_MayStore)
848 return true;
849 }
850 return hasProperty(MCID::MayStore, Type);
851 }
852
853 /// Return true if this instruction could possibly read or modify memory.
854 bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
855 return mayLoad(Type) || mayStore(Type);
856 }
857
858 /// Return true if this instruction could possibly raise a floating-point
859 /// exception. This is the case if the instruction is a floating-point
860 /// instruction that can in principle raise an exception, as indicated
861 /// by the MCID::MayRaiseFPException property, *and* at the same time,
862 /// the instruction is used in a context where we expect floating-point
863 /// exceptions might be enabled, as indicated by the FPExcept MI flag.
864 bool mayRaiseFPException() const {
865 return hasProperty(MCID::MayRaiseFPException) &&
866 getFlag(MachineInstr::MIFlag::FPExcept);
867 }
868
869 //===--------------------------------------------------------------------===//
870 // Flags that indicate whether an instruction can be modified by a method.
871 //===--------------------------------------------------------------------===//
872
873 /// Return true if this may be a 2- or 3-address
874 /// instruction (of the form "X = op Y, Z, ..."), which produces the same
875 /// result if Y and Z are exchanged. If this flag is set, then the
876 /// TargetInstrInfo::commuteInstruction method may be used to hack on the
877 /// instruction.
878 ///
879 /// Note that this flag may be set on instructions that are only commutable
880 /// sometimes. In these cases, the call to commuteInstruction will fail.
881 /// Also note that some instructions require non-trivial modification to
882 /// commute them.
883 bool isCommutable(QueryType Type = IgnoreBundle) const {
884 return hasProperty(MCID::Commutable, Type);
885 }
886
887 /// Return true if this is a 2-address instruction
888 /// which can be changed into a 3-address instruction if needed. Doing this
889 /// transformation can be profitable in the register allocator, because it
890 /// means that the instruction can use a 2-address form if possible, but
891 /// degrade into a less efficient form if the source and dest register cannot
892 /// be assigned to the same register. For example, this allows the x86
893 /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
894 /// is the same speed as the shift but has bigger code size.
895 ///
896 /// If this returns true, then the target must implement the
897 /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
898 /// is allowed to fail if the transformation isn't valid for this specific
899 /// instruction (e.g. shl reg, 4 on x86).
900 ///
901 bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
902 return hasProperty(MCID::ConvertibleTo3Addr, Type);
903 }
904
905 /// Return true if this instruction requires
906 /// custom insertion support when the DAG scheduler is inserting it into a
907 /// machine basic block. If this is true for the instruction, it basically
908 /// means that it is a pseudo instruction used at SelectionDAG time that is
909 /// expanded out into magic code by the target when MachineInstrs are formed.
910 ///
911 /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
912 /// is used to insert this into the MachineBasicBlock.
913 bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
914 return hasProperty(MCID::UsesCustomInserter, Type);
915 }
916
917 /// Return true if this instruction requires *adjustment*
918 /// after instruction selection by calling a target hook. For example, this
919 /// can be used to fill in ARM 's' optional operand depending on whether
920 /// the conditional flag register is used.
921 bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
922 return hasProperty(MCID::HasPostISelHook, Type);
923 }
924
925 /// Returns true if this instruction is a candidate for remat.
926 /// This flag is deprecated, please don't use it anymore. If this
927 /// flag is set, the isReallyTriviallyReMaterializable() method is called to
928 /// verify the instruction is really rematable.
929 bool isRematerializable(QueryType Type = AllInBundle) const {
930 // It's only possible to re-mat a bundle if all bundled instructions are
931 // re-materializable.
932 return hasProperty(MCID::Rematerializable, Type);
933 }
934
935 /// Returns true if this instruction has the same cost (or less) than a move
936 /// instruction. This is useful during certain types of optimizations
937 /// (e.g., remat during two-address conversion or machine licm)
938 /// where we would like to remat or hoist the instruction, but not if it costs
939 /// more than moving the instruction into the appropriate register. Note, we
940 /// are not marking copies from and to the same register class with this flag.
941 bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
942 // Only returns true for a bundle if all bundled instructions are cheap.
943 return hasProperty(MCID::CheapAsAMove, Type);
944 }
945
946 /// Returns true if this instruction source operands
947 /// have special register allocation requirements that are not captured by the
948 /// operand register classes. e.g. ARM::STRD's two source registers must be an
949 /// even / odd pair, ARM::STM registers have to be in ascending order.
950 /// Post-register allocation passes should not attempt to change allocations
951 /// for sources of instructions with this flag.
952 bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
953 return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
954 }
955
956 /// Returns true if this instruction def operands
957 /// have special register allocation requirements that are not captured by the
958 /// operand register classes. e.g. ARM::LDRD's two def registers must be an
959 /// even / odd pair, ARM::LDM registers have to be in ascending order.
960 /// Post-register allocation passes should not attempt to change allocations
961 /// for definitions of instructions with this flag.
962 bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
963 return hasProperty(MCID::ExtraDefRegAllocReq, Type);
964 }
965
966 enum MICheckType {
967 CheckDefs, // Check all operands for equality
968 CheckKillDead, // Check all operands including kill / dead markers
969 IgnoreDefs, // Ignore all definitions
970 IgnoreVRegDefs // Ignore virtual register definitions
971 };
972
973 /// Return true if this instruction is identical to \p Other.
974 /// Two instructions are identical if they have the same opcode and all their
975 /// operands are identical (with respect to MachineOperand::isIdenticalTo()).
976 /// Note that this means liveness related flags (dead, undef, kill) do not
977 /// affect the notion of identical.
978 bool isIdenticalTo(const MachineInstr &Other,
979 MICheckType Check = CheckDefs) const;
980
981 /// Unlink 'this' from the containing basic block, and return it without
982 /// deleting it.
983 ///
984 /// This function can not be used on bundled instructions, use
985 /// removeFromBundle() to remove individual instructions from a bundle.
986 MachineInstr *removeFromParent();
987
988 /// Unlink this instruction from its basic block and return it without
989 /// deleting it.
990 ///
991 /// If the instruction is part of a bundle, the other instructions in the
992 /// bundle remain bundled.
993 MachineInstr *removeFromBundle();
994
995 /// Unlink 'this' from the containing basic block and delete it.
996 ///
997 /// If this instruction is the header of a bundle, the whole bundle is erased.
998 /// This function can not be used for instructions inside a bundle, use
999 /// eraseFromBundle() to erase individual bundled instructions.
1000 void eraseFromParent();
1001
1002 /// Unlink 'this' from the containing basic block and delete it.
1003 ///
1004 /// For all definitions mark their uses in DBG_VALUE nodes
1005 /// as undefined. Otherwise like eraseFromParent().
1006 void eraseFromParentAndMarkDBGValuesForRemoval();
1007
1008 /// Unlink 'this' form its basic block and delete it.
1009 ///
1010 /// If the instruction is part of a bundle, the other instructions in the
1011 /// bundle remain bundled.
1012 void eraseFromBundle();
1013
1014 bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
1015 bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
1016 bool isAnnotationLabel() const {
1017 return getOpcode() == TargetOpcode::ANNOTATION_LABEL;
1018 }
1019
1020 /// Returns true if the MachineInstr represents a label.
1021 bool isLabel() const {
1022 return isEHLabel() || isGCLabel() || isAnnotationLabel();
1023 }
1024
1025 bool isCFIInstruction() const {
1026 return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
1027 }
1028
1029 // True if the instruction represents a position in the function.
1030 bool isPosition() const { return isLabel() || isCFIInstruction(); }
1031
1032 bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
1033 bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; }
1034 bool isDebugInstr() const { return isDebugValue() || isDebugLabel(); }
1035
1036 /// A DBG_VALUE is indirect iff the first operand is a register and
1037 /// the second operand is an immediate.
1038 bool isIndirectDebugValue() const {
1039 return isDebugValue()
1040 && getOperand(0).isReg()
1041 && getOperand(1).isImm();
1042 }
1043
1044 /// A DBG_VALUE is an entry value iff its debug expression contains the
1045 /// DW_OP_entry_value DWARF operation.
1046 bool isDebugEntryValue() const {
1047 return isDebugValue() && getDebugExpression()->isEntryValue();
1048 }
1049
1050 /// Return true if the instruction is a debug value which describes a part of
1051 /// a variable as unavailable.
1052 bool isUndefDebugValue() const {
1053 return isDebugValue() && getOperand(0).isReg() && !getOperand(0).getReg().isValid();
1054 }
1055
1056 bool isPHI() const {
1057 return getOpcode() == TargetOpcode::PHI ||
1058 getOpcode() == TargetOpcode::G_PHI;
1059 }
1060 bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
1061 bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
1062 bool isInlineAsm() const {
1063 return getOpcode() == TargetOpcode::INLINEASM ||
10
Assuming the condition is false
12
Returning the value 1, which participates in a condition later
1064 getOpcode() == TargetOpcode::INLINEASM_BR;
11
Assuming the condition is true
1065 }
1066
1067 /// FIXME: Seems like a layering violation that the AsmDialect, which is X86
1068 /// specific, be attached to a generic MachineInstr.
1069 bool isMSInlineAsm() const {
1070 return isInlineAsm() && getInlineAsmDialect() == InlineAsm::AD_Intel;
1071 }
1072
1073 bool isStackAligningInlineAsm() const;
1074 InlineAsm::AsmDialect getInlineAsmDialect() const;
1075
1076 bool isInsertSubreg() const {
1077 return getOpcode() == TargetOpcode::INSERT_SUBREG;
1078 }
1079
1080 bool isSubregToReg() const {
1081 return getOpcode() == TargetOpcode::SUBREG_TO_REG;
1082 }
1083
1084 bool isRegSequence() const {
1085 return getOpcode() == TargetOpcode::REG_SEQUENCE;
1086 }
1087
1088 bool isBundle() const {
1089 return getOpcode() == TargetOpcode::BUNDLE;
1090 }
1091
1092 bool isCopy() const {
1093 return getOpcode() == TargetOpcode::COPY;
1094 }
1095
1096 bool isFullCopy() const {
1097 return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
1098 }
1099
1100 bool isExtractSubreg() const {
1101 return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
1102 }
1103
1104 /// Return true if the instruction behaves like a copy.
1105 /// This does not include native copy instructions.
1106 bool isCopyLike() const {
1107 return isCopy() || isSubregToReg();
1108 }
1109
1110 /// Return true is the instruction is an identity copy.
1111 bool isIdentityCopy() const {
1112 return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
1113 getOperand(0).getSubReg() == getOperand(1).getSubReg();
1114 }
1115
1116 /// Return true if this instruction doesn't produce any output in the form of
1117 /// executable instructions.
1118 bool isMetaInstruction() const {
1119 switch (getOpcode()) {
1120 default:
1121 return false;
1122 case TargetOpcode::IMPLICIT_DEF:
1123 case TargetOpcode::KILL:
1124 case TargetOpcode::CFI_INSTRUCTION:
1125 case TargetOpcode::EH_LABEL:
1126 case TargetOpcode::GC_LABEL:
1127 case TargetOpcode::DBG_VALUE:
1128 case TargetOpcode::DBG_LABEL:
1129 case TargetOpcode::LIFETIME_START:
1130 case TargetOpcode::LIFETIME_END:
1131 return true;
1132 }
1133 }
1134
1135 /// Return true if this is a transient instruction that is either very likely
1136 /// to be eliminated during register allocation (such as copy-like
1137 /// instructions), or if this instruction doesn't have an execution-time cost.
1138 bool isTransient() const {
1139 switch (getOpcode()) {
1140 default:
1141 return isMetaInstruction();
1142 // Copy-like instructions are usually eliminated during register allocation.
1143 case TargetOpcode::PHI:
1144 case TargetOpcode::G_PHI:
1145 case TargetOpcode::COPY:
1146 case TargetOpcode::INSERT_SUBREG:
1147 case TargetOpcode::SUBREG_TO_REG:
1148 case TargetOpcode::REG_SEQUENCE:
1149 return true;
1150 }
1151 }
1152
1153 /// Return the number of instructions inside the MI bundle, excluding the
1154 /// bundle header.
1155 ///
1156 /// This is the number of instructions that MachineBasicBlock::iterator
1157 /// skips, 0 for unbundled instructions.
1158 unsigned getBundleSize() const;
1159
1160 /// Return true if the MachineInstr reads the specified register.
1161 /// If TargetRegisterInfo is passed, then it also checks if there
1162 /// is a read of a super-register.
1163 /// This does not count partial redefines of virtual registers as reads:
1164 /// %reg1024:6 = OP.
1165 bool readsRegister(Register Reg,
1166 const TargetRegisterInfo *TRI = nullptr) const {
1167 return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
1168 }
1169
1170 /// Return true if the MachineInstr reads the specified virtual register.
1171 /// Take into account that a partial define is a
1172 /// read-modify-write operation.
1173 bool readsVirtualRegister(Register Reg) const {
1174 return readsWritesVirtualRegister(Reg).first;
1175 }
1176
1177 /// Return a pair of bools (reads, writes) indicating if this instruction
1178 /// reads or writes Reg. This also considers partial defines.
1179 /// If Ops is not null, all operand indices for Reg are added.
1180 std::pair<bool,bool> readsWritesVirtualRegister(Register Reg,
1181 SmallVectorImpl<unsigned> *Ops = nullptr) const;
1182
1183 /// Return true if the MachineInstr kills the specified register.
1184 /// If TargetRegisterInfo is passed, then it also checks if there is
1185 /// a kill of a super-register.
1186 bool killsRegister(Register Reg,
1187 const TargetRegisterInfo *TRI = nullptr) const {
1188 return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
1189 }
1190
1191 /// Return true if the MachineInstr fully defines the specified register.
1192 /// If TargetRegisterInfo is passed, then it also checks
1193 /// if there is a def of a super-register.
1194 /// NOTE: It's ignoring subreg indices on virtual registers.
1195 bool definesRegister(Register Reg,
1196 const TargetRegisterInfo *TRI = nullptr) const {
1197 return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
1198 }
1199
1200 /// Return true if the MachineInstr modifies (fully define or partially
1201 /// define) the specified register.
1202 /// NOTE: It's ignoring subreg indices on virtual registers.
1203 bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const {
1204 return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
1205 }
1206
1207 /// Returns true if the register is dead in this machine instruction.
1208 /// If TargetRegisterInfo is passed, then it also checks
1209 /// if there is a dead def of a super-register.
1210 bool registerDefIsDead(Register Reg,
1211 const TargetRegisterInfo *TRI = nullptr) const {
1212 return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
1213 }
1214
1215 /// Returns true if the MachineInstr has an implicit-use operand of exactly
1216 /// the given register (not considering sub/super-registers).
1217 bool hasRegisterImplicitUseOperand(Register Reg) const;
1218
1219 /// Returns the operand index that is a use of the specific register or -1
1220 /// if it is not found. It further tightens the search criteria to a use
1221 /// that kills the register if isKill is true.
1222 int findRegisterUseOperandIdx(Register Reg, bool isKill = false,
1223 const TargetRegisterInfo *TRI = nullptr) const;
1224
1225 /// Wrapper for findRegisterUseOperandIdx, it returns
1226 /// a pointer to the MachineOperand rather than an index.
1227 MachineOperand *findRegisterUseOperand(Register Reg, bool isKill = false,
1228 const TargetRegisterInfo *TRI = nullptr) {
1229 int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
1230 return (Idx == -1) ? nullptr : &getOperand(Idx);
1231 }
1232
1233 const MachineOperand *findRegisterUseOperand(
1234 Register Reg, bool isKill = false,
1235 const TargetRegisterInfo *TRI = nullptr) const {
1236 return const_cast<MachineInstr *>(this)->
1237 findRegisterUseOperand(Reg, isKill, TRI);
1238 }
1239
1240 /// Returns the operand index that is a def of the specified register or
1241 /// -1 if it is not found. If isDead is true, defs that are not dead are
1242 /// skipped. If Overlap is true, then it also looks for defs that merely
1243 /// overlap the specified register. If TargetRegisterInfo is non-null,
1244 /// then it also checks if there is a def of a super-register.
1245 /// This may also return a register mask operand when Overlap is true.
1246 int findRegisterDefOperandIdx(Register Reg,
1247 bool isDead = false, bool Overlap = false,
1248 const TargetRegisterInfo *TRI = nullptr) const;
1249
1250 /// Wrapper for findRegisterDefOperandIdx, it returns
1251 /// a pointer to the MachineOperand rather than an index.
1252 MachineOperand *
1253 findRegisterDefOperand(Register Reg, bool isDead = false,
1254 bool Overlap = false,
1255 const TargetRegisterInfo *TRI = nullptr) {
1256 int Idx = findRegisterDefOperandIdx(Reg, isDead, Overlap, TRI);
1257 return (Idx == -1) ? nullptr : &getOperand(Idx);
1258 }
1259
1260 const MachineOperand *
1261 findRegisterDefOperand(Register Reg, bool isDead = false,
1262 bool Overlap = false,
1263 const TargetRegisterInfo *TRI = nullptr) const {
1264 return const_cast<MachineInstr *>(this)->findRegisterDefOperand(
1265 Reg, isDead, Overlap, TRI);
1266 }
1267
1268 /// Find the index of the first operand in the
1269 /// operand list that is used to represent the predicate. It returns -1 if
1270 /// none is found.
1271 int findFirstPredOperandIdx() const;
1272
1273 /// Find the index of the flag word operand that
1274 /// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if
1275 /// getOperand(OpIdx) does not belong to an inline asm operand group.
1276 ///
1277 /// If GroupNo is not NULL, it will receive the number of the operand group
1278 /// containing OpIdx.
1279 ///
1280 /// The flag operand is an immediate that can be decoded with methods like
1281 /// InlineAsm::hasRegClassConstraint().
1282 int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
1283
1284 /// Compute the static register class constraint for operand OpIdx.
1285 /// For normal instructions, this is derived from the MCInstrDesc.
1286 /// For inline assembly it is derived from the flag words.
1287 ///
1288 /// Returns NULL if the static register class constraint cannot be
1289 /// determined.
1290 const TargetRegisterClass*
1291 getRegClassConstraint(unsigned OpIdx,
1292 const TargetInstrInfo *TII,
1293 const TargetRegisterInfo *TRI) const;
1294
1295 /// Applies the constraints (def/use) implied by this MI on \p Reg to
1296 /// the given \p CurRC.
1297 /// If \p ExploreBundle is set and MI is part of a bundle, all the
1298 /// instructions inside the bundle will be taken into account. In other words,
1299 /// this method accumulates all the constraints of the operand of this MI and
1300 /// the related bundle if MI is a bundle or inside a bundle.
1301 ///
1302 /// Returns the register class that satisfies both \p CurRC and the
1303 /// constraints set by MI. Returns NULL if such a register class does not
1304 /// exist.
1305 ///
1306 /// \pre CurRC must not be NULL.
1307 const TargetRegisterClass *getRegClassConstraintEffectForVReg(
1308 Register Reg, const TargetRegisterClass *CurRC,
1309 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
1310 bool ExploreBundle = false) const;
1311
1312 /// Applies the constraints (def/use) implied by the \p OpIdx operand
1313 /// to the given \p CurRC.
1314 ///
1315 /// Returns the register class that satisfies both \p CurRC and the
1316 /// constraints set by \p OpIdx MI. Returns NULL if such a register class
1317 /// does not exist.
1318 ///
1319 /// \pre CurRC must not be NULL.
1320 /// \pre The operand at \p OpIdx must be a register.
1321 const TargetRegisterClass *
1322 getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC,
1323 const TargetInstrInfo *TII,
1324 const TargetRegisterInfo *TRI) const;
1325
1326 /// Add a tie between the register operands at DefIdx and UseIdx.
1327 /// The tie will cause the register allocator to ensure that the two
1328 /// operands are assigned the same physical register.
1329 ///
1330 /// Tied operands are managed automatically for explicit operands in the
1331 /// MCInstrDesc. This method is for exceptional cases like inline asm.
1332 void tieOperands(unsigned DefIdx, unsigned UseIdx);
1333
1334 /// Given the index of a tied register operand, find the
1335 /// operand it is tied to. Defs are tied to uses and vice versa. Returns the
1336 /// index of the tied operand which must exist.
1337 unsigned findTiedOperandIdx(unsigned OpIdx) const;
1338
1339 /// Given the index of a register def operand,
1340 /// check if the register def is tied to a source operand, due to either
1341 /// two-address elimination or inline assembly constraints. Returns the
1342 /// first tied use operand index by reference if UseOpIdx is not null.
1343 bool isRegTiedToUseOperand(unsigned DefOpIdx,
1344 unsigned *UseOpIdx = nullptr) const {
1345 const MachineOperand &MO = getOperand(DefOpIdx);
1346 if (!MO.isReg() || !MO.isDef() || !MO.isTied())
1347 return false;
1348 if (UseOpIdx)
1349 *UseOpIdx = findTiedOperandIdx(DefOpIdx);
1350 return true;
1351 }
1352
1353 /// Return true if the use operand of the specified index is tied to a def
1354 /// operand. It also returns the def operand index by reference if DefOpIdx
1355 /// is not null.
1356 bool isRegTiedToDefOperand(unsigned UseOpIdx,
1357 unsigned *DefOpIdx = nullptr) const {
1358 const MachineOperand &MO = getOperand(UseOpIdx);
1359 if (!MO.isReg() || !MO.isUse() || !MO.isTied())
1360 return false;
1361 if (DefOpIdx)
1362 *DefOpIdx = findTiedOperandIdx(UseOpIdx);
1363 return true;
1364 }
1365
1366 /// Clears kill flags on all operands.
1367 void clearKillInfo();
1368
1369 /// Replace all occurrences of FromReg with ToReg:SubIdx,
1370 /// properly composing subreg indices where necessary.
1371 void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx,
1372 const TargetRegisterInfo &RegInfo);
1373
1374 /// We have determined MI kills a register. Look for the
1375 /// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
1376 /// add a implicit operand if it's not found. Returns true if the operand
1377 /// exists / is added.
1378 bool addRegisterKilled(Register IncomingReg,
1379 const TargetRegisterInfo *RegInfo,
1380 bool AddIfNotFound = false);
1381
1382 /// Clear all kill flags affecting Reg. If RegInfo is provided, this includes
1383 /// all aliasing registers.
1384 void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo);
1385
1386 /// We have determined MI defined a register without a use.
1387 /// Look for the operand that defines it and mark it as IsDead. If
1388 /// AddIfNotFound is true, add a implicit operand if it's not found. Returns
1389 /// true if the operand exists / is added.
1390 bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo,
1391 bool AddIfNotFound = false);
1392
1393 /// Clear all dead flags on operands defining register @p Reg.
1394 void clearRegisterDeads(Register Reg);
1395
1396 /// Mark all subregister defs of register @p Reg with the undef flag.
1397 /// This function is used when we determined to have a subregister def in an
1398 /// otherwise undefined super register.
1399 void setRegisterDefReadUndef(Register Reg, bool IsUndef = true);
1400
1401 /// We have determined MI defines a register. Make sure there is an operand
1402 /// defining Reg.
1403 void addRegisterDefined(Register Reg,
1404 const TargetRegisterInfo *RegInfo = nullptr);
1405
1406 /// Mark every physreg used by this instruction as
1407 /// dead except those in the UsedRegs list.
1408 ///
1409 /// On instructions with register mask operands, also add implicit-def
1410 /// operands for all registers in UsedRegs.
1411 void setPhysRegsDeadExcept(ArrayRef<Register> UsedRegs,
1412 const TargetRegisterInfo &TRI);
1413
1414 /// Return true if it is safe to move this instruction. If
1415 /// SawStore is set to true, it means that there is a store (or call) between
1416 /// the instruction's location and its intended destination.
1417 bool isSafeToMove(AliasAnalysis *AA, bool &SawStore) const;
1418
1419 /// Returns true if this instruction's memory access aliases the memory
1420 /// access of Other.
1421 //
1422 /// Assumes any physical registers used to compute addresses
1423 /// have the same value for both instructions. Returns false if neither
1424 /// instruction writes to memory.
1425 ///
1426 /// @param AA Optional alias analysis, used to compare memory operands.
1427 /// @param Other MachineInstr to check aliasing against.
1428 /// @param UseTBAA Whether to pass TBAA information to alias analysis.
1429 bool mayAlias(AliasAnalysis *AA, const MachineInstr &Other, bool UseTBAA) const;
1430
1431 /// Return true if this instruction may have an ordered
1432 /// or volatile memory reference, or if the information describing the memory
1433 /// reference is not available. Return false if it is known to have no
1434 /// ordered or volatile memory references.
1435 bool hasOrderedMemoryRef() const;
1436
1437 /// Return true if this load instruction never traps and points to a memory
1438 /// location whose value doesn't change during the execution of this function.
1439 ///
1440 /// Examples include loading a value from the constant pool or from the
1441 /// argument area of a function (if it does not change). If the instruction
1442 /// does multiple loads, this returns true only if all of the loads are
1443 /// dereferenceable and invariant.
1444 bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const;
1445
1446 /// If the specified instruction is a PHI that always merges together the
1447 /// same virtual register, return the register, otherwise return 0.
1448 unsigned isConstantValuePHI() const;
1449
1450 /// Return true if this instruction has side effects that are not modeled
1451 /// by mayLoad / mayStore, etc.
1452 /// For all instructions, the property is encoded in MCInstrDesc::Flags
1453 /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
1454 /// INLINEASM instruction, in which case the side effect property is encoded
1455 /// in one of its operands (see InlineAsm::Extra_HasSideEffect).
1456 ///
1457 bool hasUnmodeledSideEffects() const;
1458
1459 /// Returns true if it is illegal to fold a load across this instruction.
1460 bool isLoadFoldBarrier() const;
1461
1462 /// Return true if all the defs of this instruction are dead.
1463 bool allDefsAreDead() const;
1464
1465 /// Return a valid size if the instruction is a spill instruction.
1466 Optional<unsigned> getSpillSize(const TargetInstrInfo *TII) const;
1467
1468 /// Return a valid size if the instruction is a folded spill instruction.
1469 Optional<unsigned> getFoldedSpillSize(const TargetInstrInfo *TII) const;
1470
1471 /// Return a valid size if the instruction is a restore instruction.
1472 Optional<unsigned> getRestoreSize(const TargetInstrInfo *TII) const;
1473
1474 /// Return a valid size if the instruction is a folded restore instruction.
1475 Optional<unsigned>
1476 getFoldedRestoreSize(const TargetInstrInfo *TII) const;
1477
1478 /// Copy implicit register operands from specified
1479 /// instruction to this instruction.
1480 void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI);
1481
1482 /// Debugging support
1483 /// @{
1484 /// Determine the generic type to be printed (if needed) on uses and defs.
1485 LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1486 const MachineRegisterInfo &MRI) const;
1487
1488 /// Return true when an instruction has tied register that can't be determined
1489 /// by the instruction's descriptor. This is useful for MIR printing, to
1490 /// determine whether we need to print the ties or not.
1491 bool hasComplexRegisterTies() const;
1492
1493 /// Print this MI to \p OS.
1494 /// Don't print information that can be inferred from other instructions if
1495 /// \p IsStandalone is false. It is usually true when only a fragment of the
1496 /// function is printed.
1497 /// Only print the defs and the opcode if \p SkipOpers is true.
1498 /// Otherwise, also print operands if \p SkipDebugLoc is true.
1499 /// Otherwise, also print the debug loc, with a terminating newline.
1500 /// \p TII is used to print the opcode name. If it's not present, but the
1501 /// MI is in a function, the opcode will be printed using the function's TII.
1502 void print(raw_ostream &OS, bool IsStandalone = true, bool SkipOpers = false,
1503 bool SkipDebugLoc = false, bool AddNewLine = true,
1504 const TargetInstrInfo *TII = nullptr) const;
1505 void print(raw_ostream &OS, ModuleSlotTracker &MST, bool IsStandalone = true,
1506 bool SkipOpers = false, bool SkipDebugLoc = false,
1507 bool AddNewLine = true,
1508 const TargetInstrInfo *TII = nullptr) const;
1509 void dump() const;
1510 /// @}
1511
1512 //===--------------------------------------------------------------------===//
1513 // Accessors used to build up machine instructions.
1514
1515 /// Add the specified operand to the instruction. If it is an implicit
1516 /// operand, it is added to the end of the operand list. If it is an
1517 /// explicit operand it is added at the end of the explicit operand list
1518 /// (before the first implicit operand).
1519 ///
1520 /// MF must be the machine function that was used to allocate this
1521 /// instruction.
1522 ///
1523 /// MachineInstrBuilder provides a more convenient interface for creating
1524 /// instructions and adding operands.
1525 void addOperand(MachineFunction &MF, const MachineOperand &Op);
1526
1527 /// Add an operand without providing an MF reference. This only works for
1528 /// instructions that are inserted in a basic block.
1529 ///
1530 /// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be
1531 /// preferred.
1532 void addOperand(const MachineOperand &Op);
1533
1534 /// Replace the instruction descriptor (thus opcode) of
1535 /// the current instruction with a new one.
1536 void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
1537
1538 /// Replace current source information with new such.
1539 /// Avoid using this, the constructor argument is preferable.
1540 void setDebugLoc(DebugLoc dl) {
1541 debugLoc = std::move(dl);
1542 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/MachineInstr.h"
, 1542, __PRETTY_FUNCTION__))
;
1543 }
1544
1545 /// Erase an operand from an instruction, leaving it with one
1546 /// fewer operand than it started with.
1547 void RemoveOperand(unsigned OpNo);
1548
1549 /// Clear this MachineInstr's memory reference descriptor list. This resets
1550 /// the memrefs to their most conservative state. This should be used only
1551 /// as a last resort since it greatly pessimizes our knowledge of the memory
1552 /// access performed by the instruction.
1553 void dropMemRefs(MachineFunction &MF);
1554
1555 /// Assign this MachineInstr's memory reference descriptor list.
1556 ///
1557 /// Unlike other methods, this *will* allocate them into a new array
1558 /// associated with the provided `MachineFunction`.
1559 void setMemRefs(MachineFunction &MF, ArrayRef<MachineMemOperand *> MemRefs);
1560
1561 /// Add a MachineMemOperand to the machine instruction.
1562 /// This function should be used only occasionally. The setMemRefs function
1563 /// is the primary method for setting up a MachineInstr's MemRefs list.
1564 void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
1565
1566 /// Clone another MachineInstr's memory reference descriptor list and replace
1567 /// ours with it.
1568 ///
1569 /// Note that `*this` may be the incoming MI!
1570 ///
1571 /// Prefer this API whenever possible as it can avoid allocations in common
1572 /// cases.
1573 void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI);
1574
1575 /// Clone the merge of multiple MachineInstrs' memory reference descriptors
1576 /// list and replace ours with it.
1577 ///
1578 /// Note that `*this` may be one of the incoming MIs!
1579 ///
1580 /// Prefer this API whenever possible as it can avoid allocations in common
1581 /// cases.
1582 void cloneMergedMemRefs(MachineFunction &MF,
1583 ArrayRef<const MachineInstr *> MIs);
1584
1585 /// Set a symbol that will be emitted just prior to the instruction itself.
1586 ///
1587 /// Setting this to a null pointer will remove any such symbol.
1588 ///
1589 /// FIXME: This is not fully implemented yet.
1590 void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
1591
1592 /// Set a symbol that will be emitted just after the instruction itself.
1593 ///
1594 /// Setting this to a null pointer will remove any such symbol.
1595 ///
1596 /// FIXME: This is not fully implemented yet.
1597 void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
1598
1599 /// Clone another MachineInstr's pre- and post- instruction symbols and
1600 /// replace ours with it.
1601 void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI);
1602
1603 /// Return the MIFlags which represent both MachineInstrs. This
1604 /// should be used when merging two MachineInstrs into one. This routine does
1605 /// not modify the MIFlags of this MachineInstr.
1606 uint16_t mergeFlagsWith(const MachineInstr& Other) const;
1607
1608 static uint16_t copyFlagsFromInstruction(const Instruction &I);
1609
1610 /// Copy all flags to MachineInst MIFlags
1611 void copyIRFlags(const Instruction &I);
1612
1613 /// Break any tie involving OpIdx.
1614 void untieRegOperand(unsigned OpIdx) {
1615 MachineOperand &MO = getOperand(OpIdx);
1616 if (MO.isReg() && MO.isTied()) {
1617 getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
1618 MO.TiedTo = 0;
1619 }
1620 }
1621
1622 /// Add all implicit def and use operands to this instruction.
1623 void addImplicitDefUseOperands(MachineFunction &MF);
1624
1625 /// Scan instructions following MI and collect any matching DBG_VALUEs.
1626 void collectDebugValues(SmallVectorImpl<MachineInstr *> &DbgValues);
1627
1628 /// Find all DBG_VALUEs that point to the register def in this instruction
1629 /// and point them to \p Reg instead.
1630 void changeDebugValuesDefReg(Register Reg);
1631
1632 /// Returns the Intrinsic::ID for this instruction.
1633 /// \pre Must have an intrinsic ID operand.
1634 unsigned getIntrinsicID() const {
1635 return getOperand(getNumExplicitDefs()).getIntrinsicID();
1636 }
1637
1638private:
1639 /// If this instruction is embedded into a MachineFunction, return the
1640 /// MachineRegisterInfo object for the current function, otherwise
1641 /// return null.
1642 MachineRegisterInfo *getRegInfo();
1643
1644 /// Unlink all of the register operands in this instruction from their
1645 /// respective use lists. This requires that the operands already be on their
1646 /// use lists.
1647 void RemoveRegOperandsFromUseLists(MachineRegisterInfo&);
1648
1649 /// Add all of the register operands in this instruction from their
1650 /// respective use lists. This requires that the operands not be on their
1651 /// use lists yet.
1652 void AddRegOperandsToUseLists(MachineRegisterInfo&);
1653
1654 /// Slow path for hasProperty when we're dealing with a bundle.
1655 bool hasPropertyInBundle(uint64_t Mask, QueryType Type) const;
1656
1657 /// Implements the logic of getRegClassConstraintEffectForVReg for the
1658 /// this MI and the given operand index \p OpIdx.
1659 /// If the related operand does not constrained Reg, this returns CurRC.
1660 const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(
1661 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
1662 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const;
1663};
1664
1665/// Special DenseMapInfo traits to compare MachineInstr* by *value* of the
1666/// instruction rather than by pointer value.
1667/// The hashing and equality testing functions ignore definitions so this is
1668/// useful for CSE, etc.
1669struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
1670 static inline MachineInstr *getEmptyKey() {
1671 return nullptr;
1672 }
1673
1674 static inline MachineInstr *getTombstoneKey() {
1675 return reinterpret_cast<MachineInstr*>(-1);
1676 }
1677
1678 static unsigned getHashValue(const MachineInstr* const &MI);
1679
1680 static bool isEqual(const MachineInstr* const &LHS,
1681 const MachineInstr* const &RHS) {
1682 if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
1683 LHS == getEmptyKey() || LHS == getTombstoneKey())
1684 return LHS == RHS;
1685 return LHS->isIdenticalTo(*RHS, MachineInstr::IgnoreVRegDefs);
1686 }
1687};
1688
1689//===----------------------------------------------------------------------===//
1690// Debugging Support
1691
1692inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) {
1693 MI.print(OS);
1694 return OS;
1695}
1696
1697} // end namespace llvm
1698
1699#endif // LLVM_CODEGEN_MACHINEINSTR_H