Bug Summary

File:include/llvm/CodeGen/MachineInstr.h
Warning:line 153, column 17
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MachineFunction.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-9/lib/clang/9.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen -I /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/include -I /build/llvm-toolchain-snapshot-9~svn362543/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/9.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-9/lib/clang/9.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-9~svn362543=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2019-06-05-060531-1271-1 -x c++ /build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp -faddrsig

/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp

1//===- MachineFunction.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Collect native machine code information for a function. This allows
10// target-specific information about the generated code to be stored with each
11// function.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/CodeGen/MachineFunction.h"
16#include "llvm/ADT/BitVector.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallString.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/Analysis/ConstantFolding.h"
25#include "llvm/Analysis/EHPersonalities.h"
26#include "llvm/CodeGen/MachineBasicBlock.h"
27#include "llvm/CodeGen/MachineConstantPool.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineInstr.h"
30#include "llvm/CodeGen/MachineJumpTableInfo.h"
31#include "llvm/CodeGen/MachineMemOperand.h"
32#include "llvm/CodeGen/MachineModuleInfo.h"
33#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/CodeGen/PseudoSourceValue.h"
35#include "llvm/CodeGen/TargetFrameLowering.h"
36#include "llvm/CodeGen/TargetLowering.h"
37#include "llvm/CodeGen/TargetRegisterInfo.h"
38#include "llvm/CodeGen/TargetSubtargetInfo.h"
39#include "llvm/CodeGen/WasmEHFuncInfo.h"
40#include "llvm/CodeGen/WinEHFuncInfo.h"
41#include "llvm/Config/llvm-config.h"
42#include "llvm/IR/Attributes.h"
43#include "llvm/IR/BasicBlock.h"
44#include "llvm/IR/Constant.h"
45#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/DebugInfoMetadata.h"
47#include "llvm/IR/DerivedTypes.h"
48#include "llvm/IR/Function.h"
49#include "llvm/IR/GlobalValue.h"
50#include "llvm/IR/Instruction.h"
51#include "llvm/IR/Instructions.h"
52#include "llvm/IR/Metadata.h"
53#include "llvm/IR/Module.h"
54#include "llvm/IR/ModuleSlotTracker.h"
55#include "llvm/IR/Value.h"
56#include "llvm/MC/MCContext.h"
57#include "llvm/MC/MCSymbol.h"
58#include "llvm/MC/SectionKind.h"
59#include "llvm/Support/Casting.h"
60#include "llvm/Support/CommandLine.h"
61#include "llvm/Support/Compiler.h"
62#include "llvm/Support/DOTGraphTraits.h"
63#include "llvm/Support/Debug.h"
64#include "llvm/Support/ErrorHandling.h"
65#include "llvm/Support/GraphWriter.h"
66#include "llvm/Support/raw_ostream.h"
67#include "llvm/Target/TargetMachine.h"
68#include <algorithm>
69#include <cassert>
70#include <cstddef>
71#include <cstdint>
72#include <iterator>
73#include <string>
74#include <utility>
75#include <vector>
76
77using namespace llvm;
78
79#define DEBUG_TYPE"codegen" "codegen"
80
81static cl::opt<unsigned>
82AlignAllFunctions("align-all-functions",
83 cl::desc("Force the alignment of all functions."),
84 cl::init(0), cl::Hidden);
85
86static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
87 using P = MachineFunctionProperties::Property;
88
89 switch(Prop) {
90 case P::FailedISel: return "FailedISel";
91 case P::IsSSA: return "IsSSA";
92 case P::Legalized: return "Legalized";
93 case P::NoPHIs: return "NoPHIs";
94 case P::NoVRegs: return "NoVRegs";
95 case P::RegBankSelected: return "RegBankSelected";
96 case P::Selected: return "Selected";
97 case P::TracksLiveness: return "TracksLiveness";
98 }
99 llvm_unreachable("Invalid machine function property")::llvm::llvm_unreachable_internal("Invalid machine function property"
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 99)
;
100}
101
102// Pin the vtable to this file.
103void MachineFunction::Delegate::anchor() {}
104
105void MachineFunctionProperties::print(raw_ostream &OS) const {
106 const char *Separator = "";
107 for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
108 if (!Properties[I])
109 continue;
110 OS << Separator << getPropertyName(static_cast<Property>(I));
111 Separator = ", ";
112 }
113}
114
115//===----------------------------------------------------------------------===//
116// MachineFunction implementation
117//===----------------------------------------------------------------------===//
118
119// Out-of-line virtual method.
120MachineFunctionInfo::~MachineFunctionInfo() = default;
121
122void ilist_alloc_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
123 MBB->getParent()->DeleteMachineBasicBlock(MBB);
124}
125
126static inline unsigned getFnStackAlignment(const TargetSubtargetInfo *STI,
127 const Function &F) {
128 if (F.hasFnAttribute(Attribute::StackAlignment))
129 return F.getFnStackAlignment();
130 return STI->getFrameLowering()->getStackAlignment();
131}
132
133MachineFunction::MachineFunction(const Function &F,
134 const LLVMTargetMachine &Target,
135 const TargetSubtargetInfo &STI,
136 unsigned FunctionNum, MachineModuleInfo &mmi)
137 : F(F), Target(Target), STI(&STI), Ctx(mmi.getContext()), MMI(mmi) {
138 FunctionNumber = FunctionNum;
139 init();
140}
141
142void MachineFunction::handleInsertion(MachineInstr &MI) {
143 if (TheDelegate)
144 TheDelegate->MF_HandleInsertion(MI);
145}
146
147void MachineFunction::handleRemoval(MachineInstr &MI) {
148 if (TheDelegate)
149 TheDelegate->MF_HandleRemoval(MI);
150}
151
152void MachineFunction::init() {
153 // Assume the function starts in SSA form with correct liveness.
154 Properties.set(MachineFunctionProperties::Property::IsSSA);
155 Properties.set(MachineFunctionProperties::Property::TracksLiveness);
156 if (STI->getRegisterInfo())
157 RegInfo = new (Allocator) MachineRegisterInfo(this);
158 else
159 RegInfo = nullptr;
160
161 MFInfo = nullptr;
162 // We can realign the stack if the target supports it and the user hasn't
163 // explicitly asked us not to.
164 bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() &&
165 !F.hasFnAttribute("no-realign-stack");
166 FrameInfo = new (Allocator) MachineFrameInfo(
167 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
168 /*ForceRealign=*/CanRealignSP &&
169 F.hasFnAttribute(Attribute::StackAlignment));
170
171 if (F.hasFnAttribute(Attribute::StackAlignment))
172 FrameInfo->ensureMaxAlignment(F.getFnStackAlignment());
173
174 ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
175 Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
176
177 // FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
178 // FIXME: Use Function::hasOptSize().
179 if (!F.hasFnAttribute(Attribute::OptimizeForSize))
180 Alignment = std::max(Alignment,
181 STI->getTargetLowering()->getPrefFunctionAlignment());
182
183 if (AlignAllFunctions)
184 Alignment = AlignAllFunctions;
185
186 JumpTableInfo = nullptr;
187
188 if (isFuncletEHPersonality(classifyEHPersonality(
189 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
190 WinEHInfo = new (Allocator) WinEHFuncInfo();
191 }
192
193 if (isScopedEHPersonality(classifyEHPersonality(
194 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
195 WasmEHInfo = new (Allocator) WasmEHFuncInfo();
196 }
197
198 assert(Target.isCompatibleDataLayout(getDataLayout()) &&((Target.isCompatibleDataLayout(getDataLayout()) && "Can't create a MachineFunction using a Module with a "
"Target-incompatible DataLayout attached\n") ? static_cast<
void> (0) : __assert_fail ("Target.isCompatibleDataLayout(getDataLayout()) && \"Can't create a MachineFunction using a Module with a \" \"Target-incompatible DataLayout attached\\n\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 200, __PRETTY_FUNCTION__))
199 "Can't create a MachineFunction using a Module with a "((Target.isCompatibleDataLayout(getDataLayout()) && "Can't create a MachineFunction using a Module with a "
"Target-incompatible DataLayout attached\n") ? static_cast<
void> (0) : __assert_fail ("Target.isCompatibleDataLayout(getDataLayout()) && \"Can't create a MachineFunction using a Module with a \" \"Target-incompatible DataLayout attached\\n\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 200, __PRETTY_FUNCTION__))
200 "Target-incompatible DataLayout attached\n")((Target.isCompatibleDataLayout(getDataLayout()) && "Can't create a MachineFunction using a Module with a "
"Target-incompatible DataLayout attached\n") ? static_cast<
void> (0) : __assert_fail ("Target.isCompatibleDataLayout(getDataLayout()) && \"Can't create a MachineFunction using a Module with a \" \"Target-incompatible DataLayout attached\\n\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 200, __PRETTY_FUNCTION__))
;
201
202 PSVManager =
203 llvm::make_unique<PseudoSourceValueManager>(*(getSubtarget().
204 getInstrInfo()));
205}
206
207MachineFunction::~MachineFunction() {
208 clear();
209}
210
211void MachineFunction::clear() {
212 Properties.reset();
213 // Don't call destructors on MachineInstr and MachineOperand. All of their
214 // memory comes from the BumpPtrAllocator which is about to be purged.
215 //
216 // Do call MachineBasicBlock destructors, it contains std::vectors.
217 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
218 I->Insts.clearAndLeakNodesUnsafely();
219 MBBNumbering.clear();
220
221 InstructionRecycler.clear(Allocator);
222 OperandRecycler.clear(Allocator);
223 BasicBlockRecycler.clear(Allocator);
224 CodeViewAnnotations.clear();
225 VariableDbgInfos.clear();
226 if (RegInfo) {
227 RegInfo->~MachineRegisterInfo();
228 Allocator.Deallocate(RegInfo);
229 }
230 if (MFInfo) {
231 MFInfo->~MachineFunctionInfo();
232 Allocator.Deallocate(MFInfo);
233 }
234
235 FrameInfo->~MachineFrameInfo();
236 Allocator.Deallocate(FrameInfo);
237
238 ConstantPool->~MachineConstantPool();
239 Allocator.Deallocate(ConstantPool);
240
241 if (JumpTableInfo) {
242 JumpTableInfo->~MachineJumpTableInfo();
243 Allocator.Deallocate(JumpTableInfo);
244 }
245
246 if (WinEHInfo) {
247 WinEHInfo->~WinEHFuncInfo();
248 Allocator.Deallocate(WinEHInfo);
249 }
250
251 if (WasmEHInfo) {
252 WasmEHInfo->~WasmEHFuncInfo();
253 Allocator.Deallocate(WasmEHInfo);
254 }
255}
256
257const DataLayout &MachineFunction::getDataLayout() const {
258 return F.getParent()->getDataLayout();
259}
260
261/// Get the JumpTableInfo for this function.
262/// If it does not already exist, allocate one.
263MachineJumpTableInfo *MachineFunction::
264getOrCreateJumpTableInfo(unsigned EntryKind) {
265 if (JumpTableInfo) return JumpTableInfo;
266
267 JumpTableInfo = new (Allocator)
268 MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
269 return JumpTableInfo;
270}
271
272/// Should we be emitting segmented stack stuff for the function
273bool MachineFunction::shouldSplitStack() const {
274 return getFunction().hasFnAttribute("split-stack");
275}
276
277LLVM_NODISCARD[[clang::warn_unused_result]] unsigned
278MachineFunction::addFrameInst(const MCCFIInstruction &Inst) {
279 FrameInstructions.push_back(Inst);
280 return FrameInstructions.size() - 1;
281}
282
283/// This discards all of the MachineBasicBlock numbers and recomputes them.
284/// This guarantees that the MBB numbers are sequential, dense, and match the
285/// ordering of the blocks within the function. If a specific MachineBasicBlock
286/// is specified, only that block and those after it are renumbered.
287void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
288 if (empty()) { MBBNumbering.clear(); return; }
289 MachineFunction::iterator MBBI, E = end();
290 if (MBB == nullptr)
291 MBBI = begin();
292 else
293 MBBI = MBB->getIterator();
294
295 // Figure out the block number this should have.
296 unsigned BlockNo = 0;
297 if (MBBI != begin())
298 BlockNo = std::prev(MBBI)->getNumber() + 1;
299
300 for (; MBBI != E; ++MBBI, ++BlockNo) {
301 if (MBBI->getNumber() != (int)BlockNo) {
302 // Remove use of the old number.
303 if (MBBI->getNumber() != -1) {
304 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&((MBBNumbering[MBBI->getNumber()] == &*MBBI &&
"MBB number mismatch!") ? static_cast<void> (0) : __assert_fail
("MBBNumbering[MBBI->getNumber()] == &*MBBI && \"MBB number mismatch!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 305, __PRETTY_FUNCTION__))
305 "MBB number mismatch!")((MBBNumbering[MBBI->getNumber()] == &*MBBI &&
"MBB number mismatch!") ? static_cast<void> (0) : __assert_fail
("MBBNumbering[MBBI->getNumber()] == &*MBBI && \"MBB number mismatch!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 305, __PRETTY_FUNCTION__))
;
306 MBBNumbering[MBBI->getNumber()] = nullptr;
307 }
308
309 // If BlockNo is already taken, set that block's number to -1.
310 if (MBBNumbering[BlockNo])
311 MBBNumbering[BlockNo]->setNumber(-1);
312
313 MBBNumbering[BlockNo] = &*MBBI;
314 MBBI->setNumber(BlockNo);
315 }
316 }
317
318 // Okay, all the blocks are renumbered. If we have compactified the block
319 // numbering, shrink MBBNumbering now.
320 assert(BlockNo <= MBBNumbering.size() && "Mismatch!")((BlockNo <= MBBNumbering.size() && "Mismatch!") ?
static_cast<void> (0) : __assert_fail ("BlockNo <= MBBNumbering.size() && \"Mismatch!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 320, __PRETTY_FUNCTION__))
;
321 MBBNumbering.resize(BlockNo);
322}
323
324/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
325MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
326 const DebugLoc &DL,
327 bool NoImp) {
328 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
329 MachineInstr(*this, MCID, DL, NoImp);
330}
331
332/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
333/// identical in all ways except the instruction has no parent, prev, or next.
334MachineInstr *
335MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
336 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
337 MachineInstr(*this, *Orig);
338}
339
340MachineInstr &MachineFunction::CloneMachineInstrBundle(MachineBasicBlock &MBB,
341 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) {
342 MachineInstr *FirstClone = nullptr;
343 MachineBasicBlock::const_instr_iterator I = Orig.getIterator();
344 while (true) {
345 MachineInstr *Cloned = CloneMachineInstr(&*I);
346 MBB.insert(InsertBefore, Cloned);
347 if (FirstClone == nullptr) {
348 FirstClone = Cloned;
349 } else {
350 Cloned->bundleWithPred();
351 }
352
353 if (!I->isBundledWithSucc())
354 break;
355 ++I;
356 }
357 return *FirstClone;
358}
359
360/// Delete the given MachineInstr.
361///
362/// This function also serves as the MachineInstr destructor - the real
363/// ~MachineInstr() destructor must be empty.
364void
365MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
366 // Strip it for parts. The operand array and the MI object itself are
367 // independently recyclable.
368 if (MI->Operands)
369 deallocateOperandArray(MI->CapOperands, MI->Operands);
370 // Don't call ~MachineInstr() which must be trivial anyway because
371 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
372 // destructors.
373 InstructionRecycler.Deallocate(Allocator, MI);
374}
375
376/// Allocate a new MachineBasicBlock. Use this instead of
377/// `new MachineBasicBlock'.
378MachineBasicBlock *
379MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
380 return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
381 MachineBasicBlock(*this, bb);
382}
383
384/// Delete the given MachineBasicBlock.
385void
386MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
387 assert(MBB->getParent() == this && "MBB parent mismatch!")((MBB->getParent() == this && "MBB parent mismatch!"
) ? static_cast<void> (0) : __assert_fail ("MBB->getParent() == this && \"MBB parent mismatch!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 387, __PRETTY_FUNCTION__))
;
388 MBB->~MachineBasicBlock();
389 BasicBlockRecycler.Deallocate(Allocator, MBB);
390}
391
392MachineMemOperand *MachineFunction::getMachineMemOperand(
393 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
394 unsigned base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
395 SyncScope::ID SSID, AtomicOrdering Ordering,
396 AtomicOrdering FailureOrdering) {
397 return new (Allocator)
398 MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges,
399 SSID, Ordering, FailureOrdering);
400}
401
402MachineMemOperand *
403MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
404 int64_t Offset, uint64_t Size) {
405 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
406
407 // If there is no pointer value, the offset isn't tracked so we need to adjust
408 // the base alignment.
409 unsigned Align = PtrInfo.V.isNull()
410 ? MinAlign(MMO->getBaseAlignment(), Offset)
411 : MMO->getBaseAlignment();
412
413 return new (Allocator)
414 MachineMemOperand(PtrInfo.getWithOffset(Offset), MMO->getFlags(), Size,
415 Align, AAMDNodes(), nullptr, MMO->getSyncScopeID(),
416 MMO->getOrdering(), MMO->getFailureOrdering());
417}
418
419MachineMemOperand *
420MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
421 const AAMDNodes &AAInfo) {
422 MachinePointerInfo MPI = MMO->getValue() ?
423 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
424 MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset());
425
426 return new (Allocator)
427 MachineMemOperand(MPI, MMO->getFlags(), MMO->getSize(),
428 MMO->getBaseAlignment(), AAInfo,
429 MMO->getRanges(), MMO->getSyncScopeID(),
430 MMO->getOrdering(), MMO->getFailureOrdering());
431}
432
433MachineInstr::ExtraInfo *
434MachineFunction::createMIExtraInfo(ArrayRef<MachineMemOperand *> MMOs,
435 MCSymbol *PreInstrSymbol,
436 MCSymbol *PostInstrSymbol) {
437 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
1
Calling 'ExtraInfo::create'
438 PostInstrSymbol);
439}
440
441const char *MachineFunction::createExternalSymbolName(StringRef Name) {
442 char *Dest = Allocator.Allocate<char>(Name.size() + 1);
443 llvm::copy(Name, Dest);
444 Dest[Name.size()] = 0;
445 return Dest;
446}
447
448uint32_t *MachineFunction::allocateRegMask() {
449 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
450 unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
451 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
452 memset(Mask, 0, Size * sizeof(Mask[0]));
453 return Mask;
454}
455
456#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
457LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MachineFunction::dump() const {
458 print(dbgs());
459}
460#endif
461
462StringRef MachineFunction::getName() const {
463 return getFunction().getName();
464}
465
466void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
467 OS << "# Machine code for function " << getName() << ": ";
468 getProperties().print(OS);
469 OS << '\n';
470
471 // Print Frame Information
472 FrameInfo->print(*this, OS);
473
474 // Print JumpTable Information
475 if (JumpTableInfo)
476 JumpTableInfo->print(OS);
477
478 // Print Constant Pool
479 ConstantPool->print(OS);
480
481 const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
482
483 if (RegInfo && !RegInfo->livein_empty()) {
484 OS << "Function Live Ins: ";
485 for (MachineRegisterInfo::livein_iterator
486 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
487 OS << printReg(I->first, TRI);
488 if (I->second)
489 OS << " in " << printReg(I->second, TRI);
490 if (std::next(I) != E)
491 OS << ", ";
492 }
493 OS << '\n';
494 }
495
496 ModuleSlotTracker MST(getFunction().getParent());
497 MST.incorporateFunction(getFunction());
498 for (const auto &BB : *this) {
499 OS << '\n';
500 // If we print the whole function, print it at its most verbose level.
501 BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
502 }
503
504 OS << "\n# End machine code for function " << getName() << ".\n\n";
505}
506
507namespace llvm {
508
509 template<>
510 struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
511 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
512
513 static std::string getGraphName(const MachineFunction *F) {
514 return ("CFG for '" + F->getName() + "' function").str();
515 }
516
517 std::string getNodeLabel(const MachineBasicBlock *Node,
518 const MachineFunction *Graph) {
519 std::string OutStr;
520 {
521 raw_string_ostream OSS(OutStr);
522
523 if (isSimple()) {
524 OSS << printMBBReference(*Node);
525 if (const BasicBlock *BB = Node->getBasicBlock())
526 OSS << ": " << BB->getName();
527 } else
528 Node->print(OSS);
529 }
530
531 if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
532
533 // Process string output to make it nicer...
534 for (unsigned i = 0; i != OutStr.length(); ++i)
535 if (OutStr[i] == '\n') { // Left justify
536 OutStr[i] = '\\';
537 OutStr.insert(OutStr.begin()+i+1, 'l');
538 }
539 return OutStr;
540 }
541 };
542
543} // end namespace llvm
544
545void MachineFunction::viewCFG() const
546{
547#ifndef NDEBUG
548 ViewGraph(this, "mf" + getName());
549#else
550 errs() << "MachineFunction::viewCFG is only available in debug builds on "
551 << "systems with Graphviz or gv!\n";
552#endif // NDEBUG
553}
554
555void MachineFunction::viewCFGOnly() const
556{
557#ifndef NDEBUG
558 ViewGraph(this, "mf" + getName(), true);
559#else
560 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
561 << "systems with Graphviz or gv!\n";
562#endif // NDEBUG
563}
564
565/// Add the specified physical register as a live-in value and
566/// create a corresponding virtual register for it.
567unsigned MachineFunction::addLiveIn(unsigned PReg,
568 const TargetRegisterClass *RC) {
569 MachineRegisterInfo &MRI = getRegInfo();
570 unsigned VReg = MRI.getLiveInVirtReg(PReg);
571 if (VReg) {
572 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
573 (void)VRegRC;
574 // A physical register can be added several times.
575 // Between two calls, the register class of the related virtual register
576 // may have been constrained to match some operation constraints.
577 // In that case, check that the current register class includes the
578 // physical register and is a sub class of the specified RC.
579 assert((VRegRC == RC || (VRegRC->contains(PReg) &&(((VRegRC == RC || (VRegRC->contains(PReg) && RC->
hasSubClassEq(VRegRC))) && "Register class mismatch!"
) ? static_cast<void> (0) : __assert_fail ("(VRegRC == RC || (VRegRC->contains(PReg) && RC->hasSubClassEq(VRegRC))) && \"Register class mismatch!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 581, __PRETTY_FUNCTION__))
580 RC->hasSubClassEq(VRegRC))) &&(((VRegRC == RC || (VRegRC->contains(PReg) && RC->
hasSubClassEq(VRegRC))) && "Register class mismatch!"
) ? static_cast<void> (0) : __assert_fail ("(VRegRC == RC || (VRegRC->contains(PReg) && RC->hasSubClassEq(VRegRC))) && \"Register class mismatch!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 581, __PRETTY_FUNCTION__))
581 "Register class mismatch!")(((VRegRC == RC || (VRegRC->contains(PReg) && RC->
hasSubClassEq(VRegRC))) && "Register class mismatch!"
) ? static_cast<void> (0) : __assert_fail ("(VRegRC == RC || (VRegRC->contains(PReg) && RC->hasSubClassEq(VRegRC))) && \"Register class mismatch!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 581, __PRETTY_FUNCTION__))
;
582 return VReg;
583 }
584 VReg = MRI.createVirtualRegister(RC);
585 MRI.addLiveIn(PReg, VReg);
586 return VReg;
587}
588
589/// Return the MCSymbol for the specified non-empty jump table.
590/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
591/// normal 'L' label is returned.
592MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
593 bool isLinkerPrivate) const {
594 const DataLayout &DL = getDataLayout();
595 assert(JumpTableInfo && "No jump tables")((JumpTableInfo && "No jump tables") ? static_cast<
void> (0) : __assert_fail ("JumpTableInfo && \"No jump tables\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 595, __PRETTY_FUNCTION__))
;
596 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!")((JTI < JumpTableInfo->getJumpTables().size() &&
"Invalid JTI!") ? static_cast<void> (0) : __assert_fail
("JTI < JumpTableInfo->getJumpTables().size() && \"Invalid JTI!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 596, __PRETTY_FUNCTION__))
;
597
598 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
599 : DL.getPrivateGlobalPrefix();
600 SmallString<60> Name;
601 raw_svector_ostream(Name)
602 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
603 return Ctx.getOrCreateSymbol(Name);
604}
605
606/// Return a function-local symbol to represent the PIC base.
607MCSymbol *MachineFunction::getPICBaseSymbol() const {
608 const DataLayout &DL = getDataLayout();
609 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
610 Twine(getFunctionNumber()) + "$pb");
611}
612
613/// \name Exception Handling
614/// \{
615
616LandingPadInfo &
617MachineFunction::getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad) {
618 unsigned N = LandingPads.size();
619 for (unsigned i = 0; i < N; ++i) {
620 LandingPadInfo &LP = LandingPads[i];
621 if (LP.LandingPadBlock == LandingPad)
622 return LP;
623 }
624
625 LandingPads.push_back(LandingPadInfo(LandingPad));
626 return LandingPads[N];
627}
628
629void MachineFunction::addInvoke(MachineBasicBlock *LandingPad,
630 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
631 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
632 LP.BeginLabels.push_back(BeginLabel);
633 LP.EndLabels.push_back(EndLabel);
634}
635
636MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) {
637 MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
638 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
639 LP.LandingPadLabel = LandingPadLabel;
640
641 const Instruction *FirstI = LandingPad->getBasicBlock()->getFirstNonPHI();
642 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
643 if (const auto *PF =
644 dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()))
645 getMMI().addPersonality(PF);
646
647 if (LPI->isCleanup())
648 addCleanup(LandingPad);
649
650 // FIXME: New EH - Add the clauses in reverse order. This isn't 100%
651 // correct, but we need to do it this way because of how the DWARF EH
652 // emitter processes the clauses.
653 for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
654 Value *Val = LPI->getClause(I - 1);
655 if (LPI->isCatch(I - 1)) {
656 addCatchTypeInfo(LandingPad,
657 dyn_cast<GlobalValue>(Val->stripPointerCasts()));
658 } else {
659 // Add filters in a list.
660 auto *CVal = cast<Constant>(Val);
661 SmallVector<const GlobalValue *, 4> FilterList;
662 for (User::op_iterator II = CVal->op_begin(), IE = CVal->op_end();
663 II != IE; ++II)
664 FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts()));
665
666 addFilterTypeInfo(LandingPad, FilterList);
667 }
668 }
669
670 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
671 for (unsigned I = CPI->getNumArgOperands(); I != 0; --I) {
672 Value *TypeInfo = CPI->getArgOperand(I - 1)->stripPointerCasts();
673 addCatchTypeInfo(LandingPad, dyn_cast<GlobalValue>(TypeInfo));
674 }
675
676 } else {
677 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!")((isa<CleanupPadInst>(FirstI) && "Invalid landingpad!"
) ? static_cast<void> (0) : __assert_fail ("isa<CleanupPadInst>(FirstI) && \"Invalid landingpad!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 677, __PRETTY_FUNCTION__))
;
678 }
679
680 return LandingPadLabel;
681}
682
683void MachineFunction::addCatchTypeInfo(MachineBasicBlock *LandingPad,
684 ArrayRef<const GlobalValue *> TyInfo) {
685 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
686 for (unsigned N = TyInfo.size(); N; --N)
687 LP.TypeIds.push_back(getTypeIDFor(TyInfo[N - 1]));
688}
689
690void MachineFunction::addFilterTypeInfo(MachineBasicBlock *LandingPad,
691 ArrayRef<const GlobalValue *> TyInfo) {
692 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
693 std::vector<unsigned> IdsInFilter(TyInfo.size());
694 for (unsigned I = 0, E = TyInfo.size(); I != E; ++I)
695 IdsInFilter[I] = getTypeIDFor(TyInfo[I]);
696 LP.TypeIds.push_back(getFilterIDFor(IdsInFilter));
697}
698
699void MachineFunction::tidyLandingPads(DenseMap<MCSymbol *, uintptr_t> *LPMap,
700 bool TidyIfNoBeginLabels) {
701 for (unsigned i = 0; i != LandingPads.size(); ) {
702 LandingPadInfo &LandingPad = LandingPads[i];
703 if (LandingPad.LandingPadLabel &&
704 !LandingPad.LandingPadLabel->isDefined() &&
705 (!LPMap || (*LPMap)[LandingPad.LandingPadLabel] == 0))
706 LandingPad.LandingPadLabel = nullptr;
707
708 // Special case: we *should* emit LPs with null LP MBB. This indicates
709 // "nounwind" case.
710 if (!LandingPad.LandingPadLabel && LandingPad.LandingPadBlock) {
711 LandingPads.erase(LandingPads.begin() + i);
712 continue;
713 }
714
715 if (TidyIfNoBeginLabels) {
716 for (unsigned j = 0, e = LandingPads[i].BeginLabels.size(); j != e; ++j) {
717 MCSymbol *BeginLabel = LandingPad.BeginLabels[j];
718 MCSymbol *EndLabel = LandingPad.EndLabels[j];
719 if ((BeginLabel->isDefined() || (LPMap && (*LPMap)[BeginLabel] != 0)) &&
720 (EndLabel->isDefined() || (LPMap && (*LPMap)[EndLabel] != 0)))
721 continue;
722
723 LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j);
724 LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j);
725 --j;
726 --e;
727 }
728
729 // Remove landing pads with no try-ranges.
730 if (LandingPads[i].BeginLabels.empty()) {
731 LandingPads.erase(LandingPads.begin() + i);
732 continue;
733 }
734 }
735
736 // If there is no landing pad, ensure that the list of typeids is empty.
737 // If the only typeid is a cleanup, this is the same as having no typeids.
738 if (!LandingPad.LandingPadBlock ||
739 (LandingPad.TypeIds.size() == 1 && !LandingPad.TypeIds[0]))
740 LandingPad.TypeIds.clear();
741 ++i;
742 }
743}
744
745void MachineFunction::addCleanup(MachineBasicBlock *LandingPad) {
746 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
747 LP.TypeIds.push_back(0);
748}
749
750void MachineFunction::addSEHCatchHandler(MachineBasicBlock *LandingPad,
751 const Function *Filter,
752 const BlockAddress *RecoverBA) {
753 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
754 SEHHandler Handler;
755 Handler.FilterOrFinally = Filter;
756 Handler.RecoverBA = RecoverBA;
757 LP.SEHHandlers.push_back(Handler);
758}
759
760void MachineFunction::addSEHCleanupHandler(MachineBasicBlock *LandingPad,
761 const Function *Cleanup) {
762 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
763 SEHHandler Handler;
764 Handler.FilterOrFinally = Cleanup;
765 Handler.RecoverBA = nullptr;
766 LP.SEHHandlers.push_back(Handler);
767}
768
769void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym,
770 ArrayRef<unsigned> Sites) {
771 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
772}
773
774unsigned MachineFunction::getTypeIDFor(const GlobalValue *TI) {
775 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
776 if (TypeInfos[i] == TI) return i + 1;
777
778 TypeInfos.push_back(TI);
779 return TypeInfos.size();
780}
781
782int MachineFunction::getFilterIDFor(std::vector<unsigned> &TyIds) {
783 // If the new filter coincides with the tail of an existing filter, then
784 // re-use the existing filter. Folding filters more than this requires
785 // re-ordering filters and/or their elements - probably not worth it.
786 for (std::vector<unsigned>::iterator I = FilterEnds.begin(),
787 E = FilterEnds.end(); I != E; ++I) {
788 unsigned i = *I, j = TyIds.size();
789
790 while (i && j)
791 if (FilterIds[--i] != TyIds[--j])
792 goto try_next;
793
794 if (!j)
795 // The new filter coincides with range [i, end) of the existing filter.
796 return -(1 + i);
797
798try_next:;
799 }
800
801 // Add the new filter.
802 int FilterID = -(1 + FilterIds.size());
803 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
804 FilterIds.insert(FilterIds.end(), TyIds.begin(), TyIds.end());
805 FilterEnds.push_back(FilterIds.size());
806 FilterIds.push_back(0); // terminator
807 return FilterID;
808}
809
810void MachineFunction::addCodeViewHeapAllocSite(MachineInstr *I, MDNode *MD) {
811 MCSymbol *BeginLabel = Ctx.createTempSymbol("heapallocsite", true);
812 MCSymbol *EndLabel = Ctx.createTempSymbol("heapallocsite", true);
813 I->setPreInstrSymbol(*this, BeginLabel);
814 I->setPostInstrSymbol(*this, EndLabel);
815
816 DIType *DI = dyn_cast<DIType>(MD);
817 CodeViewHeapAllocSites.push_back(std::make_tuple(BeginLabel, EndLabel, DI));
818}
819
820/// \}
821
822//===----------------------------------------------------------------------===//
823// MachineJumpTableInfo implementation
824//===----------------------------------------------------------------------===//
825
826/// Return the size of each entry in the jump table.
827unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
828 // The size of a jump table entry is 4 bytes unless the entry is just the
829 // address of a block, in which case it is the pointer size.
830 switch (getEntryKind()) {
831 case MachineJumpTableInfo::EK_BlockAddress:
832 return TD.getPointerSize();
833 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
834 return 8;
835 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
836 case MachineJumpTableInfo::EK_LabelDifference32:
837 case MachineJumpTableInfo::EK_Custom32:
838 return 4;
839 case MachineJumpTableInfo::EK_Inline:
840 return 0;
841 }
842 llvm_unreachable("Unknown jump table encoding!")::llvm::llvm_unreachable_internal("Unknown jump table encoding!"
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 842)
;
843}
844
845/// Return the alignment of each entry in the jump table.
846unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
847 // The alignment of a jump table entry is the alignment of int32 unless the
848 // entry is just the address of a block, in which case it is the pointer
849 // alignment.
850 switch (getEntryKind()) {
851 case MachineJumpTableInfo::EK_BlockAddress:
852 return TD.getPointerABIAlignment(0);
853 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
854 return TD.getABIIntegerTypeAlignment(64);
855 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
856 case MachineJumpTableInfo::EK_LabelDifference32:
857 case MachineJumpTableInfo::EK_Custom32:
858 return TD.getABIIntegerTypeAlignment(32);
859 case MachineJumpTableInfo::EK_Inline:
860 return 1;
861 }
862 llvm_unreachable("Unknown jump table encoding!")::llvm::llvm_unreachable_internal("Unknown jump table encoding!"
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 862)
;
863}
864
865/// Create a new jump table entry in the jump table info.
866unsigned MachineJumpTableInfo::createJumpTableIndex(
867 const std::vector<MachineBasicBlock*> &DestBBs) {
868 assert(!DestBBs.empty() && "Cannot create an empty jump table!")((!DestBBs.empty() && "Cannot create an empty jump table!"
) ? static_cast<void> (0) : __assert_fail ("!DestBBs.empty() && \"Cannot create an empty jump table!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 868, __PRETTY_FUNCTION__))
;
869 JumpTables.push_back(MachineJumpTableEntry(DestBBs));
870 return JumpTables.size()-1;
871}
872
873/// If Old is the target of any jump tables, update the jump tables to branch
874/// to New instead.
875bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
876 MachineBasicBlock *New) {
877 assert(Old != New && "Not making a change?")((Old != New && "Not making a change?") ? static_cast
<void> (0) : __assert_fail ("Old != New && \"Not making a change?\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 877, __PRETTY_FUNCTION__))
;
878 bool MadeChange = false;
879 for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
880 ReplaceMBBInJumpTable(i, Old, New);
881 return MadeChange;
882}
883
884/// If Old is a target of the jump tables, update the jump table to branch to
885/// New instead.
886bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
887 MachineBasicBlock *Old,
888 MachineBasicBlock *New) {
889 assert(Old != New && "Not making a change?")((Old != New && "Not making a change?") ? static_cast
<void> (0) : __assert_fail ("Old != New && \"Not making a change?\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 889, __PRETTY_FUNCTION__))
;
890 bool MadeChange = false;
891 MachineJumpTableEntry &JTE = JumpTables[Idx];
892 for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
893 if (JTE.MBBs[j] == Old) {
894 JTE.MBBs[j] = New;
895 MadeChange = true;
896 }
897 return MadeChange;
898}
899
900void MachineJumpTableInfo::print(raw_ostream &OS) const {
901 if (JumpTables.empty()) return;
902
903 OS << "Jump Tables:\n";
904
905 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
906 OS << printJumpTableEntryReference(i) << ": ";
907 for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
908 OS << ' ' << printMBBReference(*JumpTables[i].MBBs[j]);
909 }
910
911 OS << '\n';
912}
913
914#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
915LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MachineJumpTableInfo::dump() const { print(dbgs()); }
916#endif
917
918Printable llvm::printJumpTableEntryReference(unsigned Idx) {
919 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
920}
921
922//===----------------------------------------------------------------------===//
923// MachineConstantPool implementation
924//===----------------------------------------------------------------------===//
925
926void MachineConstantPoolValue::anchor() {}
927
928Type *MachineConstantPoolEntry::getType() const {
929 if (isMachineConstantPoolEntry())
930 return Val.MachineCPVal->getType();
931 return Val.ConstVal->getType();
932}
933
934bool MachineConstantPoolEntry::needsRelocation() const {
935 if (isMachineConstantPoolEntry())
936 return true;
937 return Val.ConstVal->needsRelocation();
938}
939
940SectionKind
941MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
942 if (needsRelocation())
943 return SectionKind::getReadOnlyWithRel();
944 switch (DL->getTypeAllocSize(getType())) {
945 case 4:
946 return SectionKind::getMergeableConst4();
947 case 8:
948 return SectionKind::getMergeableConst8();
949 case 16:
950 return SectionKind::getMergeableConst16();
951 case 32:
952 return SectionKind::getMergeableConst32();
953 default:
954 return SectionKind::getReadOnly();
955 }
956}
957
958MachineConstantPool::~MachineConstantPool() {
959 // A constant may be a member of both Constants and MachineCPVsSharingEntries,
960 // so keep track of which we've deleted to avoid double deletions.
961 DenseSet<MachineConstantPoolValue*> Deleted;
962 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
963 if (Constants[i].isMachineConstantPoolEntry()) {
964 Deleted.insert(Constants[i].Val.MachineCPVal);
965 delete Constants[i].Val.MachineCPVal;
966 }
967 for (DenseSet<MachineConstantPoolValue*>::iterator I =
968 MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
969 I != E; ++I) {
970 if (Deleted.count(*I) == 0)
971 delete *I;
972 }
973}
974
975/// Test whether the given two constants can be allocated the same constant pool
976/// entry.
977static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
978 const DataLayout &DL) {
979 // Handle the trivial case quickly.
980 if (A == B) return true;
981
982 // If they have the same type but weren't the same constant, quickly
983 // reject them.
984 if (A->getType() == B->getType()) return false;
985
986 // We can't handle structs or arrays.
987 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
988 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
989 return false;
990
991 // For now, only support constants with the same size.
992 uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
993 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
994 return false;
995
996 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
997
998 // Try constant folding a bitcast of both instructions to an integer. If we
999 // get two identical ConstantInt's, then we are good to share them. We use
1000 // the constant folding APIs to do this so that we get the benefit of
1001 // DataLayout.
1002 if (isa<PointerType>(A->getType()))
1003 A = ConstantFoldCastOperand(Instruction::PtrToInt,
1004 const_cast<Constant *>(A), IntTy, DL);
1005 else if (A->getType() != IntTy)
1006 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
1007 IntTy, DL);
1008 if (isa<PointerType>(B->getType()))
1009 B = ConstantFoldCastOperand(Instruction::PtrToInt,
1010 const_cast<Constant *>(B), IntTy, DL);
1011 else if (B->getType() != IntTy)
1012 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
1013 IntTy, DL);
1014
1015 return A == B;
1016}
1017
1018/// Create a new entry in the constant pool or return an existing one.
1019/// User must specify the log2 of the minimum required alignment for the object.
1020unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
1021 unsigned Alignment) {
1022 assert(Alignment && "Alignment must be specified!")((Alignment && "Alignment must be specified!") ? static_cast
<void> (0) : __assert_fail ("Alignment && \"Alignment must be specified!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 1022, __PRETTY_FUNCTION__))
;
1023 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1024
1025 // Check to see if we already have this constant.
1026 //
1027 // FIXME, this could be made much more efficient for large constant pools.
1028 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1029 if (!Constants[i].isMachineConstantPoolEntry() &&
1030 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1031 if ((unsigned)Constants[i].getAlignment() < Alignment)
1032 Constants[i].Alignment = Alignment;
1033 return i;
1034 }
1035
1036 Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1037 return Constants.size()-1;
1038}
1039
1040unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
1041 unsigned Alignment) {
1042 assert(Alignment && "Alignment must be specified!")((Alignment && "Alignment must be specified!") ? static_cast
<void> (0) : __assert_fail ("Alignment && \"Alignment must be specified!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/MachineFunction.cpp"
, 1042, __PRETTY_FUNCTION__))
;
1043 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1044
1045 // Check to see if we already have this constant.
1046 //
1047 // FIXME, this could be made much more efficient for large constant pools.
1048 int Idx = V->getExistingMachineCPValue(this, Alignment);
1049 if (Idx != -1) {
1050 MachineCPVsSharingEntries.insert(V);
1051 return (unsigned)Idx;
1052 }
1053
1054 Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1055 return Constants.size()-1;
1056}
1057
1058void MachineConstantPool::print(raw_ostream &OS) const {
1059 if (Constants.empty()) return;
1060
1061 OS << "Constant Pool:\n";
1062 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1063 OS << " cp#" << i << ": ";
1064 if (Constants[i].isMachineConstantPoolEntry())
1065 Constants[i].Val.MachineCPVal->print(OS);
1066 else
1067 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1068 OS << ", align=" << Constants[i].getAlignment();
1069 OS << "\n";
1070 }
1071}
1072
1073#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1074LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MachineConstantPool::dump() const { print(dbgs()); }
1075#endif

/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/CodeGen/MachineInstr.h

1//===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the MachineInstr class, which is the
10// basic representation for all target dependent machine instructions used by
11// the back end.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_MACHINEINSTR_H
16#define LLVM_CODEGEN_MACHINEINSTR_H
17
18#include "llvm/ADT/DenseMapInfo.h"
19#include "llvm/ADT/PointerSumType.h"
20#include "llvm/ADT/ilist.h"
21#include "llvm/ADT/ilist_node.h"
22#include "llvm/ADT/iterator_range.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/CodeGen/MachineMemOperand.h"
25#include "llvm/CodeGen/MachineOperand.h"
26#include "llvm/CodeGen/TargetOpcodes.h"
27#include "llvm/IR/DebugLoc.h"
28#include "llvm/IR/InlineAsm.h"
29#include "llvm/MC/MCInstrDesc.h"
30#include "llvm/MC/MCSymbol.h"
31#include "llvm/Support/ArrayRecycler.h"
32#include "llvm/Support/TrailingObjects.h"
33#include <algorithm>
34#include <cassert>
35#include <cstdint>
36#include <utility>
37
38namespace llvm {
39
40template <typename T> class ArrayRef;
41class DIExpression;
42class DILocalVariable;
43class MachineBasicBlock;
44class MachineFunction;
45class MachineMemOperand;
46class MachineRegisterInfo;
47class ModuleSlotTracker;
48class raw_ostream;
49template <typename T> class SmallVectorImpl;
50class SmallBitVector;
51class StringRef;
52class TargetInstrInfo;
53class TargetRegisterClass;
54class TargetRegisterInfo;
55
56//===----------------------------------------------------------------------===//
57/// Representation of each machine instruction.
58///
59/// This class isn't a POD type, but it must have a trivial destructor. When a
60/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
61/// without having their destructor called.
62///
63class MachineInstr
64 : public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
65 ilist_sentinel_tracking<true>> {
66public:
67 using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator;
68
69 /// Flags to specify different kinds of comments to output in
70 /// assembly code. These flags carry semantic information not
71 /// otherwise easily derivable from the IR text.
72 ///
73 enum CommentFlag {
74 ReloadReuse = 0x1, // higher bits are reserved for target dep comments.
75 NoSchedComment = 0x2,
76 TAsmComments = 0x4 // Target Asm comments should start from this value.
77 };
78
79 enum MIFlag {
80 NoFlags = 0,
81 FrameSetup = 1 << 0, // Instruction is used as a part of
82 // function frame setup code.
83 FrameDestroy = 1 << 1, // Instruction is used as a part of
84 // function frame destruction code.
85 BundledPred = 1 << 2, // Instruction has bundled predecessors.
86 BundledSucc = 1 << 3, // Instruction has bundled successors.
87 FmNoNans = 1 << 4, // Instruction does not support Fast
88 // math nan values.
89 FmNoInfs = 1 << 5, // Instruction does not support Fast
90 // math infinity values.
91 FmNsz = 1 << 6, // Instruction is not required to retain
92 // signed zero values.
93 FmArcp = 1 << 7, // Instruction supports Fast math
94 // reciprocal approximations.
95 FmContract = 1 << 8, // Instruction supports Fast math
96 // contraction operations like fma.
97 FmAfn = 1 << 9, // Instruction may map to Fast math
98 // instrinsic approximation.
99 FmReassoc = 1 << 10, // Instruction supports Fast math
100 // reassociation of operand order.
101 NoUWrap = 1 << 11, // Instruction supports binary operator
102 // no unsigned wrap.
103 NoSWrap = 1 << 12, // Instruction supports binary operator
104 // no signed wrap.
105 IsExact = 1 << 13 // Instruction supports division is
106 // known to be exact.
107 };
108
109private:
110 const MCInstrDesc *MCID; // Instruction descriptor.
111 MachineBasicBlock *Parent = nullptr; // Pointer to the owning basic block.
112
113 // Operands are allocated by an ArrayRecycler.
114 MachineOperand *Operands = nullptr; // Pointer to the first operand.
115 unsigned NumOperands = 0; // Number of operands on instruction.
116 using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
117 OperandCapacity CapOperands; // Capacity of the Operands array.
118
119 uint16_t Flags = 0; // Various bits of additional
120 // information about machine
121 // instruction.
122
123 uint8_t AsmPrinterFlags = 0; // Various bits of information used by
124 // the AsmPrinter to emit helpful
125 // comments. This is *not* semantic
126 // information. Do not use this for
127 // anything other than to convey comment
128 // information to AsmPrinter.
129
130 /// Internal implementation detail class that provides out-of-line storage for
131 /// extra info used by the machine instruction when this info cannot be stored
132 /// in-line within the instruction itself.
133 ///
134 /// This has to be defined eagerly due to the implementation constraints of
135 /// `PointerSumType` where it is used.
136 class ExtraInfo final
137 : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *> {
138 public:
139 static ExtraInfo *create(BumpPtrAllocator &Allocator,
140 ArrayRef<MachineMemOperand *> MMOs,
141 MCSymbol *PreInstrSymbol = nullptr,
142 MCSymbol *PostInstrSymbol = nullptr) {
143 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
2
Assuming the condition is false
144 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
3
Assuming the condition is false
145 auto *Result = new (Allocator.Allocate(
4
'Result' initialized to a null pointer value
146 totalSizeToAlloc<MachineMemOperand *, MCSymbol *>(
147 MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol),
148 alignof(ExtraInfo)))
149 ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol);
150
151 // Copy the actual data into the trailing objects.
152 std::copy(MMOs.begin(), MMOs.end(),
153 Result->getTrailingObjects<MachineMemOperand *>());
5
Called C++ object pointer is null
154
155 if (HasPreInstrSymbol)
156 Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol;
157 if (HasPostInstrSymbol)
158 Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] =
159 PostInstrSymbol;
160
161 return Result;
162 }
163
164 ArrayRef<MachineMemOperand *> getMMOs() const {
165 return makeArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs);
166 }
167
168 MCSymbol *getPreInstrSymbol() const {
169 return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr;
170 }
171
172 MCSymbol *getPostInstrSymbol() const {
173 return HasPostInstrSymbol
174 ? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol]
175 : nullptr;
176 }
177
178 private:
179 friend TrailingObjects;
180
181 // Description of the extra info, used to interpret the actual optional
182 // data appended.
183 //
184 // Note that this is not terribly space optimized. This leaves a great deal
185 // of flexibility to fit more in here later.
186 const int NumMMOs;
187 const bool HasPreInstrSymbol;
188 const bool HasPostInstrSymbol;
189
190 // Implement the `TrailingObjects` internal API.
191 size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const {
192 return NumMMOs;
193 }
194 size_t numTrailingObjects(OverloadToken<MCSymbol *>) const {
195 return HasPreInstrSymbol + HasPostInstrSymbol;
196 }
197
198 // Just a boring constructor to allow us to initialize the sizes. Always use
199 // the `create` routine above.
200 ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol)
201 : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol),
202 HasPostInstrSymbol(HasPostInstrSymbol) {}
203 };
204
205 /// Enumeration of the kinds of inline extra info available. It is important
206 /// that the `MachineMemOperand` inline kind has a tag value of zero to make
207 /// it accessible as an `ArrayRef`.
208 enum ExtraInfoInlineKinds {
209 EIIK_MMO = 0,
210 EIIK_PreInstrSymbol,
211 EIIK_PostInstrSymbol,
212 EIIK_OutOfLine
213 };
214
215 // We store extra information about the instruction here. The common case is
216 // expected to be nothing or a single pointer (typically a MMO or a symbol).
217 // We work to optimize this common case by storing it inline here rather than
218 // requiring a separate allocation, but we fall back to an allocation when
219 // multiple pointers are needed.
220 PointerSumType<ExtraInfoInlineKinds,
221 PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>,
222 PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>,
223 PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>,
224 PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>>
225 Info;
226
227 DebugLoc debugLoc; // Source line information.
228
229 // Intrusive list support
230 friend struct ilist_traits<MachineInstr>;
231 friend struct ilist_callback_traits<MachineBasicBlock>;
232 void setParent(MachineBasicBlock *P) { Parent = P; }
233
234 /// This constructor creates a copy of the given
235 /// MachineInstr in the given MachineFunction.
236 MachineInstr(MachineFunction &, const MachineInstr &);
237
238 /// This constructor create a MachineInstr and add the implicit operands.
239 /// It reserves space for number of operands specified by
240 /// MCInstrDesc. An explicit DebugLoc is supplied.
241 MachineInstr(MachineFunction &, const MCInstrDesc &tid, DebugLoc dl,
242 bool NoImp = false);
243
244 // MachineInstrs are pool-allocated and owned by MachineFunction.
245 friend class MachineFunction;
246
247public:
248 MachineInstr(const MachineInstr &) = delete;
249 MachineInstr &operator=(const MachineInstr &) = delete;
250 // Use MachineFunction::DeleteMachineInstr() instead.
251 ~MachineInstr() = delete;
252
253 const MachineBasicBlock* getParent() const { return Parent; }
254 MachineBasicBlock* getParent() { return Parent; }
255
256 /// Return the function that contains the basic block that this instruction
257 /// belongs to.
258 ///
259 /// Note: this is undefined behaviour if the instruction does not have a
260 /// parent.
261 const MachineFunction *getMF() const;
262 MachineFunction *getMF() {
263 return const_cast<MachineFunction *>(
264 static_cast<const MachineInstr *>(this)->getMF());
265 }
266
267 /// Return the asm printer flags bitvector.
268 uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }
269
270 /// Clear the AsmPrinter bitvector.
271 void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
272
273 /// Return whether an AsmPrinter flag is set.
274 bool getAsmPrinterFlag(CommentFlag Flag) const {
275 return AsmPrinterFlags & Flag;
276 }
277
278 /// Set a flag for the AsmPrinter.
279 void setAsmPrinterFlag(uint8_t Flag) {
280 AsmPrinterFlags |= Flag;
281 }
282
283 /// Clear specific AsmPrinter flags.
284 void clearAsmPrinterFlag(CommentFlag Flag) {
285 AsmPrinterFlags &= ~Flag;
286 }
287
288 /// Return the MI flags bitvector.
289 uint16_t getFlags() const {
290 return Flags;
291 }
292
293 /// Return whether an MI flag is set.
294 bool getFlag(MIFlag Flag) const {
295 return Flags & Flag;
296 }
297
298 /// Set a MI flag.
299 void setFlag(MIFlag Flag) {
300 Flags |= (uint16_t)Flag;
301 }
302
303 void setFlags(unsigned flags) {
304 // Filter out the automatically maintained flags.
305 unsigned Mask = BundledPred | BundledSucc;
306 Flags = (Flags & Mask) | (flags & ~Mask);
307 }
308
309 /// clearFlag - Clear a MI flag.
310 void clearFlag(MIFlag Flag) {
311 Flags &= ~((uint16_t)Flag);
312 }
313
314 /// Return true if MI is in a bundle (but not the first MI in a bundle).
315 ///
316 /// A bundle looks like this before it's finalized:
317 /// ----------------
318 /// | MI |
319 /// ----------------
320 /// |
321 /// ----------------
322 /// | MI * |
323 /// ----------------
324 /// |
325 /// ----------------
326 /// | MI * |
327 /// ----------------
328 /// In this case, the first MI starts a bundle but is not inside a bundle, the
329 /// next 2 MIs are considered "inside" the bundle.
330 ///
331 /// After a bundle is finalized, it looks like this:
332 /// ----------------
333 /// | Bundle |
334 /// ----------------
335 /// |
336 /// ----------------
337 /// | MI * |
338 /// ----------------
339 /// |
340 /// ----------------
341 /// | MI * |
342 /// ----------------
343 /// |
344 /// ----------------
345 /// | MI * |
346 /// ----------------
347 /// The first instruction has the special opcode "BUNDLE". It's not "inside"
348 /// a bundle, but the next three MIs are.
349 bool isInsideBundle() const {
350 return getFlag(BundledPred);
351 }
352
353 /// Return true if this instruction part of a bundle. This is true
354 /// if either itself or its following instruction is marked "InsideBundle".
355 bool isBundled() const {
356 return isBundledWithPred() || isBundledWithSucc();
357 }
358
359 /// Return true if this instruction is part of a bundle, and it is not the
360 /// first instruction in the bundle.
361 bool isBundledWithPred() const { return getFlag(BundledPred); }
362
363 /// Return true if this instruction is part of a bundle, and it is not the
364 /// last instruction in the bundle.
365 bool isBundledWithSucc() const { return getFlag(BundledSucc); }
366
367 /// Bundle this instruction with its predecessor. This can be an unbundled
368 /// instruction, or it can be the first instruction in a bundle.
369 void bundleWithPred();
370
371 /// Bundle this instruction with its successor. This can be an unbundled
372 /// instruction, or it can be the last instruction in a bundle.
373 void bundleWithSucc();
374
375 /// Break bundle above this instruction.
376 void unbundleFromPred();
377
378 /// Break bundle below this instruction.
379 void unbundleFromSucc();
380
381 /// Returns the debug location id of this MachineInstr.
382 const DebugLoc &getDebugLoc() const { return debugLoc; }
383
384 /// Return the debug variable referenced by
385 /// this DBG_VALUE instruction.
386 const DILocalVariable *getDebugVariable() const;
387
388 /// Return the complex address expression referenced by
389 /// this DBG_VALUE instruction.
390 const DIExpression *getDebugExpression() const;
391
392 /// Return the debug label referenced by
393 /// this DBG_LABEL instruction.
394 const DILabel *getDebugLabel() const;
395
396 /// Emit an error referring to the source location of this instruction.
397 /// This should only be used for inline assembly that is somehow
398 /// impossible to compile. Other errors should have been handled much
399 /// earlier.
400 ///
401 /// If this method returns, the caller should try to recover from the error.
402 void emitError(StringRef Msg) const;
403
404 /// Returns the target instruction descriptor of this MachineInstr.
405 const MCInstrDesc &getDesc() const { return *MCID; }
406
407 /// Returns the opcode of this MachineInstr.
408 unsigned getOpcode() const { return MCID->Opcode; }
409
410 /// Retuns the total number of operands.
411 unsigned getNumOperands() const { return NumOperands; }
412
413 const MachineOperand& getOperand(unsigned i) const {
414 assert(i < getNumOperands() && "getOperand() out of range!")((i < getNumOperands() && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/CodeGen/MachineInstr.h"
, 414, __PRETTY_FUNCTION__))
;
415 return Operands[i];
416 }
417 MachineOperand& getOperand(unsigned i) {
418 assert(i < getNumOperands() && "getOperand() out of range!")((i < getNumOperands() && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/CodeGen/MachineInstr.h"
, 418, __PRETTY_FUNCTION__))
;
419 return Operands[i];
420 }
421
422 /// Returns the total number of definitions.
423 unsigned getNumDefs() const {
424 return getNumExplicitDefs() + MCID->getNumImplicitDefs();
425 }
426
427 /// Return true if operand \p OpIdx is a subregister index.
428 bool isOperandSubregIdx(unsigned OpIdx) const {
429 assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&((getOperand(OpIdx).getType() == MachineOperand::MO_Immediate
&& "Expected MO_Immediate operand type.") ? static_cast
<void> (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/CodeGen/MachineInstr.h"
, 430, __PRETTY_FUNCTION__))
430 "Expected MO_Immediate operand type.")((getOperand(OpIdx).getType() == MachineOperand::MO_Immediate
&& "Expected MO_Immediate operand type.") ? static_cast
<void> (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/CodeGen/MachineInstr.h"
, 430, __PRETTY_FUNCTION__))
;
431 if (isExtractSubreg() && OpIdx == 2)
432 return true;
433 if (isInsertSubreg() && OpIdx == 3)
434 return true;
435 if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0)
436 return true;
437 if (isSubregToReg() && OpIdx == 3)
438 return true;
439 return false;
440 }
441
442 /// Returns the number of non-implicit operands.
443 unsigned getNumExplicitOperands() const;
444
445 /// Returns the number of non-implicit definitions.
446 unsigned getNumExplicitDefs() const;
447
448 /// iterator/begin/end - Iterate over all operands of a machine instruction.
449 using mop_iterator = MachineOperand *;
450 using const_mop_iterator = const MachineOperand *;
451
452 mop_iterator operands_begin() { return Operands; }
453 mop_iterator operands_end() { return Operands + NumOperands; }
454
455 const_mop_iterator operands_begin() const { return Operands; }
456 const_mop_iterator operands_end() const { return Operands + NumOperands; }
457
458 iterator_range<mop_iterator> operands() {
459 return make_range(operands_begin(), operands_end());
460 }
461 iterator_range<const_mop_iterator> operands() const {
462 return make_range(operands_begin(), operands_end());
463 }
464 iterator_range<mop_iterator> explicit_operands() {
465 return make_range(operands_begin(),
466 operands_begin() + getNumExplicitOperands());
467 }
468 iterator_range<const_mop_iterator> explicit_operands() const {
469 return make_range(operands_begin(),
470 operands_begin() + getNumExplicitOperands());
471 }
472 iterator_range<mop_iterator> implicit_operands() {
473 return make_range(explicit_operands().end(), operands_end());
474 }
475 iterator_range<const_mop_iterator> implicit_operands() const {
476 return make_range(explicit_operands().end(), operands_end());
477 }
478 /// Returns a range over all explicit operands that are register definitions.
479 /// Implicit definition are not included!
480 iterator_range<mop_iterator> defs() {
481 return make_range(operands_begin(),
482 operands_begin() + getNumExplicitDefs());
483 }
484 /// \copydoc defs()
485 iterator_range<const_mop_iterator> defs() const {
486 return make_range(operands_begin(),
487 operands_begin() + getNumExplicitDefs());
488 }
489 /// Returns a range that includes all operands that are register uses.
490 /// This may include unrelated operands which are not register uses.
491 iterator_range<mop_iterator> uses() {
492 return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
493 }
494 /// \copydoc uses()
495 iterator_range<const_mop_iterator> uses() const {
496 return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
497 }
498 iterator_range<mop_iterator> explicit_uses() {
499 return make_range(operands_begin() + getNumExplicitDefs(),
500 operands_begin() + getNumExplicitOperands());
501 }
502 iterator_range<const_mop_iterator> explicit_uses() const {
503 return make_range(operands_begin() + getNumExplicitDefs(),
504 operands_begin() + getNumExplicitOperands());
505 }
506
507 /// Returns the number of the operand iterator \p I points to.
508 unsigned getOperandNo(const_mop_iterator I) const {
509 return I - operands_begin();
510 }
511
512 /// Access to memory operands of the instruction. If there are none, that does
513 /// not imply anything about whether the function accesses memory. Instead,
514 /// the caller must behave conservatively.
515 ArrayRef<MachineMemOperand *> memoperands() const {
516 if (!Info)
517 return {};
518
519 if (Info.is<EIIK_MMO>())
520 return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1);
521
522 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
523 return EI->getMMOs();
524
525 return {};
526 }
527
528 /// Access to memory operands of the instruction.
529 ///
530 /// If `memoperands_begin() == memoperands_end()`, that does not imply
531 /// anything about whether the function accesses memory. Instead, the caller
532 /// must behave conservatively.
533 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
534
535 /// Access to memory operands of the instruction.
536 ///
537 /// If `memoperands_begin() == memoperands_end()`, that does not imply
538 /// anything about whether the function accesses memory. Instead, the caller
539 /// must behave conservatively.
540 mmo_iterator memoperands_end() const { return memoperands().end(); }
541
542 /// Return true if we don't have any memory operands which described the
543 /// memory access done by this instruction. If this is true, calling code
544 /// must be conservative.
545 bool memoperands_empty() const { return memoperands().empty(); }
546
547 /// Return true if this instruction has exactly one MachineMemOperand.
548 bool hasOneMemOperand() const { return memoperands().size() == 1; }
549
550 /// Return the number of memory operands.
551 unsigned getNumMemOperands() const { return memoperands().size(); }
552
553 /// Helper to extract a pre-instruction symbol if one has been added.
554 MCSymbol *getPreInstrSymbol() const {
555 if (!Info)
556 return nullptr;
557 if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>())
558 return S;
559 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
560 return EI->getPreInstrSymbol();
561
562 return nullptr;
563 }
564
565 /// Helper to extract a post-instruction symbol if one has been added.
566 MCSymbol *getPostInstrSymbol() const {
567 if (!Info)
568 return nullptr;
569 if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>())
570 return S;
571 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
572 return EI->getPostInstrSymbol();
573
574 return nullptr;
575 }
576
577 /// API for querying MachineInstr properties. They are the same as MCInstrDesc
578 /// queries but they are bundle aware.
579
580 enum QueryType {
581 IgnoreBundle, // Ignore bundles
582 AnyInBundle, // Return true if any instruction in bundle has property
583 AllInBundle // Return true if all instructions in bundle have property
584 };
585
586 /// Return true if the instruction (or in the case of a bundle,
587 /// the instructions inside the bundle) has the specified property.
588 /// The first argument is the property being queried.
589 /// The second argument indicates whether the query should look inside
590 /// instruction bundles.
591 bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
592 assert(MCFlag < 64 &&((MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle."
) ? static_cast<void> (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/CodeGen/MachineInstr.h"
, 593, __PRETTY_FUNCTION__))
593 "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.")((MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle."
) ? static_cast<void> (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/CodeGen/MachineInstr.h"
, 593, __PRETTY_FUNCTION__))
;
594 // Inline the fast path for unbundled or bundle-internal instructions.
595 if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
596 return getDesc().getFlags() & (1ULL << MCFlag);
597
598 // If this is the first instruction in a bundle, take the slow path.
599 return hasPropertyInBundle(1ULL << MCFlag, Type);
600 }
601
602 /// Return true if this instruction can have a variable number of operands.
603 /// In this case, the variable operands will be after the normal
604 /// operands but before the implicit definitions and uses (if any are
605 /// present).
606 bool isVariadic(QueryType Type = IgnoreBundle) const {
607 return hasProperty(MCID::Variadic, Type);
608 }
609
610 /// Set if this instruction has an optional definition, e.g.
611 /// ARM instructions which can set condition code if 's' bit is set.
612 bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
613 return hasProperty(MCID::HasOptionalDef, Type);
614 }
615
616 /// Return true if this is a pseudo instruction that doesn't
617 /// correspond to a real machine instruction.
618 bool isPseudo(QueryType Type = IgnoreBundle) const {
619 return hasProperty(MCID::Pseudo, Type);
620 }
621
622 bool isReturn(QueryType Type = AnyInBundle) const {
623 return hasProperty(MCID::Return, Type);
624 }
625
626 /// Return true if this is an instruction that marks the end of an EH scope,
627 /// i.e., a catchpad or a cleanuppad instruction.
628 bool isEHScopeReturn(QueryType Type = AnyInBundle) const {
629 return hasProperty(MCID::EHScopeReturn, Type);
630 }
631
632 bool isCall(QueryType Type = AnyInBundle) const {
633 return hasProperty(MCID::Call, Type);
634 }
635
636 /// Returns true if the specified instruction stops control flow
637 /// from executing the instruction immediately following it. Examples include
638 /// unconditional branches and return instructions.
639 bool isBarrier(QueryType Type = AnyInBundle) const {
640 return hasProperty(MCID::Barrier, Type);
641 }
642
643 /// Returns true if this instruction part of the terminator for a basic block.
644 /// Typically this is things like return and branch instructions.
645 ///
646 /// Various passes use this to insert code into the bottom of a basic block,
647 /// but before control flow occurs.
648 bool isTerminator(QueryType Type = AnyInBundle) const {
649 return hasProperty(MCID::Terminator, Type);
650 }
651
652 /// Returns true if this is a conditional, unconditional, or indirect branch.
653 /// Predicates below can be used to discriminate between
654 /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
655 /// get more information.
656 bool isBranch(QueryType Type = AnyInBundle) const {
657 return hasProperty(MCID::Branch, Type);
658 }
659
660 /// Return true if this is an indirect branch, such as a
661 /// branch through a register.
662 bool isIndirectBranch(QueryType Type = AnyInBundle) const {
663 return hasProperty(MCID::IndirectBranch, Type);
664 }
665
666 /// Return true if this is a branch which may fall
667 /// through to the next instruction or may transfer control flow to some other
668 /// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more
669 /// information about this branch.
670 bool isConditionalBranch(QueryType Type = AnyInBundle) const {
671 return isBranch(Type) & !isBarrier(Type) & !isIndirectBranch(Type);
672 }
673
674 /// Return true if this is a branch which always
675 /// transfers control flow to some other block. The
676 /// TargetInstrInfo::AnalyzeBranch method can be used to get more information
677 /// about this branch.
678 bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
679 return isBranch(Type) & isBarrier(Type) & !isIndirectBranch(Type);
680 }
681
682 /// Return true if this instruction has a predicate operand that
683 /// controls execution. It may be set to 'always', or may be set to other
684 /// values. There are various methods in TargetInstrInfo that can be used to
685 /// control and modify the predicate in this instruction.
686 bool isPredicable(QueryType Type = AllInBundle) const {
687 // If it's a bundle than all bundled instructions must be predicable for this
688 // to return true.
689 return hasProperty(MCID::Predicable, Type);
690 }
691
692 /// Return true if this instruction is a comparison.
693 bool isCompare(QueryType Type = IgnoreBundle) const {
694 return hasProperty(MCID::Compare, Type);
695 }
696
697 /// Return true if this instruction is a move immediate
698 /// (including conditional moves) instruction.
699 bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
700 return hasProperty(MCID::MoveImm, Type);
701 }
702
703 /// Return true if this instruction is a register move.
704 /// (including moving values from subreg to reg)
705 bool isMoveReg(QueryType Type = IgnoreBundle) const {
706 return hasProperty(MCID::MoveReg, Type);
707 }
708
709 /// Return true if this instruction is a bitcast instruction.
710 bool isBitcast(QueryType Type = IgnoreBundle) const {
711 return hasProperty(MCID::Bitcast, Type);
712 }
713
714 /// Return true if this instruction is a select instruction.
715 bool isSelect(QueryType Type = IgnoreBundle) const {
716 return hasProperty(MCID::Select, Type);
717 }
718
719 /// Return true if this instruction cannot be safely duplicated.
720 /// For example, if the instruction has a unique labels attached
721 /// to it, duplicating it would cause multiple definition errors.
722 bool isNotDuplicable(QueryType Type = AnyInBundle) const {
723 return hasProperty(MCID::NotDuplicable, Type);
724 }
725
726 /// Return true if this instruction is convergent.
727 /// Convergent instructions can not be made control-dependent on any
728 /// additional values.
729 bool isConvergent(QueryType Type = AnyInBundle) const {
730 if (isInlineAsm()) {
731 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
732 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
733 return true;
734 }
735 return hasProperty(MCID::Convergent, Type);
736 }
737
738 /// Returns true if the specified instruction has a delay slot
739 /// which must be filled by the code generator.
740 bool hasDelaySlot(QueryType Type = AnyInBundle) const {
741 return hasProperty(MCID::DelaySlot, Type);
742 }
743
744 /// Return true for instructions that can be folded as
745 /// memory operands in other instructions. The most common use for this
746 /// is instructions that are simple loads from memory that don't modify
747 /// the loaded value in any way, but it can also be used for instructions
748 /// that can be expressed as constant-pool loads, such as V_SETALLONES
749 /// on x86, to allow them to be folded when it is beneficial.
750 /// This should only be set on instructions that return a value in their
751 /// only virtual register definition.
752 bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
753 return hasProperty(MCID::FoldableAsLoad, Type);
754 }
755
756 /// Return true if this instruction behaves
757 /// the same way as the generic REG_SEQUENCE instructions.
758 /// E.g., on ARM,
759 /// dX VMOVDRR rY, rZ
760 /// is equivalent to
761 /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
762 ///
763 /// Note that for the optimizers to be able to take advantage of
764 /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
765 /// override accordingly.
766 bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
767 return hasProperty(MCID::RegSequence, Type);
768 }
769
770 /// Return true if this instruction behaves
771 /// the same way as the generic EXTRACT_SUBREG instructions.
772 /// E.g., on ARM,
773 /// rX, rY VMOVRRD dZ
774 /// is equivalent to two EXTRACT_SUBREG:
775 /// rX = EXTRACT_SUBREG dZ, ssub_0
776 /// rY = EXTRACT_SUBREG dZ, ssub_1
777 ///
778 /// Note that for the optimizers to be able to take advantage of
779 /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
780 /// override accordingly.
781 bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
782 return hasProperty(MCID::ExtractSubreg, Type);
783 }
784
785 /// Return true if this instruction behaves
786 /// the same way as the generic INSERT_SUBREG instructions.
787 /// E.g., on ARM,
788 /// dX = VSETLNi32 dY, rZ, Imm
789 /// is equivalent to a INSERT_SUBREG:
790 /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
791 ///
792 /// Note that for the optimizers to be able to take advantage of
793 /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
794 /// override accordingly.
795 bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
796 return hasProperty(MCID::InsertSubreg, Type);
797 }
798
799 //===--------------------------------------------------------------------===//
800 // Side Effect Analysis
801 //===--------------------------------------------------------------------===//
802
803 /// Return true if this instruction could possibly read memory.
804 /// Instructions with this flag set are not necessarily simple load
805 /// instructions, they may load a value and modify it, for example.
806 bool mayLoad(QueryType Type = AnyInBundle) const {
807 if (isInlineAsm()) {
808 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
809 if (ExtraInfo & InlineAsm::Extra_MayLoad)
810 return true;
811 }
812 return hasProperty(MCID::MayLoad, Type);
813 }
814
815 /// Return true if this instruction could possibly modify memory.
816 /// Instructions with this flag set are not necessarily simple store
817 /// instructions, they may store a modified value based on their operands, or
818 /// may not actually modify anything, for example.
819 bool mayStore(QueryType Type = AnyInBundle) const {
820 if (isInlineAsm()) {
821 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
822 if (ExtraInfo & InlineAsm::Extra_MayStore)
823 return true;
824 }
825 return hasProperty(MCID::MayStore, Type);
826 }
827
828 /// Return true if this instruction could possibly read or modify memory.
829 bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
830 return mayLoad(Type) || mayStore(Type);
831 }
832
833 //===--------------------------------------------------------------------===//
834 // Flags that indicate whether an instruction can be modified by a method.
835 //===--------------------------------------------------------------------===//
836
837 /// Return true if this may be a 2- or 3-address
838 /// instruction (of the form "X = op Y, Z, ..."), which produces the same
839 /// result if Y and Z are exchanged. If this flag is set, then the
840 /// TargetInstrInfo::commuteInstruction method may be used to hack on the
841 /// instruction.
842 ///
843 /// Note that this flag may be set on instructions that are only commutable
844 /// sometimes. In these cases, the call to commuteInstruction will fail.
845 /// Also note that some instructions require non-trivial modification to
846 /// commute them.
847 bool isCommutable(QueryType Type = IgnoreBundle) const {
848 return hasProperty(MCID::Commutable, Type);
849 }
850
851 /// Return true if this is a 2-address instruction
852 /// which can be changed into a 3-address instruction if needed. Doing this
853 /// transformation can be profitable in the register allocator, because it
854 /// means that the instruction can use a 2-address form if possible, but
855 /// degrade into a less efficient form if the source and dest register cannot
856 /// be assigned to the same register. For example, this allows the x86
857 /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
858 /// is the same speed as the shift but has bigger code size.
859 ///
860 /// If this returns true, then the target must implement the
861 /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
862 /// is allowed to fail if the transformation isn't valid for this specific
863 /// instruction (e.g. shl reg, 4 on x86).
864 ///
865 bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
866 return hasProperty(MCID::ConvertibleTo3Addr, Type);
867 }
868
869 /// Return true if this instruction requires
870 /// custom insertion support when the DAG scheduler is inserting it into a
871 /// machine basic block. If this is true for the instruction, it basically
872 /// means that it is a pseudo instruction used at SelectionDAG time that is
873 /// expanded out into magic code by the target when MachineInstrs are formed.
874 ///
875 /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
876 /// is used to insert this into the MachineBasicBlock.
877 bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
878 return hasProperty(MCID::UsesCustomInserter, Type);
879 }
880
881 /// Return true if this instruction requires *adjustment*
882 /// after instruction selection by calling a target hook. For example, this
883 /// can be used to fill in ARM 's' optional operand depending on whether
884 /// the conditional flag register is used.
885 bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
886 return hasProperty(MCID::HasPostISelHook, Type);
887 }
888
889 /// Returns true if this instruction is a candidate for remat.
890 /// This flag is deprecated, please don't use it anymore. If this
891 /// flag is set, the isReallyTriviallyReMaterializable() method is called to
892 /// verify the instruction is really rematable.
893 bool isRematerializable(QueryType Type = AllInBundle) const {
894 // It's only possible to re-mat a bundle if all bundled instructions are
895 // re-materializable.
896 return hasProperty(MCID::Rematerializable, Type);
897 }
898
899 /// Returns true if this instruction has the same cost (or less) than a move
900 /// instruction. This is useful during certain types of optimizations
901 /// (e.g., remat during two-address conversion or machine licm)
902 /// where we would like to remat or hoist the instruction, but not if it costs
903 /// more than moving the instruction into the appropriate register. Note, we
904 /// are not marking copies from and to the same register class with this flag.
905 bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
906 // Only returns true for a bundle if all bundled instructions are cheap.
907 return hasProperty(MCID::CheapAsAMove, Type);
908 }
909
910 /// Returns true if this instruction source operands
911 /// have special register allocation requirements that are not captured by the
912 /// operand register classes. e.g. ARM::STRD's two source registers must be an
913 /// even / odd pair, ARM::STM registers have to be in ascending order.
914 /// Post-register allocation passes should not attempt to change allocations
915 /// for sources of instructions with this flag.
916 bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
917 return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
918 }
919
920 /// Returns true if this instruction def operands
921 /// have special register allocation requirements that are not captured by the
922 /// operand register classes. e.g. ARM::LDRD's two def registers must be an
923 /// even / odd pair, ARM::LDM registers have to be in ascending order.
924 /// Post-register allocation passes should not attempt to change allocations
925 /// for definitions of instructions with this flag.
926 bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
927 return hasProperty(MCID::ExtraDefRegAllocReq, Type);
928 }
929
930 enum MICheckType {
931 CheckDefs, // Check all operands for equality
932 CheckKillDead, // Check all operands including kill / dead markers
933 IgnoreDefs, // Ignore all definitions
934 IgnoreVRegDefs // Ignore virtual register definitions
935 };
936
937 /// Return true if this instruction is identical to \p Other.
938 /// Two instructions are identical if they have the same opcode and all their
939 /// operands are identical (with respect to MachineOperand::isIdenticalTo()).
940 /// Note that this means liveness related flags (dead, undef, kill) do not
941 /// affect the notion of identical.
942 bool isIdenticalTo(const MachineInstr &Other,
943 MICheckType Check = CheckDefs) const;
944
945 /// Unlink 'this' from the containing basic block, and return it without
946 /// deleting it.
947 ///
948 /// This function can not be used on bundled instructions, use
949 /// removeFromBundle() to remove individual instructions from a bundle.
950 MachineInstr *removeFromParent();
951
952 /// Unlink this instruction from its basic block and return it without
953 /// deleting it.
954 ///
955 /// If the instruction is part of a bundle, the other instructions in the
956 /// bundle remain bundled.
957 MachineInstr *removeFromBundle();
958
959 /// Unlink 'this' from the containing basic block and delete it.
960 ///
961 /// If this instruction is the header of a bundle, the whole bundle is erased.
962 /// This function can not be used for instructions inside a bundle, use
963 /// eraseFromBundle() to erase individual bundled instructions.
964 void eraseFromParent();
965
966 /// Unlink 'this' from the containing basic block and delete it.
967 ///
968 /// For all definitions mark their uses in DBG_VALUE nodes
969 /// as undefined. Otherwise like eraseFromParent().
970 void eraseFromParentAndMarkDBGValuesForRemoval();
971
972 /// Unlink 'this' form its basic block and delete it.
973 ///
974 /// If the instruction is part of a bundle, the other instructions in the
975 /// bundle remain bundled.
976 void eraseFromBundle();
977
978 bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
979 bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
980 bool isAnnotationLabel() const {
981 return getOpcode() == TargetOpcode::ANNOTATION_LABEL;
982 }
983
984 /// Returns true if the MachineInstr represents a label.
985 bool isLabel() const {
986 return isEHLabel() || isGCLabel() || isAnnotationLabel();
987 }
988
989 bool isCFIInstruction() const {
990 return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
991 }
992
993 // True if the instruction represents a position in the function.
994 bool isPosition() const { return isLabel() || isCFIInstruction(); }
995
996 bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
997 bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; }
998 bool isDebugInstr() const { return isDebugValue() || isDebugLabel(); }
999
1000 /// A DBG_VALUE is indirect iff the first operand is a register and
1001 /// the second operand is an immediate.
1002 bool isIndirectDebugValue() const {
1003 return isDebugValue()
1004 && getOperand(0).isReg()
1005 && getOperand(1).isImm();
1006 }
1007
1008 /// Return true if the instruction is a debug value which describes a part of
1009 /// a variable as unavailable.
1010 bool isUndefDebugValue() const {
1011 return isDebugValue() && getOperand(0).isReg() && !getOperand(0).getReg();
1012 }
1013
1014 bool isPHI() const {
1015 return getOpcode() == TargetOpcode::PHI ||
1016 getOpcode() == TargetOpcode::G_PHI;
1017 }
1018 bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
1019 bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
1020 bool isInlineAsm() const {
1021 return getOpcode() == TargetOpcode::INLINEASM ||
1022 getOpcode() == TargetOpcode::INLINEASM_BR;
1023 }
1024
1025 /// FIXME: Seems like a layering violation that the AsmDialect, which is X86
1026 /// specific, be attached to a generic MachineInstr.
1027 bool isMSInlineAsm() const {
1028 return isInlineAsm() && getInlineAsmDialect() == InlineAsm::AD_Intel;
1029 }
1030
1031 bool isStackAligningInlineAsm() const;
1032 InlineAsm::AsmDialect getInlineAsmDialect() const;
1033
1034 bool isInsertSubreg() const {
1035 return getOpcode() == TargetOpcode::INSERT_SUBREG;
1036 }
1037
1038 bool isSubregToReg() const {
1039 return getOpcode() == TargetOpcode::SUBREG_TO_REG;
1040 }
1041
1042 bool isRegSequence() const {
1043 return getOpcode() == TargetOpcode::REG_SEQUENCE;
1044 }
1045
1046 bool isBundle() const {
1047 return getOpcode() == TargetOpcode::BUNDLE;
1048 }
1049
1050 bool isCopy() const {
1051 return getOpcode() == TargetOpcode::COPY;
1052 }
1053
1054 bool isFullCopy() const {
1055 return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
1056 }
1057
1058 bool isExtractSubreg() const {
1059 return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
1060 }
1061
1062 /// Return true if the instruction behaves like a copy.
1063 /// This does not include native copy instructions.
1064 bool isCopyLike() const {
1065 return isCopy() || isSubregToReg();
1066 }
1067
1068 /// Return true is the instruction is an identity copy.
1069 bool isIdentityCopy() const {
1070 return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
1071 getOperand(0).getSubReg() == getOperand(1).getSubReg();
1072 }
1073
1074 /// Return true if this instruction doesn't produce any output in the form of
1075 /// executable instructions.
1076 bool isMetaInstruction() const {
1077 switch (getOpcode()) {
1078 default:
1079 return false;
1080 case TargetOpcode::IMPLICIT_DEF:
1081 case TargetOpcode::KILL:
1082 case TargetOpcode::CFI_INSTRUCTION:
1083 case TargetOpcode::EH_LABEL:
1084 case TargetOpcode::GC_LABEL:
1085 case TargetOpcode::DBG_VALUE:
1086 case TargetOpcode::DBG_LABEL:
1087 case TargetOpcode::LIFETIME_START:
1088 case TargetOpcode::LIFETIME_END:
1089 return true;
1090 }
1091 }
1092
1093 /// Return true if this is a transient instruction that is either very likely
1094 /// to be eliminated during register allocation (such as copy-like
1095 /// instructions), or if this instruction doesn't have an execution-time cost.
1096 bool isTransient() const {
1097 switch (getOpcode()) {
1098 default:
1099 return isMetaInstruction();
1100 // Copy-like instructions are usually eliminated during register allocation.
1101 case TargetOpcode::PHI:
1102 case TargetOpcode::G_PHI:
1103 case TargetOpcode::COPY:
1104 case TargetOpcode::INSERT_SUBREG:
1105 case TargetOpcode::SUBREG_TO_REG:
1106 case TargetOpcode::REG_SEQUENCE:
1107 return true;
1108 }
1109 }
1110
1111 /// Return the number of instructions inside the MI bundle, excluding the
1112 /// bundle header.
1113 ///
1114 /// This is the number of instructions that MachineBasicBlock::iterator
1115 /// skips, 0 for unbundled instructions.
1116 unsigned getBundleSize() const;
1117
1118 /// Return true if the MachineInstr reads the specified register.
1119 /// If TargetRegisterInfo is passed, then it also checks if there
1120 /// is a read of a super-register.
1121 /// This does not count partial redefines of virtual registers as reads:
1122 /// %reg1024:6 = OP.
1123 bool readsRegister(unsigned Reg,
1124 const TargetRegisterInfo *TRI = nullptr) const {
1125 return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
1126 }
1127
1128 /// Return true if the MachineInstr reads the specified virtual register.
1129 /// Take into account that a partial define is a
1130 /// read-modify-write operation.
1131 bool readsVirtualRegister(unsigned Reg) const {
1132 return readsWritesVirtualRegister(Reg).first;
1133 }
1134
1135 /// Return a pair of bools (reads, writes) indicating if this instruction
1136 /// reads or writes Reg. This also considers partial defines.
1137 /// If Ops is not null, all operand indices for Reg are added.
1138 std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg,
1139 SmallVectorImpl<unsigned> *Ops = nullptr) const;
1140
1141 /// Return true if the MachineInstr kills the specified register.
1142 /// If TargetRegisterInfo is passed, then it also checks if there is
1143 /// a kill of a super-register.
1144 bool killsRegister(unsigned Reg,
1145 const TargetRegisterInfo *TRI = nullptr) const {
1146 return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
1147 }
1148
1149 /// Return true if the MachineInstr fully defines the specified register.
1150 /// If TargetRegisterInfo is passed, then it also checks
1151 /// if there is a def of a super-register.
1152 /// NOTE: It's ignoring subreg indices on virtual registers.
1153 bool definesRegister(unsigned Reg,
1154 const TargetRegisterInfo *TRI = nullptr) const {
1155 return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
1156 }
1157
1158 /// Return true if the MachineInstr modifies (fully define or partially
1159 /// define) the specified register.
1160 /// NOTE: It's ignoring subreg indices on virtual registers.
1161 bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const {
1162 return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
1163 }
1164
1165 /// Returns true if the register is dead in this machine instruction.
1166 /// If TargetRegisterInfo is passed, then it also checks
1167 /// if there is a dead def of a super-register.
1168 bool registerDefIsDead(unsigned Reg,
1169 const TargetRegisterInfo *TRI = nullptr) const {
1170 return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
1171 }
1172
1173 /// Returns true if the MachineInstr has an implicit-use operand of exactly
1174 /// the given register (not considering sub/super-registers).
1175 bool hasRegisterImplicitUseOperand(unsigned Reg) const;
1176
1177 /// Returns the operand index that is a use of the specific register or -1
1178 /// if it is not found. It further tightens the search criteria to a use
1179 /// that kills the register if isKill is true.
1180 int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false,
1181 const TargetRegisterInfo *TRI = nullptr) const;
1182
1183 /// Wrapper for findRegisterUseOperandIdx, it returns
1184 /// a pointer to the MachineOperand rather than an index.
1185 MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false,
1186 const TargetRegisterInfo *TRI = nullptr) {
1187 int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
1188 return (Idx == -1) ? nullptr : &getOperand(Idx);
1189 }
1190
1191 const MachineOperand *findRegisterUseOperand(
1192 unsigned Reg, bool isKill = false,
1193 const TargetRegisterInfo *TRI = nullptr) const {
1194 return const_cast<MachineInstr *>(this)->
1195 findRegisterUseOperand(Reg, isKill, TRI);
1196 }
1197
1198 /// Returns the operand index that is a def of the specified register or
1199 /// -1 if it is not found. If isDead is true, defs that are not dead are
1200 /// skipped. If Overlap is true, then it also looks for defs that merely
1201 /// overlap the specified register. If TargetRegisterInfo is non-null,
1202 /// then it also checks if there is a def of a super-register.
1203 /// This may also return a register mask operand when Overlap is true.
1204 int findRegisterDefOperandIdx(unsigned Reg,
1205 bool isDead = false, bool Overlap = false,
1206 const TargetRegisterInfo *TRI = nullptr) const;
1207
1208 /// Wrapper for findRegisterDefOperandIdx, it returns
1209 /// a pointer to the MachineOperand rather than an index.
1210 MachineOperand *
1211 findRegisterDefOperand(unsigned Reg, bool isDead = false,
1212 bool Overlap = false,
1213 const TargetRegisterInfo *TRI = nullptr) {
1214 int Idx = findRegisterDefOperandIdx(Reg, isDead, Overlap, TRI);
1215 return (Idx == -1) ? nullptr : &getOperand(Idx);
1216 }
1217
1218 const MachineOperand *
1219 findRegisterDefOperand(unsigned Reg, bool isDead = false,
1220 bool Overlap = false,
1221 const TargetRegisterInfo *TRI = nullptr) const {
1222 return const_cast<MachineInstr *>(this)->findRegisterDefOperand(
1223 Reg, isDead, Overlap, TRI);
1224 }
1225
1226 /// Find the index of the first operand in the
1227 /// operand list that is used to represent the predicate. It returns -1 if
1228 /// none is found.
1229 int findFirstPredOperandIdx() const;
1230
1231 /// Find the index of the flag word operand that
1232 /// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if
1233 /// getOperand(OpIdx) does not belong to an inline asm operand group.
1234 ///
1235 /// If GroupNo is not NULL, it will receive the number of the operand group
1236 /// containing OpIdx.
1237 ///
1238 /// The flag operand is an immediate that can be decoded with methods like
1239 /// InlineAsm::hasRegClassConstraint().
1240 int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
1241
1242 /// Compute the static register class constraint for operand OpIdx.
1243 /// For normal instructions, this is derived from the MCInstrDesc.
1244 /// For inline assembly it is derived from the flag words.
1245 ///
1246 /// Returns NULL if the static register class constraint cannot be
1247 /// determined.
1248 const TargetRegisterClass*
1249 getRegClassConstraint(unsigned OpIdx,
1250 const TargetInstrInfo *TII,
1251 const TargetRegisterInfo *TRI) const;
1252
1253 /// Applies the constraints (def/use) implied by this MI on \p Reg to
1254 /// the given \p CurRC.
1255 /// If \p ExploreBundle is set and MI is part of a bundle, all the
1256 /// instructions inside the bundle will be taken into account. In other words,
1257 /// this method accumulates all the constraints of the operand of this MI and
1258 /// the related bundle if MI is a bundle or inside a bundle.
1259 ///
1260 /// Returns the register class that satisfies both \p CurRC and the
1261 /// constraints set by MI. Returns NULL if such a register class does not
1262 /// exist.
1263 ///
1264 /// \pre CurRC must not be NULL.
1265 const TargetRegisterClass *getRegClassConstraintEffectForVReg(
1266 unsigned Reg, const TargetRegisterClass *CurRC,
1267 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
1268 bool ExploreBundle = false) const;
1269
1270 /// Applies the constraints (def/use) implied by the \p OpIdx operand
1271 /// to the given \p CurRC.
1272 ///
1273 /// Returns the register class that satisfies both \p CurRC and the
1274 /// constraints set by \p OpIdx MI. Returns NULL if such a register class
1275 /// does not exist.
1276 ///
1277 /// \pre CurRC must not be NULL.
1278 /// \pre The operand at \p OpIdx must be a register.
1279 const TargetRegisterClass *
1280 getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC,
1281 const TargetInstrInfo *TII,
1282 const TargetRegisterInfo *TRI) const;
1283
1284 /// Add a tie between the register operands at DefIdx and UseIdx.
1285 /// The tie will cause the register allocator to ensure that the two
1286 /// operands are assigned the same physical register.
1287 ///
1288 /// Tied operands are managed automatically for explicit operands in the
1289 /// MCInstrDesc. This method is for exceptional cases like inline asm.
1290 void tieOperands(unsigned DefIdx, unsigned UseIdx);
1291
1292 /// Given the index of a tied register operand, find the
1293 /// operand it is tied to. Defs are tied to uses and vice versa. Returns the
1294 /// index of the tied operand which must exist.
1295 unsigned findTiedOperandIdx(unsigned OpIdx) const;
1296
1297 /// Given the index of a register def operand,
1298 /// check if the register def is tied to a source operand, due to either
1299 /// two-address elimination or inline assembly constraints. Returns the
1300 /// first tied use operand index by reference if UseOpIdx is not null.
1301 bool isRegTiedToUseOperand(unsigned DefOpIdx,
1302 unsigned *UseOpIdx = nullptr) const {
1303 const MachineOperand &MO = getOperand(DefOpIdx);
1304 if (!MO.isReg() || !MO.isDef() || !MO.isTied())
1305 return false;
1306 if (UseOpIdx)
1307 *UseOpIdx = findTiedOperandIdx(DefOpIdx);
1308 return true;
1309 }
1310
1311 /// Return true if the use operand of the specified index is tied to a def
1312 /// operand. It also returns the def operand index by reference if DefOpIdx
1313 /// is not null.
1314 bool isRegTiedToDefOperand(unsigned UseOpIdx,
1315 unsigned *DefOpIdx = nullptr) const {
1316 const MachineOperand &MO = getOperand(UseOpIdx);
1317 if (!MO.isReg() || !MO.isUse() || !MO.isTied())
1318 return false;
1319 if (DefOpIdx)
1320 *DefOpIdx = findTiedOperandIdx(UseOpIdx);
1321 return true;
1322 }
1323
1324 /// Clears kill flags on all operands.
1325 void clearKillInfo();
1326
1327 /// Replace all occurrences of FromReg with ToReg:SubIdx,
1328 /// properly composing subreg indices where necessary.
1329 void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx,
1330 const TargetRegisterInfo &RegInfo);
1331
1332 /// We have determined MI kills a register. Look for the
1333 /// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
1334 /// add a implicit operand if it's not found. Returns true if the operand
1335 /// exists / is added.
1336 bool addRegisterKilled(unsigned IncomingReg,
1337 const TargetRegisterInfo *RegInfo,
1338 bool AddIfNotFound = false);
1339
1340 /// Clear all kill flags affecting Reg. If RegInfo is provided, this includes
1341 /// all aliasing registers.
1342 void clearRegisterKills(unsigned Reg, const TargetRegisterInfo *RegInfo);
1343
1344 /// We have determined MI defined a register without a use.
1345 /// Look for the operand that defines it and mark it as IsDead. If
1346 /// AddIfNotFound is true, add a implicit operand if it's not found. Returns
1347 /// true if the operand exists / is added.
1348 bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo,
1349 bool AddIfNotFound = false);
1350
1351 /// Clear all dead flags on operands defining register @p Reg.
1352 void clearRegisterDeads(unsigned Reg);
1353
1354 /// Mark all subregister defs of register @p Reg with the undef flag.
1355 /// This function is used when we determined to have a subregister def in an
1356 /// otherwise undefined super register.
1357 void setRegisterDefReadUndef(unsigned Reg, bool IsUndef = true);
1358
1359 /// We have determined MI defines a register. Make sure there is an operand
1360 /// defining Reg.
1361 void addRegisterDefined(unsigned Reg,
1362 const TargetRegisterInfo *RegInfo = nullptr);
1363
1364 /// Mark every physreg used by this instruction as
1365 /// dead except those in the UsedRegs list.
1366 ///
1367 /// On instructions with register mask operands, also add implicit-def
1368 /// operands for all registers in UsedRegs.
1369 void setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
1370 const TargetRegisterInfo &TRI);
1371
1372 /// Return true if it is safe to move this instruction. If
1373 /// SawStore is set to true, it means that there is a store (or call) between
1374 /// the instruction's location and its intended destination.
1375 bool isSafeToMove(AliasAnalysis *AA, bool &SawStore) const;
1376
1377 /// Returns true if this instruction's memory access aliases the memory
1378 /// access of Other.
1379 //
1380 /// Assumes any physical registers used to compute addresses
1381 /// have the same value for both instructions. Returns false if neither
1382 /// instruction writes to memory.
1383 ///
1384 /// @param AA Optional alias analysis, used to compare memory operands.
1385 /// @param Other MachineInstr to check aliasing against.
1386 /// @param UseTBAA Whether to pass TBAA information to alias analysis.
1387 bool mayAlias(AliasAnalysis *AA, const MachineInstr &Other, bool UseTBAA) const;
1388
1389 /// Return true if this instruction may have an ordered
1390 /// or volatile memory reference, or if the information describing the memory
1391 /// reference is not available. Return false if it is known to have no
1392 /// ordered or volatile memory references.
1393 bool hasOrderedMemoryRef() const;
1394
1395 /// Return true if this load instruction never traps and points to a memory
1396 /// location whose value doesn't change during the execution of this function.
1397 ///
1398 /// Examples include loading a value from the constant pool or from the
1399 /// argument area of a function (if it does not change). If the instruction
1400 /// does multiple loads, this returns true only if all of the loads are
1401 /// dereferenceable and invariant.
1402 bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const;
1403
1404 /// If the specified instruction is a PHI that always merges together the
1405 /// same virtual register, return the register, otherwise return 0.
1406 unsigned isConstantValuePHI() const;
1407
1408 /// Return true if this instruction has side effects that are not modeled
1409 /// by mayLoad / mayStore, etc.
1410 /// For all instructions, the property is encoded in MCInstrDesc::Flags
1411 /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
1412 /// INLINEASM instruction, in which case the side effect property is encoded
1413 /// in one of its operands (see InlineAsm::Extra_HasSideEffect).
1414 ///
1415 bool hasUnmodeledSideEffects() const;
1416
1417 /// Returns true if it is illegal to fold a load across this instruction.
1418 bool isLoadFoldBarrier() const;
1419
1420 /// Return true if all the defs of this instruction are dead.
1421 bool allDefsAreDead() const;
1422
1423 /// Return a valid size if the instruction is a spill instruction.
1424 Optional<unsigned> getSpillSize(const TargetInstrInfo *TII) const;
1425
1426 /// Return a valid size if the instruction is a folded spill instruction.
1427 Optional<unsigned> getFoldedSpillSize(const TargetInstrInfo *TII) const;
1428
1429 /// Return a valid size if the instruction is a restore instruction.
1430 Optional<unsigned> getRestoreSize(const TargetInstrInfo *TII) const;
1431
1432 /// Return a valid size if the instruction is a folded restore instruction.
1433 Optional<unsigned>
1434 getFoldedRestoreSize(const TargetInstrInfo *TII) const;
1435
1436 /// Copy implicit register operands from specified
1437 /// instruction to this instruction.
1438 void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI);
1439
1440 /// Debugging support
1441 /// @{
1442 /// Determine the generic type to be printed (if needed) on uses and defs.
1443 LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1444 const MachineRegisterInfo &MRI) const;
1445
1446 /// Return true when an instruction has tied register that can't be determined
1447 /// by the instruction's descriptor. This is useful for MIR printing, to
1448 /// determine whether we need to print the ties or not.
1449 bool hasComplexRegisterTies() const;
1450
1451 /// Print this MI to \p OS.
1452 /// Don't print information that can be inferred from other instructions if
1453 /// \p IsStandalone is false. It is usually true when only a fragment of the
1454 /// function is printed.
1455 /// Only print the defs and the opcode if \p SkipOpers is true.
1456 /// Otherwise, also print operands if \p SkipDebugLoc is true.
1457 /// Otherwise, also print the debug loc, with a terminating newline.
1458 /// \p TII is used to print the opcode name. If it's not present, but the
1459 /// MI is in a function, the opcode will be printed using the function's TII.
1460 void print(raw_ostream &OS, bool IsStandalone = true, bool SkipOpers = false,
1461 bool SkipDebugLoc = false, bool AddNewLine = true,
1462 const TargetInstrInfo *TII = nullptr) const;
1463 void print(raw_ostream &OS, ModuleSlotTracker &MST, bool IsStandalone = true,
1464 bool SkipOpers = false, bool SkipDebugLoc = false,
1465 bool AddNewLine = true,
1466 const TargetInstrInfo *TII = nullptr) const;
1467 void dump() const;
1468 /// @}
1469
1470 //===--------------------------------------------------------------------===//
1471 // Accessors used to build up machine instructions.
1472
1473 /// Add the specified operand to the instruction. If it is an implicit
1474 /// operand, it is added to the end of the operand list. If it is an
1475 /// explicit operand it is added at the end of the explicit operand list
1476 /// (before the first implicit operand).
1477 ///
1478 /// MF must be the machine function that was used to allocate this
1479 /// instruction.
1480 ///
1481 /// MachineInstrBuilder provides a more convenient interface for creating
1482 /// instructions and adding operands.
1483 void addOperand(MachineFunction &MF, const MachineOperand &Op);
1484
1485 /// Add an operand without providing an MF reference. This only works for
1486 /// instructions that are inserted in a basic block.
1487 ///
1488 /// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be
1489 /// preferred.
1490 void addOperand(const MachineOperand &Op);
1491
1492 /// Replace the instruction descriptor (thus opcode) of
1493 /// the current instruction with a new one.
1494 void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
1495
1496 /// Replace current source information with new such.
1497 /// Avoid using this, the constructor argument is preferable.
1498 void setDebugLoc(DebugLoc dl) {
1499 debugLoc = std::move(dl);
1500 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/CodeGen/MachineInstr.h"
, 1500, __PRETTY_FUNCTION__))
;
1501 }
1502
1503 /// Erase an operand from an instruction, leaving it with one
1504 /// fewer operand than it started with.
1505 void RemoveOperand(unsigned OpNo);
1506
1507 /// Clear this MachineInstr's memory reference descriptor list. This resets
1508 /// the memrefs to their most conservative state. This should be used only
1509 /// as a last resort since it greatly pessimizes our knowledge of the memory
1510 /// access performed by the instruction.
1511 void dropMemRefs(MachineFunction &MF);
1512
1513 /// Assign this MachineInstr's memory reference descriptor list.
1514 ///
1515 /// Unlike other methods, this *will* allocate them into a new array
1516 /// associated with the provided `MachineFunction`.
1517 void setMemRefs(MachineFunction &MF, ArrayRef<MachineMemOperand *> MemRefs);
1518
1519 /// Add a MachineMemOperand to the machine instruction.
1520 /// This function should be used only occasionally. The setMemRefs function
1521 /// is the primary method for setting up a MachineInstr's MemRefs list.
1522 void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
1523
1524 /// Clone another MachineInstr's memory reference descriptor list and replace
1525 /// ours with it.
1526 ///
1527 /// Note that `*this` may be the incoming MI!
1528 ///
1529 /// Prefer this API whenever possible as it can avoid allocations in common
1530 /// cases.
1531 void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI);
1532
1533 /// Clone the merge of multiple MachineInstrs' memory reference descriptors
1534 /// list and replace ours with it.
1535 ///
1536 /// Note that `*this` may be one of the incoming MIs!
1537 ///
1538 /// Prefer this API whenever possible as it can avoid allocations in common
1539 /// cases.
1540 void cloneMergedMemRefs(MachineFunction &MF,
1541 ArrayRef<const MachineInstr *> MIs);
1542
1543 /// Set a symbol that will be emitted just prior to the instruction itself.
1544 ///
1545 /// Setting this to a null pointer will remove any such symbol.
1546 ///
1547 /// FIXME: This is not fully implemented yet.
1548 void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
1549
1550 /// Set a symbol that will be emitted just after the instruction itself.
1551 ///
1552 /// Setting this to a null pointer will remove any such symbol.
1553 ///
1554 /// FIXME: This is not fully implemented yet.
1555 void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
1556
1557 /// Clone another MachineInstr's pre- and post- instruction symbols and
1558 /// replace ours with it.
1559 void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI);
1560
1561 /// Return the MIFlags which represent both MachineInstrs. This
1562 /// should be used when merging two MachineInstrs into one. This routine does
1563 /// not modify the MIFlags of this MachineInstr.
1564 uint16_t mergeFlagsWith(const MachineInstr& Other) const;
1565
1566 static uint16_t copyFlagsFromInstruction(const Instruction &I);
1567
1568 /// Copy all flags to MachineInst MIFlags
1569 void copyIRFlags(const Instruction &I);
1570
1571 /// Break any tie involving OpIdx.
1572 void untieRegOperand(unsigned OpIdx) {
1573 MachineOperand &MO = getOperand(OpIdx);
1574 if (MO.isReg() && MO.isTied()) {
1575 getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
1576 MO.TiedTo = 0;
1577 }
1578 }
1579
1580 /// Add all implicit def and use operands to this instruction.
1581 void addImplicitDefUseOperands(MachineFunction &MF);
1582
1583 /// Scan instructions following MI and collect any matching DBG_VALUEs.
1584 void collectDebugValues(SmallVectorImpl<MachineInstr *> &DbgValues);
1585
1586 /// Find all DBG_VALUEs immediately following this instruction that point
1587 /// to a register def in this instruction and point them to \p Reg instead.
1588 void changeDebugValuesDefReg(unsigned Reg);
1589
1590private:
1591 /// If this instruction is embedded into a MachineFunction, return the
1592 /// MachineRegisterInfo object for the current function, otherwise
1593 /// return null.
1594 MachineRegisterInfo *getRegInfo();
1595
1596 /// Unlink all of the register operands in this instruction from their
1597 /// respective use lists. This requires that the operands already be on their
1598 /// use lists.
1599 void RemoveRegOperandsFromUseLists(MachineRegisterInfo&);
1600
1601 /// Add all of the register operands in this instruction from their
1602 /// respective use lists. This requires that the operands not be on their
1603 /// use lists yet.
1604 void AddRegOperandsToUseLists(MachineRegisterInfo&);
1605
1606 /// Slow path for hasProperty when we're dealing with a bundle.
1607 bool hasPropertyInBundle(uint64_t Mask, QueryType Type) const;
1608
1609 /// Implements the logic of getRegClassConstraintEffectForVReg for the
1610 /// this MI and the given operand index \p OpIdx.
1611 /// If the related operand does not constrained Reg, this returns CurRC.
1612 const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(
1613 unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
1614 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const;
1615};
1616
1617/// Special DenseMapInfo traits to compare MachineInstr* by *value* of the
1618/// instruction rather than by pointer value.
1619/// The hashing and equality testing functions ignore definitions so this is
1620/// useful for CSE, etc.
1621struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
1622 static inline MachineInstr *getEmptyKey() {
1623 return nullptr;
1624 }
1625
1626 static inline MachineInstr *getTombstoneKey() {
1627 return reinterpret_cast<MachineInstr*>(-1);
1628 }
1629
1630 static unsigned getHashValue(const MachineInstr* const &MI);
1631
1632 static bool isEqual(const MachineInstr* const &LHS,
1633 const MachineInstr* const &RHS) {
1634 if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
1635 LHS == getEmptyKey() || LHS == getTombstoneKey())
1636 return LHS == RHS;
1637 return LHS->isIdenticalTo(*RHS, MachineInstr::IgnoreVRegDefs);
1638 }
1639};
1640
1641//===----------------------------------------------------------------------===//
1642// Debugging Support
1643
1644inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) {
1645 MI.print(OS);
1646 return OS;
1647}
1648
1649} // end namespace llvm
1650
1651#endif // LLVM_CODEGEN_MACHINEINSTR_H