Bug Summary

File:include/llvm/CodeGen/MachineInstr.h
Warning:line 154, column 17
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MachineFunction.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn350071/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-8~svn350071=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-12-27-042839-1215-1 -x c++ /build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp -faddrsig

/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp

1//===- MachineFunction.cpp ------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Collect native machine code information for a function. This allows
11// target-specific information about the generated code to be stored with each
12// function.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/CodeGen/MachineFunction.h"
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallString.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/Analysis/ConstantFolding.h"
26#include "llvm/Analysis/EHPersonalities.h"
27#include "llvm/CodeGen/MachineBasicBlock.h"
28#include "llvm/CodeGen/MachineConstantPool.h"
29#include "llvm/CodeGen/MachineFrameInfo.h"
30#include "llvm/CodeGen/MachineInstr.h"
31#include "llvm/CodeGen/MachineJumpTableInfo.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/MachineModuleInfo.h"
34#include "llvm/CodeGen/MachineRegisterInfo.h"
35#include "llvm/CodeGen/PseudoSourceValue.h"
36#include "llvm/CodeGen/TargetFrameLowering.h"
37#include "llvm/CodeGen/TargetLowering.h"
38#include "llvm/CodeGen/TargetRegisterInfo.h"
39#include "llvm/CodeGen/TargetSubtargetInfo.h"
40#include "llvm/CodeGen/WasmEHFuncInfo.h"
41#include "llvm/CodeGen/WinEHFuncInfo.h"
42#include "llvm/Config/llvm-config.h"
43#include "llvm/IR/Attributes.h"
44#include "llvm/IR/BasicBlock.h"
45#include "llvm/IR/Constant.h"
46#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/DerivedTypes.h"
48#include "llvm/IR/Function.h"
49#include "llvm/IR/GlobalValue.h"
50#include "llvm/IR/Instruction.h"
51#include "llvm/IR/Instructions.h"
52#include "llvm/IR/Metadata.h"
53#include "llvm/IR/Module.h"
54#include "llvm/IR/ModuleSlotTracker.h"
55#include "llvm/IR/Value.h"
56#include "llvm/MC/MCContext.h"
57#include "llvm/MC/MCSymbol.h"
58#include "llvm/MC/SectionKind.h"
59#include "llvm/Support/Casting.h"
60#include "llvm/Support/CommandLine.h"
61#include "llvm/Support/Compiler.h"
62#include "llvm/Support/DOTGraphTraits.h"
63#include "llvm/Support/Debug.h"
64#include "llvm/Support/ErrorHandling.h"
65#include "llvm/Support/GraphWriter.h"
66#include "llvm/Support/raw_ostream.h"
67#include "llvm/Target/TargetMachine.h"
68#include <algorithm>
69#include <cassert>
70#include <cstddef>
71#include <cstdint>
72#include <iterator>
73#include <string>
74#include <utility>
75#include <vector>
76
77using namespace llvm;
78
79#define DEBUG_TYPE"codegen" "codegen"
80
81static cl::opt<unsigned>
82AlignAllFunctions("align-all-functions",
83 cl::desc("Force the alignment of all functions."),
84 cl::init(0), cl::Hidden);
85
86static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
87 using P = MachineFunctionProperties::Property;
88
89 switch(Prop) {
90 case P::FailedISel: return "FailedISel";
91 case P::IsSSA: return "IsSSA";
92 case P::Legalized: return "Legalized";
93 case P::NoPHIs: return "NoPHIs";
94 case P::NoVRegs: return "NoVRegs";
95 case P::RegBankSelected: return "RegBankSelected";
96 case P::Selected: return "Selected";
97 case P::TracksLiveness: return "TracksLiveness";
98 }
99 llvm_unreachable("Invalid machine function property")::llvm::llvm_unreachable_internal("Invalid machine function property"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 99)
;
100}
101
102// Pin the vtable to this file.
103void MachineFunction::Delegate::anchor() {}
104
105void MachineFunctionProperties::print(raw_ostream &OS) const {
106 const char *Separator = "";
107 for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
108 if (!Properties[I])
109 continue;
110 OS << Separator << getPropertyName(static_cast<Property>(I));
111 Separator = ", ";
112 }
113}
114
115//===----------------------------------------------------------------------===//
116// MachineFunction implementation
117//===----------------------------------------------------------------------===//
118
119// Out-of-line virtual method.
120MachineFunctionInfo::~MachineFunctionInfo() = default;
121
122void ilist_alloc_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
123 MBB->getParent()->DeleteMachineBasicBlock(MBB);
124}
125
126static inline unsigned getFnStackAlignment(const TargetSubtargetInfo *STI,
127 const Function &F) {
128 if (F.hasFnAttribute(Attribute::StackAlignment))
129 return F.getFnStackAlignment();
130 return STI->getFrameLowering()->getStackAlignment();
131}
132
133MachineFunction::MachineFunction(const Function &F,
134 const LLVMTargetMachine &Target,
135 const TargetSubtargetInfo &STI,
136 unsigned FunctionNum, MachineModuleInfo &mmi)
137 : F(F), Target(Target), STI(&STI), Ctx(mmi.getContext()), MMI(mmi) {
138 FunctionNumber = FunctionNum;
139 init();
140}
141
142void MachineFunction::handleInsertion(const MachineInstr &MI) {
143 if (TheDelegate)
144 TheDelegate->MF_HandleInsertion(MI);
145}
146
147void MachineFunction::handleRemoval(const MachineInstr &MI) {
148 if (TheDelegate)
149 TheDelegate->MF_HandleRemoval(MI);
150}
151
152void MachineFunction::init() {
153 // Assume the function starts in SSA form with correct liveness.
154 Properties.set(MachineFunctionProperties::Property::IsSSA);
155 Properties.set(MachineFunctionProperties::Property::TracksLiveness);
156 if (STI->getRegisterInfo())
157 RegInfo = new (Allocator) MachineRegisterInfo(this);
158 else
159 RegInfo = nullptr;
160
161 MFInfo = nullptr;
162 // We can realign the stack if the target supports it and the user hasn't
163 // explicitly asked us not to.
164 bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() &&
165 !F.hasFnAttribute("no-realign-stack");
166 FrameInfo = new (Allocator) MachineFrameInfo(
167 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
168 /*ForceRealign=*/CanRealignSP &&
169 F.hasFnAttribute(Attribute::StackAlignment));
170
171 if (F.hasFnAttribute(Attribute::StackAlignment))
172 FrameInfo->ensureMaxAlignment(F.getFnStackAlignment());
173
174 ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
175 Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
176
177 // FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
178 // FIXME: Use Function::optForSize().
179 if (!F.hasFnAttribute(Attribute::OptimizeForSize))
180 Alignment = std::max(Alignment,
181 STI->getTargetLowering()->getPrefFunctionAlignment());
182
183 if (AlignAllFunctions)
184 Alignment = AlignAllFunctions;
185
186 JumpTableInfo = nullptr;
187
188 if (isFuncletEHPersonality(classifyEHPersonality(
189 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
190 WinEHInfo = new (Allocator) WinEHFuncInfo();
191 }
192
193 if (isScopedEHPersonality(classifyEHPersonality(
194 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
195 WasmEHInfo = new (Allocator) WasmEHFuncInfo();
196 }
197
198 assert(Target.isCompatibleDataLayout(getDataLayout()) &&((Target.isCompatibleDataLayout(getDataLayout()) && "Can't create a MachineFunction using a Module with a "
"Target-incompatible DataLayout attached\n") ? static_cast<
void> (0) : __assert_fail ("Target.isCompatibleDataLayout(getDataLayout()) && \"Can't create a MachineFunction using a Module with a \" \"Target-incompatible DataLayout attached\\n\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 200, __PRETTY_FUNCTION__))
199 "Can't create a MachineFunction using a Module with a "((Target.isCompatibleDataLayout(getDataLayout()) && "Can't create a MachineFunction using a Module with a "
"Target-incompatible DataLayout attached\n") ? static_cast<
void> (0) : __assert_fail ("Target.isCompatibleDataLayout(getDataLayout()) && \"Can't create a MachineFunction using a Module with a \" \"Target-incompatible DataLayout attached\\n\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 200, __PRETTY_FUNCTION__))
200 "Target-incompatible DataLayout attached\n")((Target.isCompatibleDataLayout(getDataLayout()) && "Can't create a MachineFunction using a Module with a "
"Target-incompatible DataLayout attached\n") ? static_cast<
void> (0) : __assert_fail ("Target.isCompatibleDataLayout(getDataLayout()) && \"Can't create a MachineFunction using a Module with a \" \"Target-incompatible DataLayout attached\\n\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 200, __PRETTY_FUNCTION__))
;
201
202 PSVManager =
203 llvm::make_unique<PseudoSourceValueManager>(*(getSubtarget().
204 getInstrInfo()));
205}
206
207MachineFunction::~MachineFunction() {
208 clear();
209}
210
211void MachineFunction::clear() {
212 Properties.reset();
213 // Don't call destructors on MachineInstr and MachineOperand. All of their
214 // memory comes from the BumpPtrAllocator which is about to be purged.
215 //
216 // Do call MachineBasicBlock destructors, it contains std::vectors.
217 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
218 I->Insts.clearAndLeakNodesUnsafely();
219 MBBNumbering.clear();
220
221 InstructionRecycler.clear(Allocator);
222 OperandRecycler.clear(Allocator);
223 BasicBlockRecycler.clear(Allocator);
224 CodeViewAnnotations.clear();
225 VariableDbgInfos.clear();
226 if (RegInfo) {
227 RegInfo->~MachineRegisterInfo();
228 Allocator.Deallocate(RegInfo);
229 }
230 if (MFInfo) {
231 MFInfo->~MachineFunctionInfo();
232 Allocator.Deallocate(MFInfo);
233 }
234
235 FrameInfo->~MachineFrameInfo();
236 Allocator.Deallocate(FrameInfo);
237
238 ConstantPool->~MachineConstantPool();
239 Allocator.Deallocate(ConstantPool);
240
241 if (JumpTableInfo) {
242 JumpTableInfo->~MachineJumpTableInfo();
243 Allocator.Deallocate(JumpTableInfo);
244 }
245
246 if (WinEHInfo) {
247 WinEHInfo->~WinEHFuncInfo();
248 Allocator.Deallocate(WinEHInfo);
249 }
250
251 if (WasmEHInfo) {
252 WasmEHInfo->~WasmEHFuncInfo();
253 Allocator.Deallocate(WasmEHInfo);
254 }
255}
256
257const DataLayout &MachineFunction::getDataLayout() const {
258 return F.getParent()->getDataLayout();
259}
260
261/// Get the JumpTableInfo for this function.
262/// If it does not already exist, allocate one.
263MachineJumpTableInfo *MachineFunction::
264getOrCreateJumpTableInfo(unsigned EntryKind) {
265 if (JumpTableInfo) return JumpTableInfo;
266
267 JumpTableInfo = new (Allocator)
268 MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
269 return JumpTableInfo;
270}
271
272/// Should we be emitting segmented stack stuff for the function
273bool MachineFunction::shouldSplitStack() const {
274 return getFunction().hasFnAttribute("split-stack");
275}
276
277/// This discards all of the MachineBasicBlock numbers and recomputes them.
278/// This guarantees that the MBB numbers are sequential, dense, and match the
279/// ordering of the blocks within the function. If a specific MachineBasicBlock
280/// is specified, only that block and those after it are renumbered.
281void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
282 if (empty()) { MBBNumbering.clear(); return; }
283 MachineFunction::iterator MBBI, E = end();
284 if (MBB == nullptr)
285 MBBI = begin();
286 else
287 MBBI = MBB->getIterator();
288
289 // Figure out the block number this should have.
290 unsigned BlockNo = 0;
291 if (MBBI != begin())
292 BlockNo = std::prev(MBBI)->getNumber() + 1;
293
294 for (; MBBI != E; ++MBBI, ++BlockNo) {
295 if (MBBI->getNumber() != (int)BlockNo) {
296 // Remove use of the old number.
297 if (MBBI->getNumber() != -1) {
298 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&((MBBNumbering[MBBI->getNumber()] == &*MBBI &&
"MBB number mismatch!") ? static_cast<void> (0) : __assert_fail
("MBBNumbering[MBBI->getNumber()] == &*MBBI && \"MBB number mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 299, __PRETTY_FUNCTION__))
299 "MBB number mismatch!")((MBBNumbering[MBBI->getNumber()] == &*MBBI &&
"MBB number mismatch!") ? static_cast<void> (0) : __assert_fail
("MBBNumbering[MBBI->getNumber()] == &*MBBI && \"MBB number mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 299, __PRETTY_FUNCTION__))
;
300 MBBNumbering[MBBI->getNumber()] = nullptr;
301 }
302
303 // If BlockNo is already taken, set that block's number to -1.
304 if (MBBNumbering[BlockNo])
305 MBBNumbering[BlockNo]->setNumber(-1);
306
307 MBBNumbering[BlockNo] = &*MBBI;
308 MBBI->setNumber(BlockNo);
309 }
310 }
311
312 // Okay, all the blocks are renumbered. If we have compactified the block
313 // numbering, shrink MBBNumbering now.
314 assert(BlockNo <= MBBNumbering.size() && "Mismatch!")((BlockNo <= MBBNumbering.size() && "Mismatch!") ?
static_cast<void> (0) : __assert_fail ("BlockNo <= MBBNumbering.size() && \"Mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 314, __PRETTY_FUNCTION__))
;
315 MBBNumbering.resize(BlockNo);
316}
317
318/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
319MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
320 const DebugLoc &DL,
321 bool NoImp) {
322 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
323 MachineInstr(*this, MCID, DL, NoImp);
324}
325
326/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
327/// identical in all ways except the instruction has no parent, prev, or next.
328MachineInstr *
329MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
330 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
331 MachineInstr(*this, *Orig);
332}
333
334MachineInstr &MachineFunction::CloneMachineInstrBundle(MachineBasicBlock &MBB,
335 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) {
336 MachineInstr *FirstClone = nullptr;
337 MachineBasicBlock::const_instr_iterator I = Orig.getIterator();
338 while (true) {
339 MachineInstr *Cloned = CloneMachineInstr(&*I);
340 MBB.insert(InsertBefore, Cloned);
341 if (FirstClone == nullptr) {
342 FirstClone = Cloned;
343 } else {
344 Cloned->bundleWithPred();
345 }
346
347 if (!I->isBundledWithSucc())
348 break;
349 ++I;
350 }
351 return *FirstClone;
352}
353
354/// Delete the given MachineInstr.
355///
356/// This function also serves as the MachineInstr destructor - the real
357/// ~MachineInstr() destructor must be empty.
358void
359MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
360 // Strip it for parts. The operand array and the MI object itself are
361 // independently recyclable.
362 if (MI->Operands)
363 deallocateOperandArray(MI->CapOperands, MI->Operands);
364 // Don't call ~MachineInstr() which must be trivial anyway because
365 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
366 // destructors.
367 InstructionRecycler.Deallocate(Allocator, MI);
368}
369
370/// Allocate a new MachineBasicBlock. Use this instead of
371/// `new MachineBasicBlock'.
372MachineBasicBlock *
373MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
374 return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
375 MachineBasicBlock(*this, bb);
376}
377
378/// Delete the given MachineBasicBlock.
379void
380MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
381 assert(MBB->getParent() == this && "MBB parent mismatch!")((MBB->getParent() == this && "MBB parent mismatch!"
) ? static_cast<void> (0) : __assert_fail ("MBB->getParent() == this && \"MBB parent mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 381, __PRETTY_FUNCTION__))
;
382 MBB->~MachineBasicBlock();
383 BasicBlockRecycler.Deallocate(Allocator, MBB);
384}
385
386MachineMemOperand *MachineFunction::getMachineMemOperand(
387 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
388 unsigned base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
389 SyncScope::ID SSID, AtomicOrdering Ordering,
390 AtomicOrdering FailureOrdering) {
391 return new (Allocator)
392 MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges,
393 SSID, Ordering, FailureOrdering);
394}
395
396MachineMemOperand *
397MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
398 int64_t Offset, uint64_t Size) {
399 if (MMO->getValue())
400 return new (Allocator)
401 MachineMemOperand(MachinePointerInfo(MMO->getValue(),
402 MMO->getOffset()+Offset),
403 MMO->getFlags(), Size, MMO->getBaseAlignment(),
404 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
405 MMO->getOrdering(), MMO->getFailureOrdering());
406 return new (Allocator)
407 MachineMemOperand(MachinePointerInfo(MMO->getPseudoValue(),
408 MMO->getOffset()+Offset),
409 MMO->getFlags(), Size, MMO->getBaseAlignment(),
410 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
411 MMO->getOrdering(), MMO->getFailureOrdering());
412}
413
414MachineMemOperand *
415MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
416 const AAMDNodes &AAInfo) {
417 MachinePointerInfo MPI = MMO->getValue() ?
418 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
419 MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset());
420
421 return new (Allocator)
422 MachineMemOperand(MPI, MMO->getFlags(), MMO->getSize(),
423 MMO->getBaseAlignment(), AAInfo,
424 MMO->getRanges(), MMO->getSyncScopeID(),
425 MMO->getOrdering(), MMO->getFailureOrdering());
426}
427
428MachineInstr::ExtraInfo *
429MachineFunction::createMIExtraInfo(ArrayRef<MachineMemOperand *> MMOs,
430 MCSymbol *PreInstrSymbol,
431 MCSymbol *PostInstrSymbol) {
432 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
1
Calling 'ExtraInfo::create'
433 PostInstrSymbol);
434}
435
436const char *MachineFunction::createExternalSymbolName(StringRef Name) {
437 char *Dest = Allocator.Allocate<char>(Name.size() + 1);
438 llvm::copy(Name, Dest);
439 Dest[Name.size()] = 0;
440 return Dest;
441}
442
443uint32_t *MachineFunction::allocateRegMask() {
444 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
445 unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
446 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
447 memset(Mask, 0, Size * sizeof(Mask[0]));
448 return Mask;
449}
450
451#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
452LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MachineFunction::dump() const {
453 print(dbgs());
454}
455#endif
456
457StringRef MachineFunction::getName() const {
458 return getFunction().getName();
459}
460
461void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
462 OS << "# Machine code for function " << getName() << ": ";
463 getProperties().print(OS);
464 OS << '\n';
465
466 // Print Frame Information
467 FrameInfo->print(*this, OS);
468
469 // Print JumpTable Information
470 if (JumpTableInfo)
471 JumpTableInfo->print(OS);
472
473 // Print Constant Pool
474 ConstantPool->print(OS);
475
476 const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
477
478 if (RegInfo && !RegInfo->livein_empty()) {
479 OS << "Function Live Ins: ";
480 for (MachineRegisterInfo::livein_iterator
481 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
482 OS << printReg(I->first, TRI);
483 if (I->second)
484 OS << " in " << printReg(I->second, TRI);
485 if (std::next(I) != E)
486 OS << ", ";
487 }
488 OS << '\n';
489 }
490
491 ModuleSlotTracker MST(getFunction().getParent());
492 MST.incorporateFunction(getFunction());
493 for (const auto &BB : *this) {
494 OS << '\n';
495 // If we print the whole function, print it at its most verbose level.
496 BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
497 }
498
499 OS << "\n# End machine code for function " << getName() << ".\n\n";
500}
501
502namespace llvm {
503
504 template<>
505 struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
506 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
507
508 static std::string getGraphName(const MachineFunction *F) {
509 return ("CFG for '" + F->getName() + "' function").str();
510 }
511
512 std::string getNodeLabel(const MachineBasicBlock *Node,
513 const MachineFunction *Graph) {
514 std::string OutStr;
515 {
516 raw_string_ostream OSS(OutStr);
517
518 if (isSimple()) {
519 OSS << printMBBReference(*Node);
520 if (const BasicBlock *BB = Node->getBasicBlock())
521 OSS << ": " << BB->getName();
522 } else
523 Node->print(OSS);
524 }
525
526 if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
527
528 // Process string output to make it nicer...
529 for (unsigned i = 0; i != OutStr.length(); ++i)
530 if (OutStr[i] == '\n') { // Left justify
531 OutStr[i] = '\\';
532 OutStr.insert(OutStr.begin()+i+1, 'l');
533 }
534 return OutStr;
535 }
536 };
537
538} // end namespace llvm
539
540void MachineFunction::viewCFG() const
541{
542#ifndef NDEBUG
543 ViewGraph(this, "mf" + getName());
544#else
545 errs() << "MachineFunction::viewCFG is only available in debug builds on "
546 << "systems with Graphviz or gv!\n";
547#endif // NDEBUG
548}
549
550void MachineFunction::viewCFGOnly() const
551{
552#ifndef NDEBUG
553 ViewGraph(this, "mf" + getName(), true);
554#else
555 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
556 << "systems with Graphviz or gv!\n";
557#endif // NDEBUG
558}
559
560/// Add the specified physical register as a live-in value and
561/// create a corresponding virtual register for it.
562unsigned MachineFunction::addLiveIn(unsigned PReg,
563 const TargetRegisterClass *RC) {
564 MachineRegisterInfo &MRI = getRegInfo();
565 unsigned VReg = MRI.getLiveInVirtReg(PReg);
566 if (VReg) {
567 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
568 (void)VRegRC;
569 // A physical register can be added several times.
570 // Between two calls, the register class of the related virtual register
571 // may have been constrained to match some operation constraints.
572 // In that case, check that the current register class includes the
573 // physical register and is a sub class of the specified RC.
574 assert((VRegRC == RC || (VRegRC->contains(PReg) &&(((VRegRC == RC || (VRegRC->contains(PReg) && RC->
hasSubClassEq(VRegRC))) && "Register class mismatch!"
) ? static_cast<void> (0) : __assert_fail ("(VRegRC == RC || (VRegRC->contains(PReg) && RC->hasSubClassEq(VRegRC))) && \"Register class mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 576, __PRETTY_FUNCTION__))
575 RC->hasSubClassEq(VRegRC))) &&(((VRegRC == RC || (VRegRC->contains(PReg) && RC->
hasSubClassEq(VRegRC))) && "Register class mismatch!"
) ? static_cast<void> (0) : __assert_fail ("(VRegRC == RC || (VRegRC->contains(PReg) && RC->hasSubClassEq(VRegRC))) && \"Register class mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 576, __PRETTY_FUNCTION__))
576 "Register class mismatch!")(((VRegRC == RC || (VRegRC->contains(PReg) && RC->
hasSubClassEq(VRegRC))) && "Register class mismatch!"
) ? static_cast<void> (0) : __assert_fail ("(VRegRC == RC || (VRegRC->contains(PReg) && RC->hasSubClassEq(VRegRC))) && \"Register class mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 576, __PRETTY_FUNCTION__))
;
577 return VReg;
578 }
579 VReg = MRI.createVirtualRegister(RC);
580 MRI.addLiveIn(PReg, VReg);
581 return VReg;
582}
583
584/// Return the MCSymbol for the specified non-empty jump table.
585/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
586/// normal 'L' label is returned.
587MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
588 bool isLinkerPrivate) const {
589 const DataLayout &DL = getDataLayout();
590 assert(JumpTableInfo && "No jump tables")((JumpTableInfo && "No jump tables") ? static_cast<
void> (0) : __assert_fail ("JumpTableInfo && \"No jump tables\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 590, __PRETTY_FUNCTION__))
;
591 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!")((JTI < JumpTableInfo->getJumpTables().size() &&
"Invalid JTI!") ? static_cast<void> (0) : __assert_fail
("JTI < JumpTableInfo->getJumpTables().size() && \"Invalid JTI!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 591, __PRETTY_FUNCTION__))
;
592
593 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
594 : DL.getPrivateGlobalPrefix();
595 SmallString<60> Name;
596 raw_svector_ostream(Name)
597 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
598 return Ctx.getOrCreateSymbol(Name);
599}
600
601/// Return a function-local symbol to represent the PIC base.
602MCSymbol *MachineFunction::getPICBaseSymbol() const {
603 const DataLayout &DL = getDataLayout();
604 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
605 Twine(getFunctionNumber()) + "$pb");
606}
607
608/// \name Exception Handling
609/// \{
610
611LandingPadInfo &
612MachineFunction::getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad) {
613 unsigned N = LandingPads.size();
614 for (unsigned i = 0; i < N; ++i) {
615 LandingPadInfo &LP = LandingPads[i];
616 if (LP.LandingPadBlock == LandingPad)
617 return LP;
618 }
619
620 LandingPads.push_back(LandingPadInfo(LandingPad));
621 return LandingPads[N];
622}
623
624void MachineFunction::addInvoke(MachineBasicBlock *LandingPad,
625 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
626 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
627 LP.BeginLabels.push_back(BeginLabel);
628 LP.EndLabels.push_back(EndLabel);
629}
630
631MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) {
632 MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
633 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
634 LP.LandingPadLabel = LandingPadLabel;
635
636 const Instruction *FirstI = LandingPad->getBasicBlock()->getFirstNonPHI();
637 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
638 if (const auto *PF =
639 dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()))
640 getMMI().addPersonality(PF);
641
642 if (LPI->isCleanup())
643 addCleanup(LandingPad);
644
645 // FIXME: New EH - Add the clauses in reverse order. This isn't 100%
646 // correct, but we need to do it this way because of how the DWARF EH
647 // emitter processes the clauses.
648 for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
649 Value *Val = LPI->getClause(I - 1);
650 if (LPI->isCatch(I - 1)) {
651 addCatchTypeInfo(LandingPad,
652 dyn_cast<GlobalValue>(Val->stripPointerCasts()));
653 } else {
654 // Add filters in a list.
655 auto *CVal = cast<Constant>(Val);
656 SmallVector<const GlobalValue *, 4> FilterList;
657 for (User::op_iterator II = CVal->op_begin(), IE = CVal->op_end();
658 II != IE; ++II)
659 FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts()));
660
661 addFilterTypeInfo(LandingPad, FilterList);
662 }
663 }
664
665 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
666 for (unsigned I = CPI->getNumArgOperands(); I != 0; --I) {
667 Value *TypeInfo = CPI->getArgOperand(I - 1)->stripPointerCasts();
668 addCatchTypeInfo(LandingPad, dyn_cast<GlobalValue>(TypeInfo));
669 }
670
671 } else {
672 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!")((isa<CleanupPadInst>(FirstI) && "Invalid landingpad!"
) ? static_cast<void> (0) : __assert_fail ("isa<CleanupPadInst>(FirstI) && \"Invalid landingpad!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 672, __PRETTY_FUNCTION__))
;
673 }
674
675 return LandingPadLabel;
676}
677
678void MachineFunction::addCatchTypeInfo(MachineBasicBlock *LandingPad,
679 ArrayRef<const GlobalValue *> TyInfo) {
680 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
681 for (unsigned N = TyInfo.size(); N; --N)
682 LP.TypeIds.push_back(getTypeIDFor(TyInfo[N - 1]));
683}
684
685void MachineFunction::addFilterTypeInfo(MachineBasicBlock *LandingPad,
686 ArrayRef<const GlobalValue *> TyInfo) {
687 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
688 std::vector<unsigned> IdsInFilter(TyInfo.size());
689 for (unsigned I = 0, E = TyInfo.size(); I != E; ++I)
690 IdsInFilter[I] = getTypeIDFor(TyInfo[I]);
691 LP.TypeIds.push_back(getFilterIDFor(IdsInFilter));
692}
693
694void MachineFunction::tidyLandingPads(DenseMap<MCSymbol *, uintptr_t> *LPMap,
695 bool TidyIfNoBeginLabels) {
696 for (unsigned i = 0; i != LandingPads.size(); ) {
697 LandingPadInfo &LandingPad = LandingPads[i];
698 if (LandingPad.LandingPadLabel &&
699 !LandingPad.LandingPadLabel->isDefined() &&
700 (!LPMap || (*LPMap)[LandingPad.LandingPadLabel] == 0))
701 LandingPad.LandingPadLabel = nullptr;
702
703 // Special case: we *should* emit LPs with null LP MBB. This indicates
704 // "nounwind" case.
705 if (!LandingPad.LandingPadLabel && LandingPad.LandingPadBlock) {
706 LandingPads.erase(LandingPads.begin() + i);
707 continue;
708 }
709
710 if (TidyIfNoBeginLabels) {
711 for (unsigned j = 0, e = LandingPads[i].BeginLabels.size(); j != e; ++j) {
712 MCSymbol *BeginLabel = LandingPad.BeginLabels[j];
713 MCSymbol *EndLabel = LandingPad.EndLabels[j];
714 if ((BeginLabel->isDefined() || (LPMap && (*LPMap)[BeginLabel] != 0)) &&
715 (EndLabel->isDefined() || (LPMap && (*LPMap)[EndLabel] != 0)))
716 continue;
717
718 LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j);
719 LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j);
720 --j;
721 --e;
722 }
723
724 // Remove landing pads with no try-ranges.
725 if (LandingPads[i].BeginLabels.empty()) {
726 LandingPads.erase(LandingPads.begin() + i);
727 continue;
728 }
729 }
730
731 // If there is no landing pad, ensure that the list of typeids is empty.
732 // If the only typeid is a cleanup, this is the same as having no typeids.
733 if (!LandingPad.LandingPadBlock ||
734 (LandingPad.TypeIds.size() == 1 && !LandingPad.TypeIds[0]))
735 LandingPad.TypeIds.clear();
736 ++i;
737 }
738}
739
740void MachineFunction::addCleanup(MachineBasicBlock *LandingPad) {
741 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
742 LP.TypeIds.push_back(0);
743}
744
745void MachineFunction::addSEHCatchHandler(MachineBasicBlock *LandingPad,
746 const Function *Filter,
747 const BlockAddress *RecoverBA) {
748 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
749 SEHHandler Handler;
750 Handler.FilterOrFinally = Filter;
751 Handler.RecoverBA = RecoverBA;
752 LP.SEHHandlers.push_back(Handler);
753}
754
755void MachineFunction::addSEHCleanupHandler(MachineBasicBlock *LandingPad,
756 const Function *Cleanup) {
757 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
758 SEHHandler Handler;
759 Handler.FilterOrFinally = Cleanup;
760 Handler.RecoverBA = nullptr;
761 LP.SEHHandlers.push_back(Handler);
762}
763
764void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym,
765 ArrayRef<unsigned> Sites) {
766 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
767}
768
769unsigned MachineFunction::getTypeIDFor(const GlobalValue *TI) {
770 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
771 if (TypeInfos[i] == TI) return i + 1;
772
773 TypeInfos.push_back(TI);
774 return TypeInfos.size();
775}
776
777int MachineFunction::getFilterIDFor(std::vector<unsigned> &TyIds) {
778 // If the new filter coincides with the tail of an existing filter, then
779 // re-use the existing filter. Folding filters more than this requires
780 // re-ordering filters and/or their elements - probably not worth it.
781 for (std::vector<unsigned>::iterator I = FilterEnds.begin(),
782 E = FilterEnds.end(); I != E; ++I) {
783 unsigned i = *I, j = TyIds.size();
784
785 while (i && j)
786 if (FilterIds[--i] != TyIds[--j])
787 goto try_next;
788
789 if (!j)
790 // The new filter coincides with range [i, end) of the existing filter.
791 return -(1 + i);
792
793try_next:;
794 }
795
796 // Add the new filter.
797 int FilterID = -(1 + FilterIds.size());
798 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
799 FilterIds.insert(FilterIds.end(), TyIds.begin(), TyIds.end());
800 FilterEnds.push_back(FilterIds.size());
801 FilterIds.push_back(0); // terminator
802 return FilterID;
803}
804
805/// \}
806
807//===----------------------------------------------------------------------===//
808// MachineJumpTableInfo implementation
809//===----------------------------------------------------------------------===//
810
811/// Return the size of each entry in the jump table.
812unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
813 // The size of a jump table entry is 4 bytes unless the entry is just the
814 // address of a block, in which case it is the pointer size.
815 switch (getEntryKind()) {
816 case MachineJumpTableInfo::EK_BlockAddress:
817 return TD.getPointerSize();
818 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
819 return 8;
820 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
821 case MachineJumpTableInfo::EK_LabelDifference32:
822 case MachineJumpTableInfo::EK_Custom32:
823 return 4;
824 case MachineJumpTableInfo::EK_Inline:
825 return 0;
826 }
827 llvm_unreachable("Unknown jump table encoding!")::llvm::llvm_unreachable_internal("Unknown jump table encoding!"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 827)
;
828}
829
830/// Return the alignment of each entry in the jump table.
831unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
832 // The alignment of a jump table entry is the alignment of int32 unless the
833 // entry is just the address of a block, in which case it is the pointer
834 // alignment.
835 switch (getEntryKind()) {
836 case MachineJumpTableInfo::EK_BlockAddress:
837 return TD.getPointerABIAlignment(0);
838 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
839 return TD.getABIIntegerTypeAlignment(64);
840 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
841 case MachineJumpTableInfo::EK_LabelDifference32:
842 case MachineJumpTableInfo::EK_Custom32:
843 return TD.getABIIntegerTypeAlignment(32);
844 case MachineJumpTableInfo::EK_Inline:
845 return 1;
846 }
847 llvm_unreachable("Unknown jump table encoding!")::llvm::llvm_unreachable_internal("Unknown jump table encoding!"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 847)
;
848}
849
850/// Create a new jump table entry in the jump table info.
851unsigned MachineJumpTableInfo::createJumpTableIndex(
852 const std::vector<MachineBasicBlock*> &DestBBs) {
853 assert(!DestBBs.empty() && "Cannot create an empty jump table!")((!DestBBs.empty() && "Cannot create an empty jump table!"
) ? static_cast<void> (0) : __assert_fail ("!DestBBs.empty() && \"Cannot create an empty jump table!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 853, __PRETTY_FUNCTION__))
;
854 JumpTables.push_back(MachineJumpTableEntry(DestBBs));
855 return JumpTables.size()-1;
856}
857
858/// If Old is the target of any jump tables, update the jump tables to branch
859/// to New instead.
860bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
861 MachineBasicBlock *New) {
862 assert(Old != New && "Not making a change?")((Old != New && "Not making a change?") ? static_cast
<void> (0) : __assert_fail ("Old != New && \"Not making a change?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 862, __PRETTY_FUNCTION__))
;
863 bool MadeChange = false;
864 for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
865 ReplaceMBBInJumpTable(i, Old, New);
866 return MadeChange;
867}
868
869/// If Old is a target of the jump tables, update the jump table to branch to
870/// New instead.
871bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
872 MachineBasicBlock *Old,
873 MachineBasicBlock *New) {
874 assert(Old != New && "Not making a change?")((Old != New && "Not making a change?") ? static_cast
<void> (0) : __assert_fail ("Old != New && \"Not making a change?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 874, __PRETTY_FUNCTION__))
;
875 bool MadeChange = false;
876 MachineJumpTableEntry &JTE = JumpTables[Idx];
877 for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
878 if (JTE.MBBs[j] == Old) {
879 JTE.MBBs[j] = New;
880 MadeChange = true;
881 }
882 return MadeChange;
883}
884
885void MachineJumpTableInfo::print(raw_ostream &OS) const {
886 if (JumpTables.empty()) return;
887
888 OS << "Jump Tables:\n";
889
890 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
891 OS << printJumpTableEntryReference(i) << ": ";
892 for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
893 OS << ' ' << printMBBReference(*JumpTables[i].MBBs[j]);
894 }
895
896 OS << '\n';
897}
898
899#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
900LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MachineJumpTableInfo::dump() const { print(dbgs()); }
901#endif
902
903Printable llvm::printJumpTableEntryReference(unsigned Idx) {
904 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
905}
906
907//===----------------------------------------------------------------------===//
908// MachineConstantPool implementation
909//===----------------------------------------------------------------------===//
910
911void MachineConstantPoolValue::anchor() {}
912
913Type *MachineConstantPoolEntry::getType() const {
914 if (isMachineConstantPoolEntry())
915 return Val.MachineCPVal->getType();
916 return Val.ConstVal->getType();
917}
918
919bool MachineConstantPoolEntry::needsRelocation() const {
920 if (isMachineConstantPoolEntry())
921 return true;
922 return Val.ConstVal->needsRelocation();
923}
924
925SectionKind
926MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
927 if (needsRelocation())
928 return SectionKind::getReadOnlyWithRel();
929 switch (DL->getTypeAllocSize(getType())) {
930 case 4:
931 return SectionKind::getMergeableConst4();
932 case 8:
933 return SectionKind::getMergeableConst8();
934 case 16:
935 return SectionKind::getMergeableConst16();
936 case 32:
937 return SectionKind::getMergeableConst32();
938 default:
939 return SectionKind::getReadOnly();
940 }
941}
942
943MachineConstantPool::~MachineConstantPool() {
944 // A constant may be a member of both Constants and MachineCPVsSharingEntries,
945 // so keep track of which we've deleted to avoid double deletions.
946 DenseSet<MachineConstantPoolValue*> Deleted;
947 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
948 if (Constants[i].isMachineConstantPoolEntry()) {
949 Deleted.insert(Constants[i].Val.MachineCPVal);
950 delete Constants[i].Val.MachineCPVal;
951 }
952 for (DenseSet<MachineConstantPoolValue*>::iterator I =
953 MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
954 I != E; ++I) {
955 if (Deleted.count(*I) == 0)
956 delete *I;
957 }
958}
959
960/// Test whether the given two constants can be allocated the same constant pool
961/// entry.
962static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
963 const DataLayout &DL) {
964 // Handle the trivial case quickly.
965 if (A == B) return true;
966
967 // If they have the same type but weren't the same constant, quickly
968 // reject them.
969 if (A->getType() == B->getType()) return false;
970
971 // We can't handle structs or arrays.
972 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
973 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
974 return false;
975
976 // For now, only support constants with the same size.
977 uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
978 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
979 return false;
980
981 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
982
983 // Try constant folding a bitcast of both instructions to an integer. If we
984 // get two identical ConstantInt's, then we are good to share them. We use
985 // the constant folding APIs to do this so that we get the benefit of
986 // DataLayout.
987 if (isa<PointerType>(A->getType()))
988 A = ConstantFoldCastOperand(Instruction::PtrToInt,
989 const_cast<Constant *>(A), IntTy, DL);
990 else if (A->getType() != IntTy)
991 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
992 IntTy, DL);
993 if (isa<PointerType>(B->getType()))
994 B = ConstantFoldCastOperand(Instruction::PtrToInt,
995 const_cast<Constant *>(B), IntTy, DL);
996 else if (B->getType() != IntTy)
997 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
998 IntTy, DL);
999
1000 return A == B;
1001}
1002
1003/// Create a new entry in the constant pool or return an existing one.
1004/// User must specify the log2 of the minimum required alignment for the object.
1005unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
1006 unsigned Alignment) {
1007 assert(Alignment && "Alignment must be specified!")((Alignment && "Alignment must be specified!") ? static_cast
<void> (0) : __assert_fail ("Alignment && \"Alignment must be specified!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 1007, __PRETTY_FUNCTION__))
;
1008 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1009
1010 // Check to see if we already have this constant.
1011 //
1012 // FIXME, this could be made much more efficient for large constant pools.
1013 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1014 if (!Constants[i].isMachineConstantPoolEntry() &&
1015 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1016 if ((unsigned)Constants[i].getAlignment() < Alignment)
1017 Constants[i].Alignment = Alignment;
1018 return i;
1019 }
1020
1021 Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1022 return Constants.size()-1;
1023}
1024
1025unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
1026 unsigned Alignment) {
1027 assert(Alignment && "Alignment must be specified!")((Alignment && "Alignment must be specified!") ? static_cast
<void> (0) : __assert_fail ("Alignment && \"Alignment must be specified!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/CodeGen/MachineFunction.cpp"
, 1027, __PRETTY_FUNCTION__))
;
1028 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1029
1030 // Check to see if we already have this constant.
1031 //
1032 // FIXME, this could be made much more efficient for large constant pools.
1033 int Idx = V->getExistingMachineCPValue(this, Alignment);
1034 if (Idx != -1) {
1035 MachineCPVsSharingEntries.insert(V);
1036 return (unsigned)Idx;
1037 }
1038
1039 Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1040 return Constants.size()-1;
1041}
1042
1043void MachineConstantPool::print(raw_ostream &OS) const {
1044 if (Constants.empty()) return;
1045
1046 OS << "Constant Pool:\n";
1047 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1048 OS << " cp#" << i << ": ";
1049 if (Constants[i].isMachineConstantPoolEntry())
1050 Constants[i].Val.MachineCPVal->print(OS);
1051 else
1052 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1053 OS << ", align=" << Constants[i].getAlignment();
1054 OS << "\n";
1055 }
1056}
1057
1058#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1059LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MachineConstantPool::dump() const { print(dbgs()); }
1060#endif

/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/CodeGen/MachineInstr.h

1//===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the declaration of the MachineInstr class, which is the
11// basic representation for all target dependent machine instructions used by
12// the back end.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_MACHINEINSTR_H
17#define LLVM_CODEGEN_MACHINEINSTR_H
18
19#include "llvm/ADT/DenseMapInfo.h"
20#include "llvm/ADT/PointerSumType.h"
21#include "llvm/ADT/ilist.h"
22#include "llvm/ADT/ilist_node.h"
23#include "llvm/ADT/iterator_range.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/CodeGen/MachineMemOperand.h"
26#include "llvm/CodeGen/MachineOperand.h"
27#include "llvm/CodeGen/TargetOpcodes.h"
28#include "llvm/IR/DebugLoc.h"
29#include "llvm/IR/InlineAsm.h"
30#include "llvm/MC/MCInstrDesc.h"
31#include "llvm/MC/MCSymbol.h"
32#include "llvm/Support/ArrayRecycler.h"
33#include "llvm/Support/TrailingObjects.h"
34#include <algorithm>
35#include <cassert>
36#include <cstdint>
37#include <utility>
38
39namespace llvm {
40
41template <typename T> class ArrayRef;
42class DIExpression;
43class DILocalVariable;
44class MachineBasicBlock;
45class MachineFunction;
46class MachineMemOperand;
47class MachineRegisterInfo;
48class ModuleSlotTracker;
49class raw_ostream;
50template <typename T> class SmallVectorImpl;
51class SmallBitVector;
52class StringRef;
53class TargetInstrInfo;
54class TargetRegisterClass;
55class TargetRegisterInfo;
56
57//===----------------------------------------------------------------------===//
58/// Representation of each machine instruction.
59///
60/// This class isn't a POD type, but it must have a trivial destructor. When a
61/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
62/// without having their destructor called.
63///
64class MachineInstr
65 : public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
66 ilist_sentinel_tracking<true>> {
67public:
68 using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator;
69
70 /// Flags to specify different kinds of comments to output in
71 /// assembly code. These flags carry semantic information not
72 /// otherwise easily derivable from the IR text.
73 ///
74 enum CommentFlag {
75 ReloadReuse = 0x1, // higher bits are reserved for target dep comments.
76 NoSchedComment = 0x2,
77 TAsmComments = 0x4 // Target Asm comments should start from this value.
78 };
79
80 enum MIFlag {
81 NoFlags = 0,
82 FrameSetup = 1 << 0, // Instruction is used as a part of
83 // function frame setup code.
84 FrameDestroy = 1 << 1, // Instruction is used as a part of
85 // function frame destruction code.
86 BundledPred = 1 << 2, // Instruction has bundled predecessors.
87 BundledSucc = 1 << 3, // Instruction has bundled successors.
88 FmNoNans = 1 << 4, // Instruction does not support Fast
89 // math nan values.
90 FmNoInfs = 1 << 5, // Instruction does not support Fast
91 // math infinity values.
92 FmNsz = 1 << 6, // Instruction is not required to retain
93 // signed zero values.
94 FmArcp = 1 << 7, // Instruction supports Fast math
95 // reciprocal approximations.
96 FmContract = 1 << 8, // Instruction supports Fast math
97 // contraction operations like fma.
98 FmAfn = 1 << 9, // Instruction may map to Fast math
99 // instrinsic approximation.
100 FmReassoc = 1 << 10, // Instruction supports Fast math
101 // reassociation of operand order.
102 NoUWrap = 1 << 11, // Instruction supports binary operator
103 // no unsigned wrap.
104 NoSWrap = 1 << 12, // Instruction supports binary operator
105 // no signed wrap.
106 IsExact = 1 << 13 // Instruction supports division is
107 // known to be exact.
108 };
109
110private:
111 const MCInstrDesc *MCID; // Instruction descriptor.
112 MachineBasicBlock *Parent = nullptr; // Pointer to the owning basic block.
113
114 // Operands are allocated by an ArrayRecycler.
115 MachineOperand *Operands = nullptr; // Pointer to the first operand.
116 unsigned NumOperands = 0; // Number of operands on instruction.
117 using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
118 OperandCapacity CapOperands; // Capacity of the Operands array.
119
120 uint16_t Flags = 0; // Various bits of additional
121 // information about machine
122 // instruction.
123
124 uint8_t AsmPrinterFlags = 0; // Various bits of information used by
125 // the AsmPrinter to emit helpful
126 // comments. This is *not* semantic
127 // information. Do not use this for
128 // anything other than to convey comment
129 // information to AsmPrinter.
130
131 /// Internal implementation detail class that provides out-of-line storage for
132 /// extra info used by the machine instruction when this info cannot be stored
133 /// in-line within the instruction itself.
134 ///
135 /// This has to be defined eagerly due to the implementation constraints of
136 /// `PointerSumType` where it is used.
137 class ExtraInfo final
138 : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *> {
139 public:
140 static ExtraInfo *create(BumpPtrAllocator &Allocator,
141 ArrayRef<MachineMemOperand *> MMOs,
142 MCSymbol *PreInstrSymbol = nullptr,
143 MCSymbol *PostInstrSymbol = nullptr) {
144 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
2
Assuming the condition is false
145 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
3
Assuming the condition is false
146 auto *Result = new (Allocator.Allocate(
4
'Result' initialized to a null pointer value
147 totalSizeToAlloc<MachineMemOperand *, MCSymbol *>(
148 MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol),
149 alignof(ExtraInfo)))
150 ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol);
151
152 // Copy the actual data into the trailing objects.
153 std::copy(MMOs.begin(), MMOs.end(),
154 Result->getTrailingObjects<MachineMemOperand *>());
5
Called C++ object pointer is null
155
156 if (HasPreInstrSymbol)
157 Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol;
158 if (HasPostInstrSymbol)
159 Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] =
160 PostInstrSymbol;
161
162 return Result;
163 }
164
165 ArrayRef<MachineMemOperand *> getMMOs() const {
166 return makeArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs);
167 }
168
169 MCSymbol *getPreInstrSymbol() const {
170 return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr;
171 }
172
173 MCSymbol *getPostInstrSymbol() const {
174 return HasPostInstrSymbol
175 ? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol]
176 : nullptr;
177 }
178
179 private:
180 friend TrailingObjects;
181
182 // Description of the extra info, used to interpret the actual optional
183 // data appended.
184 //
185 // Note that this is not terribly space optimized. This leaves a great deal
186 // of flexibility to fit more in here later.
187 const int NumMMOs;
188 const bool HasPreInstrSymbol;
189 const bool HasPostInstrSymbol;
190
191 // Implement the `TrailingObjects` internal API.
192 size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const {
193 return NumMMOs;
194 }
195 size_t numTrailingObjects(OverloadToken<MCSymbol *>) const {
196 return HasPreInstrSymbol + HasPostInstrSymbol;
197 }
198
199 // Just a boring constructor to allow us to initialize the sizes. Always use
200 // the `create` routine above.
201 ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol)
202 : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol),
203 HasPostInstrSymbol(HasPostInstrSymbol) {}
204 };
205
206 /// Enumeration of the kinds of inline extra info available. It is important
207 /// that the `MachineMemOperand` inline kind has a tag value of zero to make
208 /// it accessible as an `ArrayRef`.
209 enum ExtraInfoInlineKinds {
210 EIIK_MMO = 0,
211 EIIK_PreInstrSymbol,
212 EIIK_PostInstrSymbol,
213 EIIK_OutOfLine
214 };
215
216 // We store extra information about the instruction here. The common case is
217 // expected to be nothing or a single pointer (typically a MMO or a symbol).
218 // We work to optimize this common case by storing it inline here rather than
219 // requiring a separate allocation, but we fall back to an allocation when
220 // multiple pointers are needed.
221 PointerSumType<ExtraInfoInlineKinds,
222 PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>,
223 PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>,
224 PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>,
225 PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>>
226 Info;
227
228 DebugLoc debugLoc; // Source line information.
229
230 // Intrusive list support
231 friend struct ilist_traits<MachineInstr>;
232 friend struct ilist_callback_traits<MachineBasicBlock>;
233 void setParent(MachineBasicBlock *P) { Parent = P; }
234
235 /// This constructor creates a copy of the given
236 /// MachineInstr in the given MachineFunction.
237 MachineInstr(MachineFunction &, const MachineInstr &);
238
239 /// This constructor create a MachineInstr and add the implicit operands.
240 /// It reserves space for number of operands specified by
241 /// MCInstrDesc. An explicit DebugLoc is supplied.
242 MachineInstr(MachineFunction &, const MCInstrDesc &tid, DebugLoc dl,
243 bool NoImp = false);
244
245 // MachineInstrs are pool-allocated and owned by MachineFunction.
246 friend class MachineFunction;
247
248public:
249 MachineInstr(const MachineInstr &) = delete;
250 MachineInstr &operator=(const MachineInstr &) = delete;
251 // Use MachineFunction::DeleteMachineInstr() instead.
252 ~MachineInstr() = delete;
253
254 const MachineBasicBlock* getParent() const { return Parent; }
255 MachineBasicBlock* getParent() { return Parent; }
256
257 /// Return the function that contains the basic block that this instruction
258 /// belongs to.
259 ///
260 /// Note: this is undefined behaviour if the instruction does not have a
261 /// parent.
262 const MachineFunction *getMF() const;
263 MachineFunction *getMF() {
264 return const_cast<MachineFunction *>(
265 static_cast<const MachineInstr *>(this)->getMF());
266 }
267
268 /// Return the asm printer flags bitvector.
269 uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }
270
271 /// Clear the AsmPrinter bitvector.
272 void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
273
274 /// Return whether an AsmPrinter flag is set.
275 bool getAsmPrinterFlag(CommentFlag Flag) const {
276 return AsmPrinterFlags & Flag;
277 }
278
279 /// Set a flag for the AsmPrinter.
280 void setAsmPrinterFlag(uint8_t Flag) {
281 AsmPrinterFlags |= Flag;
282 }
283
284 /// Clear specific AsmPrinter flags.
285 void clearAsmPrinterFlag(CommentFlag Flag) {
286 AsmPrinterFlags &= ~Flag;
287 }
288
289 /// Return the MI flags bitvector.
290 uint16_t getFlags() const {
291 return Flags;
292 }
293
294 /// Return whether an MI flag is set.
295 bool getFlag(MIFlag Flag) const {
296 return Flags & Flag;
297 }
298
299 /// Set a MI flag.
300 void setFlag(MIFlag Flag) {
301 Flags |= (uint16_t)Flag;
302 }
303
304 void setFlags(unsigned flags) {
305 // Filter out the automatically maintained flags.
306 unsigned Mask = BundledPred | BundledSucc;
307 Flags = (Flags & Mask) | (flags & ~Mask);
308 }
309
310 /// clearFlag - Clear a MI flag.
311 void clearFlag(MIFlag Flag) {
312 Flags &= ~((uint16_t)Flag);
313 }
314
315 /// Return true if MI is in a bundle (but not the first MI in a bundle).
316 ///
317 /// A bundle looks like this before it's finalized:
318 /// ----------------
319 /// | MI |
320 /// ----------------
321 /// |
322 /// ----------------
323 /// | MI * |
324 /// ----------------
325 /// |
326 /// ----------------
327 /// | MI * |
328 /// ----------------
329 /// In this case, the first MI starts a bundle but is not inside a bundle, the
330 /// next 2 MIs are considered "inside" the bundle.
331 ///
332 /// After a bundle is finalized, it looks like this:
333 /// ----------------
334 /// | Bundle |
335 /// ----------------
336 /// |
337 /// ----------------
338 /// | MI * |
339 /// ----------------
340 /// |
341 /// ----------------
342 /// | MI * |
343 /// ----------------
344 /// |
345 /// ----------------
346 /// | MI * |
347 /// ----------------
348 /// The first instruction has the special opcode "BUNDLE". It's not "inside"
349 /// a bundle, but the next three MIs are.
350 bool isInsideBundle() const {
351 return getFlag(BundledPred);
352 }
353
354 /// Return true if this instruction part of a bundle. This is true
355 /// if either itself or its following instruction is marked "InsideBundle".
356 bool isBundled() const {
357 return isBundledWithPred() || isBundledWithSucc();
358 }
359
360 /// Return true if this instruction is part of a bundle, and it is not the
361 /// first instruction in the bundle.
362 bool isBundledWithPred() const { return getFlag(BundledPred); }
363
364 /// Return true if this instruction is part of a bundle, and it is not the
365 /// last instruction in the bundle.
366 bool isBundledWithSucc() const { return getFlag(BundledSucc); }
367
368 /// Bundle this instruction with its predecessor. This can be an unbundled
369 /// instruction, or it can be the first instruction in a bundle.
370 void bundleWithPred();
371
372 /// Bundle this instruction with its successor. This can be an unbundled
373 /// instruction, or it can be the last instruction in a bundle.
374 void bundleWithSucc();
375
376 /// Break bundle above this instruction.
377 void unbundleFromPred();
378
379 /// Break bundle below this instruction.
380 void unbundleFromSucc();
381
382 /// Returns the debug location id of this MachineInstr.
383 const DebugLoc &getDebugLoc() const { return debugLoc; }
384
385 /// Return the debug variable referenced by
386 /// this DBG_VALUE instruction.
387 const DILocalVariable *getDebugVariable() const;
388
389 /// Return the complex address expression referenced by
390 /// this DBG_VALUE instruction.
391 const DIExpression *getDebugExpression() const;
392
393 /// Return the debug label referenced by
394 /// this DBG_LABEL instruction.
395 const DILabel *getDebugLabel() const;
396
397 /// Emit an error referring to the source location of this instruction.
398 /// This should only be used for inline assembly that is somehow
399 /// impossible to compile. Other errors should have been handled much
400 /// earlier.
401 ///
402 /// If this method returns, the caller should try to recover from the error.
403 void emitError(StringRef Msg) const;
404
405 /// Returns the target instruction descriptor of this MachineInstr.
406 const MCInstrDesc &getDesc() const { return *MCID; }
407
408 /// Returns the opcode of this MachineInstr.
409 unsigned getOpcode() const { return MCID->Opcode; }
410
411 /// Retuns the total number of operands.
412 unsigned getNumOperands() const { return NumOperands; }
413
414 const MachineOperand& getOperand(unsigned i) const {
415 assert(i < getNumOperands() && "getOperand() out of range!")((i < getNumOperands() && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/CodeGen/MachineInstr.h"
, 415, __PRETTY_FUNCTION__))
;
416 return Operands[i];
417 }
418 MachineOperand& getOperand(unsigned i) {
419 assert(i < getNumOperands() && "getOperand() out of range!")((i < getNumOperands() && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumOperands() && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/CodeGen/MachineInstr.h"
, 419, __PRETTY_FUNCTION__))
;
420 return Operands[i];
421 }
422
423 /// Returns the total number of definitions.
424 unsigned getNumDefs() const {
425 return getNumExplicitDefs() + MCID->getNumImplicitDefs();
426 }
427
428 /// Return true if operand \p OpIdx is a subregister index.
429 bool isOperandSubregIdx(unsigned OpIdx) const {
430 assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&((getOperand(OpIdx).getType() == MachineOperand::MO_Immediate
&& "Expected MO_Immediate operand type.") ? static_cast
<void> (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/CodeGen/MachineInstr.h"
, 431, __PRETTY_FUNCTION__))
431 "Expected MO_Immediate operand type.")((getOperand(OpIdx).getType() == MachineOperand::MO_Immediate
&& "Expected MO_Immediate operand type.") ? static_cast
<void> (0) : __assert_fail ("getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && \"Expected MO_Immediate operand type.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/CodeGen/MachineInstr.h"
, 431, __PRETTY_FUNCTION__))
;
432 if (isExtractSubreg() && OpIdx == 2)
433 return true;
434 if (isInsertSubreg() && OpIdx == 3)
435 return true;
436 if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0)
437 return true;
438 if (isSubregToReg() && OpIdx == 3)
439 return true;
440 return false;
441 }
442
443 /// Returns the number of non-implicit operands.
444 unsigned getNumExplicitOperands() const;
445
446 /// Returns the number of non-implicit definitions.
447 unsigned getNumExplicitDefs() const;
448
449 /// iterator/begin/end - Iterate over all operands of a machine instruction.
450 using mop_iterator = MachineOperand *;
451 using const_mop_iterator = const MachineOperand *;
452
453 mop_iterator operands_begin() { return Operands; }
454 mop_iterator operands_end() { return Operands + NumOperands; }
455
456 const_mop_iterator operands_begin() const { return Operands; }
457 const_mop_iterator operands_end() const { return Operands + NumOperands; }
458
459 iterator_range<mop_iterator> operands() {
460 return make_range(operands_begin(), operands_end());
461 }
462 iterator_range<const_mop_iterator> operands() const {
463 return make_range(operands_begin(), operands_end());
464 }
465 iterator_range<mop_iterator> explicit_operands() {
466 return make_range(operands_begin(),
467 operands_begin() + getNumExplicitOperands());
468 }
469 iterator_range<const_mop_iterator> explicit_operands() const {
470 return make_range(operands_begin(),
471 operands_begin() + getNumExplicitOperands());
472 }
473 iterator_range<mop_iterator> implicit_operands() {
474 return make_range(explicit_operands().end(), operands_end());
475 }
476 iterator_range<const_mop_iterator> implicit_operands() const {
477 return make_range(explicit_operands().end(), operands_end());
478 }
479 /// Returns a range over all explicit operands that are register definitions.
480 /// Implicit definition are not included!
481 iterator_range<mop_iterator> defs() {
482 return make_range(operands_begin(),
483 operands_begin() + getNumExplicitDefs());
484 }
485 /// \copydoc defs()
486 iterator_range<const_mop_iterator> defs() const {
487 return make_range(operands_begin(),
488 operands_begin() + getNumExplicitDefs());
489 }
490 /// Returns a range that includes all operands that are register uses.
491 /// This may include unrelated operands which are not register uses.
492 iterator_range<mop_iterator> uses() {
493 return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
494 }
495 /// \copydoc uses()
496 iterator_range<const_mop_iterator> uses() const {
497 return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
498 }
499 iterator_range<mop_iterator> explicit_uses() {
500 return make_range(operands_begin() + getNumExplicitDefs(),
501 operands_begin() + getNumExplicitOperands());
502 }
503 iterator_range<const_mop_iterator> explicit_uses() const {
504 return make_range(operands_begin() + getNumExplicitDefs(),
505 operands_begin() + getNumExplicitOperands());
506 }
507
508 /// Returns the number of the operand iterator \p I points to.
509 unsigned getOperandNo(const_mop_iterator I) const {
510 return I - operands_begin();
511 }
512
513 /// Access to memory operands of the instruction. If there are none, that does
514 /// not imply anything about whether the function accesses memory. Instead,
515 /// the caller must behave conservatively.
516 ArrayRef<MachineMemOperand *> memoperands() const {
517 if (!Info)
518 return {};
519
520 if (Info.is<EIIK_MMO>())
521 return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1);
522
523 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
524 return EI->getMMOs();
525
526 return {};
527 }
528
529 /// Access to memory operands of the instruction.
530 ///
531 /// If `memoperands_begin() == memoperands_end()`, that does not imply
532 /// anything about whether the function accesses memory. Instead, the caller
533 /// must behave conservatively.
534 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
535
536 /// Access to memory operands of the instruction.
537 ///
538 /// If `memoperands_begin() == memoperands_end()`, that does not imply
539 /// anything about whether the function accesses memory. Instead, the caller
540 /// must behave conservatively.
541 mmo_iterator memoperands_end() const { return memoperands().end(); }
542
543 /// Return true if we don't have any memory operands which described the
544 /// memory access done by this instruction. If this is true, calling code
545 /// must be conservative.
546 bool memoperands_empty() const { return memoperands().empty(); }
547
548 /// Return true if this instruction has exactly one MachineMemOperand.
549 bool hasOneMemOperand() const { return memoperands().size() == 1; }
550
551 /// Return the number of memory operands.
552 unsigned getNumMemOperands() const { return memoperands().size(); }
553
554 /// Helper to extract a pre-instruction symbol if one has been added.
555 MCSymbol *getPreInstrSymbol() const {
556 if (!Info)
557 return nullptr;
558 if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>())
559 return S;
560 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
561 return EI->getPreInstrSymbol();
562
563 return nullptr;
564 }
565
566 /// Helper to extract a post-instruction symbol if one has been added.
567 MCSymbol *getPostInstrSymbol() const {
568 if (!Info)
569 return nullptr;
570 if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>())
571 return S;
572 if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
573 return EI->getPostInstrSymbol();
574
575 return nullptr;
576 }
577
578 /// API for querying MachineInstr properties. They are the same as MCInstrDesc
579 /// queries but they are bundle aware.
580
581 enum QueryType {
582 IgnoreBundle, // Ignore bundles
583 AnyInBundle, // Return true if any instruction in bundle has property
584 AllInBundle // Return true if all instructions in bundle have property
585 };
586
587 /// Return true if the instruction (or in the case of a bundle,
588 /// the instructions inside the bundle) has the specified property.
589 /// The first argument is the property being queried.
590 /// The second argument indicates whether the query should look inside
591 /// instruction bundles.
592 bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
593 assert(MCFlag < 64 &&((MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle."
) ? static_cast<void> (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/CodeGen/MachineInstr.h"
, 594, __PRETTY_FUNCTION__))
594 "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.")((MCFlag < 64 && "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle."
) ? static_cast<void> (0) : __assert_fail ("MCFlag < 64 && \"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/CodeGen/MachineInstr.h"
, 594, __PRETTY_FUNCTION__))
;
595 // Inline the fast path for unbundled or bundle-internal instructions.
596 if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
597 return getDesc().getFlags() & (1ULL << MCFlag);
598
599 // If this is the first instruction in a bundle, take the slow path.
600 return hasPropertyInBundle(1ULL << MCFlag, Type);
601 }
602
603 /// Return true if this instruction can have a variable number of operands.
604 /// In this case, the variable operands will be after the normal
605 /// operands but before the implicit definitions and uses (if any are
606 /// present).
607 bool isVariadic(QueryType Type = IgnoreBundle) const {
608 return hasProperty(MCID::Variadic, Type);
609 }
610
611 /// Set if this instruction has an optional definition, e.g.
612 /// ARM instructions which can set condition code if 's' bit is set.
613 bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
614 return hasProperty(MCID::HasOptionalDef, Type);
615 }
616
617 /// Return true if this is a pseudo instruction that doesn't
618 /// correspond to a real machine instruction.
619 bool isPseudo(QueryType Type = IgnoreBundle) const {
620 return hasProperty(MCID::Pseudo, Type);
621 }
622
623 bool isReturn(QueryType Type = AnyInBundle) const {
624 return hasProperty(MCID::Return, Type);
625 }
626
627 /// Return true if this is an instruction that marks the end of an EH scope,
628 /// i.e., a catchpad or a cleanuppad instruction.
629 bool isEHScopeReturn(QueryType Type = AnyInBundle) const {
630 return hasProperty(MCID::EHScopeReturn, Type);
631 }
632
633 bool isCall(QueryType Type = AnyInBundle) const {
634 return hasProperty(MCID::Call, Type);
635 }
636
637 /// Returns true if the specified instruction stops control flow
638 /// from executing the instruction immediately following it. Examples include
639 /// unconditional branches and return instructions.
640 bool isBarrier(QueryType Type = AnyInBundle) const {
641 return hasProperty(MCID::Barrier, Type);
642 }
643
644 /// Returns true if this instruction part of the terminator for a basic block.
645 /// Typically this is things like return and branch instructions.
646 ///
647 /// Various passes use this to insert code into the bottom of a basic block,
648 /// but before control flow occurs.
649 bool isTerminator(QueryType Type = AnyInBundle) const {
650 return hasProperty(MCID::Terminator, Type);
651 }
652
653 /// Returns true if this is a conditional, unconditional, or indirect branch.
654 /// Predicates below can be used to discriminate between
655 /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
656 /// get more information.
657 bool isBranch(QueryType Type = AnyInBundle) const {
658 return hasProperty(MCID::Branch, Type);
659 }
660
661 /// Return true if this is an indirect branch, such as a
662 /// branch through a register.
663 bool isIndirectBranch(QueryType Type = AnyInBundle) const {
664 return hasProperty(MCID::IndirectBranch, Type);
665 }
666
667 /// Return true if this is a branch which may fall
668 /// through to the next instruction or may transfer control flow to some other
669 /// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more
670 /// information about this branch.
671 bool isConditionalBranch(QueryType Type = AnyInBundle) const {
672 return isBranch(Type) & !isBarrier(Type) & !isIndirectBranch(Type);
673 }
674
675 /// Return true if this is a branch which always
676 /// transfers control flow to some other block. The
677 /// TargetInstrInfo::AnalyzeBranch method can be used to get more information
678 /// about this branch.
679 bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
680 return isBranch(Type) & isBarrier(Type) & !isIndirectBranch(Type);
681 }
682
683 /// Return true if this instruction has a predicate operand that
684 /// controls execution. It may be set to 'always', or may be set to other
685 /// values. There are various methods in TargetInstrInfo that can be used to
686 /// control and modify the predicate in this instruction.
687 bool isPredicable(QueryType Type = AllInBundle) const {
688 // If it's a bundle than all bundled instructions must be predicable for this
689 // to return true.
690 return hasProperty(MCID::Predicable, Type);
691 }
692
693 /// Return true if this instruction is a comparison.
694 bool isCompare(QueryType Type = IgnoreBundle) const {
695 return hasProperty(MCID::Compare, Type);
696 }
697
698 /// Return true if this instruction is a move immediate
699 /// (including conditional moves) instruction.
700 bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
701 return hasProperty(MCID::MoveImm, Type);
702 }
703
704 /// Return true if this instruction is a register move.
705 /// (including moving values from subreg to reg)
706 bool isMoveReg(QueryType Type = IgnoreBundle) const {
707 return hasProperty(MCID::MoveReg, Type);
708 }
709
710 /// Return true if this instruction is a bitcast instruction.
711 bool isBitcast(QueryType Type = IgnoreBundle) const {
712 return hasProperty(MCID::Bitcast, Type);
713 }
714
715 /// Return true if this instruction is a select instruction.
716 bool isSelect(QueryType Type = IgnoreBundle) const {
717 return hasProperty(MCID::Select, Type);
718 }
719
720 /// Return true if this instruction cannot be safely duplicated.
721 /// For example, if the instruction has a unique labels attached
722 /// to it, duplicating it would cause multiple definition errors.
723 bool isNotDuplicable(QueryType Type = AnyInBundle) const {
724 return hasProperty(MCID::NotDuplicable, Type);
725 }
726
727 /// Return true if this instruction is convergent.
728 /// Convergent instructions can not be made control-dependent on any
729 /// additional values.
730 bool isConvergent(QueryType Type = AnyInBundle) const {
731 if (isInlineAsm()) {
732 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
733 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
734 return true;
735 }
736 return hasProperty(MCID::Convergent, Type);
737 }
738
739 /// Returns true if the specified instruction has a delay slot
740 /// which must be filled by the code generator.
741 bool hasDelaySlot(QueryType Type = AnyInBundle) const {
742 return hasProperty(MCID::DelaySlot, Type);
743 }
744
745 /// Return true for instructions that can be folded as
746 /// memory operands in other instructions. The most common use for this
747 /// is instructions that are simple loads from memory that don't modify
748 /// the loaded value in any way, but it can also be used for instructions
749 /// that can be expressed as constant-pool loads, such as V_SETALLONES
750 /// on x86, to allow them to be folded when it is beneficial.
751 /// This should only be set on instructions that return a value in their
752 /// only virtual register definition.
753 bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
754 return hasProperty(MCID::FoldableAsLoad, Type);
755 }
756
757 /// Return true if this instruction behaves
758 /// the same way as the generic REG_SEQUENCE instructions.
759 /// E.g., on ARM,
760 /// dX VMOVDRR rY, rZ
761 /// is equivalent to
762 /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
763 ///
764 /// Note that for the optimizers to be able to take advantage of
765 /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
766 /// override accordingly.
767 bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
768 return hasProperty(MCID::RegSequence, Type);
769 }
770
771 /// Return true if this instruction behaves
772 /// the same way as the generic EXTRACT_SUBREG instructions.
773 /// E.g., on ARM,
774 /// rX, rY VMOVRRD dZ
775 /// is equivalent to two EXTRACT_SUBREG:
776 /// rX = EXTRACT_SUBREG dZ, ssub_0
777 /// rY = EXTRACT_SUBREG dZ, ssub_1
778 ///
779 /// Note that for the optimizers to be able to take advantage of
780 /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
781 /// override accordingly.
782 bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
783 return hasProperty(MCID::ExtractSubreg, Type);
784 }
785
786 /// Return true if this instruction behaves
787 /// the same way as the generic INSERT_SUBREG instructions.
788 /// E.g., on ARM,
789 /// dX = VSETLNi32 dY, rZ, Imm
790 /// is equivalent to a INSERT_SUBREG:
791 /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
792 ///
793 /// Note that for the optimizers to be able to take advantage of
794 /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
795 /// override accordingly.
796 bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
797 return hasProperty(MCID::InsertSubreg, Type);
798 }
799
800 //===--------------------------------------------------------------------===//
801 // Side Effect Analysis
802 //===--------------------------------------------------------------------===//
803
804 /// Return true if this instruction could possibly read memory.
805 /// Instructions with this flag set are not necessarily simple load
806 /// instructions, they may load a value and modify it, for example.
807 bool mayLoad(QueryType Type = AnyInBundle) const {
808 if (isInlineAsm()) {
809 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
810 if (ExtraInfo & InlineAsm::Extra_MayLoad)
811 return true;
812 }
813 return hasProperty(MCID::MayLoad, Type);
814 }
815
816 /// Return true if this instruction could possibly modify memory.
817 /// Instructions with this flag set are not necessarily simple store
818 /// instructions, they may store a modified value based on their operands, or
819 /// may not actually modify anything, for example.
820 bool mayStore(QueryType Type = AnyInBundle) const {
821 if (isInlineAsm()) {
822 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
823 if (ExtraInfo & InlineAsm::Extra_MayStore)
824 return true;
825 }
826 return hasProperty(MCID::MayStore, Type);
827 }
828
829 /// Return true if this instruction could possibly read or modify memory.
830 bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
831 return mayLoad(Type) || mayStore(Type);
832 }
833
834 //===--------------------------------------------------------------------===//
835 // Flags that indicate whether an instruction can be modified by a method.
836 //===--------------------------------------------------------------------===//
837
838 /// Return true if this may be a 2- or 3-address
839 /// instruction (of the form "X = op Y, Z, ..."), which produces the same
840 /// result if Y and Z are exchanged. If this flag is set, then the
841 /// TargetInstrInfo::commuteInstruction method may be used to hack on the
842 /// instruction.
843 ///
844 /// Note that this flag may be set on instructions that are only commutable
845 /// sometimes. In these cases, the call to commuteInstruction will fail.
846 /// Also note that some instructions require non-trivial modification to
847 /// commute them.
848 bool isCommutable(QueryType Type = IgnoreBundle) const {
849 return hasProperty(MCID::Commutable, Type);
850 }
851
852 /// Return true if this is a 2-address instruction
853 /// which can be changed into a 3-address instruction if needed. Doing this
854 /// transformation can be profitable in the register allocator, because it
855 /// means that the instruction can use a 2-address form if possible, but
856 /// degrade into a less efficient form if the source and dest register cannot
857 /// be assigned to the same register. For example, this allows the x86
858 /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
859 /// is the same speed as the shift but has bigger code size.
860 ///
861 /// If this returns true, then the target must implement the
862 /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
863 /// is allowed to fail if the transformation isn't valid for this specific
864 /// instruction (e.g. shl reg, 4 on x86).
865 ///
866 bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
867 return hasProperty(MCID::ConvertibleTo3Addr, Type);
868 }
869
870 /// Return true if this instruction requires
871 /// custom insertion support when the DAG scheduler is inserting it into a
872 /// machine basic block. If this is true for the instruction, it basically
873 /// means that it is a pseudo instruction used at SelectionDAG time that is
874 /// expanded out into magic code by the target when MachineInstrs are formed.
875 ///
876 /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
877 /// is used to insert this into the MachineBasicBlock.
878 bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
879 return hasProperty(MCID::UsesCustomInserter, Type);
880 }
881
882 /// Return true if this instruction requires *adjustment*
883 /// after instruction selection by calling a target hook. For example, this
884 /// can be used to fill in ARM 's' optional operand depending on whether
885 /// the conditional flag register is used.
886 bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
887 return hasProperty(MCID::HasPostISelHook, Type);
888 }
889
890 /// Returns true if this instruction is a candidate for remat.
891 /// This flag is deprecated, please don't use it anymore. If this
892 /// flag is set, the isReallyTriviallyReMaterializable() method is called to
893 /// verify the instruction is really rematable.
894 bool isRematerializable(QueryType Type = AllInBundle) const {
895 // It's only possible to re-mat a bundle if all bundled instructions are
896 // re-materializable.
897 return hasProperty(MCID::Rematerializable, Type);
898 }
899
900 /// Returns true if this instruction has the same cost (or less) than a move
901 /// instruction. This is useful during certain types of optimizations
902 /// (e.g., remat during two-address conversion or machine licm)
903 /// where we would like to remat or hoist the instruction, but not if it costs
904 /// more than moving the instruction into the appropriate register. Note, we
905 /// are not marking copies from and to the same register class with this flag.
906 bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
907 // Only returns true for a bundle if all bundled instructions are cheap.
908 return hasProperty(MCID::CheapAsAMove, Type);
909 }
910
911 /// Returns true if this instruction source operands
912 /// have special register allocation requirements that are not captured by the
913 /// operand register classes. e.g. ARM::STRD's two source registers must be an
914 /// even / odd pair, ARM::STM registers have to be in ascending order.
915 /// Post-register allocation passes should not attempt to change allocations
916 /// for sources of instructions with this flag.
917 bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
918 return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
919 }
920
921 /// Returns true if this instruction def operands
922 /// have special register allocation requirements that are not captured by the
923 /// operand register classes. e.g. ARM::LDRD's two def registers must be an
924 /// even / odd pair, ARM::LDM registers have to be in ascending order.
925 /// Post-register allocation passes should not attempt to change allocations
926 /// for definitions of instructions with this flag.
927 bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
928 return hasProperty(MCID::ExtraDefRegAllocReq, Type);
929 }
930
931 enum MICheckType {
932 CheckDefs, // Check all operands for equality
933 CheckKillDead, // Check all operands including kill / dead markers
934 IgnoreDefs, // Ignore all definitions
935 IgnoreVRegDefs // Ignore virtual register definitions
936 };
937
938 /// Return true if this instruction is identical to \p Other.
939 /// Two instructions are identical if they have the same opcode and all their
940 /// operands are identical (with respect to MachineOperand::isIdenticalTo()).
941 /// Note that this means liveness related flags (dead, undef, kill) do not
942 /// affect the notion of identical.
943 bool isIdenticalTo(const MachineInstr &Other,
944 MICheckType Check = CheckDefs) const;
945
946 /// Unlink 'this' from the containing basic block, and return it without
947 /// deleting it.
948 ///
949 /// This function can not be used on bundled instructions, use
950 /// removeFromBundle() to remove individual instructions from a bundle.
951 MachineInstr *removeFromParent();
952
953 /// Unlink this instruction from its basic block and return it without
954 /// deleting it.
955 ///
956 /// If the instruction is part of a bundle, the other instructions in the
957 /// bundle remain bundled.
958 MachineInstr *removeFromBundle();
959
960 /// Unlink 'this' from the containing basic block and delete it.
961 ///
962 /// If this instruction is the header of a bundle, the whole bundle is erased.
963 /// This function can not be used for instructions inside a bundle, use
964 /// eraseFromBundle() to erase individual bundled instructions.
965 void eraseFromParent();
966
967 /// Unlink 'this' from the containing basic block and delete it.
968 ///
969 /// For all definitions mark their uses in DBG_VALUE nodes
970 /// as undefined. Otherwise like eraseFromParent().
971 void eraseFromParentAndMarkDBGValuesForRemoval();
972
973 /// Unlink 'this' form its basic block and delete it.
974 ///
975 /// If the instruction is part of a bundle, the other instructions in the
976 /// bundle remain bundled.
977 void eraseFromBundle();
978
979 bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
980 bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
981 bool isAnnotationLabel() const {
982 return getOpcode() == TargetOpcode::ANNOTATION_LABEL;
983 }
984
985 /// Returns true if the MachineInstr represents a label.
986 bool isLabel() const {
987 return isEHLabel() || isGCLabel() || isAnnotationLabel();
988 }
989
990 bool isCFIInstruction() const {
991 return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
992 }
993
994 // True if the instruction represents a position in the function.
995 bool isPosition() const { return isLabel() || isCFIInstruction(); }
996
997 bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
998 bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; }
999 bool isDebugInstr() const { return isDebugValue() || isDebugLabel(); }
1000
1001 /// A DBG_VALUE is indirect iff the first operand is a register and
1002 /// the second operand is an immediate.
1003 bool isIndirectDebugValue() const {
1004 return isDebugValue()
1005 && getOperand(0).isReg()
1006 && getOperand(1).isImm();
1007 }
1008
1009 bool isPHI() const {
1010 return getOpcode() == TargetOpcode::PHI ||
1011 getOpcode() == TargetOpcode::G_PHI;
1012 }
1013 bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
1014 bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
1015 bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
1016
1017 bool isMSInlineAsm() const {
1018 return getOpcode() == TargetOpcode::INLINEASM && getInlineAsmDialect();
1019 }
1020
1021 bool isStackAligningInlineAsm() const;
1022 InlineAsm::AsmDialect getInlineAsmDialect() const;
1023
1024 bool isInsertSubreg() const {
1025 return getOpcode() == TargetOpcode::INSERT_SUBREG;
1026 }
1027
1028 bool isSubregToReg() const {
1029 return getOpcode() == TargetOpcode::SUBREG_TO_REG;
1030 }
1031
1032 bool isRegSequence() const {
1033 return getOpcode() == TargetOpcode::REG_SEQUENCE;
1034 }
1035
1036 bool isBundle() const {
1037 return getOpcode() == TargetOpcode::BUNDLE;
1038 }
1039
1040 bool isCopy() const {
1041 return getOpcode() == TargetOpcode::COPY;
1042 }
1043
1044 bool isFullCopy() const {
1045 return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
1046 }
1047
1048 bool isExtractSubreg() const {
1049 return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
1050 }
1051
1052 /// Return true if the instruction behaves like a copy.
1053 /// This does not include native copy instructions.
1054 bool isCopyLike() const {
1055 return isCopy() || isSubregToReg();
1056 }
1057
1058 /// Return true is the instruction is an identity copy.
1059 bool isIdentityCopy() const {
1060 return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
1061 getOperand(0).getSubReg() == getOperand(1).getSubReg();
1062 }
1063
1064 /// Return true if this instruction doesn't produce any output in the form of
1065 /// executable instructions.
1066 bool isMetaInstruction() const {
1067 switch (getOpcode()) {
1068 default:
1069 return false;
1070 case TargetOpcode::IMPLICIT_DEF:
1071 case TargetOpcode::KILL:
1072 case TargetOpcode::CFI_INSTRUCTION:
1073 case TargetOpcode::EH_LABEL:
1074 case TargetOpcode::GC_LABEL:
1075 case TargetOpcode::DBG_VALUE:
1076 case TargetOpcode::DBG_LABEL:
1077 case TargetOpcode::LIFETIME_START:
1078 case TargetOpcode::LIFETIME_END:
1079 return true;
1080 }
1081 }
1082
1083 /// Return true if this is a transient instruction that is either very likely
1084 /// to be eliminated during register allocation (such as copy-like
1085 /// instructions), or if this instruction doesn't have an execution-time cost.
1086 bool isTransient() const {
1087 switch (getOpcode()) {
1088 default:
1089 return isMetaInstruction();
1090 // Copy-like instructions are usually eliminated during register allocation.
1091 case TargetOpcode::PHI:
1092 case TargetOpcode::G_PHI:
1093 case TargetOpcode::COPY:
1094 case TargetOpcode::INSERT_SUBREG:
1095 case TargetOpcode::SUBREG_TO_REG:
1096 case TargetOpcode::REG_SEQUENCE:
1097 return true;
1098 }
1099 }
1100
1101 /// Return the number of instructions inside the MI bundle, excluding the
1102 /// bundle header.
1103 ///
1104 /// This is the number of instructions that MachineBasicBlock::iterator
1105 /// skips, 0 for unbundled instructions.
1106 unsigned getBundleSize() const;
1107
1108 /// Return true if the MachineInstr reads the specified register.
1109 /// If TargetRegisterInfo is passed, then it also checks if there
1110 /// is a read of a super-register.
1111 /// This does not count partial redefines of virtual registers as reads:
1112 /// %reg1024:6 = OP.
1113 bool readsRegister(unsigned Reg,
1114 const TargetRegisterInfo *TRI = nullptr) const {
1115 return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
1116 }
1117
1118 /// Return true if the MachineInstr reads the specified virtual register.
1119 /// Take into account that a partial define is a
1120 /// read-modify-write operation.
1121 bool readsVirtualRegister(unsigned Reg) const {
1122 return readsWritesVirtualRegister(Reg).first;
1123 }
1124
1125 /// Return a pair of bools (reads, writes) indicating if this instruction
1126 /// reads or writes Reg. This also considers partial defines.
1127 /// If Ops is not null, all operand indices for Reg are added.
1128 std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg,
1129 SmallVectorImpl<unsigned> *Ops = nullptr) const;
1130
1131 /// Return true if the MachineInstr kills the specified register.
1132 /// If TargetRegisterInfo is passed, then it also checks if there is
1133 /// a kill of a super-register.
1134 bool killsRegister(unsigned Reg,
1135 const TargetRegisterInfo *TRI = nullptr) const {
1136 return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
1137 }
1138
1139 /// Return true if the MachineInstr fully defines the specified register.
1140 /// If TargetRegisterInfo is passed, then it also checks
1141 /// if there is a def of a super-register.
1142 /// NOTE: It's ignoring subreg indices on virtual registers.
1143 bool definesRegister(unsigned Reg,
1144 const TargetRegisterInfo *TRI = nullptr) const {
1145 return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
1146 }
1147
1148 /// Return true if the MachineInstr modifies (fully define or partially
1149 /// define) the specified register.
1150 /// NOTE: It's ignoring subreg indices on virtual registers.
1151 bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const {
1152 return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
1153 }
1154
1155 /// Returns true if the register is dead in this machine instruction.
1156 /// If TargetRegisterInfo is passed, then it also checks
1157 /// if there is a dead def of a super-register.
1158 bool registerDefIsDead(unsigned Reg,
1159 const TargetRegisterInfo *TRI = nullptr) const {
1160 return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
1161 }
1162
1163 /// Returns true if the MachineInstr has an implicit-use operand of exactly
1164 /// the given register (not considering sub/super-registers).
1165 bool hasRegisterImplicitUseOperand(unsigned Reg) const;
1166
1167 /// Returns the operand index that is a use of the specific register or -1
1168 /// if it is not found. It further tightens the search criteria to a use
1169 /// that kills the register if isKill is true.
1170 int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false,
1171 const TargetRegisterInfo *TRI = nullptr) const;
1172
1173 /// Wrapper for findRegisterUseOperandIdx, it returns
1174 /// a pointer to the MachineOperand rather than an index.
1175 MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false,
1176 const TargetRegisterInfo *TRI = nullptr) {
1177 int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
1178 return (Idx == -1) ? nullptr : &getOperand(Idx);
1179 }
1180
1181 const MachineOperand *findRegisterUseOperand(
1182 unsigned Reg, bool isKill = false,
1183 const TargetRegisterInfo *TRI = nullptr) const {
1184 return const_cast<MachineInstr *>(this)->
1185 findRegisterUseOperand(Reg, isKill, TRI);
1186 }
1187
1188 /// Returns the operand index that is a def of the specified register or
1189 /// -1 if it is not found. If isDead is true, defs that are not dead are
1190 /// skipped. If Overlap is true, then it also looks for defs that merely
1191 /// overlap the specified register. If TargetRegisterInfo is non-null,
1192 /// then it also checks if there is a def of a super-register.
1193 /// This may also return a register mask operand when Overlap is true.
1194 int findRegisterDefOperandIdx(unsigned Reg,
1195 bool isDead = false, bool Overlap = false,
1196 const TargetRegisterInfo *TRI = nullptr) const;
1197
1198 /// Wrapper for findRegisterDefOperandIdx, it returns
1199 /// a pointer to the MachineOperand rather than an index.
1200 MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false,
1201 const TargetRegisterInfo *TRI = nullptr) {
1202 int Idx = findRegisterDefOperandIdx(Reg, isDead, false, TRI);
1203 return (Idx == -1) ? nullptr : &getOperand(Idx);
1204 }
1205
1206 /// Find the index of the first operand in the
1207 /// operand list that is used to represent the predicate. It returns -1 if
1208 /// none is found.
1209 int findFirstPredOperandIdx() const;
1210
1211 /// Find the index of the flag word operand that
1212 /// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if
1213 /// getOperand(OpIdx) does not belong to an inline asm operand group.
1214 ///
1215 /// If GroupNo is not NULL, it will receive the number of the operand group
1216 /// containing OpIdx.
1217 ///
1218 /// The flag operand is an immediate that can be decoded with methods like
1219 /// InlineAsm::hasRegClassConstraint().
1220 int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
1221
1222 /// Compute the static register class constraint for operand OpIdx.
1223 /// For normal instructions, this is derived from the MCInstrDesc.
1224 /// For inline assembly it is derived from the flag words.
1225 ///
1226 /// Returns NULL if the static register class constraint cannot be
1227 /// determined.
1228 const TargetRegisterClass*
1229 getRegClassConstraint(unsigned OpIdx,
1230 const TargetInstrInfo *TII,
1231 const TargetRegisterInfo *TRI) const;
1232
1233 /// Applies the constraints (def/use) implied by this MI on \p Reg to
1234 /// the given \p CurRC.
1235 /// If \p ExploreBundle is set and MI is part of a bundle, all the
1236 /// instructions inside the bundle will be taken into account. In other words,
1237 /// this method accumulates all the constraints of the operand of this MI and
1238 /// the related bundle if MI is a bundle or inside a bundle.
1239 ///
1240 /// Returns the register class that satisfies both \p CurRC and the
1241 /// constraints set by MI. Returns NULL if such a register class does not
1242 /// exist.
1243 ///
1244 /// \pre CurRC must not be NULL.
1245 const TargetRegisterClass *getRegClassConstraintEffectForVReg(
1246 unsigned Reg, const TargetRegisterClass *CurRC,
1247 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
1248 bool ExploreBundle = false) const;
1249
1250 /// Applies the constraints (def/use) implied by the \p OpIdx operand
1251 /// to the given \p CurRC.
1252 ///
1253 /// Returns the register class that satisfies both \p CurRC and the
1254 /// constraints set by \p OpIdx MI. Returns NULL if such a register class
1255 /// does not exist.
1256 ///
1257 /// \pre CurRC must not be NULL.
1258 /// \pre The operand at \p OpIdx must be a register.
1259 const TargetRegisterClass *
1260 getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC,
1261 const TargetInstrInfo *TII,
1262 const TargetRegisterInfo *TRI) const;
1263
1264 /// Add a tie between the register operands at DefIdx and UseIdx.
1265 /// The tie will cause the register allocator to ensure that the two
1266 /// operands are assigned the same physical register.
1267 ///
1268 /// Tied operands are managed automatically for explicit operands in the
1269 /// MCInstrDesc. This method is for exceptional cases like inline asm.
1270 void tieOperands(unsigned DefIdx, unsigned UseIdx);
1271
1272 /// Given the index of a tied register operand, find the
1273 /// operand it is tied to. Defs are tied to uses and vice versa. Returns the
1274 /// index of the tied operand which must exist.
1275 unsigned findTiedOperandIdx(unsigned OpIdx) const;
1276
1277 /// Given the index of a register def operand,
1278 /// check if the register def is tied to a source operand, due to either
1279 /// two-address elimination or inline assembly constraints. Returns the
1280 /// first tied use operand index by reference if UseOpIdx is not null.
1281 bool isRegTiedToUseOperand(unsigned DefOpIdx,
1282 unsigned *UseOpIdx = nullptr) const {
1283 const MachineOperand &MO = getOperand(DefOpIdx);
1284 if (!MO.isReg() || !MO.isDef() || !MO.isTied())
1285 return false;
1286 if (UseOpIdx)
1287 *UseOpIdx = findTiedOperandIdx(DefOpIdx);
1288 return true;
1289 }
1290
1291 /// Return true if the use operand of the specified index is tied to a def
1292 /// operand. It also returns the def operand index by reference if DefOpIdx
1293 /// is not null.
1294 bool isRegTiedToDefOperand(unsigned UseOpIdx,
1295 unsigned *DefOpIdx = nullptr) const {
1296 const MachineOperand &MO = getOperand(UseOpIdx);
1297 if (!MO.isReg() || !MO.isUse() || !MO.isTied())
1298 return false;
1299 if (DefOpIdx)
1300 *DefOpIdx = findTiedOperandIdx(UseOpIdx);
1301 return true;
1302 }
1303
1304 /// Clears kill flags on all operands.
1305 void clearKillInfo();
1306
1307 /// Replace all occurrences of FromReg with ToReg:SubIdx,
1308 /// properly composing subreg indices where necessary.
1309 void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx,
1310 const TargetRegisterInfo &RegInfo);
1311
1312 /// We have determined MI kills a register. Look for the
1313 /// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
1314 /// add a implicit operand if it's not found. Returns true if the operand
1315 /// exists / is added.
1316 bool addRegisterKilled(unsigned IncomingReg,
1317 const TargetRegisterInfo *RegInfo,
1318 bool AddIfNotFound = false);
1319
1320 /// Clear all kill flags affecting Reg. If RegInfo is provided, this includes
1321 /// all aliasing registers.
1322 void clearRegisterKills(unsigned Reg, const TargetRegisterInfo *RegInfo);
1323
1324 /// We have determined MI defined a register without a use.
1325 /// Look for the operand that defines it and mark it as IsDead. If
1326 /// AddIfNotFound is true, add a implicit operand if it's not found. Returns
1327 /// true if the operand exists / is added.
1328 bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo,
1329 bool AddIfNotFound = false);
1330
1331 /// Clear all dead flags on operands defining register @p Reg.
1332 void clearRegisterDeads(unsigned Reg);
1333
1334 /// Mark all subregister defs of register @p Reg with the undef flag.
1335 /// This function is used when we determined to have a subregister def in an
1336 /// otherwise undefined super register.
1337 void setRegisterDefReadUndef(unsigned Reg, bool IsUndef = true);
1338
1339 /// We have determined MI defines a register. Make sure there is an operand
1340 /// defining Reg.
1341 void addRegisterDefined(unsigned Reg,
1342 const TargetRegisterInfo *RegInfo = nullptr);
1343
1344 /// Mark every physreg used by this instruction as
1345 /// dead except those in the UsedRegs list.
1346 ///
1347 /// On instructions with register mask operands, also add implicit-def
1348 /// operands for all registers in UsedRegs.
1349 void setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
1350 const TargetRegisterInfo &TRI);
1351
1352 /// Return true if it is safe to move this instruction. If
1353 /// SawStore is set to true, it means that there is a store (or call) between
1354 /// the instruction's location and its intended destination.
1355 bool isSafeToMove(AliasAnalysis *AA, bool &SawStore) const;
1356
1357 /// Returns true if this instruction's memory access aliases the memory
1358 /// access of Other.
1359 //
1360 /// Assumes any physical registers used to compute addresses
1361 /// have the same value for both instructions. Returns false if neither
1362 /// instruction writes to memory.
1363 ///
1364 /// @param AA Optional alias analysis, used to compare memory operands.
1365 /// @param Other MachineInstr to check aliasing against.
1366 /// @param UseTBAA Whether to pass TBAA information to alias analysis.
1367 bool mayAlias(AliasAnalysis *AA, MachineInstr &Other, bool UseTBAA);
1368
1369 /// Return true if this instruction may have an ordered
1370 /// or volatile memory reference, or if the information describing the memory
1371 /// reference is not available. Return false if it is known to have no
1372 /// ordered or volatile memory references.
1373 bool hasOrderedMemoryRef() const;
1374
1375 /// Return true if this load instruction never traps and points to a memory
1376 /// location whose value doesn't change during the execution of this function.
1377 ///
1378 /// Examples include loading a value from the constant pool or from the
1379 /// argument area of a function (if it does not change). If the instruction
1380 /// does multiple loads, this returns true only if all of the loads are
1381 /// dereferenceable and invariant.
1382 bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const;
1383
1384 /// If the specified instruction is a PHI that always merges together the
1385 /// same virtual register, return the register, otherwise return 0.
1386 unsigned isConstantValuePHI() const;
1387
1388 /// Return true if this instruction has side effects that are not modeled
1389 /// by mayLoad / mayStore, etc.
1390 /// For all instructions, the property is encoded in MCInstrDesc::Flags
1391 /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
1392 /// INLINEASM instruction, in which case the side effect property is encoded
1393 /// in one of its operands (see InlineAsm::Extra_HasSideEffect).
1394 ///
1395 bool hasUnmodeledSideEffects() const;
1396
1397 /// Returns true if it is illegal to fold a load across this instruction.
1398 bool isLoadFoldBarrier() const;
1399
1400 /// Return true if all the defs of this instruction are dead.
1401 bool allDefsAreDead() const;
1402
1403 /// Copy implicit register operands from specified
1404 /// instruction to this instruction.
1405 void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI);
1406
1407 /// Debugging support
1408 /// @{
1409 /// Determine the generic type to be printed (if needed) on uses and defs.
1410 LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1411 const MachineRegisterInfo &MRI) const;
1412
1413 /// Return true when an instruction has tied register that can't be determined
1414 /// by the instruction's descriptor. This is useful for MIR printing, to
1415 /// determine whether we need to print the ties or not.
1416 bool hasComplexRegisterTies() const;
1417
1418 /// Print this MI to \p OS.
1419 /// Don't print information that can be inferred from other instructions if
1420 /// \p IsStandalone is false. It is usually true when only a fragment of the
1421 /// function is printed.
1422 /// Only print the defs and the opcode if \p SkipOpers is true.
1423 /// Otherwise, also print operands if \p SkipDebugLoc is true.
1424 /// Otherwise, also print the debug loc, with a terminating newline.
1425 /// \p TII is used to print the opcode name. If it's not present, but the
1426 /// MI is in a function, the opcode will be printed using the function's TII.
1427 void print(raw_ostream &OS, bool IsStandalone = true, bool SkipOpers = false,
1428 bool SkipDebugLoc = false, bool AddNewLine = true,
1429 const TargetInstrInfo *TII = nullptr) const;
1430 void print(raw_ostream &OS, ModuleSlotTracker &MST, bool IsStandalone = true,
1431 bool SkipOpers = false, bool SkipDebugLoc = false,
1432 bool AddNewLine = true,
1433 const TargetInstrInfo *TII = nullptr) const;
1434 void dump() const;
1435 /// @}
1436
1437 //===--------------------------------------------------------------------===//
1438 // Accessors used to build up machine instructions.
1439
1440 /// Add the specified operand to the instruction. If it is an implicit
1441 /// operand, it is added to the end of the operand list. If it is an
1442 /// explicit operand it is added at the end of the explicit operand list
1443 /// (before the first implicit operand).
1444 ///
1445 /// MF must be the machine function that was used to allocate this
1446 /// instruction.
1447 ///
1448 /// MachineInstrBuilder provides a more convenient interface for creating
1449 /// instructions and adding operands.
1450 void addOperand(MachineFunction &MF, const MachineOperand &Op);
1451
1452 /// Add an operand without providing an MF reference. This only works for
1453 /// instructions that are inserted in a basic block.
1454 ///
1455 /// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be
1456 /// preferred.
1457 void addOperand(const MachineOperand &Op);
1458
1459 /// Replace the instruction descriptor (thus opcode) of
1460 /// the current instruction with a new one.
1461 void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
1462
1463 /// Replace current source information with new such.
1464 /// Avoid using this, the constructor argument is preferable.
1465 void setDebugLoc(DebugLoc dl) {
1466 debugLoc = std::move(dl);
1467 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/CodeGen/MachineInstr.h"
, 1467, __PRETTY_FUNCTION__))
;
1468 }
1469
1470 /// Erase an operand from an instruction, leaving it with one
1471 /// fewer operand than it started with.
1472 void RemoveOperand(unsigned OpNo);
1473
1474 /// Clear this MachineInstr's memory reference descriptor list. This resets
1475 /// the memrefs to their most conservative state. This should be used only
1476 /// as a last resort since it greatly pessimizes our knowledge of the memory
1477 /// access performed by the instruction.
1478 void dropMemRefs(MachineFunction &MF);
1479
1480 /// Assign this MachineInstr's memory reference descriptor list.
1481 ///
1482 /// Unlike other methods, this *will* allocate them into a new array
1483 /// associated with the provided `MachineFunction`.
1484 void setMemRefs(MachineFunction &MF, ArrayRef<MachineMemOperand *> MemRefs);
1485
1486 /// Add a MachineMemOperand to the machine instruction.
1487 /// This function should be used only occasionally. The setMemRefs function
1488 /// is the primary method for setting up a MachineInstr's MemRefs list.
1489 void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
1490
1491 /// Clone another MachineInstr's memory reference descriptor list and replace
1492 /// ours with it.
1493 ///
1494 /// Note that `*this` may be the incoming MI!
1495 ///
1496 /// Prefer this API whenever possible as it can avoid allocations in common
1497 /// cases.
1498 void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI);
1499
1500 /// Clone the merge of multiple MachineInstrs' memory reference descriptors
1501 /// list and replace ours with it.
1502 ///
1503 /// Note that `*this` may be one of the incoming MIs!
1504 ///
1505 /// Prefer this API whenever possible as it can avoid allocations in common
1506 /// cases.
1507 void cloneMergedMemRefs(MachineFunction &MF,
1508 ArrayRef<const MachineInstr *> MIs);
1509
1510 /// Set a symbol that will be emitted just prior to the instruction itself.
1511 ///
1512 /// Setting this to a null pointer will remove any such symbol.
1513 ///
1514 /// FIXME: This is not fully implemented yet.
1515 void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
1516
1517 /// Set a symbol that will be emitted just after the instruction itself.
1518 ///
1519 /// Setting this to a null pointer will remove any such symbol.
1520 ///
1521 /// FIXME: This is not fully implemented yet.
1522 void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
1523
1524 /// Return the MIFlags which represent both MachineInstrs. This
1525 /// should be used when merging two MachineInstrs into one. This routine does
1526 /// not modify the MIFlags of this MachineInstr.
1527 uint16_t mergeFlagsWith(const MachineInstr& Other) const;
1528
1529 /// Copy all flags to MachineInst MIFlags
1530 void copyIRFlags(const Instruction &I);
1531
1532 /// Break any tie involving OpIdx.
1533 void untieRegOperand(unsigned OpIdx) {
1534 MachineOperand &MO = getOperand(OpIdx);
1535 if (MO.isReg() && MO.isTied()) {
1536 getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
1537 MO.TiedTo = 0;
1538 }
1539 }
1540
1541 /// Add all implicit def and use operands to this instruction.
1542 void addImplicitDefUseOperands(MachineFunction &MF);
1543
1544 /// Scan instructions following MI and collect any matching DBG_VALUEs.
1545 void collectDebugValues(SmallVectorImpl<MachineInstr *> &DbgValues);
1546
1547 /// Find all DBG_VALUEs immediately following this instruction that point
1548 /// to a register def in this instruction and point them to \p Reg instead.
1549 void changeDebugValuesDefReg(unsigned Reg);
1550
1551private:
1552 /// If this instruction is embedded into a MachineFunction, return the
1553 /// MachineRegisterInfo object for the current function, otherwise
1554 /// return null.
1555 MachineRegisterInfo *getRegInfo();
1556
1557 /// Unlink all of the register operands in this instruction from their
1558 /// respective use lists. This requires that the operands already be on their
1559 /// use lists.
1560 void RemoveRegOperandsFromUseLists(MachineRegisterInfo&);
1561
1562 /// Add all of the register operands in this instruction from their
1563 /// respective use lists. This requires that the operands not be on their
1564 /// use lists yet.
1565 void AddRegOperandsToUseLists(MachineRegisterInfo&);
1566
1567 /// Slow path for hasProperty when we're dealing with a bundle.
1568 bool hasPropertyInBundle(uint64_t Mask, QueryType Type) const;
1569
1570 /// Implements the logic of getRegClassConstraintEffectForVReg for the
1571 /// this MI and the given operand index \p OpIdx.
1572 /// If the related operand does not constrained Reg, this returns CurRC.
1573 const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(
1574 unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
1575 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const;
1576};
1577
1578/// Special DenseMapInfo traits to compare MachineInstr* by *value* of the
1579/// instruction rather than by pointer value.
1580/// The hashing and equality testing functions ignore definitions so this is
1581/// useful for CSE, etc.
1582struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
1583 static inline MachineInstr *getEmptyKey() {
1584 return nullptr;
1585 }
1586
1587 static inline MachineInstr *getTombstoneKey() {
1588 return reinterpret_cast<MachineInstr*>(-1);
1589 }
1590
1591 static unsigned getHashValue(const MachineInstr* const &MI);
1592
1593 static bool isEqual(const MachineInstr* const &LHS,
1594 const MachineInstr* const &RHS) {
1595 if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
1596 LHS == getEmptyKey() || LHS == getTombstoneKey())
1597 return LHS == RHS;
1598 return LHS->isIdenticalTo(*RHS, MachineInstr::IgnoreVRegDefs);
1599 }
1600};
1601
1602//===----------------------------------------------------------------------===//
1603// Debugging Support
1604
1605inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) {
1606 MI.print(OS);
1607 return OS;
1608}
1609
1610} // end namespace llvm
1611
1612#endif // LLVM_CODEGEN_MACHINEINSTR_H