LLVM 23.0.0git
MachineFunction.cpp
Go to the documentation of this file.
1//===- MachineFunction.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Collect native machine code information for a function. This allows
10// target-specific information about the generated code to be stored with each
11// function.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/BitVector.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/Twine.h"
43#include "llvm/Config/llvm-config.h"
44#include "llvm/IR/Attributes.h"
45#include "llvm/IR/BasicBlock.h"
46#include "llvm/IR/Constant.h"
47#include "llvm/IR/DataLayout.h"
50#include "llvm/IR/Function.h"
51#include "llvm/IR/GlobalValue.h"
52#include "llvm/IR/Instruction.h"
54#include "llvm/IR/Metadata.h"
55#include "llvm/IR/Module.h"
57#include "llvm/IR/Value.h"
58#include "llvm/MC/MCContext.h"
59#include "llvm/MC/MCSymbol.h"
60#include "llvm/MC/SectionKind.h"
69#include <algorithm>
70#include <cassert>
71#include <cstddef>
72#include <cstdint>
73#include <iterator>
74#include <string>
75#include <utility>
76#include <vector>
77
79
80using namespace llvm;
81
82#define DEBUG_TYPE "codegen"
83
85 "align-all-functions",
86 cl::desc("Force the alignment of all functions in log2 format (e.g. 4 "
87 "means align on 16B boundaries)."),
89
92
93 // clang-format off
94 switch(Prop) {
95 case P::FailedISel: return "FailedISel";
96 case P::IsSSA: return "IsSSA";
97 case P::Legalized: return "Legalized";
98 case P::NoPHIs: return "NoPHIs";
99 case P::NoVRegs: return "NoVRegs";
100 case P::RegBankSelected: return "RegBankSelected";
101 case P::Selected: return "Selected";
102 case P::TracksLiveness: return "TracksLiveness";
103 case P::TiedOpsRewritten: return "TiedOpsRewritten";
104 case P::FailsVerification: return "FailsVerification";
105 case P::FailedRegAlloc: return "FailedRegAlloc";
106 case P::TracksDebugUserValues: return "TracksDebugUserValues";
107 }
108 // clang-format on
109 llvm_unreachable("Invalid machine function property");
110}
111
113 if (!F.hasFnAttribute(Attribute::SafeStack))
114 return;
115
116 auto *Existing =
117 dyn_cast_or_null<MDTuple>(F.getMetadata(LLVMContext::MD_annotation));
118
119 if (!Existing || Existing->getNumOperands() != 2)
120 return;
121
122 auto *MetadataName = "unsafe-stack-size";
123 if (auto &N = Existing->getOperand(0)) {
124 if (N.equalsStr(MetadataName)) {
125 if (auto &Op = Existing->getOperand(1)) {
126 auto Val = mdconst::extract<ConstantInt>(Op)->getZExtValue();
127 FrameInfo.setUnsafeStackSize(Val);
128 }
129 }
130 }
131}
132
133// Pin the vtable to this file.
134void MachineFunction::Delegate::anchor() {}
135
137 const char *Separator = "";
138 for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
139 if (!Properties[I])
140 continue;
141 OS << Separator << getPropertyName(static_cast<Property>(I));
142 Separator = ", ";
143 }
144}
145
146//===----------------------------------------------------------------------===//
147// MachineFunction implementation
148//===----------------------------------------------------------------------===//
149
150// Out-of-line virtual method.
152
154 MBB->getParent()->deleteMachineBasicBlock(MBB);
155}
156
158 const Function &F) {
159 if (auto MA = F.getFnStackAlign())
160 return *MA;
161 return STI.getFrameLowering()->getStackAlign();
162}
163
165 const TargetSubtargetInfo &STI, MCContext &Ctx,
166 unsigned FunctionNum)
167 : F(F), Target(Target), STI(STI), Ctx(Ctx) {
168 FunctionNumber = FunctionNum;
169 init();
170}
171
172void MachineFunction::handleInsertion(MachineInstr &MI) {
173 if (TheDelegate)
174 TheDelegate->MF_HandleInsertion(MI);
175}
176
177void MachineFunction::handleRemoval(MachineInstr &MI) {
178 if (TheDelegate)
179 TheDelegate->MF_HandleRemoval(MI);
180}
181
183 const MCInstrDesc &TID) {
184 if (TheDelegate)
185 TheDelegate->MF_HandleChangeDesc(MI, TID);
186}
187
188void MachineFunction::init() {
189 // Assume the function starts in SSA form with correct liveness.
190 Properties.setIsSSA();
191 Properties.setTracksLiveness();
192 RegInfo = new (Allocator) MachineRegisterInfo(this);
193
194 MFInfo = nullptr;
195
196 // We can realign the stack if the target supports it and the user hasn't
197 // explicitly asked us not to.
198 bool CanRealignSP = STI.getFrameLowering()->isStackRealignable() &&
199 !F.hasFnAttribute("no-realign-stack");
200 bool ForceRealignSP = F.hasFnAttribute(Attribute::StackAlignment) ||
201 F.hasFnAttribute("stackrealign");
202 FrameInfo = new (Allocator) MachineFrameInfo(
203 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
204 /*ForcedRealign=*/ForceRealignSP && CanRealignSP);
205
206 setUnsafeStackSize(F, *FrameInfo);
207
208 if (F.hasFnAttribute(Attribute::StackAlignment))
209 FrameInfo->ensureMaxAlignment(*F.getFnStackAlign());
210
212 Alignment = STI.getTargetLowering()->getMinFunctionAlignment();
213
214 // -fsanitize=function and -fsanitize=kcfi instrument indirect function calls
215 // to load a type hash before the function label. Ensure functions are aligned
216 // by a least 4 to avoid unaligned access, which is especially important for
217 // -mno-unaligned-access.
218 if (F.hasMetadata(LLVMContext::MD_func_sanitize) ||
219 F.getMetadata(LLVMContext::MD_kcfi_type))
220 Alignment = std::max(Alignment, Align(4));
221
223 Alignment = Align(1ULL << AlignAllFunctions);
224
225 JumpTableInfo = nullptr;
226
228 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
229 WinEHInfo = new (Allocator) WinEHFuncInfo();
230 }
231
233 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
234 WasmEHInfo = new (Allocator) WasmEHFuncInfo();
235 }
236
237 assert(Target.isCompatibleDataLayout(getDataLayout()) &&
238 "Can't create a MachineFunction using a Module with a "
239 "Target-incompatible DataLayout attached\n");
240
241 PSVManager = std::make_unique<PseudoSourceValueManager>(getTarget());
242}
243
245 const TargetSubtargetInfo &STI) {
246 assert(!MFInfo && "MachineFunctionInfo already set");
247 MFInfo = Target.createMachineFunctionInfo(Allocator, F, &STI);
248}
249
253
254void MachineFunction::clear() {
255 Properties.reset();
256
257 // Clear JumpTableInfo first. Otherwise, every MBB we delete would do a
258 // linear search over the jump table entries to find and erase itself.
259 if (JumpTableInfo) {
260 JumpTableInfo->~MachineJumpTableInfo();
261 Allocator.Deallocate(JumpTableInfo);
262 JumpTableInfo = nullptr;
263 }
264
265 // Don't call destructors on MachineInstr and MachineOperand. All of their
266 // memory comes from the BumpPtrAllocator which is about to be purged.
267 //
268 // Do call MachineBasicBlock destructors, it contains std::vectors.
269 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
270 I->Insts.clearAndLeakNodesUnsafely();
271 MBBNumbering.clear();
272
273 InstructionRecycler.clear(Allocator);
274 OperandRecycler.clear(Allocator);
275 BasicBlockRecycler.clear(Allocator);
276 CodeViewAnnotations.clear();
278 if (RegInfo) {
279 RegInfo->~MachineRegisterInfo();
280 Allocator.Deallocate(RegInfo);
281 }
282 if (MFInfo) {
283 MFInfo->~MachineFunctionInfo();
284 Allocator.Deallocate(MFInfo);
285 }
286
287 FrameInfo->~MachineFrameInfo();
288 Allocator.Deallocate(FrameInfo);
289
290 ConstantPool->~MachineConstantPool();
291 Allocator.Deallocate(ConstantPool);
292
293 if (WinEHInfo) {
294 WinEHInfo->~WinEHFuncInfo();
295 Allocator.Deallocate(WinEHInfo);
296 }
297
298 if (WasmEHInfo) {
299 WasmEHInfo->~WasmEHFuncInfo();
300 Allocator.Deallocate(WasmEHInfo);
301 }
302}
303
305 return F.getDataLayout();
306}
307
308/// Get the JumpTableInfo for this function.
309/// If it does not already exist, allocate one.
311getOrCreateJumpTableInfo(unsigned EntryKind) {
312 if (JumpTableInfo) return JumpTableInfo;
313
314 JumpTableInfo = new (Allocator)
316 return JumpTableInfo;
317}
318
320 return F.getDenormalMode(FPType);
321}
322
323/// Should we be emitting segmented stack stuff for the function
325 return getFunction().hasFnAttribute("split-stack");
326}
327
329 Align PrefAlignment;
330
331 if (MaybeAlign A = F.getPreferredAlignment())
332 PrefAlignment = *A;
333 else if (!F.hasOptSize())
334 PrefAlignment = STI.getTargetLowering()->getPrefFunctionAlignment();
335 else
336 PrefAlignment = Align(1);
337
338 return std::max(PrefAlignment, getAlignment());
339}
340
341[[nodiscard]] unsigned
343 FrameInstructions.push_back(Inst);
344 return FrameInstructions.size() - 1;
345}
346
347/// This discards all of the MachineBasicBlock numbers and recomputes them.
348/// This guarantees that the MBB numbers are sequential, dense, and match the
349/// ordering of the blocks within the function. If a specific MachineBasicBlock
350/// is specified, only that block and those after it are renumbered.
352 if (empty()) { MBBNumbering.clear(); return; }
354 if (MBB == nullptr)
355 MBBI = begin();
356 else
357 MBBI = MBB->getIterator();
358
359 // Figure out the block number this should have.
360 unsigned BlockNo = 0;
361 if (MBBI != begin())
362 BlockNo = std::prev(MBBI)->getNumber() + 1;
363
364 for (; MBBI != E; ++MBBI, ++BlockNo) {
365 if (MBBI->getNumber() != (int)BlockNo) {
366 // Remove use of the old number.
367 if (MBBI->getNumber() != -1) {
368 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
369 "MBB number mismatch!");
370 MBBNumbering[MBBI->getNumber()] = nullptr;
371 }
372
373 // If BlockNo is already taken, set that block's number to -1.
374 if (MBBNumbering[BlockNo])
375 MBBNumbering[BlockNo]->setNumber(-1);
376
377 MBBNumbering[BlockNo] = &*MBBI;
378 MBBI->setNumber(BlockNo);
379 }
380 }
381
382 // Okay, all the blocks are renumbered. If we have compactified the block
383 // numbering, shrink MBBNumbering now.
384 assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
385 MBBNumbering.resize(BlockNo);
386}
387
390 const Align FunctionAlignment = getAlignment();
392 /// Offset - Distance from the beginning of the function to the end
393 /// of the basic block.
394 int64_t Offset = 0;
395
396 for (; MBBI != E; ++MBBI) {
397 const Align Alignment = MBBI->getAlignment();
398 int64_t BlockSize = 0;
399
400 for (auto &MI : *MBBI) {
401 BlockSize += TII.getInstSizeInBytes(MI);
402 }
403
404 int64_t OffsetBB;
405 if (Alignment <= FunctionAlignment) {
406 OffsetBB = alignTo(Offset, Alignment);
407 } else {
408 // The alignment of this MBB is larger than the function's alignment, so
409 // we can't tell whether or not it will insert nops. Assume that it will.
410 OffsetBB = alignTo(Offset, Alignment) + Alignment.value() -
411 FunctionAlignment.value();
412 }
413 Offset = OffsetBB + BlockSize;
414 }
415
416 return Offset;
417}
418
419/// This method iterates over the basic blocks and assigns their IsBeginSection
420/// and IsEndSection fields. This must be called after MBB layout is finalized
421/// and the SectionID's are assigned to MBBs.
424 auto CurrentSectionID = front().getSectionID();
425 for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) {
426 if (MBBI->getSectionID() == CurrentSectionID)
427 continue;
428 MBBI->setIsBeginSection();
429 std::prev(MBBI)->setIsEndSection();
430 CurrentSectionID = MBBI->getSectionID();
431 }
433}
434
435/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
436MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
437 DebugLoc DL,
438 bool NoImplicit) {
439 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
440 MachineInstr(*this, MCID, std::move(DL), NoImplicit);
441}
442
443/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
444/// identical in all ways except the instruction has no parent, prev, or next.
446MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
447 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
448 MachineInstr(*this, *Orig);
449}
450
451MachineInstr &MachineFunction::cloneMachineInstrBundle(
453 const MachineInstr &Orig) {
454 MachineInstr *FirstClone = nullptr;
456 while (true) {
457 MachineInstr *Cloned = CloneMachineInstr(&*I);
458 MBB.insert(InsertBefore, Cloned);
459 if (FirstClone == nullptr) {
460 FirstClone = Cloned;
461 } else {
462 Cloned->bundleWithPred();
463 }
464
465 if (!I->isBundledWithSucc())
466 break;
467 ++I;
468 }
469 // Copy over call info to the cloned instruction if needed. If Orig is in
470 // a bundle, copyAdditionalCallInfo takes care of finding the call instruction
471 // in the bundle.
473 copyAdditionalCallInfo(&Orig, FirstClone);
474 return *FirstClone;
475}
476
477/// Delete the given MachineInstr.
478///
479/// This function also serves as the MachineInstr destructor - the real
480/// ~MachineInstr() destructor must be empty.
481void MachineFunction::deleteMachineInstr(MachineInstr *MI) {
482 // Verify that a call site info is at valid state. This assertion should
483 // be triggered during the implementation of support for the
484 // call site info of a new architecture. If the assertion is triggered,
485 // back trace will tell where to insert a call to updateCallSiteInfo().
486 assert((!MI->isCandidateForAdditionalCallInfo() ||
487 !CallSitesInfo.contains(MI)) &&
488 "Call site info was not updated!");
489 // Verify that the "called globals" info is in a valid state.
490 assert((!MI->isCandidateForAdditionalCallInfo() ||
491 !CalledGlobalsInfo.contains(MI)) &&
492 "Called globals info was not updated!");
493 // Strip it for parts. The operand array and the MI object itself are
494 // independently recyclable.
495 if (MI->Operands)
496 deallocateOperandArray(MI->CapOperands, MI->Operands);
497 // Don't call ~MachineInstr() which must be trivial anyway because
498 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
499 // destructors.
500 InstructionRecycler.Deallocate(Allocator, MI);
501}
502
503/// Allocate a new MachineBasicBlock. Use this instead of
504/// `new MachineBasicBlock'.
507 std::optional<UniqueBBID> BBID) {
509 new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
510 MachineBasicBlock(*this, BB);
511 // Set BBID for `-basic-block-sections=list` and `-basic-block-address-map` to
512 // allow robust mapping of profiles to basic blocks.
513 if (Target.Options.BBAddrMap ||
514 Target.getBBSectionsType() == BasicBlockSection::List)
515 MBB->setBBID(BBID.has_value() ? *BBID : UniqueBBID{NextBBID++, 0});
516 return MBB;
517}
518
519/// Delete the given MachineBasicBlock.
521 assert(MBB->getParent() == this && "MBB parent mismatch!");
522 // Clean up any references to MBB in jump tables before deleting it.
523 if (JumpTableInfo)
524 JumpTableInfo->RemoveMBBFromJumpTables(MBB);
525 MBB->~MachineBasicBlock();
526 BasicBlockRecycler.Deallocate(Allocator, MBB);
527}
528
531 Align BaseAlignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
532 SyncScope::ID SSID, AtomicOrdering Ordering,
533 AtomicOrdering FailureOrdering) {
534 assert((!Size.hasValue() ||
535 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
536 "Unexpected an unknown size to be represented using "
537 "LocationSize::beforeOrAfter()");
538 return new (Allocator)
539 MachineMemOperand(PtrInfo, F, Size, BaseAlignment, AAInfo, Ranges, SSID,
540 Ordering, FailureOrdering);
541}
542
545 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
546 SyncScope::ID SSID, AtomicOrdering Ordering,
547 AtomicOrdering FailureOrdering) {
548 return new (Allocator)
549 MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID,
550 Ordering, FailureOrdering);
551}
552
555 const MachinePointerInfo &PtrInfo,
557 assert((!Size.hasValue() ||
558 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
559 "Unexpected an unknown size to be represented using "
560 "LocationSize::beforeOrAfter()");
561 return new (Allocator)
562 MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(),
563 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
565}
566
568 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) {
569 return new (Allocator)
570 MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(),
571 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
573}
574
577 int64_t Offset, LLT Ty) {
578 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
579
580 // If there is no pointer value, the offset isn't tracked so we need to adjust
581 // the base alignment.
582 Align Alignment = PtrInfo.V.isNull()
584 : MMO->getBaseAlign();
585
586 // Do not preserve ranges, since we don't necessarily know what the high bits
587 // are anymore.
588 return new (Allocator) MachineMemOperand(
589 PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment,
590 MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(),
592}
593
596 const AAMDNodes &AAInfo) {
597 MachinePointerInfo MPI = MMO->getValue() ?
598 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
600
601 return new (Allocator) MachineMemOperand(
602 MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo,
603 MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(),
604 MMO->getFailureOrdering());
605}
606
610 return new (Allocator) MachineMemOperand(
611 MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(),
612 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
614}
615
616MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo(
617 ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol,
618 MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections,
619 uint32_t CFIType, MDNode *MMRAs, Value *DS) {
620 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
621 PostInstrSymbol, HeapAllocMarker,
622 PCSections, CFIType, MMRAs, DS);
623}
624
626 char *Dest = Allocator.Allocate<char>(Name.size() + 1);
627 llvm::copy(Name, Dest);
628 Dest[Name.size()] = 0;
629 return Dest;
630}
631
633 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
634 unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
635 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
636 memset(Mask, 0, Size * sizeof(Mask[0]));
637 return Mask;
638}
639
641 int* AllocMask = Allocator.Allocate<int>(Mask.size());
642 copy(Mask, AllocMask);
643 return {AllocMask, Mask.size()};
644}
645
646#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
650#endif
651
655
656void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
657 OS << "# Machine code for function " << getName() << ": ";
658 getProperties().print(OS);
659 OS << '\n';
660
661 // Print Frame Information
662 FrameInfo->print(*this, OS);
663
664 // Print JumpTable Information
665 if (JumpTableInfo)
666 JumpTableInfo->print(OS);
667
668 // Print Constant Pool
669 ConstantPool->print(OS);
670
672
673 if (RegInfo && !RegInfo->livein_empty()) {
674 OS << "Function Live Ins: ";
676 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
677 OS << printReg(I->first, TRI);
678 if (I->second)
679 OS << " in " << printReg(I->second, TRI);
680 if (std::next(I) != E)
681 OS << ", ";
682 }
683 OS << '\n';
684 }
685
688 for (const auto &BB : *this) {
689 OS << '\n';
690 // If we print the whole function, print it at its most verbose level.
691 BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
692 }
693
694 OS << "\n# End machine code for function " << getName() << ".\n\n";
695}
696
697/// True if this function needs frame moves for debug or exceptions.
699 // TODO: Ideally, what we'd like is to have a switch that allows emitting
700 // synchronous (precise at call-sites only) CFA into .eh_frame. However, even
701 // under this switch, we'd like .debug_frame to be precise when using -g. At
702 // this moment, there's no way to specify that some CFI directives go into
703 // .eh_frame only, while others go into .debug_frame only.
705 F.needsUnwindTableEntry() ||
706 !F.getParent()->debug_compile_units().empty();
707}
708
710 if (MDNode *Node = CB.getMetadata(llvm::LLVMContext::MD_call_target))
712
713 // Numeric callee_type ids are only for indirect calls.
714 if (!CB.isIndirectCall())
715 return;
716
717 MDNode *CalleeTypeList = CB.getMetadata(LLVMContext::MD_callee_type);
718 if (!CalleeTypeList)
719 return;
720
721 for (const MDOperand &Op : CalleeTypeList->operands()) {
722 MDNode *TypeMD = cast<MDNode>(Op);
723 MDString *TypeIdStr = cast<MDString>(TypeMD->getOperand(1));
724 // Compute numeric type id from generalized type id string
725 uint64_t TypeIdVal = MD5Hash(TypeIdStr->getString());
726 IntegerType *Int64Ty = Type::getInt64Ty(CB.getContext());
727 CalleeTypeIds.push_back(
728 ConstantInt::get(Int64Ty, TypeIdVal, /*IsSigned=*/false));
729 }
730}
731
732template <>
734 : public DefaultDOTGraphTraits {
736
737 static std::string getGraphName(const MachineFunction *F) {
738 return ("CFG for '" + F->getName() + "' function").str();
739 }
740
742 const MachineFunction *Graph) {
743 std::string OutStr;
744 {
745 raw_string_ostream OSS(OutStr);
746
747 if (isSimple()) {
748 OSS << printMBBReference(*Node);
749 if (const BasicBlock *BB = Node->getBasicBlock())
750 OSS << ": " << BB->getName();
751 } else
752 Node->print(OSS);
753 }
754
755 if (OutStr[0] == '\n')
756 OutStr.erase(OutStr.begin());
757
758 // Process string output to make it nicer...
759 for (unsigned i = 0; i != OutStr.length(); ++i)
760 if (OutStr[i] == '\n') { // Left justify
761 OutStr[i] = '\\';
762 OutStr.insert(OutStr.begin() + i + 1, 'l');
763 }
764 return OutStr;
765 }
766};
767
769{
770#ifndef NDEBUG
771 ViewGraph(this, "mf" + getName());
772#else
773 errs() << "MachineFunction::viewCFG is only available in debug builds on "
774 << "systems with Graphviz or gv!\n";
775#endif // NDEBUG
776}
777
779{
780#ifndef NDEBUG
781 ViewGraph(this, "mf" + getName(), true);
782#else
783 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
784 << "systems with Graphviz or gv!\n";
785#endif // NDEBUG
786}
787
788/// Add the specified physical register as a live-in value and
789/// create a corresponding virtual register for it.
791 const TargetRegisterClass *RC) {
793 Register VReg = MRI.getLiveInVirtReg(PReg);
794 if (VReg) {
795 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
796 (void)VRegRC;
797 // A physical register can be added several times.
798 // Between two calls, the register class of the related virtual register
799 // may have been constrained to match some operation constraints.
800 // In that case, check that the current register class includes the
801 // physical register and is a sub class of the specified RC.
802 assert((VRegRC == RC || (VRegRC->contains(PReg) &&
803 RC->hasSubClassEq(VRegRC))) &&
804 "Register class mismatch!");
805 return VReg;
806 }
807 VReg = MRI.createVirtualRegister(RC);
808 MRI.addLiveIn(PReg, VReg);
809 return VReg;
810}
811
812/// Return the MCSymbol for the specified non-empty jump table.
813/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
814/// normal 'L' label is returned.
816 bool isLinkerPrivate) const {
817 const DataLayout &DL = getDataLayout();
818 assert(JumpTableInfo && "No jump tables");
819 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
820
821 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
822 : DL.getInternalSymbolPrefix();
823 SmallString<60> Name;
825 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
826 return Ctx.getOrCreateSymbol(Name);
827}
828
829/// Return a function-local symbol to represent the PIC base.
831 const DataLayout &DL = getDataLayout();
832 return Ctx.getOrCreateSymbol(Twine(DL.getInternalSymbolPrefix()) +
833 Twine(getFunctionNumber()) + "$pb");
834}
835
836/// \name Exception Handling
837/// \{
838
841 unsigned N = LandingPads.size();
842 for (unsigned i = 0; i < N; ++i) {
843 LandingPadInfo &LP = LandingPads[i];
844 if (LP.LandingPadBlock == LandingPad)
845 return LP;
846 }
847
848 LandingPads.push_back(LandingPadInfo(LandingPad));
849 return LandingPads[N];
850}
851
853 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
855 LP.BeginLabels.push_back(BeginLabel);
856 LP.EndLabels.push_back(EndLabel);
857}
858
860 MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
862 LP.LandingPadLabel = LandingPadLabel;
863
865 LandingPad->getBasicBlock()->getFirstNonPHIIt();
866 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
867 // If there's no typeid list specified, then "cleanup" is implicit.
868 // Otherwise, id 0 is reserved for the cleanup action.
869 if (LPI->isCleanup() && LPI->getNumClauses() != 0)
870 LP.TypeIds.push_back(0);
871
872 // FIXME: New EH - Add the clauses in reverse order. This isn't 100%
873 // correct, but we need to do it this way because of how the DWARF EH
874 // emitter processes the clauses.
875 for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
876 Value *Val = LPI->getClause(I - 1);
877 if (LPI->isCatch(I - 1)) {
878 LP.TypeIds.push_back(
880 } else {
881 // Add filters in a list.
882 auto *CVal = cast<Constant>(Val);
883 SmallVector<unsigned, 4> FilterList;
884 for (const Use &U : CVal->operands())
885 FilterList.push_back(
886 getTypeIDFor(cast<GlobalValue>(U->stripPointerCasts())));
887
888 LP.TypeIds.push_back(getFilterIDFor(FilterList));
889 }
890 }
891
892 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
893 for (unsigned I = CPI->arg_size(); I != 0; --I) {
894 auto *TypeInfo =
895 dyn_cast<GlobalValue>(CPI->getArgOperand(I - 1)->stripPointerCasts());
896 LP.TypeIds.push_back(getTypeIDFor(TypeInfo));
897 }
898
899 } else {
900 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!");
901 }
902
903 return LandingPadLabel;
904}
905
907 ArrayRef<unsigned> Sites) {
908 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
909}
910
912 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
913 if (TypeInfos[i] == TI) return i + 1;
914
915 TypeInfos.push_back(TI);
916 return TypeInfos.size();
917}
918
920 // If the new filter coincides with the tail of an existing filter, then
921 // re-use the existing filter. Folding filters more than this requires
922 // re-ordering filters and/or their elements - probably not worth it.
923 for (unsigned i : FilterEnds) {
924 unsigned j = TyIds.size();
925
926 while (i && j)
927 if (FilterIds[--i] != TyIds[--j])
928 goto try_next;
929
930 if (!j)
931 // The new filter coincides with range [i, end) of the existing filter.
932 return -(1 + i);
933
934try_next:;
935 }
936
937 // Add the new filter.
938 int FilterID = -(1 + FilterIds.size());
939 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
940 llvm::append_range(FilterIds, TyIds);
941 FilterEnds.push_back(FilterIds.size());
942 FilterIds.push_back(0); // terminator
943 return FilterID;
944}
945
947MachineFunction::getCallSiteInfo(const MachineInstr *MI) {
948 assert(MI->isCandidateForAdditionalCallInfo() &&
949 "Call site info refers only to call (MI) candidates");
950
951 if (!Target.Options.EmitCallSiteInfo && !Target.Options.EmitCallGraphSection)
952 return CallSitesInfo.end();
953 return CallSitesInfo.find(MI);
954}
955
956/// Return the call machine instruction or find a call within bundle.
958 if (!MI->isBundle())
959 return MI;
960
961 for (const auto &BMI : make_range(getBundleStart(MI->getIterator()),
962 getBundleEnd(MI->getIterator())))
963 if (BMI.isCandidateForAdditionalCallInfo())
964 return &BMI;
965
966 llvm_unreachable("Unexpected bundle without a call site candidate");
967}
968
970 assert(MI->shouldUpdateAdditionalCallInfo() &&
971 "Call info refers only to call (MI) candidates or "
972 "candidates inside bundles");
973
974 const MachineInstr *CallMI = getCallInstr(MI);
975
976 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI);
977 if (CSIt != CallSitesInfo.end())
978 CallSitesInfo.erase(CSIt);
979
980 CalledGlobalsInfo.erase(CallMI);
981}
982
984 const MachineInstr *New) {
986 "Call info refers only to call (MI) candidates or "
987 "candidates inside bundles");
988
989 if (!New->isCandidateForAdditionalCallInfo())
990 return eraseAdditionalCallInfo(Old);
991
992 const MachineInstr *OldCallMI = getCallInstr(Old);
993 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
994 if (CSIt != CallSitesInfo.end()) {
995 CallSiteInfo CSInfo = CSIt->second;
996 CallSitesInfo[New] = std::move(CSInfo);
997 }
998
999 CalledGlobalsMap::iterator CGIt = CalledGlobalsInfo.find(OldCallMI);
1000 if (CGIt != CalledGlobalsInfo.end()) {
1001 CalledGlobalInfo CGInfo = CGIt->second;
1002 CalledGlobalsInfo[New] = std::move(CGInfo);
1003 }
1004}
1005
1007 const MachineInstr *New) {
1009 "Call info refers only to call (MI) candidates or "
1010 "candidates inside bundles");
1011
1012 if (!New->isCandidateForAdditionalCallInfo())
1013 return eraseAdditionalCallInfo(Old);
1014
1015 const MachineInstr *OldCallMI = getCallInstr(Old);
1016 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
1017 if (CSIt != CallSitesInfo.end()) {
1018 CallSiteInfo CSInfo = std::move(CSIt->second);
1019 CallSitesInfo.erase(CSIt);
1020 CallSitesInfo[New] = std::move(CSInfo);
1021 }
1022
1023 CalledGlobalsMap::iterator CGIt = CalledGlobalsInfo.find(OldCallMI);
1024 if (CGIt != CalledGlobalsInfo.end()) {
1025 CalledGlobalInfo CGInfo = std::move(CGIt->second);
1026 CalledGlobalsInfo.erase(CGIt);
1027 CalledGlobalsInfo[New] = std::move(CGInfo);
1028 }
1029}
1030
1034
1037 unsigned Subreg) {
1038 // Catch any accidental self-loops.
1039 assert(A.first != B.first);
1040 // Don't allow any substitutions _from_ the memory operand number.
1041 assert(A.second != DebugOperandMemNumber);
1042
1043 DebugValueSubstitutions.push_back({A, B, Subreg});
1044}
1045
1047 MachineInstr &New,
1048 unsigned MaxOperand) {
1049 // If the Old instruction wasn't tracked at all, there is no work to do.
1050 unsigned OldInstrNum = Old.peekDebugInstrNum();
1051 if (!OldInstrNum)
1052 return;
1053
1054 // Iterate over all operands looking for defs to create substitutions for.
1055 // Avoid creating new instr numbers unless we create a new substitution.
1056 // While this has no functional effect, it risks confusing someone reading
1057 // MIR output.
1058 // Examine all the operands, or the first N specified by the caller.
1059 MaxOperand = std::min(MaxOperand, Old.getNumOperands());
1060 for (unsigned int I = 0; I < MaxOperand; ++I) {
1061 const auto &OldMO = Old.getOperand(I);
1062 auto &NewMO = New.getOperand(I);
1063 (void)NewMO;
1064
1065 if (!OldMO.isReg() || !OldMO.isDef())
1066 continue;
1067 assert(NewMO.isDef());
1068
1069 unsigned NewInstrNum = New.getDebugInstrNum();
1070 makeDebugValueSubstitution(std::make_pair(OldInstrNum, I),
1071 std::make_pair(NewInstrNum, I));
1072 }
1073}
1074
1079
1080 // Check whether this copy-like instruction has already been salvaged into
1081 // an operand pair.
1082 Register Dest;
1083 if (auto CopyDstSrc = TII.isCopyLikeInstr(MI)) {
1084 Dest = CopyDstSrc->Destination->getReg();
1085 } else {
1086 assert(MI.isSubregToReg());
1087 Dest = MI.getOperand(0).getReg();
1088 }
1089
1090 auto CacheIt = DbgPHICache.find(Dest);
1091 if (CacheIt != DbgPHICache.end())
1092 return CacheIt->second;
1093
1094 // Calculate the instruction number to use, or install a DBG_PHI.
1095 auto OperandPair = salvageCopySSAImpl(MI);
1096 DbgPHICache.insert({Dest, OperandPair});
1097 return OperandPair;
1098}
1099
1105
1106 // Chase the value read by a copy-like instruction back to the instruction
1107 // that ultimately _defines_ that value. This may pass:
1108 // * Through multiple intermediate copies, including subregister moves /
1109 // copies,
1110 // * Copies from physical registers that must then be traced back to the
1111 // defining instruction,
1112 // * Or, physical registers may be live-in to (only) the entry block, which
1113 // requires a DBG_PHI to be created.
1114 // We can pursue this problem in that order: trace back through copies,
1115 // optionally through a physical register, to a defining instruction. We
1116 // should never move from physreg to vreg. As we're still in SSA form, no need
1117 // to worry about partial definitions of registers.
1118
1119 // Helper lambda to interpret a copy-like instruction. Takes instruction,
1120 // returns the register read and any subregister identifying which part is
1121 // read.
1122 auto GetRegAndSubreg =
1123 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> {
1124 Register NewReg, OldReg;
1125 unsigned SubReg;
1126 if (Cpy.isCopy()) {
1127 OldReg = Cpy.getOperand(0).getReg();
1128 NewReg = Cpy.getOperand(1).getReg();
1129 SubReg = Cpy.getOperand(1).getSubReg();
1130 } else if (Cpy.isSubregToReg()) {
1131 OldReg = Cpy.getOperand(0).getReg();
1132 NewReg = Cpy.getOperand(1).getReg();
1133 SubReg = Cpy.getOperand(2).getImm();
1134 } else {
1135 auto CopyDetails = *TII.isCopyInstr(Cpy);
1136 const MachineOperand &Src = *CopyDetails.Source;
1137 const MachineOperand &Dest = *CopyDetails.Destination;
1138 OldReg = Dest.getReg();
1139 NewReg = Src.getReg();
1140 SubReg = Src.getSubReg();
1141 }
1142
1143 return {NewReg, SubReg};
1144 };
1145
1146 // First seek either the defining instruction, or a copy from a physreg.
1147 // During search, the current state is the current copy instruction, and which
1148 // register we've read. Accumulate qualifying subregisters into SubregsSeen;
1149 // deal with those later.
1150 auto State = GetRegAndSubreg(MI);
1151 auto CurInst = MI.getIterator();
1152 SmallVector<unsigned, 4> SubregsSeen;
1153 while (true) {
1154 // If we've found a copy from a physreg, first portion of search is over.
1155 if (!State.first.isVirtual())
1156 break;
1157
1158 // Record any subregister qualifier.
1159 if (State.second)
1160 SubregsSeen.push_back(State.second);
1161
1162 assert(MRI.hasOneDef(State.first));
1163 MachineInstr &Inst = *MRI.def_begin(State.first)->getParent();
1164 CurInst = Inst.getIterator();
1165
1166 // Any non-copy instruction is the defining instruction we're seeking.
1167 if (!Inst.isCopyLike() && !TII.isCopyLikeInstr(Inst))
1168 break;
1169 State = GetRegAndSubreg(Inst);
1170 };
1171
1172 // Helper lambda to apply additional subregister substitutions to a known
1173 // instruction/operand pair. Adds new (fake) substitutions so that we can
1174 // record the subregister. FIXME: this isn't very space efficient if multiple
1175 // values are tracked back through the same copies; cache something later.
1176 auto ApplySubregisters =
1178 for (unsigned Subreg : reverse(SubregsSeen)) {
1179 // Fetch a new instruction number, not attached to an actual instruction.
1180 unsigned NewInstrNumber = getNewDebugInstrNum();
1181 // Add a substitution from the "new" number to the known one, with a
1182 // qualifying subreg.
1183 makeDebugValueSubstitution({NewInstrNumber, 0}, P, Subreg);
1184 // Return the new number; to find the underlying value, consumers need to
1185 // deal with the qualifying subreg.
1186 P = {NewInstrNumber, 0};
1187 }
1188 return P;
1189 };
1190
1191 // If we managed to find the defining instruction after COPYs, return an
1192 // instruction / operand pair after adding subregister qualifiers.
1193 if (State.first.isVirtual()) {
1194 // Virtual register def -- we can just look up where this happens.
1195 MachineInstr *Inst = MRI.def_begin(State.first)->getParent();
1196 for (auto &MO : Inst->all_defs()) {
1197 if (MO.getReg() != State.first)
1198 continue;
1199 return ApplySubregisters({Inst->getDebugInstrNum(), MO.getOperandNo()});
1200 }
1201
1202 llvm_unreachable("Vreg def with no corresponding operand?");
1203 }
1204
1205 // Our search ended in a copy from a physreg: walk back up the function
1206 // looking for whatever defines the physreg.
1207 assert(CurInst->isCopyLike() || TII.isCopyInstr(*CurInst));
1208 State = GetRegAndSubreg(*CurInst);
1209 Register RegToSeek = State.first;
1210
1211 auto RMII = CurInst->getReverseIterator();
1212 auto PrevInstrs = make_range(RMII, CurInst->getParent()->instr_rend());
1213 for (auto &ToExamine : PrevInstrs) {
1214 for (auto &MO : ToExamine.all_defs()) {
1215 // Test for operand that defines something aliasing RegToSeek.
1216 if (!TRI.regsOverlap(RegToSeek, MO.getReg()))
1217 continue;
1218
1219 return ApplySubregisters(
1220 {ToExamine.getDebugInstrNum(), MO.getOperandNo()});
1221 }
1222 }
1223
1224 MachineBasicBlock &InsertBB = *CurInst->getParent();
1225
1226 // We reached the start of the block before finding a defining instruction.
1227 // There are numerous scenarios where this can happen:
1228 // * Constant physical registers,
1229 // * Several intrinsics that allow LLVM-IR to read arbitary registers,
1230 // * Arguments in the entry block,
1231 // * Exception handling landing pads.
1232 // Validating all of them is too difficult, so just insert a DBG_PHI reading
1233 // the variable value at this position, rather than checking it makes sense.
1234
1235 // Create DBG_PHI for specified physreg.
1236 auto Builder = BuildMI(InsertBB, InsertBB.getFirstNonPHI(), DebugLoc(),
1237 TII.get(TargetOpcode::DBG_PHI));
1238 Builder.addReg(State.first);
1239 unsigned NewNum = getNewDebugInstrNum();
1240 Builder.addImm(NewNum);
1241 return ApplySubregisters({NewNum, 0u});
1242}
1243
1245 auto *TII = getSubtarget().getInstrInfo();
1246
1247 auto MakeUndefDbgValue = [&](MachineInstr &MI) {
1248 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_VALUE_LIST);
1249 MI.setDesc(RefII);
1250 MI.setDebugValueUndef();
1251 };
1252
1254 for (auto &MBB : *this) {
1255 for (auto &MI : MBB) {
1256 if (!MI.isDebugRef())
1257 continue;
1258
1259 bool IsValidRef = true;
1260
1261 for (MachineOperand &MO : MI.debug_operands()) {
1262 if (!MO.isReg())
1263 continue;
1264
1265 Register Reg = MO.getReg();
1266
1267 // Some vregs can be deleted as redundant in the meantime. Mark those
1268 // as DBG_VALUE $noreg. Additionally, some normal instructions are
1269 // quickly deleted, leaving dangling references to vregs with no def.
1270 if (Reg == 0 || !RegInfo->hasOneDef(Reg)) {
1271 IsValidRef = false;
1272 break;
1273 }
1274
1275 assert(Reg.isVirtual());
1276 MachineInstr &DefMI = *RegInfo->def_instr_begin(Reg);
1277
1278 // If we've found a copy-like instruction, follow it back to the
1279 // instruction that defines the source value, see salvageCopySSA docs
1280 // for why this is important.
1281 if (DefMI.isCopyLike() || TII->isCopyInstr(DefMI)) {
1282 auto Result = salvageCopySSA(DefMI, ArgDbgPHIs);
1283 MO.ChangeToDbgInstrRef(Result.first, Result.second);
1284 } else {
1285 // Otherwise, identify the operand number that the VReg refers to.
1286 unsigned OperandIdx = 0;
1287 for (const auto &DefMO : DefMI.operands()) {
1288 if (DefMO.isReg() && DefMO.isDef() && DefMO.getReg() == Reg)
1289 break;
1290 ++OperandIdx;
1291 }
1292 assert(OperandIdx < DefMI.getNumOperands());
1293
1294 // Morph this instr ref to point at the given instruction and operand.
1295 unsigned ID = DefMI.getDebugInstrNum();
1296 MO.ChangeToDbgInstrRef(ID, OperandIdx);
1297 }
1298 }
1299
1300 if (!IsValidRef)
1301 MakeUndefDbgValue(MI);
1302 }
1303 }
1304}
1305
1307 // Disable instr-ref at -O0: it's very slow (in compile time). We can still
1308 // have optimized code inlined into this unoptimized code, however with
1309 // fewer and less aggressive optimizations happening, coverage and accuracy
1310 // should not suffer.
1311 if (getTarget().getOptLevel() == CodeGenOptLevel::None)
1312 return false;
1313
1314 // Don't use instr-ref if this function is marked optnone.
1315 if (F.hasFnAttribute(Attribute::OptimizeNone))
1316 return false;
1317
1318 if (llvm::debuginfoShouldUseDebugInstrRef(getTarget().getTargetTriple()))
1319 return true;
1320
1321 return false;
1322}
1323
1325 return UseDebugInstrRef;
1326}
1327
1331
1332// Use one million as a high / reserved number.
1333const unsigned MachineFunction::DebugOperandMemNumber = 1000000;
1334
1335/// \}
1336
1337//===----------------------------------------------------------------------===//
1338// MachineJumpTableInfo implementation
1339//===----------------------------------------------------------------------===//
1340
1342 const std::vector<MachineBasicBlock *> &MBBs)
1344
1345/// Return the size of each entry in the jump table.
1347 // The size of a jump table entry is 4 bytes unless the entry is just the
1348 // address of a block, in which case it is the pointer size.
1349 switch (getEntryKind()) {
1351 return TD.getPointerSize();
1354 return 8;
1358 return 4;
1360 return 0;
1361 }
1362 llvm_unreachable("Unknown jump table encoding!");
1363}
1364
1365/// Return the alignment of each entry in the jump table.
1367 // The alignment of a jump table entry is the alignment of int32 unless the
1368 // entry is just the address of a block, in which case it is the pointer
1369 // alignment.
1370 switch (getEntryKind()) {
1372 return TD.getPointerABIAlignment(0).value();
1375 return TD.getABIIntegerTypeAlignment(64).value();
1379 return TD.getABIIntegerTypeAlignment(32).value();
1381 return 1;
1382 }
1383 llvm_unreachable("Unknown jump table encoding!");
1384}
1385
1386/// Create a new jump table entry in the jump table info.
1388 const std::vector<MachineBasicBlock*> &DestBBs) {
1389 assert(!DestBBs.empty() && "Cannot create an empty jump table!");
1390 JumpTables.push_back(MachineJumpTableEntry(DestBBs));
1391 return JumpTables.size()-1;
1392}
1393
1395 size_t JTI, MachineFunctionDataHotness Hotness) {
1396 assert(JTI < JumpTables.size() && "Invalid JTI!");
1397 // Record the largest hotness value.
1398 if (Hotness <= JumpTables[JTI].Hotness)
1399 return false;
1400
1401 JumpTables[JTI].Hotness = Hotness;
1402 return true;
1403}
1404
1405/// If Old is the target of any jump tables, update the jump tables to branch
1406/// to New instead.
1408 MachineBasicBlock *New) {
1409 assert(Old != New && "Not making a change?");
1410 bool MadeChange = false;
1411 for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
1412 ReplaceMBBInJumpTable(i, Old, New);
1413 return MadeChange;
1414}
1415
1416/// If MBB is present in any jump tables, remove it.
1418 bool MadeChange = false;
1419 for (MachineJumpTableEntry &JTE : JumpTables) {
1420 auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB);
1421 MadeChange |= (removeBeginItr != JTE.MBBs.end());
1422 JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end());
1423 }
1424 return MadeChange;
1425}
1426
1427/// If Old is a target of the jump tables, update the jump table to branch to
1428/// New instead.
1430 MachineBasicBlock *Old,
1431 MachineBasicBlock *New) {
1432 assert(Old != New && "Not making a change?");
1433 bool MadeChange = false;
1434 MachineJumpTableEntry &JTE = JumpTables[Idx];
1435 for (MachineBasicBlock *&MBB : JTE.MBBs)
1436 if (MBB == Old) {
1437 MBB = New;
1438 MadeChange = true;
1439 }
1440 return MadeChange;
1441}
1442
1444 if (JumpTables.empty()) return;
1445
1446 OS << "Jump Tables:\n";
1447
1448 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
1449 OS << printJumpTableEntryReference(i) << ':';
1450 for (const MachineBasicBlock *MBB : JumpTables[i].MBBs)
1451 OS << ' ' << printMBBReference(*MBB);
1452 OS << '\n';
1453 }
1454
1455 OS << '\n';
1456}
1457
1458#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1460#endif
1461
1463 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
1464}
1465
1466//===----------------------------------------------------------------------===//
1467// MachineConstantPool implementation
1468//===----------------------------------------------------------------------===//
1469
1470void MachineConstantPoolValue::anchor() {}
1471
1473 return DL.getTypeAllocSize(Ty);
1474}
1475
1478 return Val.MachineCPVal->getSizeInBytes(DL);
1479 return DL.getTypeAllocSize(Val.ConstVal->getType());
1480}
1481
1484 return true;
1485 return Val.ConstVal->needsDynamicRelocation();
1486}
1487
1490 if (needsRelocation())
1492 switch (getSizeInBytes(*DL)) {
1493 case 4:
1495 case 8:
1497 case 16:
1499 case 32:
1501 default:
1502 return SectionKind::getReadOnly();
1503 }
1504}
1505
1507 // A constant may be a member of both Constants and MachineCPVsSharingEntries,
1508 // so keep track of which we've deleted to avoid double deletions.
1510 for (const MachineConstantPoolEntry &C : Constants)
1511 if (C.isMachineConstantPoolEntry()) {
1512 Deleted.insert(C.Val.MachineCPVal);
1513 delete C.Val.MachineCPVal;
1514 }
1515 for (MachineConstantPoolValue *CPV : MachineCPVsSharingEntries) {
1516 if (Deleted.count(CPV) == 0)
1517 delete CPV;
1518 }
1519}
1520
1521/// Test whether the given two constants can be allocated the same constant pool
1522/// entry referenced by \param A.
1523static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
1524 const DataLayout &DL) {
1525 // Handle the trivial case quickly.
1526 if (A == B) return true;
1527
1528 // If they have the same type but weren't the same constant, quickly
1529 // reject them.
1530 if (A->getType() == B->getType()) return false;
1531
1532 // We can't handle structs or arrays.
1533 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
1534 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
1535 return false;
1536
1537 // For now, only support constants with the same size.
1538 uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
1539 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
1540 return false;
1541
1542 bool ContainsUndefOrPoisonA = A->containsUndefOrPoisonElement();
1543
1544 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
1545
1546 // Try constant folding a bitcast of both instructions to an integer. If we
1547 // get two identical ConstantInt's, then we are good to share them. We use
1548 // the constant folding APIs to do this so that we get the benefit of
1549 // DataLayout.
1550 if (isa<PointerType>(A->getType()))
1551 A = ConstantFoldCastOperand(Instruction::PtrToInt,
1552 const_cast<Constant *>(A), IntTy, DL);
1553 else if (A->getType() != IntTy)
1554 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
1555 IntTy, DL);
1556 if (isa<PointerType>(B->getType()))
1557 B = ConstantFoldCastOperand(Instruction::PtrToInt,
1558 const_cast<Constant *>(B), IntTy, DL);
1559 else if (B->getType() != IntTy)
1560 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
1561 IntTy, DL);
1562
1563 if (A != B)
1564 return false;
1565
1566 // Constants only safely match if A doesn't contain undef/poison.
1567 // As we'll be reusing A, it doesn't matter if B contain undef/poison.
1568 // TODO: Handle cases where A and B have the same undef/poison elements.
1569 // TODO: Merge A and B with mismatching undef/poison elements.
1570 return !ContainsUndefOrPoisonA;
1571}
1572
1573/// Create a new entry in the constant pool or return an existing one.
1574/// User must specify the log2 of the minimum required alignment for the object.
1576 Align Alignment) {
1577 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1578
1579 // Check to see if we already have this constant.
1580 //
1581 // FIXME, this could be made much more efficient for large constant pools.
1582 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1583 if (!Constants[i].isMachineConstantPoolEntry() &&
1584 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1585 if (Constants[i].getAlign() < Alignment)
1586 Constants[i].Alignment = Alignment;
1587 return i;
1588 }
1589
1590 Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1591 return Constants.size()-1;
1592}
1593
1595 Align Alignment) {
1596 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1597
1598 // Check to see if we already have this constant.
1599 //
1600 // FIXME, this could be made much more efficient for large constant pools.
1601 int Idx = V->getExistingMachineCPValue(this, Alignment);
1602 if (Idx != -1) {
1603 MachineCPVsSharingEntries.insert(V);
1604 return (unsigned)Idx;
1605 }
1606
1607 Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1608 return Constants.size()-1;
1609}
1610
1612 if (Constants.empty()) return;
1613
1614 OS << "Constant Pool:\n";
1615 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1616 OS << " cp#" << i << ": ";
1617 if (Constants[i].isMachineConstantPoolEntry())
1618 Constants[i].Val.MachineCPVal->print(OS);
1619 else
1620 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1621 OS << ", align=" << Constants[i].getAlign().value();
1622 OS << "\n";
1623 }
1624}
1625
1626//===----------------------------------------------------------------------===//
1627// Template specialization for MachineFunction implementation of
1628// ProfileSummaryInfo::getEntryCount().
1629//===----------------------------------------------------------------------===//
1630template <>
1631std::optional<Function::ProfileCount>
1632ProfileSummaryInfo::getEntryCount<llvm::MachineFunction>(
1633 const llvm::MachineFunction *F) const {
1634 return F->getFunction().getEntryCount();
1635}
1636
1637#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1639#endif
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition Compiler.h:661
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
static cl::opt< unsigned > AlignAllFunctions("align-all-functions", cl::desc("Force the alignment of all functions in log2 format (e.g. 4 " "means align on 16B boundaries)."), cl::init(0), cl::Hidden)
static const MachineInstr * getCallInstr(const MachineInstr *MI)
Return the call machine instruction or find a call within bundle.
static Align getFnStackAlignment(const TargetSubtargetInfo &STI, const Function &F)
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, const DataLayout &DL)
Test whether the given two constants can be allocated the same constant pool entry referenced by.
void setUnsafeStackSize(const Function &F, MachineFrameInfo &FrameInfo)
static const char * getPropertyName(MachineFunctionProperties::Property Prop)
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
#define P(N)
Basic Register Allocator
static bool isSimple(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallString class.
This file defines the SmallVector class.
static const int BlockSize
Definition TarWriter.cpp:33
This file describes how to lower LLVM code to machine code.
void print(OutputBuffer &OB) const
void clear(AllocatorType &Allocator)
Release all the tracked allocations to the allocator.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
Definition BasicBlock.h:171
unsigned size_type
Definition BitVector.h:115
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Align getABIIntegerTypeAlignment(unsigned BitWidth) const
Returns the minimum ABI-required alignment for an integer type of the specified bitwidth.
Definition DataLayout.h:634
LLVM_ABI unsigned getPointerSize(unsigned AS=0) const
The pointer representation size in bytes, rounded up to a whole number of bytes.
LLVM_ABI Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
A debug info location.
Definition DebugLoc.h:123
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
iterator end()
Definition DenseMap.h:81
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
Context object for machine code objects.
Definition MCContext.h:83
Describe properties that are true of each instruction in the target description file.
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
Tracking metadata reference owned by Metadata.
Definition Metadata.h:902
A single uniqued string.
Definition Metadata.h:722
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
void setIsEndSection(bool V=true)
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
MBBSectionID getSectionID() const
Returns the section ID of this basic block.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsBeginSection(bool V=true)
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool needsRelocation() const
This method classifies the entry according to whether or not it may generate a relocation entry.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
unsigned getSizeInBytes(const DataLayout &DL) const
SectionKind getSectionKind(const DataLayout *DL) const
Abstract base class for all machine specific constantpool value subclasses.
virtual unsigned getSizeInBytes(const DataLayout &DL) const
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
void dump() const
dump - Call print(cerr) to be called from the debugger.
void print(raw_ostream &OS) const
print - Used by the MachineFunction printer to print information about constant pool objects.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI void print(raw_ostream &OS) const
Print the MachineFunctionProperties in human-readable form.
MachineFunctionProperties & reset(Property P)
virtual void MF_HandleRemoval(MachineInstr &MI)=0
Callback before a removal. This should not modify the MI directly.
virtual void MF_HandleInsertion(MachineInstr &MI)=0
Callback after an insertion. This should not modify the MI directly.
int getFilterIDFor(ArrayRef< unsigned > TyIds)
Return the id of the filter encoded by TyIds. This is function wide.
bool UseDebugInstrRef
Flag for whether this function contains DBG_VALUEs (false) or DBG_INSTR_REF (true).
void moveAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
std::pair< unsigned, unsigned > DebugInstrOperandPair
Pair of instruction number and operand number.
unsigned addFrameInst(const MCCFIInstruction &Inst)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
SmallVector< DebugSubstitution, 8 > DebugValueSubstitutions
Debug value substitutions: a collection of DebugSubstitution objects, recording changes in where a va...
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
void viewCFGOnly() const
viewCFGOnly - This function is meant for use from the debugger.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
void substituteDebugValuesForInst(const MachineInstr &Old, MachineInstr &New, unsigned MaxOperand=UINT_MAX)
Create substitutions for any tracked values in Old, to point at New.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist,...
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair, unsigned SubReg=0)
Create a substitution between one <instr,operand> value to a different, new value.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFunction(Function &F, const TargetMachine &Target, const TargetSubtargetInfo &STI, MCContext &Ctx, unsigned FunctionNum)
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0, MDNode *MMRAs=nullptr, Value *DS=nullptr)
Allocate and construct an extra info structure for a MachineInstr.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
void finalizeDebugInstrRefs()
Finalise any partially emitted debug instructions.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
void initTargetMachineFunctionInfo(const TargetSubtargetInfo &STI)
Initialize the target specific MachineFunctionInfo.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef< unsigned > Sites)
Map the landing pad's EH symbol to the call site indexes.
void setUseDebugInstrRef(bool UseInstrRef)
Set whether this function will use instruction referencing or not.
LandingPadInfo & getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad)
Find or create an LandingPadInfo for the specified MachineBasicBlock.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
unsigned DebugInstrNumberingCount
A count of how many instructions in the function have had numbers assigned to them.
void deleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
Align getAlignment() const
getAlignment - Return the alignment of the function.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
static const unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
Function & getFunction()
Return the LLVM function that this machine code represents.
Align getPreferredAlignment() const
Returns the preferred alignment which comes from the function attributes (optsize,...
DebugInstrOperandPair salvageCopySSAImpl(MachineInstr &MI)
const MachineBasicBlock & back() const
BasicBlockListType::iterator iterator
void setDebugInstrNumberingCount(unsigned Num)
Set value of DebugInstrNumberingCount field.
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
void viewCFG() const
viewCFG - This function is meant for use from the debugger.
bool shouldUseDebugInstrRef() const
Determine whether, in the current machine configuration, we should use instruction referencing or not...
const MachineFunctionProperties & getProperties() const
Get the function properties.
void eraseAdditionalCallInfo(const MachineInstr *MI)
Following functions update call site info.
void RenumberBlocks(MachineBasicBlock *MBBFrom=nullptr)
RenumberBlocks - This discards all of the MachineBasicBlock numbers and recomputes them.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
int64_t estimateFunctionSizeInBytes()
Return an estimate of the function's code size, taking into account block and function alignment.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void copyAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Copy the call site info from Old to \ New.
VariableDbgInfoMapTy VariableDbgInfos
void assignBeginEndSections()
Assign IsBeginSection IsEndSection fields for basic blocks in this function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
DebugInstrOperandPair salvageCopySSA(MachineInstr &MI, DenseMap< Register, DebugInstrOperandPair > &DbgPHICache)
Find the underlying defining instruction / operand for a COPY instruction while in SSA form.
Representation of each machine instruction.
LLVM_ABI void bundleWithPred()
Bundle this instruction with its predecessor.
bool isCopyLike() const
Return true if the instruction behaves like a copy.
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
unsigned getNumOperands() const
Retuns the total number of operands.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI bool shouldUpdateAdditionalCallInfo() const
Return true if copying, moving, or erasing this instruction requires updating additional call info (s...
LLVM_ABI bool RemoveMBBFromJumpTables(MachineBasicBlock *MBB)
RemoveMBBFromJumpTables - If MBB is present in any jump tables, remove it.
LLVM_ABI bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New)
ReplaceMBBInJumpTables - If Old is the target of any jump tables, update the jump tables to branch to...
LLVM_ABI void print(raw_ostream &OS) const
print - Used by the MachineFunction printer to print information about jump tables.
LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
LLVM_ABI unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
createJumpTableIndex - Create a new jump table.
LLVM_ABI void dump() const
dump - Call to stderr.
LLVM_ABI bool ReplaceMBBInJumpTable(unsigned Idx, MachineBasicBlock *Old, MachineBasicBlock *New)
ReplaceMBBInJumpTable - If Old is a target of the jump tables, update the jump table to branch to New...
LLVM_ABI bool updateJumpTableEntryHotness(size_t JTI, MachineFunctionDataHotness Hotness)
JTEntryKind
JTEntryKind - This enum indicates how each entry of the jump table is represented and emitted.
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_LabelDifference64
EK_LabelDifference64 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
LLVM_ABI unsigned getEntryAlignment(const DataLayout &TD) const
getEntryAlignment - Return the alignment of each entry in the jump table.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
AtomicOrdering getFailureOrdering() const
For cmpxchg atomic operations, return the atomic ordering requirements when store does not occur.
const PseudoSourceValue * getPseudoValue() const
const MDNode * getRanges() const
Return the range tag for the memory reference.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID for this memory operation.
Flags
Flags values. These may be or'd together.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
Align getBaseAlign() const
Return the minimum known alignment in bytes of the base address, without the offset.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
def_iterator def_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
std::vector< std::pair< MCRegister, Register > >::const_iterator livein_iterator
bool hasOneDef(Register RegNo) const
Return true if there is exactly one operand defining the specified register.
LLVM_ABI Register getLiveInVirtReg(MCRegister PReg) const
getLiveInVirtReg - If PReg is a live-in physical register, return the corresponding live-in virtual r...
const TargetRegisterInfo * getTargetRegisterInfo() const
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Simple wrapper around std::function<void(raw_ostream&)>.
Definition Printable.h:38
Wrapper class representing virtual and physical registers.
Definition Register.h:20
SectionKind - This is a simple POD value that classifies the properties of a section.
Definition SectionKind.h:22
static SectionKind getMergeableConst4()
static SectionKind getReadOnlyWithRel()
static SectionKind getMergeableConst8()
static SectionKind getMergeableConst16()
static SectionKind getReadOnly()
static SectionKind getMergeableConst32()
SlotIndexes pass.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool isStackRealignable() const
isStackRealignable - This method returns whether the stack can be realigned.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Align getMinFunctionAlignment() const
Return the minimum function alignment.
Primary interface to the complete machine description for the target machine.
TargetOptions Options
unsigned ForceDwarfFrameSection
Emit DWARF debug frame section.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:314
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:713
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
self_iterator getIterator()
Definition ilist_node.h:123
iterator erase(iterator where)
Definition ilist.h:204
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
uint64_t MD5Hash(const FunctionId &Obj)
Definition FunctionId.h:167
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
MachineBasicBlock::instr_iterator getBundleStart(MachineBasicBlock::instr_iterator I)
Returns an iterator to the first instruction in the bundle containing I.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI Printable printJumpTableEntryReference(unsigned Idx)
Prints a jump table entry reference.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
MachineFunctionDataHotness
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
MachineBasicBlock::instr_iterator getBundleEnd(MachineBasicBlock::instr_iterator I)
Returns an iterator pointing beyond the bundle containing I.
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1885
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
bool debuginfoShouldUseDebugInstrRef(const Triple &T)
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
std::string getNodeLabel(const MachineBasicBlock *Node, const MachineFunction *Graph)
static std::string getGraphName(const MachineFunction *F)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
Represent subnormal handling kind for floating point instruction inputs and outputs.
This structure is used to retain landing pad info for the current function.
SmallVector< MCSymbol *, 1 > EndLabels
MachineBasicBlock * LandingPadBlock
SmallVector< MCSymbol *, 1 > BeginLabels
std::vector< int > TypeIds
SmallVector< ConstantInt *, 4 > CalleeTypeIds
Callee type ids.
MDNode * CallTarget
'call_target' metadata for the DISubprogram.
MachineJumpTableEntry - One jump table in the jump table info.
LLVM_ABI MachineJumpTableEntry(const std::vector< MachineBasicBlock * > &M)
std::vector< MachineBasicBlock * > MBBs
MBBs - The vector of basic blocks from which to create the jump table.
MachineFunctionDataHotness Hotness
The hotness of MJTE is inferred from the hotness of the source basic block(s) that reference it.
This class contains a discriminated union of information about pointers in memory operands,...
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
static void deleteNode(NodeTy *V)
Definition ilist.h:42