LLVM 23.0.0git
MachineFunction.cpp
Go to the documentation of this file.
1//===- MachineFunction.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Collect native machine code information for a function. This allows
10// target-specific information about the generated code to be stored with each
11// function.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/BitVector.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/Twine.h"
43#include "llvm/Config/llvm-config.h"
44#include "llvm/IR/Attributes.h"
45#include "llvm/IR/BasicBlock.h"
46#include "llvm/IR/Constant.h"
47#include "llvm/IR/DataLayout.h"
50#include "llvm/IR/Function.h"
51#include "llvm/IR/GlobalValue.h"
52#include "llvm/IR/Instruction.h"
54#include "llvm/IR/Metadata.h"
55#include "llvm/IR/Module.h"
57#include "llvm/IR/Value.h"
58#include "llvm/MC/MCContext.h"
59#include "llvm/MC/MCSymbol.h"
60#include "llvm/MC/SectionKind.h"
69#include <algorithm>
70#include <cassert>
71#include <cstddef>
72#include <cstdint>
73#include <iterator>
74#include <string>
75#include <utility>
76#include <vector>
77
79
80using namespace llvm;
81
82#define DEBUG_TYPE "codegen"
83
85 "align-all-functions",
86 cl::desc("Force the alignment of all functions in log2 format (e.g. 4 "
87 "means align on 16B boundaries)."),
89
92
93 // clang-format off
94 switch(Prop) {
95 case P::FailedISel: return "FailedISel";
96 case P::IsSSA: return "IsSSA";
97 case P::Legalized: return "Legalized";
98 case P::NoPHIs: return "NoPHIs";
99 case P::NoVRegs: return "NoVRegs";
100 case P::RegBankSelected: return "RegBankSelected";
101 case P::Selected: return "Selected";
102 case P::TracksLiveness: return "TracksLiveness";
103 case P::TiedOpsRewritten: return "TiedOpsRewritten";
104 case P::FailsVerification: return "FailsVerification";
105 case P::FailedRegAlloc: return "FailedRegAlloc";
106 case P::TracksDebugUserValues: return "TracksDebugUserValues";
107 }
108 // clang-format on
109 llvm_unreachable("Invalid machine function property");
110}
111
113 if (!F.hasFnAttribute(Attribute::SafeStack))
114 return;
115
116 auto *Existing =
117 dyn_cast_or_null<MDTuple>(F.getMetadata(LLVMContext::MD_annotation));
118
119 if (!Existing || Existing->getNumOperands() != 2)
120 return;
121
122 auto *MetadataName = "unsafe-stack-size";
123 if (auto &N = Existing->getOperand(0)) {
124 if (N.equalsStr(MetadataName)) {
125 if (auto &Op = Existing->getOperand(1)) {
126 auto Val = mdconst::extract<ConstantInt>(Op)->getZExtValue();
127 FrameInfo.setUnsafeStackSize(Val);
128 }
129 }
130 }
131}
132
133// Pin the vtable to this file.
134void MachineFunction::Delegate::anchor() {}
135
137 const char *Separator = "";
138 for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
139 if (!Properties[I])
140 continue;
141 OS << Separator << getPropertyName(static_cast<Property>(I));
142 Separator = ", ";
143 }
144}
145
146//===----------------------------------------------------------------------===//
147// MachineFunction implementation
148//===----------------------------------------------------------------------===//
149
150// Out-of-line virtual method.
152
154 MBB->getParent()->deleteMachineBasicBlock(MBB);
155}
156
158 const Function &F) {
159 if (auto MA = F.getFnStackAlign())
160 return *MA;
161 return STI.getFrameLowering()->getStackAlign();
162}
163
165 const TargetSubtargetInfo &STI, MCContext &Ctx,
166 unsigned FunctionNum)
167 : F(F), Target(Target), STI(STI), Ctx(Ctx) {
168 FunctionNumber = FunctionNum;
169 init();
170}
171
172void MachineFunction::handleInsertion(MachineInstr &MI) {
173 if (TheDelegate)
174 TheDelegate->MF_HandleInsertion(MI);
175}
176
177void MachineFunction::handleRemoval(MachineInstr &MI) {
178 if (TheDelegate)
179 TheDelegate->MF_HandleRemoval(MI);
180}
181
183 const MCInstrDesc &TID) {
184 if (TheDelegate)
185 TheDelegate->MF_HandleChangeDesc(MI, TID);
186}
187
188void MachineFunction::init() {
189 // Assume the function starts in SSA form with correct liveness.
190 Properties.setIsSSA();
191 Properties.setTracksLiveness();
192 RegInfo = new (Allocator) MachineRegisterInfo(this);
193
194 MFInfo = nullptr;
195
196 // We can realign the stack if the target supports it and the user hasn't
197 // explicitly asked us not to.
198 bool CanRealignSP = STI.getFrameLowering()->isStackRealignable() &&
199 !F.hasFnAttribute("no-realign-stack");
200 bool ForceRealignSP = F.hasFnAttribute(Attribute::StackAlignment) ||
201 F.hasFnAttribute("stackrealign");
202 FrameInfo = new (Allocator) MachineFrameInfo(
203 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
204 /*ForcedRealign=*/ForceRealignSP && CanRealignSP);
205
206 setUnsafeStackSize(F, *FrameInfo);
207
208 if (F.hasFnAttribute(Attribute::StackAlignment))
209 FrameInfo->ensureMaxAlignment(*F.getFnStackAlign());
210
212 Alignment = STI.getTargetLowering()->getMinFunctionAlignment();
213
214 // -fsanitize=function and -fsanitize=kcfi instrument indirect function calls
215 // to load a type hash before the function label. Ensure functions are aligned
216 // by a least 4 to avoid unaligned access, which is especially important for
217 // -mno-unaligned-access.
218 if (F.hasMetadata(LLVMContext::MD_func_sanitize) ||
219 F.getMetadata(LLVMContext::MD_kcfi_type))
220 Alignment = std::max(Alignment, Align(4));
221
223 Alignment = Align(1ULL << AlignAllFunctions);
224
225 JumpTableInfo = nullptr;
226
228 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
229 WinEHInfo = new (Allocator) WinEHFuncInfo();
230 }
231
233 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
234 WasmEHInfo = new (Allocator) WasmEHFuncInfo();
235 }
236
237 assert(Target.isCompatibleDataLayout(getDataLayout()) &&
238 "Can't create a MachineFunction using a Module with a "
239 "Target-incompatible DataLayout attached\n");
240
241 PSVManager = std::make_unique<PseudoSourceValueManager>(getTarget());
242}
243
245 const TargetSubtargetInfo &STI) {
246 assert(!MFInfo && "MachineFunctionInfo already set");
247 MFInfo = Target.createMachineFunctionInfo(Allocator, F, &STI);
248}
249
253
254void MachineFunction::clear() {
255 Properties.reset();
256
257 // Clear JumpTableInfo first. Otherwise, every MBB we delete would do a
258 // linear search over the jump table entries to find and erase itself.
259 if (JumpTableInfo) {
260 JumpTableInfo->~MachineJumpTableInfo();
261 Allocator.Deallocate(JumpTableInfo);
262 JumpTableInfo = nullptr;
263 }
264
265 // Don't call destructors on MachineInstr and MachineOperand. All of their
266 // memory comes from the BumpPtrAllocator which is about to be purged.
267 //
268 // Do call MachineBasicBlock destructors, it contains std::vectors.
269 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
270 I->Insts.clearAndLeakNodesUnsafely();
271 MBBNumbering.clear();
272
273 InstructionRecycler.clear(Allocator);
274 OperandRecycler.clear(Allocator);
275 BasicBlockRecycler.clear(Allocator);
276 CodeViewAnnotations.clear();
278 if (RegInfo) {
279 RegInfo->~MachineRegisterInfo();
280 Allocator.Deallocate(RegInfo);
281 }
282 if (MFInfo) {
283 MFInfo->~MachineFunctionInfo();
284 Allocator.Deallocate(MFInfo);
285 }
286
287 FrameInfo->~MachineFrameInfo();
288 Allocator.Deallocate(FrameInfo);
289
290 ConstantPool->~MachineConstantPool();
291 Allocator.Deallocate(ConstantPool);
292
293 if (WinEHInfo) {
294 WinEHInfo->~WinEHFuncInfo();
295 Allocator.Deallocate(WinEHInfo);
296 }
297
298 if (WasmEHInfo) {
299 WasmEHInfo->~WasmEHFuncInfo();
300 Allocator.Deallocate(WasmEHInfo);
301 }
302}
303
305 return F.getDataLayout();
306}
307
308/// Get the JumpTableInfo for this function.
309/// If it does not already exist, allocate one.
311getOrCreateJumpTableInfo(unsigned EntryKind) {
312 if (JumpTableInfo) return JumpTableInfo;
313
314 JumpTableInfo = new (Allocator)
316 return JumpTableInfo;
317}
318
320 return F.getDenormalMode(FPType);
321}
322
323/// Should we be emitting segmented stack stuff for the function
325 return getFunction().hasFnAttribute("split-stack");
326}
327
329 Align PrefAlignment;
330
331 if (MaybeAlign A = F.getPreferredAlignment())
332 PrefAlignment = *A;
333 else if (!F.hasOptSize())
334 PrefAlignment = STI.getTargetLowering()->getPrefFunctionAlignment();
335 else
336 PrefAlignment = Align(1);
337
338 return std::max(PrefAlignment, getAlignment());
339}
340
341[[nodiscard]] unsigned
343 FrameInstructions.push_back(Inst);
344 return FrameInstructions.size() - 1;
345}
346
347/// This discards all of the MachineBasicBlock numbers and recomputes them.
348/// This guarantees that the MBB numbers are sequential, dense, and match the
349/// ordering of the blocks within the function. If a specific MachineBasicBlock
350/// is specified, only that block and those after it are renumbered.
352 if (empty()) { MBBNumbering.clear(); return; }
354 if (MBB == nullptr)
355 MBBI = begin();
356 else
357 MBBI = MBB->getIterator();
358
359 // Figure out the block number this should have.
360 unsigned BlockNo = 0;
361 if (MBBI != begin())
362 BlockNo = std::prev(MBBI)->getNumber() + 1;
363
364 for (; MBBI != E; ++MBBI, ++BlockNo) {
365 if (MBBI->getNumber() != (int)BlockNo) {
366 // Remove use of the old number.
367 if (MBBI->getNumber() != -1) {
368 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
369 "MBB number mismatch!");
370 MBBNumbering[MBBI->getNumber()] = nullptr;
371 }
372
373 // If BlockNo is already taken, set that block's number to -1.
374 if (MBBNumbering[BlockNo])
375 MBBNumbering[BlockNo]->setNumber(-1);
376
377 MBBNumbering[BlockNo] = &*MBBI;
378 MBBI->setNumber(BlockNo);
379 }
380 }
381
382 // Okay, all the blocks are renumbered. If we have compactified the block
383 // numbering, shrink MBBNumbering now.
384 assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
385 MBBNumbering.resize(BlockNo);
386 MBBNumberingEpoch++;
387}
388
391 const Align FunctionAlignment = getAlignment();
393 /// Offset - Distance from the beginning of the function to the end
394 /// of the basic block.
395 int64_t Offset = 0;
396
397 for (; MBBI != E; ++MBBI) {
398 const Align Alignment = MBBI->getAlignment();
399 int64_t BlockSize = 0;
400
401 for (auto &MI : *MBBI) {
402 BlockSize += TII.getInstSizeInBytes(MI);
403 }
404
405 int64_t OffsetBB;
406 if (Alignment <= FunctionAlignment) {
407 OffsetBB = alignTo(Offset, Alignment);
408 } else {
409 // The alignment of this MBB is larger than the function's alignment, so
410 // we can't tell whether or not it will insert nops. Assume that it will.
411 OffsetBB = alignTo(Offset, Alignment) + Alignment.value() -
412 FunctionAlignment.value();
413 }
414 Offset = OffsetBB + BlockSize;
415 }
416
417 return Offset;
418}
419
420/// This method iterates over the basic blocks and assigns their IsBeginSection
421/// and IsEndSection fields. This must be called after MBB layout is finalized
422/// and the SectionID's are assigned to MBBs.
425 auto CurrentSectionID = front().getSectionID();
426 for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) {
427 if (MBBI->getSectionID() == CurrentSectionID)
428 continue;
429 MBBI->setIsBeginSection();
430 std::prev(MBBI)->setIsEndSection();
431 CurrentSectionID = MBBI->getSectionID();
432 }
434}
435
436/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
437MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
438 DebugLoc DL,
439 bool NoImplicit) {
440 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
441 MachineInstr(*this, MCID, std::move(DL), NoImplicit);
442}
443
444/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
445/// identical in all ways except the instruction has no parent, prev, or next.
447MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
448 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
449 MachineInstr(*this, *Orig);
450}
451
452MachineInstr &MachineFunction::cloneMachineInstrBundle(
454 const MachineInstr &Orig) {
455 MachineInstr *FirstClone = nullptr;
457 while (true) {
458 MachineInstr *Cloned = CloneMachineInstr(&*I);
459 MBB.insert(InsertBefore, Cloned);
460 if (FirstClone == nullptr) {
461 FirstClone = Cloned;
462 } else {
463 Cloned->bundleWithPred();
464 }
465
466 if (!I->isBundledWithSucc())
467 break;
468 ++I;
469 }
470 // Copy over call info to the cloned instruction if needed. If Orig is in
471 // a bundle, copyAdditionalCallInfo takes care of finding the call instruction
472 // in the bundle.
474 copyAdditionalCallInfo(&Orig, FirstClone);
475 return *FirstClone;
476}
477
478/// Delete the given MachineInstr.
479///
480/// This function also serves as the MachineInstr destructor - the real
481/// ~MachineInstr() destructor must be empty.
482void MachineFunction::deleteMachineInstr(MachineInstr *MI) {
483 // Verify that a call site info is at valid state. This assertion should
484 // be triggered during the implementation of support for the
485 // call site info of a new architecture. If the assertion is triggered,
486 // back trace will tell where to insert a call to updateCallSiteInfo().
487 assert((!MI->isCandidateForAdditionalCallInfo() ||
488 !CallSitesInfo.contains(MI)) &&
489 "Call site info was not updated!");
490 // Verify that the "called globals" info is in a valid state.
491 assert((!MI->isCandidateForAdditionalCallInfo() ||
492 !CalledGlobalsInfo.contains(MI)) &&
493 "Called globals info was not updated!");
494 // Strip it for parts. The operand array and the MI object itself are
495 // independently recyclable.
496 if (MI->Operands)
497 deallocateOperandArray(MI->CapOperands, MI->Operands);
498 // Don't call ~MachineInstr() which must be trivial anyway because
499 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
500 // destructors.
501 InstructionRecycler.Deallocate(Allocator, MI);
502}
503
504/// Allocate a new MachineBasicBlock. Use this instead of
505/// `new MachineBasicBlock'.
508 std::optional<UniqueBBID> BBID) {
510 new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
511 MachineBasicBlock(*this, BB);
512 // Set BBID for `-basic-block-sections=list` and `-basic-block-address-map` to
513 // allow robust mapping of profiles to basic blocks.
514 if (Target.Options.BBAddrMap ||
515 Target.getBBSectionsType() == BasicBlockSection::List)
516 MBB->setBBID(BBID.has_value() ? *BBID : UniqueBBID{NextBBID++, 0});
517 return MBB;
518}
519
520/// Delete the given MachineBasicBlock.
522 assert(MBB->getParent() == this && "MBB parent mismatch!");
523 // Clean up any references to MBB in jump tables before deleting it.
524 if (JumpTableInfo)
525 JumpTableInfo->RemoveMBBFromJumpTables(MBB);
526 MBB->~MachineBasicBlock();
527 BasicBlockRecycler.Deallocate(Allocator, MBB);
528}
529
532 Align BaseAlignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
533 SyncScope::ID SSID, AtomicOrdering Ordering,
534 AtomicOrdering FailureOrdering) {
535 assert((!Size.hasValue() ||
536 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
537 "Unexpected an unknown size to be represented using "
538 "LocationSize::beforeOrAfter()");
539 return new (Allocator)
540 MachineMemOperand(PtrInfo, F, Size, BaseAlignment, AAInfo, Ranges, SSID,
541 Ordering, FailureOrdering);
542}
543
546 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
547 SyncScope::ID SSID, AtomicOrdering Ordering,
548 AtomicOrdering FailureOrdering) {
549 return new (Allocator)
550 MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID,
551 Ordering, FailureOrdering);
552}
553
556 const MachinePointerInfo &PtrInfo,
558 assert((!Size.hasValue() ||
559 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
560 "Unexpected an unknown size to be represented using "
561 "LocationSize::beforeOrAfter()");
562 return new (Allocator)
563 MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(),
564 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
566}
567
569 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) {
570 return new (Allocator)
571 MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(),
572 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
574}
575
578 int64_t Offset, LLT Ty) {
579 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
580
581 // If there is no pointer value, the offset isn't tracked so we need to adjust
582 // the base alignment.
583 Align Alignment = PtrInfo.V.isNull()
585 : MMO->getBaseAlign();
586
587 // Do not preserve ranges, since we don't necessarily know what the high bits
588 // are anymore.
589 return new (Allocator) MachineMemOperand(
590 PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment,
591 MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(),
593}
594
597 const AAMDNodes &AAInfo) {
598 MachinePointerInfo MPI = MMO->getValue() ?
599 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
601
602 return new (Allocator) MachineMemOperand(
603 MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo,
604 MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(),
605 MMO->getFailureOrdering());
606}
607
611 return new (Allocator) MachineMemOperand(
612 MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(),
613 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
615}
616
617MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo(
618 ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol,
619 MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections,
620 uint32_t CFIType, MDNode *MMRAs, Value *DS) {
621 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
622 PostInstrSymbol, HeapAllocMarker,
623 PCSections, CFIType, MMRAs, DS);
624}
625
627 char *Dest = Allocator.Allocate<char>(Name.size() + 1);
628 llvm::copy(Name, Dest);
629 Dest[Name.size()] = 0;
630 return Dest;
631}
632
634 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
635 unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
636 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
637 memset(Mask, 0, Size * sizeof(Mask[0]));
638 return Mask;
639}
640
642 int* AllocMask = Allocator.Allocate<int>(Mask.size());
643 copy(Mask, AllocMask);
644 return {AllocMask, Mask.size()};
645}
646
647#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
651#endif
652
656
657void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
658 OS << "# Machine code for function " << getName() << ": ";
659 getProperties().print(OS);
660 OS << '\n';
661
662 // Print Frame Information
663 FrameInfo->print(*this, OS);
664
665 // Print JumpTable Information
666 if (JumpTableInfo)
667 JumpTableInfo->print(OS);
668
669 // Print Constant Pool
670 ConstantPool->print(OS);
671
673
674 if (RegInfo && !RegInfo->livein_empty()) {
675 OS << "Function Live Ins: ";
677 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
678 OS << printReg(I->first, TRI);
679 if (I->second)
680 OS << " in " << printReg(I->second, TRI);
681 if (std::next(I) != E)
682 OS << ", ";
683 }
684 OS << '\n';
685 }
686
689 for (const auto &BB : *this) {
690 OS << '\n';
691 // If we print the whole function, print it at its most verbose level.
692 BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
693 }
694
695 OS << "\n# End machine code for function " << getName() << ".\n\n";
696}
697
698/// True if this function needs frame moves for debug or exceptions.
700 // TODO: Ideally, what we'd like is to have a switch that allows emitting
701 // synchronous (precise at call-sites only) CFA into .eh_frame. However, even
702 // under this switch, we'd like .debug_frame to be precise when using -g. At
703 // this moment, there's no way to specify that some CFI directives go into
704 // .eh_frame only, while others go into .debug_frame only.
706 F.needsUnwindTableEntry() ||
707 !F.getParent()->debug_compile_units().empty();
708}
709
711 if (MDNode *Node = CB.getMetadata(llvm::LLVMContext::MD_call_target))
713
714 // Numeric callee_type ids are only for indirect calls.
715 if (!CB.isIndirectCall())
716 return;
717
718 MDNode *CalleeTypeList = CB.getMetadata(LLVMContext::MD_callee_type);
719 if (!CalleeTypeList)
720 return;
721
722 for (const MDOperand &Op : CalleeTypeList->operands()) {
723 MDNode *TypeMD = cast<MDNode>(Op);
724 MDString *TypeIdStr = cast<MDString>(TypeMD->getOperand(1));
725 // Compute numeric type id from generalized type id string
726 uint64_t TypeIdVal = MD5Hash(TypeIdStr->getString());
727 IntegerType *Int64Ty = Type::getInt64Ty(CB.getContext());
728 CalleeTypeIds.push_back(
729 ConstantInt::get(Int64Ty, TypeIdVal, /*IsSigned=*/false));
730 }
731}
732
733template <>
735 : public DefaultDOTGraphTraits {
737
738 static std::string getGraphName(const MachineFunction *F) {
739 return ("CFG for '" + F->getName() + "' function").str();
740 }
741
743 const MachineFunction *Graph) {
744 std::string OutStr;
745 {
746 raw_string_ostream OSS(OutStr);
747
748 if (isSimple()) {
749 OSS << printMBBReference(*Node);
750 if (const BasicBlock *BB = Node->getBasicBlock())
751 OSS << ": " << BB->getName();
752 } else
753 Node->print(OSS);
754 }
755
756 if (OutStr[0] == '\n')
757 OutStr.erase(OutStr.begin());
758
759 // Process string output to make it nicer...
760 for (unsigned i = 0; i != OutStr.length(); ++i)
761 if (OutStr[i] == '\n') { // Left justify
762 OutStr[i] = '\\';
763 OutStr.insert(OutStr.begin() + i + 1, 'l');
764 }
765 return OutStr;
766 }
767};
768
770{
771#ifndef NDEBUG
772 ViewGraph(this, "mf" + getName());
773#else
774 errs() << "MachineFunction::viewCFG is only available in debug builds on "
775 << "systems with Graphviz or gv!\n";
776#endif // NDEBUG
777}
778
780{
781#ifndef NDEBUG
782 ViewGraph(this, "mf" + getName(), true);
783#else
784 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
785 << "systems with Graphviz or gv!\n";
786#endif // NDEBUG
787}
788
789/// Add the specified physical register as a live-in value and
790/// create a corresponding virtual register for it.
792 const TargetRegisterClass *RC) {
794 Register VReg = MRI.getLiveInVirtReg(PReg);
795 if (VReg) {
796 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
797 (void)VRegRC;
798 // A physical register can be added several times.
799 // Between two calls, the register class of the related virtual register
800 // may have been constrained to match some operation constraints.
801 // In that case, check that the current register class includes the
802 // physical register and is a sub class of the specified RC.
803 assert((VRegRC == RC || (VRegRC->contains(PReg) &&
804 RC->hasSubClassEq(VRegRC))) &&
805 "Register class mismatch!");
806 return VReg;
807 }
808 VReg = MRI.createVirtualRegister(RC);
809 MRI.addLiveIn(PReg, VReg);
810 return VReg;
811}
812
813/// Return the MCSymbol for the specified non-empty jump table.
814/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
815/// normal 'L' label is returned.
817 bool isLinkerPrivate) const {
818 const DataLayout &DL = getDataLayout();
819 assert(JumpTableInfo && "No jump tables");
820 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
821
822 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
823 : DL.getPrivateGlobalPrefix();
824 SmallString<60> Name;
826 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
827 return Ctx.getOrCreateSymbol(Name);
828}
829
830/// Return a function-local symbol to represent the PIC base.
832 const DataLayout &DL = getDataLayout();
833 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
834 Twine(getFunctionNumber()) + "$pb");
835}
836
837/// \name Exception Handling
838/// \{
839
842 unsigned N = LandingPads.size();
843 for (unsigned i = 0; i < N; ++i) {
844 LandingPadInfo &LP = LandingPads[i];
845 if (LP.LandingPadBlock == LandingPad)
846 return LP;
847 }
848
849 LandingPads.push_back(LandingPadInfo(LandingPad));
850 return LandingPads[N];
851}
852
854 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
856 LP.BeginLabels.push_back(BeginLabel);
857 LP.EndLabels.push_back(EndLabel);
858}
859
861 MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
863 LP.LandingPadLabel = LandingPadLabel;
864
866 LandingPad->getBasicBlock()->getFirstNonPHIIt();
867 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
868 // If there's no typeid list specified, then "cleanup" is implicit.
869 // Otherwise, id 0 is reserved for the cleanup action.
870 if (LPI->isCleanup() && LPI->getNumClauses() != 0)
871 LP.TypeIds.push_back(0);
872
873 // FIXME: New EH - Add the clauses in reverse order. This isn't 100%
874 // correct, but we need to do it this way because of how the DWARF EH
875 // emitter processes the clauses.
876 for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
877 Value *Val = LPI->getClause(I - 1);
878 if (LPI->isCatch(I - 1)) {
879 LP.TypeIds.push_back(
881 } else {
882 // Add filters in a list.
883 auto *CVal = cast<Constant>(Val);
884 SmallVector<unsigned, 4> FilterList;
885 for (const Use &U : CVal->operands())
886 FilterList.push_back(
887 getTypeIDFor(cast<GlobalValue>(U->stripPointerCasts())));
888
889 LP.TypeIds.push_back(getFilterIDFor(FilterList));
890 }
891 }
892
893 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
894 for (unsigned I = CPI->arg_size(); I != 0; --I) {
895 auto *TypeInfo =
896 dyn_cast<GlobalValue>(CPI->getArgOperand(I - 1)->stripPointerCasts());
897 LP.TypeIds.push_back(getTypeIDFor(TypeInfo));
898 }
899
900 } else {
901 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!");
902 }
903
904 return LandingPadLabel;
905}
906
908 ArrayRef<unsigned> Sites) {
909 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
910}
911
913 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
914 if (TypeInfos[i] == TI) return i + 1;
915
916 TypeInfos.push_back(TI);
917 return TypeInfos.size();
918}
919
921 // If the new filter coincides with the tail of an existing filter, then
922 // re-use the existing filter. Folding filters more than this requires
923 // re-ordering filters and/or their elements - probably not worth it.
924 for (unsigned i : FilterEnds) {
925 unsigned j = TyIds.size();
926
927 while (i && j)
928 if (FilterIds[--i] != TyIds[--j])
929 goto try_next;
930
931 if (!j)
932 // The new filter coincides with range [i, end) of the existing filter.
933 return -(1 + i);
934
935try_next:;
936 }
937
938 // Add the new filter.
939 int FilterID = -(1 + FilterIds.size());
940 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
941 llvm::append_range(FilterIds, TyIds);
942 FilterEnds.push_back(FilterIds.size());
943 FilterIds.push_back(0); // terminator
944 return FilterID;
945}
946
948MachineFunction::getCallSiteInfo(const MachineInstr *MI) {
949 assert(MI->isCandidateForAdditionalCallInfo() &&
950 "Call site info refers only to call (MI) candidates");
951
952 if (!Target.Options.EmitCallSiteInfo && !Target.Options.EmitCallGraphSection)
953 return CallSitesInfo.end();
954 return CallSitesInfo.find(MI);
955}
956
957/// Return the call machine instruction or find a call within bundle.
959 if (!MI->isBundle())
960 return MI;
961
962 for (const auto &BMI : make_range(getBundleStart(MI->getIterator()),
963 getBundleEnd(MI->getIterator())))
964 if (BMI.isCandidateForAdditionalCallInfo())
965 return &BMI;
966
967 llvm_unreachable("Unexpected bundle without a call site candidate");
968}
969
971 assert(MI->shouldUpdateAdditionalCallInfo() &&
972 "Call info refers only to call (MI) candidates or "
973 "candidates inside bundles");
974
975 const MachineInstr *CallMI = getCallInstr(MI);
976
977 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI);
978 if (CSIt != CallSitesInfo.end())
979 CallSitesInfo.erase(CSIt);
980
981 CalledGlobalsInfo.erase(CallMI);
982}
983
985 const MachineInstr *New) {
987 "Call info refers only to call (MI) candidates or "
988 "candidates inside bundles");
989
990 if (!New->isCandidateForAdditionalCallInfo())
991 return eraseAdditionalCallInfo(Old);
992
993 const MachineInstr *OldCallMI = getCallInstr(Old);
994 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
995 if (CSIt != CallSitesInfo.end()) {
996 CallSiteInfo CSInfo = CSIt->second;
997 CallSitesInfo[New] = std::move(CSInfo);
998 }
999
1000 CalledGlobalsMap::iterator CGIt = CalledGlobalsInfo.find(OldCallMI);
1001 if (CGIt != CalledGlobalsInfo.end()) {
1002 CalledGlobalInfo CGInfo = CGIt->second;
1003 CalledGlobalsInfo[New] = std::move(CGInfo);
1004 }
1005}
1006
1008 const MachineInstr *New) {
1010 "Call info refers only to call (MI) candidates or "
1011 "candidates inside bundles");
1012
1013 if (!New->isCandidateForAdditionalCallInfo())
1014 return eraseAdditionalCallInfo(Old);
1015
1016 const MachineInstr *OldCallMI = getCallInstr(Old);
1017 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
1018 if (CSIt != CallSitesInfo.end()) {
1019 CallSiteInfo CSInfo = std::move(CSIt->second);
1020 CallSitesInfo.erase(CSIt);
1021 CallSitesInfo[New] = std::move(CSInfo);
1022 }
1023
1024 CalledGlobalsMap::iterator CGIt = CalledGlobalsInfo.find(OldCallMI);
1025 if (CGIt != CalledGlobalsInfo.end()) {
1026 CalledGlobalInfo CGInfo = std::move(CGIt->second);
1027 CalledGlobalsInfo.erase(CGIt);
1028 CalledGlobalsInfo[New] = std::move(CGInfo);
1029 }
1030}
1031
1035
1038 unsigned Subreg) {
1039 // Catch any accidental self-loops.
1040 assert(A.first != B.first);
1041 // Don't allow any substitutions _from_ the memory operand number.
1042 assert(A.second != DebugOperandMemNumber);
1043
1044 DebugValueSubstitutions.push_back({A, B, Subreg});
1045}
1046
1048 MachineInstr &New,
1049 unsigned MaxOperand) {
1050 // If the Old instruction wasn't tracked at all, there is no work to do.
1051 unsigned OldInstrNum = Old.peekDebugInstrNum();
1052 if (!OldInstrNum)
1053 return;
1054
1055 // Iterate over all operands looking for defs to create substitutions for.
1056 // Avoid creating new instr numbers unless we create a new substitution.
1057 // While this has no functional effect, it risks confusing someone reading
1058 // MIR output.
1059 // Examine all the operands, or the first N specified by the caller.
1060 MaxOperand = std::min(MaxOperand, Old.getNumOperands());
1061 for (unsigned int I = 0; I < MaxOperand; ++I) {
1062 const auto &OldMO = Old.getOperand(I);
1063 auto &NewMO = New.getOperand(I);
1064 (void)NewMO;
1065
1066 if (!OldMO.isReg() || !OldMO.isDef())
1067 continue;
1068 assert(NewMO.isDef());
1069
1070 unsigned NewInstrNum = New.getDebugInstrNum();
1071 makeDebugValueSubstitution(std::make_pair(OldInstrNum, I),
1072 std::make_pair(NewInstrNum, I));
1073 }
1074}
1075
1080
1081 // Check whether this copy-like instruction has already been salvaged into
1082 // an operand pair.
1083 Register Dest;
1084 if (auto CopyDstSrc = TII.isCopyLikeInstr(MI)) {
1085 Dest = CopyDstSrc->Destination->getReg();
1086 } else {
1087 assert(MI.isSubregToReg());
1088 Dest = MI.getOperand(0).getReg();
1089 }
1090
1091 auto CacheIt = DbgPHICache.find(Dest);
1092 if (CacheIt != DbgPHICache.end())
1093 return CacheIt->second;
1094
1095 // Calculate the instruction number to use, or install a DBG_PHI.
1096 auto OperandPair = salvageCopySSAImpl(MI);
1097 DbgPHICache.insert({Dest, OperandPair});
1098 return OperandPair;
1099}
1100
1104 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
1106
1107 // Chase the value read by a copy-like instruction back to the instruction
1108 // that ultimately _defines_ that value. This may pass:
1109 // * Through multiple intermediate copies, including subregister moves /
1110 // copies,
1111 // * Copies from physical registers that must then be traced back to the
1112 // defining instruction,
1113 // * Or, physical registers may be live-in to (only) the entry block, which
1114 // requires a DBG_PHI to be created.
1115 // We can pursue this problem in that order: trace back through copies,
1116 // optionally through a physical register, to a defining instruction. We
1117 // should never move from physreg to vreg. As we're still in SSA form, no need
1118 // to worry about partial definitions of registers.
1119
1120 // Helper lambda to interpret a copy-like instruction. Takes instruction,
1121 // returns the register read and any subregister identifying which part is
1122 // read.
1123 auto GetRegAndSubreg =
1124 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> {
1125 Register NewReg, OldReg;
1126 unsigned SubReg;
1127 if (Cpy.isCopy()) {
1128 OldReg = Cpy.getOperand(0).getReg();
1129 NewReg = Cpy.getOperand(1).getReg();
1130 SubReg = Cpy.getOperand(1).getSubReg();
1131 } else if (Cpy.isSubregToReg()) {
1132 OldReg = Cpy.getOperand(0).getReg();
1133 NewReg = Cpy.getOperand(1).getReg();
1134 SubReg = Cpy.getOperand(2).getImm();
1135 } else {
1136 auto CopyDetails = *TII.isCopyInstr(Cpy);
1137 const MachineOperand &Src = *CopyDetails.Source;
1138 const MachineOperand &Dest = *CopyDetails.Destination;
1139 OldReg = Dest.getReg();
1140 NewReg = Src.getReg();
1141 SubReg = Src.getSubReg();
1142 }
1143
1144 return {NewReg, SubReg};
1145 };
1146
1147 // First seek either the defining instruction, or a copy from a physreg.
1148 // During search, the current state is the current copy instruction, and which
1149 // register we've read. Accumulate qualifying subregisters into SubregsSeen;
1150 // deal with those later.
1151 auto State = GetRegAndSubreg(MI);
1152 auto CurInst = MI.getIterator();
1153 SmallVector<unsigned, 4> SubregsSeen;
1154 while (true) {
1155 // If we've found a copy from a physreg, first portion of search is over.
1156 if (!State.first.isVirtual())
1157 break;
1158
1159 // Record any subregister qualifier.
1160 if (State.second)
1161 SubregsSeen.push_back(State.second);
1162
1163 assert(MRI.hasOneDef(State.first));
1164 MachineInstr &Inst = *MRI.def_begin(State.first)->getParent();
1165 CurInst = Inst.getIterator();
1166
1167 // Any non-copy instruction is the defining instruction we're seeking.
1168 if (!Inst.isCopyLike() && !TII.isCopyLikeInstr(Inst))
1169 break;
1170 State = GetRegAndSubreg(Inst);
1171 };
1172
1173 // Helper lambda to apply additional subregister substitutions to a known
1174 // instruction/operand pair. Adds new (fake) substitutions so that we can
1175 // record the subregister. FIXME: this isn't very space efficient if multiple
1176 // values are tracked back through the same copies; cache something later.
1177 auto ApplySubregisters =
1179 for (unsigned Subreg : reverse(SubregsSeen)) {
1180 // Fetch a new instruction number, not attached to an actual instruction.
1181 unsigned NewInstrNumber = getNewDebugInstrNum();
1182 // Add a substitution from the "new" number to the known one, with a
1183 // qualifying subreg.
1184 makeDebugValueSubstitution({NewInstrNumber, 0}, P, Subreg);
1185 // Return the new number; to find the underlying value, consumers need to
1186 // deal with the qualifying subreg.
1187 P = {NewInstrNumber, 0};
1188 }
1189 return P;
1190 };
1191
1192 // If we managed to find the defining instruction after COPYs, return an
1193 // instruction / operand pair after adding subregister qualifiers.
1194 if (State.first.isVirtual()) {
1195 // Virtual register def -- we can just look up where this happens.
1196 MachineInstr *Inst = MRI.def_begin(State.first)->getParent();
1197 for (auto &MO : Inst->all_defs()) {
1198 if (MO.getReg() != State.first)
1199 continue;
1200 return ApplySubregisters({Inst->getDebugInstrNum(), MO.getOperandNo()});
1201 }
1202
1203 llvm_unreachable("Vreg def with no corresponding operand?");
1204 }
1205
1206 // Our search ended in a copy from a physreg: walk back up the function
1207 // looking for whatever defines the physreg.
1208 assert(CurInst->isCopyLike() || TII.isCopyInstr(*CurInst));
1209 State = GetRegAndSubreg(*CurInst);
1210 Register RegToSeek = State.first;
1211
1212 auto RMII = CurInst->getReverseIterator();
1213 auto PrevInstrs = make_range(RMII, CurInst->getParent()->instr_rend());
1214 for (auto &ToExamine : PrevInstrs) {
1215 for (auto &MO : ToExamine.all_defs()) {
1216 // Test for operand that defines something aliasing RegToSeek.
1217 if (!TRI.regsOverlap(RegToSeek, MO.getReg()))
1218 continue;
1219
1220 return ApplySubregisters(
1221 {ToExamine.getDebugInstrNum(), MO.getOperandNo()});
1222 }
1223 }
1224
1225 MachineBasicBlock &InsertBB = *CurInst->getParent();
1226
1227 // We reached the start of the block before finding a defining instruction.
1228 // There are numerous scenarios where this can happen:
1229 // * Constant physical registers,
1230 // * Several intrinsics that allow LLVM-IR to read arbitary registers,
1231 // * Arguments in the entry block,
1232 // * Exception handling landing pads.
1233 // Validating all of them is too difficult, so just insert a DBG_PHI reading
1234 // the variable value at this position, rather than checking it makes sense.
1235
1236 // Create DBG_PHI for specified physreg.
1237 auto Builder = BuildMI(InsertBB, InsertBB.getFirstNonPHI(), DebugLoc(),
1238 TII.get(TargetOpcode::DBG_PHI));
1239 Builder.addReg(State.first);
1240 unsigned NewNum = getNewDebugInstrNum();
1241 Builder.addImm(NewNum);
1242 return ApplySubregisters({NewNum, 0u});
1243}
1244
1246 auto *TII = getSubtarget().getInstrInfo();
1247
1248 auto MakeUndefDbgValue = [&](MachineInstr &MI) {
1249 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_VALUE_LIST);
1250 MI.setDesc(RefII);
1251 MI.setDebugValueUndef();
1252 };
1253
1255 for (auto &MBB : *this) {
1256 for (auto &MI : MBB) {
1257 if (!MI.isDebugRef())
1258 continue;
1259
1260 bool IsValidRef = true;
1261
1262 for (MachineOperand &MO : MI.debug_operands()) {
1263 if (!MO.isReg())
1264 continue;
1265
1266 Register Reg = MO.getReg();
1267
1268 // Some vregs can be deleted as redundant in the meantime. Mark those
1269 // as DBG_VALUE $noreg. Additionally, some normal instructions are
1270 // quickly deleted, leaving dangling references to vregs with no def.
1271 if (Reg == 0 || !RegInfo->hasOneDef(Reg)) {
1272 IsValidRef = false;
1273 break;
1274 }
1275
1276 assert(Reg.isVirtual());
1277 MachineInstr &DefMI = *RegInfo->def_instr_begin(Reg);
1278
1279 // If we've found a copy-like instruction, follow it back to the
1280 // instruction that defines the source value, see salvageCopySSA docs
1281 // for why this is important.
1282 if (DefMI.isCopyLike() || TII->isCopyInstr(DefMI)) {
1283 auto Result = salvageCopySSA(DefMI, ArgDbgPHIs);
1284 MO.ChangeToDbgInstrRef(Result.first, Result.second);
1285 } else {
1286 // Otherwise, identify the operand number that the VReg refers to.
1287 unsigned OperandIdx = 0;
1288 for (const auto &DefMO : DefMI.operands()) {
1289 if (DefMO.isReg() && DefMO.isDef() && DefMO.getReg() == Reg)
1290 break;
1291 ++OperandIdx;
1292 }
1293 assert(OperandIdx < DefMI.getNumOperands());
1294
1295 // Morph this instr ref to point at the given instruction and operand.
1296 unsigned ID = DefMI.getDebugInstrNum();
1297 MO.ChangeToDbgInstrRef(ID, OperandIdx);
1298 }
1299 }
1300
1301 if (!IsValidRef)
1302 MakeUndefDbgValue(MI);
1303 }
1304 }
1305}
1306
1308 // Disable instr-ref at -O0: it's very slow (in compile time). We can still
1309 // have optimized code inlined into this unoptimized code, however with
1310 // fewer and less aggressive optimizations happening, coverage and accuracy
1311 // should not suffer.
1312 if (getTarget().getOptLevel() == CodeGenOptLevel::None)
1313 return false;
1314
1315 // Don't use instr-ref if this function is marked optnone.
1316 if (F.hasFnAttribute(Attribute::OptimizeNone))
1317 return false;
1318
1319 if (llvm::debuginfoShouldUseDebugInstrRef(getTarget().getTargetTriple()))
1320 return true;
1321
1322 return false;
1323}
1324
1326 return UseDebugInstrRef;
1327}
1328
1332
1333// Use one million as a high / reserved number.
1334const unsigned MachineFunction::DebugOperandMemNumber = 1000000;
1335
1336/// \}
1337
1338//===----------------------------------------------------------------------===//
1339// MachineJumpTableInfo implementation
1340//===----------------------------------------------------------------------===//
1341
1343 const std::vector<MachineBasicBlock *> &MBBs)
1345
1346/// Return the size of each entry in the jump table.
1348 // The size of a jump table entry is 4 bytes unless the entry is just the
1349 // address of a block, in which case it is the pointer size.
1350 switch (getEntryKind()) {
1352 return TD.getPointerSize();
1355 return 8;
1359 return 4;
1361 return 0;
1362 }
1363 llvm_unreachable("Unknown jump table encoding!");
1364}
1365
1366/// Return the alignment of each entry in the jump table.
1368 // The alignment of a jump table entry is the alignment of int32 unless the
1369 // entry is just the address of a block, in which case it is the pointer
1370 // alignment.
1371 switch (getEntryKind()) {
1373 return TD.getPointerABIAlignment(0).value();
1376 return TD.getABIIntegerTypeAlignment(64).value();
1380 return TD.getABIIntegerTypeAlignment(32).value();
1382 return 1;
1383 }
1384 llvm_unreachable("Unknown jump table encoding!");
1385}
1386
1387/// Create a new jump table entry in the jump table info.
1389 const std::vector<MachineBasicBlock*> &DestBBs) {
1390 assert(!DestBBs.empty() && "Cannot create an empty jump table!");
1391 JumpTables.push_back(MachineJumpTableEntry(DestBBs));
1392 return JumpTables.size()-1;
1393}
1394
1396 size_t JTI, MachineFunctionDataHotness Hotness) {
1397 assert(JTI < JumpTables.size() && "Invalid JTI!");
1398 // Record the largest hotness value.
1399 if (Hotness <= JumpTables[JTI].Hotness)
1400 return false;
1401
1402 JumpTables[JTI].Hotness = Hotness;
1403 return true;
1404}
1405
1406/// If Old is the target of any jump tables, update the jump tables to branch
1407/// to New instead.
1409 MachineBasicBlock *New) {
1410 assert(Old != New && "Not making a change?");
1411 bool MadeChange = false;
1412 for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
1413 ReplaceMBBInJumpTable(i, Old, New);
1414 return MadeChange;
1415}
1416
1417/// If MBB is present in any jump tables, remove it.
1419 bool MadeChange = false;
1420 for (MachineJumpTableEntry &JTE : JumpTables) {
1421 auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB);
1422 MadeChange |= (removeBeginItr != JTE.MBBs.end());
1423 JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end());
1424 }
1425 return MadeChange;
1426}
1427
1428/// If Old is a target of the jump tables, update the jump table to branch to
1429/// New instead.
1431 MachineBasicBlock *Old,
1432 MachineBasicBlock *New) {
1433 assert(Old != New && "Not making a change?");
1434 bool MadeChange = false;
1435 MachineJumpTableEntry &JTE = JumpTables[Idx];
1436 for (MachineBasicBlock *&MBB : JTE.MBBs)
1437 if (MBB == Old) {
1438 MBB = New;
1439 MadeChange = true;
1440 }
1441 return MadeChange;
1442}
1443
1445 if (JumpTables.empty()) return;
1446
1447 OS << "Jump Tables:\n";
1448
1449 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
1450 OS << printJumpTableEntryReference(i) << ':';
1451 for (const MachineBasicBlock *MBB : JumpTables[i].MBBs)
1452 OS << ' ' << printMBBReference(*MBB);
1453 OS << '\n';
1454 }
1455
1456 OS << '\n';
1457}
1458
1459#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1461#endif
1462
1464 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
1465}
1466
1467//===----------------------------------------------------------------------===//
1468// MachineConstantPool implementation
1469//===----------------------------------------------------------------------===//
1470
1471void MachineConstantPoolValue::anchor() {}
1472
1474 return DL.getTypeAllocSize(Ty);
1475}
1476
1479 return Val.MachineCPVal->getSizeInBytes(DL);
1480 return DL.getTypeAllocSize(Val.ConstVal->getType());
1481}
1482
1485 return true;
1486 return Val.ConstVal->needsDynamicRelocation();
1487}
1488
1491 if (needsRelocation())
1493 switch (getSizeInBytes(*DL)) {
1494 case 4:
1496 case 8:
1498 case 16:
1500 case 32:
1502 default:
1503 return SectionKind::getReadOnly();
1504 }
1505}
1506
1508 // A constant may be a member of both Constants and MachineCPVsSharingEntries,
1509 // so keep track of which we've deleted to avoid double deletions.
1511 for (const MachineConstantPoolEntry &C : Constants)
1512 if (C.isMachineConstantPoolEntry()) {
1513 Deleted.insert(C.Val.MachineCPVal);
1514 delete C.Val.MachineCPVal;
1515 }
1516 for (MachineConstantPoolValue *CPV : MachineCPVsSharingEntries) {
1517 if (Deleted.count(CPV) == 0)
1518 delete CPV;
1519 }
1520}
1521
1522/// Test whether the given two constants can be allocated the same constant pool
1523/// entry referenced by \param A.
1524static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
1525 const DataLayout &DL) {
1526 // Handle the trivial case quickly.
1527 if (A == B) return true;
1528
1529 // If they have the same type but weren't the same constant, quickly
1530 // reject them.
1531 if (A->getType() == B->getType()) return false;
1532
1533 // We can't handle structs or arrays.
1534 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
1535 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
1536 return false;
1537
1538 // For now, only support constants with the same size.
1539 uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
1540 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
1541 return false;
1542
1543 bool ContainsUndefOrPoisonA = A->containsUndefOrPoisonElement();
1544
1545 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
1546
1547 // Try constant folding a bitcast of both instructions to an integer. If we
1548 // get two identical ConstantInt's, then we are good to share them. We use
1549 // the constant folding APIs to do this so that we get the benefit of
1550 // DataLayout.
1551 if (isa<PointerType>(A->getType()))
1552 A = ConstantFoldCastOperand(Instruction::PtrToInt,
1553 const_cast<Constant *>(A), IntTy, DL);
1554 else if (A->getType() != IntTy)
1555 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
1556 IntTy, DL);
1557 if (isa<PointerType>(B->getType()))
1558 B = ConstantFoldCastOperand(Instruction::PtrToInt,
1559 const_cast<Constant *>(B), IntTy, DL);
1560 else if (B->getType() != IntTy)
1561 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
1562 IntTy, DL);
1563
1564 if (A != B)
1565 return false;
1566
1567 // Constants only safely match if A doesn't contain undef/poison.
1568 // As we'll be reusing A, it doesn't matter if B contain undef/poison.
1569 // TODO: Handle cases where A and B have the same undef/poison elements.
1570 // TODO: Merge A and B with mismatching undef/poison elements.
1571 return !ContainsUndefOrPoisonA;
1572}
1573
1574/// Create a new entry in the constant pool or return an existing one.
1575/// User must specify the log2 of the minimum required alignment for the object.
1577 Align Alignment) {
1578 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1579
1580 // Check to see if we already have this constant.
1581 //
1582 // FIXME, this could be made much more efficient for large constant pools.
1583 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1584 if (!Constants[i].isMachineConstantPoolEntry() &&
1585 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1586 if (Constants[i].getAlign() < Alignment)
1587 Constants[i].Alignment = Alignment;
1588 return i;
1589 }
1590
1591 Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1592 return Constants.size()-1;
1593}
1594
1596 Align Alignment) {
1597 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1598
1599 // Check to see if we already have this constant.
1600 //
1601 // FIXME, this could be made much more efficient for large constant pools.
1602 int Idx = V->getExistingMachineCPValue(this, Alignment);
1603 if (Idx != -1) {
1604 MachineCPVsSharingEntries.insert(V);
1605 return (unsigned)Idx;
1606 }
1607
1608 Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1609 return Constants.size()-1;
1610}
1611
1613 if (Constants.empty()) return;
1614
1615 OS << "Constant Pool:\n";
1616 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1617 OS << " cp#" << i << ": ";
1618 if (Constants[i].isMachineConstantPoolEntry())
1619 Constants[i].Val.MachineCPVal->print(OS);
1620 else
1621 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1622 OS << ", align=" << Constants[i].getAlign().value();
1623 OS << "\n";
1624 }
1625}
1626
1627//===----------------------------------------------------------------------===//
1628// Template specialization for MachineFunction implementation of
1629// ProfileSummaryInfo::getEntryCount().
1630//===----------------------------------------------------------------------===//
1631template <>
1632std::optional<Function::ProfileCount>
1633ProfileSummaryInfo::getEntryCount<llvm::MachineFunction>(
1634 const llvm::MachineFunction *F) const {
1635 return F->getFunction().getEntryCount();
1636}
1637
1638#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1640#endif
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition Compiler.h:661
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
static cl::opt< unsigned > AlignAllFunctions("align-all-functions", cl::desc("Force the alignment of all functions in log2 format (e.g. 4 " "means align on 16B boundaries)."), cl::init(0), cl::Hidden)
static const MachineInstr * getCallInstr(const MachineInstr *MI)
Return the call machine instruction or find a call within bundle.
static Align getFnStackAlignment(const TargetSubtargetInfo &STI, const Function &F)
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, const DataLayout &DL)
Test whether the given two constants can be allocated the same constant pool entry referenced by.
void setUnsafeStackSize(const Function &F, MachineFrameInfo &FrameInfo)
static const char * getPropertyName(MachineFunctionProperties::Property Prop)
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
#define P(N)
Basic Register Allocator
static bool isSimple(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallString class.
This file defines the SmallVector class.
static const int BlockSize
Definition TarWriter.cpp:33
This file describes how to lower LLVM code to machine code.
void print(OutputBuffer &OB) const
void clear(AllocatorType &Allocator)
Release all the tracked allocations to the allocator.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
Definition BasicBlock.h:171
unsigned size_type
Definition BitVector.h:115
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Align getABIIntegerTypeAlignment(unsigned BitWidth) const
Returns the minimum ABI-required alignment for an integer type of the specified bitwidth.
Definition DataLayout.h:630
LLVM_ABI unsigned getPointerSize(unsigned AS=0) const
The pointer representation size in bytes, rounded up to a whole number of bytes.
LLVM_ABI Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
A debug info location.
Definition DebugLoc.h:123
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
iterator end()
Definition DenseMap.h:81
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:729
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
Context object for machine code objects.
Definition MCContext.h:83
Describe properties that are true of each instruction in the target description file.
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
Tracking metadata reference owned by Metadata.
Definition Metadata.h:902
A single uniqued string.
Definition Metadata.h:722
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
void setIsEndSection(bool V=true)
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
MBBSectionID getSectionID() const
Returns the section ID of this basic block.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsBeginSection(bool V=true)
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool needsRelocation() const
This method classifies the entry according to whether or not it may generate a relocation entry.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
unsigned getSizeInBytes(const DataLayout &DL) const
SectionKind getSectionKind(const DataLayout *DL) const
Abstract base class for all machine specific constantpool value subclasses.
virtual unsigned getSizeInBytes(const DataLayout &DL) const
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
void dump() const
dump - Call print(cerr) to be called from the debugger.
void print(raw_ostream &OS) const
print - Used by the MachineFunction printer to print information about constant pool objects.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI void print(raw_ostream &OS) const
Print the MachineFunctionProperties in human-readable form.
MachineFunctionProperties & reset(Property P)
virtual void MF_HandleRemoval(MachineInstr &MI)=0
Callback before a removal. This should not modify the MI directly.
virtual void MF_HandleInsertion(MachineInstr &MI)=0
Callback after an insertion. This should not modify the MI directly.
int getFilterIDFor(ArrayRef< unsigned > TyIds)
Return the id of the filter encoded by TyIds. This is function wide.
bool UseDebugInstrRef
Flag for whether this function contains DBG_VALUEs (false) or DBG_INSTR_REF (true).
void moveAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
std::pair< unsigned, unsigned > DebugInstrOperandPair
Pair of instruction number and operand number.
unsigned addFrameInst(const MCCFIInstruction &Inst)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
SmallVector< DebugSubstitution, 8 > DebugValueSubstitutions
Debug value substitutions: a collection of DebugSubstitution objects, recording changes in where a va...
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
void viewCFGOnly() const
viewCFGOnly - This function is meant for use from the debugger.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
void substituteDebugValuesForInst(const MachineInstr &Old, MachineInstr &New, unsigned MaxOperand=UINT_MAX)
Create substitutions for any tracked values in Old, to point at New.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist,...
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair, unsigned SubReg=0)
Create a substitution between one <instr,operand> value to a different, new value.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFunction(Function &F, const TargetMachine &Target, const TargetSubtargetInfo &STI, MCContext &Ctx, unsigned FunctionNum)
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0, MDNode *MMRAs=nullptr, Value *DS=nullptr)
Allocate and construct an extra info structure for a MachineInstr.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
void finalizeDebugInstrRefs()
Finalise any partially emitted debug instructions.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
void initTargetMachineFunctionInfo(const TargetSubtargetInfo &STI)
Initialize the target specific MachineFunctionInfo.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef< unsigned > Sites)
Map the landing pad's EH symbol to the call site indexes.
void setUseDebugInstrRef(bool UseInstrRef)
Set whether this function will use instruction referencing or not.
LandingPadInfo & getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad)
Find or create an LandingPadInfo for the specified MachineBasicBlock.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
unsigned DebugInstrNumberingCount
A count of how many instructions in the function have had numbers assigned to them.
void deleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
Align getAlignment() const
getAlignment - Return the alignment of the function.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
static const unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
Function & getFunction()
Return the LLVM function that this machine code represents.
Align getPreferredAlignment() const
Returns the preferred alignment which comes from the function attributes (optsize,...
DebugInstrOperandPair salvageCopySSAImpl(MachineInstr &MI)
const MachineBasicBlock & back() const
BasicBlockListType::iterator iterator
void setDebugInstrNumberingCount(unsigned Num)
Set value of DebugInstrNumberingCount field.
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
void viewCFG() const
viewCFG - This function is meant for use from the debugger.
bool shouldUseDebugInstrRef() const
Determine whether, in the current machine configuration, we should use instruction referencing or not...
const MachineFunctionProperties & getProperties() const
Get the function properties.
void eraseAdditionalCallInfo(const MachineInstr *MI)
Following functions update call site info.
void RenumberBlocks(MachineBasicBlock *MBBFrom=nullptr)
RenumberBlocks - This discards all of the MachineBasicBlock numbers and recomputes them.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
int64_t estimateFunctionSizeInBytes()
Return an estimate of the function's code size, taking into account block and function alignment.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void copyAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Copy the call site info from Old to \ New.
VariableDbgInfoMapTy VariableDbgInfos
void assignBeginEndSections()
Assign IsBeginSection IsEndSection fields for basic blocks in this function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
DebugInstrOperandPair salvageCopySSA(MachineInstr &MI, DenseMap< Register, DebugInstrOperandPair > &DbgPHICache)
Find the underlying defining instruction / operand for a COPY instruction while in SSA form.
Representation of each machine instruction.
LLVM_ABI void bundleWithPred()
Bundle this instruction with its predecessor.
bool isCopyLike() const
Return true if the instruction behaves like a copy.
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
unsigned getNumOperands() const
Retuns the total number of operands.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI bool shouldUpdateAdditionalCallInfo() const
Return true if copying, moving, or erasing this instruction requires updating additional call info (s...
LLVM_ABI bool RemoveMBBFromJumpTables(MachineBasicBlock *MBB)
RemoveMBBFromJumpTables - If MBB is present in any jump tables, remove it.
LLVM_ABI bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New)
ReplaceMBBInJumpTables - If Old is the target of any jump tables, update the jump tables to branch to...
LLVM_ABI void print(raw_ostream &OS) const
print - Used by the MachineFunction printer to print information about jump tables.
LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
LLVM_ABI unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
createJumpTableIndex - Create a new jump table.
LLVM_ABI void dump() const
dump - Call to stderr.
LLVM_ABI bool ReplaceMBBInJumpTable(unsigned Idx, MachineBasicBlock *Old, MachineBasicBlock *New)
ReplaceMBBInJumpTable - If Old is a target of the jump tables, update the jump table to branch to New...
LLVM_ABI bool updateJumpTableEntryHotness(size_t JTI, MachineFunctionDataHotness Hotness)
JTEntryKind
JTEntryKind - This enum indicates how each entry of the jump table is represented and emitted.
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_LabelDifference64
EK_LabelDifference64 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
LLVM_ABI unsigned getEntryAlignment(const DataLayout &TD) const
getEntryAlignment - Return the alignment of each entry in the jump table.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
AtomicOrdering getFailureOrdering() const
For cmpxchg atomic operations, return the atomic ordering requirements when store does not occur.
const PseudoSourceValue * getPseudoValue() const
const MDNode * getRanges() const
Return the range tag for the memory reference.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID for this memory operation.
Flags
Flags values. These may be or'd together.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
Align getBaseAlign() const
Return the minimum known alignment in bytes of the base address, without the offset.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
std::vector< std::pair< MCRegister, Register > >::const_iterator livein_iterator
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Simple wrapper around std::function<void(raw_ostream&)>.
Definition Printable.h:38
Wrapper class representing virtual and physical registers.
Definition Register.h:20
SectionKind - This is a simple POD value that classifies the properties of a section.
Definition SectionKind.h:22
static SectionKind getMergeableConst4()
static SectionKind getReadOnlyWithRel()
static SectionKind getMergeableConst8()
static SectionKind getMergeableConst16()
static SectionKind getReadOnly()
static SectionKind getMergeableConst32()
SlotIndexes pass.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool isStackRealignable() const
isStackRealignable - This method returns whether the stack can be realigned.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Align getMinFunctionAlignment() const
Return the minimum function alignment.
Primary interface to the complete machine description for the target machine.
TargetOptions Options
unsigned ForceDwarfFrameSection
Emit DWARF debug frame section.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:297
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:713
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
self_iterator getIterator()
Definition ilist_node.h:123
iterator erase(iterator where)
Definition ilist.h:204
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
uint64_t MD5Hash(const FunctionId &Obj)
Definition FunctionId.h:167
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
MachineBasicBlock::instr_iterator getBundleStart(MachineBasicBlock::instr_iterator I)
Returns an iterator to the first instruction in the bundle containing I.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI Printable printJumpTableEntryReference(unsigned Idx)
Prints a jump table entry reference.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
MachineFunctionDataHotness
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
MachineBasicBlock::instr_iterator getBundleEnd(MachineBasicBlock::instr_iterator I)
Returns an iterator pointing beyond the bundle containing I.
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1885
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
bool debuginfoShouldUseDebugInstrRef(const Triple &T)
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
std::string getNodeLabel(const MachineBasicBlock *Node, const MachineFunction *Graph)
static std::string getGraphName(const MachineFunction *F)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
Represent subnormal handling kind for floating point instruction inputs and outputs.
This structure is used to retain landing pad info for the current function.
SmallVector< MCSymbol *, 1 > EndLabels
MachineBasicBlock * LandingPadBlock
SmallVector< MCSymbol *, 1 > BeginLabels
std::vector< int > TypeIds
SmallVector< ConstantInt *, 4 > CalleeTypeIds
Callee type ids.
MDNode * CallTarget
'call_target' metadata for the DISubprogram.
MachineJumpTableEntry - One jump table in the jump table info.
LLVM_ABI MachineJumpTableEntry(const std::vector< MachineBasicBlock * > &M)
std::vector< MachineBasicBlock * > MBBs
MBBs - The vector of basic blocks from which to create the jump table.
MachineFunctionDataHotness Hotness
The hotness of MJTE is inferred from the hotness of the source basic block(s) that reference it.
This class contains a discriminated union of information about pointers in memory operands,...
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
static void deleteNode(NodeTy *V)
Definition ilist.h:42