LLVM 23.0.0git
MachineFunction.cpp
Go to the documentation of this file.
1//===- MachineFunction.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Collect native machine code information for a function. This allows
10// target-specific information about the generated code to be stored with each
11// function.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/BitVector.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/Twine.h"
43#include "llvm/Config/llvm-config.h"
44#include "llvm/IR/Attributes.h"
45#include "llvm/IR/BasicBlock.h"
46#include "llvm/IR/Constant.h"
47#include "llvm/IR/DataLayout.h"
50#include "llvm/IR/Function.h"
51#include "llvm/IR/GlobalValue.h"
52#include "llvm/IR/Instruction.h"
54#include "llvm/IR/Metadata.h"
55#include "llvm/IR/Module.h"
57#include "llvm/IR/Value.h"
58#include "llvm/MC/MCContext.h"
59#include "llvm/MC/MCSymbol.h"
60#include "llvm/MC/SectionKind.h"
69#include <algorithm>
70#include <cassert>
71#include <cstddef>
72#include <cstdint>
73#include <iterator>
74#include <string>
75#include <utility>
76#include <vector>
77
79
80using namespace llvm;
81
82#define DEBUG_TYPE "codegen"
83
85 "align-all-functions",
86 cl::desc("Force the alignment of all functions in log2 format (e.g. 4 "
87 "means align on 16B boundaries)."),
89
92
93 // clang-format off
94 switch(Prop) {
95 case P::FailedISel: return "FailedISel";
96 case P::IsSSA: return "IsSSA";
97 case P::Legalized: return "Legalized";
98 case P::NoPHIs: return "NoPHIs";
99 case P::NoVRegs: return "NoVRegs";
100 case P::RegBankSelected: return "RegBankSelected";
101 case P::Selected: return "Selected";
102 case P::TracksLiveness: return "TracksLiveness";
103 case P::TiedOpsRewritten: return "TiedOpsRewritten";
104 case P::FailsVerification: return "FailsVerification";
105 case P::FailedRegAlloc: return "FailedRegAlloc";
106 case P::TracksDebugUserValues: return "TracksDebugUserValues";
107 }
108 // clang-format on
109 llvm_unreachable("Invalid machine function property");
110}
111
113 if (!F.hasFnAttribute(Attribute::SafeStack))
114 return;
115
116 auto *Existing =
117 dyn_cast_or_null<MDTuple>(F.getMetadata(LLVMContext::MD_annotation));
118
119 if (!Existing || Existing->getNumOperands() != 2)
120 return;
121
122 auto *MetadataName = "unsafe-stack-size";
123 if (auto &N = Existing->getOperand(0)) {
124 if (N.equalsStr(MetadataName)) {
125 if (auto &Op = Existing->getOperand(1)) {
126 auto Val = mdconst::extract<ConstantInt>(Op)->getZExtValue();
127 FrameInfo.setUnsafeStackSize(Val);
128 }
129 }
130 }
131}
132
133// Pin the vtable to this file.
134void MachineFunction::Delegate::anchor() {}
135
137 const char *Separator = "";
138 for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
139 if (!Properties[I])
140 continue;
141 OS << Separator << getPropertyName(static_cast<Property>(I));
142 Separator = ", ";
143 }
144}
145
146//===----------------------------------------------------------------------===//
147// MachineFunction implementation
148//===----------------------------------------------------------------------===//
149
150// Out-of-line virtual method.
152
154 MBB->getParent()->deleteMachineBasicBlock(MBB);
155}
156
158 const Function &F) {
159 if (auto MA = F.getFnStackAlign())
160 return *MA;
161 return STI.getFrameLowering()->getStackAlign();
162}
163
165 const TargetSubtargetInfo &STI, MCContext &Ctx,
166 unsigned FunctionNum)
167 : F(F), Target(Target), STI(STI), Ctx(Ctx) {
168 FunctionNumber = FunctionNum;
169 init();
170}
171
172void MachineFunction::handleInsertion(MachineInstr &MI) {
173 if (TheDelegate)
174 TheDelegate->MF_HandleInsertion(MI);
175}
176
177void MachineFunction::handleRemoval(MachineInstr &MI) {
178 if (TheDelegate)
179 TheDelegate->MF_HandleRemoval(MI);
180}
181
183 const MCInstrDesc &TID) {
184 if (TheDelegate)
185 TheDelegate->MF_HandleChangeDesc(MI, TID);
186}
187
188void MachineFunction::init() {
189 // Assume the function starts in SSA form with correct liveness.
190 Properties.setIsSSA();
191 Properties.setTracksLiveness();
192 RegInfo = new (Allocator) MachineRegisterInfo(this);
193
194 MFInfo = nullptr;
195
196 // We can realign the stack if the target supports it and the user hasn't
197 // explicitly asked us not to.
198 bool CanRealignSP = STI.getFrameLowering()->isStackRealignable() &&
199 !F.hasFnAttribute("no-realign-stack");
200 bool ForceRealignSP = F.hasFnAttribute(Attribute::StackAlignment) ||
201 F.hasFnAttribute("stackrealign");
202 FrameInfo = new (Allocator) MachineFrameInfo(
203 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
204 /*ForcedRealign=*/ForceRealignSP && CanRealignSP);
205
206 setUnsafeStackSize(F, *FrameInfo);
207
208 if (F.hasFnAttribute(Attribute::StackAlignment))
209 FrameInfo->ensureMaxAlignment(*F.getFnStackAlign());
210
212 Alignment = STI.getTargetLowering()->getMinFunctionAlignment();
213
214 // -fsanitize=function and -fsanitize=kcfi instrument indirect function calls
215 // to load a type hash before the function label. Ensure functions are aligned
216 // by a least 4 to avoid unaligned access, which is especially important for
217 // -mno-unaligned-access.
218 if (F.hasMetadata(LLVMContext::MD_func_sanitize) ||
219 F.getMetadata(LLVMContext::MD_kcfi_type))
220 Alignment = std::max(Alignment, Align(4));
221
223 Alignment = Align(1ULL << AlignAllFunctions);
224
225 JumpTableInfo = nullptr;
226
228 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
229 WinEHInfo = new (Allocator) WinEHFuncInfo();
230 }
231
233 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
234 WasmEHInfo = new (Allocator) WasmEHFuncInfo();
235 }
236
237 assert(Target.isCompatibleDataLayout(getDataLayout()) &&
238 "Can't create a MachineFunction using a Module with a "
239 "Target-incompatible DataLayout attached\n");
240
241 PSVManager = std::make_unique<PseudoSourceValueManager>(getTarget());
242}
243
245 const TargetSubtargetInfo &STI) {
246 assert(!MFInfo && "MachineFunctionInfo already set");
247 MFInfo = Target.createMachineFunctionInfo(Allocator, F, &STI);
248}
249
253
254void MachineFunction::clear() {
255 Properties.reset();
256
257 // Clear JumpTableInfo first. Otherwise, every MBB we delete would do a
258 // linear search over the jump table entries to find and erase itself.
259 if (JumpTableInfo) {
260 JumpTableInfo->~MachineJumpTableInfo();
261 Allocator.Deallocate(JumpTableInfo);
262 JumpTableInfo = nullptr;
263 }
264
265 // Don't call destructors on MachineInstr and MachineOperand. All of their
266 // memory comes from the BumpPtrAllocator which is about to be purged.
267 //
268 // Do call MachineBasicBlock destructors, it contains std::vectors.
269 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
270 I->Insts.clearAndLeakNodesUnsafely();
271 MBBNumbering.clear();
272
273 InstructionRecycler.clear(Allocator);
274 OperandRecycler.clear(Allocator);
275 BasicBlockRecycler.clear(Allocator);
276 CodeViewAnnotations.clear();
278 if (RegInfo) {
279 RegInfo->~MachineRegisterInfo();
280 Allocator.Deallocate(RegInfo);
281 }
282 if (MFInfo) {
283 MFInfo->~MachineFunctionInfo();
284 Allocator.Deallocate(MFInfo);
285 }
286
287 FrameInfo->~MachineFrameInfo();
288 Allocator.Deallocate(FrameInfo);
289
290 ConstantPool->~MachineConstantPool();
291 Allocator.Deallocate(ConstantPool);
292
293 if (WinEHInfo) {
294 WinEHInfo->~WinEHFuncInfo();
295 Allocator.Deallocate(WinEHInfo);
296 }
297
298 if (WasmEHInfo) {
299 WasmEHInfo->~WasmEHFuncInfo();
300 Allocator.Deallocate(WasmEHInfo);
301 }
302}
303
305 return F.getDataLayout();
306}
307
308/// Get the JumpTableInfo for this function.
309/// If it does not already exist, allocate one.
311getOrCreateJumpTableInfo(unsigned EntryKind) {
312 if (JumpTableInfo) return JumpTableInfo;
313
314 JumpTableInfo = new (Allocator)
316 return JumpTableInfo;
317}
318
320 return F.getDenormalMode(FPType);
321}
322
323/// Should we be emitting segmented stack stuff for the function
325 return getFunction().hasFnAttribute("split-stack");
326}
327
329 Align PrefAlignment;
330
331 if (MaybeAlign A = F.getPreferredAlignment())
332 PrefAlignment = *A;
333 else if (!F.hasOptSize())
334 PrefAlignment = STI.getTargetLowering()->getPrefFunctionAlignment();
335 else
336 PrefAlignment = Align(1);
337
338 return std::max(PrefAlignment, getAlignment());
339}
340
341[[nodiscard]] unsigned
343 FrameInstructions.push_back(Inst);
344 return FrameInstructions.size() - 1;
345}
346
347/// This discards all of the MachineBasicBlock numbers and recomputes them.
348/// This guarantees that the MBB numbers are sequential, dense, and match the
349/// ordering of the blocks within the function. If a specific MachineBasicBlock
350/// is specified, only that block and those after it are renumbered.
352 if (empty()) { MBBNumbering.clear(); return; }
354 if (MBB == nullptr)
355 MBBI = begin();
356 else
357 MBBI = MBB->getIterator();
358
359 // Figure out the block number this should have.
360 unsigned BlockNo = 0;
361 if (MBBI != begin())
362 BlockNo = std::prev(MBBI)->getNumber() + 1;
363
364 for (; MBBI != E; ++MBBI, ++BlockNo) {
365 if (MBBI->getNumber() != (int)BlockNo) {
366 // Remove use of the old number.
367 if (MBBI->getNumber() != -1) {
368 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
369 "MBB number mismatch!");
370 MBBNumbering[MBBI->getNumber()] = nullptr;
371 }
372
373 // If BlockNo is already taken, set that block's number to -1.
374 if (MBBNumbering[BlockNo])
375 MBBNumbering[BlockNo]->setNumber(-1);
376
377 MBBNumbering[BlockNo] = &*MBBI;
378 MBBI->setNumber(BlockNo);
379 }
380 }
381
382 // Okay, all the blocks are renumbered. If we have compactified the block
383 // numbering, shrink MBBNumbering now.
384 assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
385 MBBNumbering.resize(BlockNo);
386 MBBNumberingEpoch++;
387}
388
391 const Align FunctionAlignment = getAlignment();
393 /// Offset - Distance from the beginning of the function to the end
394 /// of the basic block.
395 int64_t Offset = 0;
396
397 for (; MBBI != E; ++MBBI) {
398 const Align Alignment = MBBI->getAlignment();
399 int64_t BlockSize = 0;
400
401 for (auto &MI : *MBBI) {
402 BlockSize += TII.getInstSizeInBytes(MI);
403 }
404
405 int64_t OffsetBB;
406 if (Alignment <= FunctionAlignment) {
407 OffsetBB = alignTo(Offset, Alignment);
408 } else {
409 // The alignment of this MBB is larger than the function's alignment, so
410 // we can't tell whether or not it will insert nops. Assume that it will.
411 OffsetBB = alignTo(Offset, Alignment) + Alignment.value() -
412 FunctionAlignment.value();
413 }
414 Offset = OffsetBB + BlockSize;
415 }
416
417 return Offset;
418}
419
420/// This method iterates over the basic blocks and assigns their IsBeginSection
421/// and IsEndSection fields. This must be called after MBB layout is finalized
422/// and the SectionID's are assigned to MBBs.
425 auto CurrentSectionID = front().getSectionID();
426 for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) {
427 if (MBBI->getSectionID() == CurrentSectionID)
428 continue;
429 MBBI->setIsBeginSection();
430 std::prev(MBBI)->setIsEndSection();
431 CurrentSectionID = MBBI->getSectionID();
432 }
434}
435
436/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
437MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
438 DebugLoc DL,
439 bool NoImplicit) {
440 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
441 MachineInstr(*this, MCID, std::move(DL), NoImplicit);
442}
443
444/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
445/// identical in all ways except the instruction has no parent, prev, or next.
447MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
448 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
449 MachineInstr(*this, *Orig);
450}
451
452MachineInstr &MachineFunction::cloneMachineInstrBundle(
454 const MachineInstr &Orig) {
455 MachineInstr *FirstClone = nullptr;
457 while (true) {
458 MachineInstr *Cloned = CloneMachineInstr(&*I);
459 MBB.insert(InsertBefore, Cloned);
460 if (FirstClone == nullptr) {
461 FirstClone = Cloned;
462 } else {
463 Cloned->bundleWithPred();
464 }
465
466 if (!I->isBundledWithSucc())
467 break;
468 ++I;
469 }
470 // Copy over call info to the cloned instruction if needed. If Orig is in
471 // a bundle, copyAdditionalCallInfo takes care of finding the call instruction
472 // in the bundle.
474 copyAdditionalCallInfo(&Orig, FirstClone);
475 return *FirstClone;
476}
477
478/// Delete the given MachineInstr.
479///
480/// This function also serves as the MachineInstr destructor - the real
481/// ~MachineInstr() destructor must be empty.
482void MachineFunction::deleteMachineInstr(MachineInstr *MI) {
483 // Verify that a call site info is at valid state. This assertion should
484 // be triggered during the implementation of support for the
485 // call site info of a new architecture. If the assertion is triggered,
486 // back trace will tell where to insert a call to updateCallSiteInfo().
487 assert((!MI->isCandidateForAdditionalCallInfo() ||
488 !CallSitesInfo.contains(MI)) &&
489 "Call site info was not updated!");
490 // Verify that the "called globals" info is in a valid state.
491 assert((!MI->isCandidateForAdditionalCallInfo() ||
492 !CalledGlobalsInfo.contains(MI)) &&
493 "Called globals info was not updated!");
494 // Strip it for parts. The operand array and the MI object itself are
495 // independently recyclable.
496 if (MI->Operands)
497 deallocateOperandArray(MI->CapOperands, MI->Operands);
498 // Don't call ~MachineInstr() which must be trivial anyway because
499 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
500 // destructors.
501 InstructionRecycler.Deallocate(Allocator, MI);
502}
503
504/// Allocate a new MachineBasicBlock. Use this instead of
505/// `new MachineBasicBlock'.
508 std::optional<UniqueBBID> BBID) {
510 new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
511 MachineBasicBlock(*this, BB);
512 // Set BBID for `-basic-block-sections=list` and `-basic-block-address-map` to
513 // allow robust mapping of profiles to basic blocks.
514 if (Target.Options.BBAddrMap ||
515 Target.getBBSectionsType() == BasicBlockSection::List)
516 MBB->setBBID(BBID.has_value() ? *BBID : UniqueBBID{NextBBID++, 0});
517 return MBB;
518}
519
520/// Delete the given MachineBasicBlock.
522 assert(MBB->getParent() == this && "MBB parent mismatch!");
523 // Clean up any references to MBB in jump tables before deleting it.
524 if (JumpTableInfo)
525 JumpTableInfo->RemoveMBBFromJumpTables(MBB);
526 MBB->~MachineBasicBlock();
527 BasicBlockRecycler.Deallocate(Allocator, MBB);
528}
529
532 Align BaseAlignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
533 SyncScope::ID SSID, AtomicOrdering Ordering,
534 AtomicOrdering FailureOrdering) {
535 assert((!Size.hasValue() ||
536 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
537 "Unexpected an unknown size to be represented using "
538 "LocationSize::beforeOrAfter()");
539 return new (Allocator)
540 MachineMemOperand(PtrInfo, F, Size, BaseAlignment, AAInfo, Ranges, SSID,
541 Ordering, FailureOrdering);
542}
543
546 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
547 SyncScope::ID SSID, AtomicOrdering Ordering,
548 AtomicOrdering FailureOrdering) {
549 return new (Allocator)
550 MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID,
551 Ordering, FailureOrdering);
552}
553
556 const MachinePointerInfo &PtrInfo,
558 assert((!Size.hasValue() ||
559 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
560 "Unexpected an unknown size to be represented using "
561 "LocationSize::beforeOrAfter()");
562 return new (Allocator)
563 MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(),
564 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
566}
567
569 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) {
570 return new (Allocator)
571 MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(),
572 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
574}
575
578 int64_t Offset, LLT Ty) {
579 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
580
581 // If there is no pointer value, the offset isn't tracked so we need to adjust
582 // the base alignment.
583 Align Alignment = PtrInfo.V.isNull()
585 : MMO->getBaseAlign();
586
587 // Do not preserve ranges, since we don't necessarily know what the high bits
588 // are anymore.
589 return new (Allocator) MachineMemOperand(
590 PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment,
591 MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(),
593}
594
597 const AAMDNodes &AAInfo) {
598 MachinePointerInfo MPI = MMO->getValue() ?
599 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
601
602 return new (Allocator) MachineMemOperand(
603 MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo,
604 MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(),
605 MMO->getFailureOrdering());
606}
607
611 return new (Allocator) MachineMemOperand(
612 MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(),
613 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
615}
616
617MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo(
618 ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol,
619 MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections,
620 uint32_t CFIType, MDNode *MMRAs, Value *DS) {
621 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
622 PostInstrSymbol, HeapAllocMarker,
623 PCSections, CFIType, MMRAs, DS);
624}
625
627 char *Dest = Allocator.Allocate<char>(Name.size() + 1);
628 llvm::copy(Name, Dest);
629 Dest[Name.size()] = 0;
630 return Dest;
631}
632
634 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
635 unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
636 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
637 memset(Mask, 0, Size * sizeof(Mask[0]));
638 return Mask;
639}
640
642 int* AllocMask = Allocator.Allocate<int>(Mask.size());
643 copy(Mask, AllocMask);
644 return {AllocMask, Mask.size()};
645}
646
647#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
651#endif
652
656
657void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
658 OS << "# Machine code for function " << getName() << ": ";
659 getProperties().print(OS);
660 OS << '\n';
661
662 // Print Frame Information
663 FrameInfo->print(*this, OS);
664
665 // Print JumpTable Information
666 if (JumpTableInfo)
667 JumpTableInfo->print(OS);
668
669 // Print Constant Pool
670 ConstantPool->print(OS);
671
673
674 if (RegInfo && !RegInfo->livein_empty()) {
675 OS << "Function Live Ins: ";
677 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
678 OS << printReg(I->first, TRI);
679 if (I->second)
680 OS << " in " << printReg(I->second, TRI);
681 if (std::next(I) != E)
682 OS << ", ";
683 }
684 OS << '\n';
685 }
686
689 for (const auto &BB : *this) {
690 OS << '\n';
691 // If we print the whole function, print it at its most verbose level.
692 BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
693 }
694
695 OS << "\n# End machine code for function " << getName() << ".\n\n";
696}
697
698/// True if this function needs frame moves for debug or exceptions.
700 // TODO: Ideally, what we'd like is to have a switch that allows emitting
701 // synchronous (precise at call-sites only) CFA into .eh_frame. However, even
702 // under this switch, we'd like .debug_frame to be precise when using -g. At
703 // this moment, there's no way to specify that some CFI directives go into
704 // .eh_frame only, while others go into .debug_frame only.
706 F.needsUnwindTableEntry() ||
707 !F.getParent()->debug_compile_units().empty();
708}
709
711 // Numeric callee_type ids are only for indirect calls.
712 if (!CB.isIndirectCall())
713 return;
714
715 MDNode *CalleeTypeList = CB.getMetadata(LLVMContext::MD_callee_type);
716 if (!CalleeTypeList)
717 return;
718
719 for (const MDOperand &Op : CalleeTypeList->operands()) {
720 MDNode *TypeMD = cast<MDNode>(Op);
721 MDString *TypeIdStr = cast<MDString>(TypeMD->getOperand(1));
722 // Compute numeric type id from generalized type id string
723 uint64_t TypeIdVal = MD5Hash(TypeIdStr->getString());
724 IntegerType *Int64Ty = Type::getInt64Ty(CB.getContext());
725 CalleeTypeIds.push_back(
726 ConstantInt::get(Int64Ty, TypeIdVal, /*IsSigned=*/false));
727 }
728}
729
730template <>
732 : public DefaultDOTGraphTraits {
734
735 static std::string getGraphName(const MachineFunction *F) {
736 return ("CFG for '" + F->getName() + "' function").str();
737 }
738
740 const MachineFunction *Graph) {
741 std::string OutStr;
742 {
743 raw_string_ostream OSS(OutStr);
744
745 if (isSimple()) {
746 OSS << printMBBReference(*Node);
747 if (const BasicBlock *BB = Node->getBasicBlock())
748 OSS << ": " << BB->getName();
749 } else
750 Node->print(OSS);
751 }
752
753 if (OutStr[0] == '\n')
754 OutStr.erase(OutStr.begin());
755
756 // Process string output to make it nicer...
757 for (unsigned i = 0; i != OutStr.length(); ++i)
758 if (OutStr[i] == '\n') { // Left justify
759 OutStr[i] = '\\';
760 OutStr.insert(OutStr.begin() + i + 1, 'l');
761 }
762 return OutStr;
763 }
764};
765
767{
768#ifndef NDEBUG
769 ViewGraph(this, "mf" + getName());
770#else
771 errs() << "MachineFunction::viewCFG is only available in debug builds on "
772 << "systems with Graphviz or gv!\n";
773#endif // NDEBUG
774}
775
777{
778#ifndef NDEBUG
779 ViewGraph(this, "mf" + getName(), true);
780#else
781 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
782 << "systems with Graphviz or gv!\n";
783#endif // NDEBUG
784}
785
786/// Add the specified physical register as a live-in value and
787/// create a corresponding virtual register for it.
789 const TargetRegisterClass *RC) {
791 Register VReg = MRI.getLiveInVirtReg(PReg);
792 if (VReg) {
793 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
794 (void)VRegRC;
795 // A physical register can be added several times.
796 // Between two calls, the register class of the related virtual register
797 // may have been constrained to match some operation constraints.
798 // In that case, check that the current register class includes the
799 // physical register and is a sub class of the specified RC.
800 assert((VRegRC == RC || (VRegRC->contains(PReg) &&
801 RC->hasSubClassEq(VRegRC))) &&
802 "Register class mismatch!");
803 return VReg;
804 }
805 VReg = MRI.createVirtualRegister(RC);
806 MRI.addLiveIn(PReg, VReg);
807 return VReg;
808}
809
810/// Return the MCSymbol for the specified non-empty jump table.
811/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
812/// normal 'L' label is returned.
814 bool isLinkerPrivate) const {
815 const DataLayout &DL = getDataLayout();
816 assert(JumpTableInfo && "No jump tables");
817 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
818
819 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
820 : DL.getPrivateGlobalPrefix();
821 SmallString<60> Name;
823 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
824 return Ctx.getOrCreateSymbol(Name);
825}
826
827/// Return a function-local symbol to represent the PIC base.
829 const DataLayout &DL = getDataLayout();
830 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
831 Twine(getFunctionNumber()) + "$pb");
832}
833
834/// \name Exception Handling
835/// \{
836
839 unsigned N = LandingPads.size();
840 for (unsigned i = 0; i < N; ++i) {
841 LandingPadInfo &LP = LandingPads[i];
842 if (LP.LandingPadBlock == LandingPad)
843 return LP;
844 }
845
846 LandingPads.push_back(LandingPadInfo(LandingPad));
847 return LandingPads[N];
848}
849
851 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
853 LP.BeginLabels.push_back(BeginLabel);
854 LP.EndLabels.push_back(EndLabel);
855}
856
858 MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
860 LP.LandingPadLabel = LandingPadLabel;
861
863 LandingPad->getBasicBlock()->getFirstNonPHIIt();
864 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
865 // If there's no typeid list specified, then "cleanup" is implicit.
866 // Otherwise, id 0 is reserved for the cleanup action.
867 if (LPI->isCleanup() && LPI->getNumClauses() != 0)
868 LP.TypeIds.push_back(0);
869
870 // FIXME: New EH - Add the clauses in reverse order. This isn't 100%
871 // correct, but we need to do it this way because of how the DWARF EH
872 // emitter processes the clauses.
873 for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
874 Value *Val = LPI->getClause(I - 1);
875 if (LPI->isCatch(I - 1)) {
876 LP.TypeIds.push_back(
878 } else {
879 // Add filters in a list.
880 auto *CVal = cast<Constant>(Val);
881 SmallVector<unsigned, 4> FilterList;
882 for (const Use &U : CVal->operands())
883 FilterList.push_back(
884 getTypeIDFor(cast<GlobalValue>(U->stripPointerCasts())));
885
886 LP.TypeIds.push_back(getFilterIDFor(FilterList));
887 }
888 }
889
890 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
891 for (unsigned I = CPI->arg_size(); I != 0; --I) {
892 auto *TypeInfo =
893 dyn_cast<GlobalValue>(CPI->getArgOperand(I - 1)->stripPointerCasts());
894 LP.TypeIds.push_back(getTypeIDFor(TypeInfo));
895 }
896
897 } else {
898 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!");
899 }
900
901 return LandingPadLabel;
902}
903
905 ArrayRef<unsigned> Sites) {
906 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
907}
908
910 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
911 if (TypeInfos[i] == TI) return i + 1;
912
913 TypeInfos.push_back(TI);
914 return TypeInfos.size();
915}
916
918 // If the new filter coincides with the tail of an existing filter, then
919 // re-use the existing filter. Folding filters more than this requires
920 // re-ordering filters and/or their elements - probably not worth it.
921 for (unsigned i : FilterEnds) {
922 unsigned j = TyIds.size();
923
924 while (i && j)
925 if (FilterIds[--i] != TyIds[--j])
926 goto try_next;
927
928 if (!j)
929 // The new filter coincides with range [i, end) of the existing filter.
930 return -(1 + i);
931
932try_next:;
933 }
934
935 // Add the new filter.
936 int FilterID = -(1 + FilterIds.size());
937 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
938 llvm::append_range(FilterIds, TyIds);
939 FilterEnds.push_back(FilterIds.size());
940 FilterIds.push_back(0); // terminator
941 return FilterID;
942}
943
945MachineFunction::getCallSiteInfo(const MachineInstr *MI) {
946 assert(MI->isCandidateForAdditionalCallInfo() &&
947 "Call site info refers only to call (MI) candidates");
948
949 if (!Target.Options.EmitCallSiteInfo && !Target.Options.EmitCallGraphSection)
950 return CallSitesInfo.end();
951 return CallSitesInfo.find(MI);
952}
953
954/// Return the call machine instruction or find a call within bundle.
956 if (!MI->isBundle())
957 return MI;
958
959 for (const auto &BMI : make_range(getBundleStart(MI->getIterator()),
960 getBundleEnd(MI->getIterator())))
961 if (BMI.isCandidateForAdditionalCallInfo())
962 return &BMI;
963
964 llvm_unreachable("Unexpected bundle without a call site candidate");
965}
966
968 assert(MI->shouldUpdateAdditionalCallInfo() &&
969 "Call info refers only to call (MI) candidates or "
970 "candidates inside bundles");
971
972 const MachineInstr *CallMI = getCallInstr(MI);
973
974 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI);
975 if (CSIt != CallSitesInfo.end())
976 CallSitesInfo.erase(CSIt);
977
978 CalledGlobalsInfo.erase(CallMI);
979}
980
982 const MachineInstr *New) {
984 "Call info refers only to call (MI) candidates or "
985 "candidates inside bundles");
986
987 if (!New->isCandidateForAdditionalCallInfo())
988 return eraseAdditionalCallInfo(Old);
989
990 const MachineInstr *OldCallMI = getCallInstr(Old);
991 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
992 if (CSIt != CallSitesInfo.end()) {
993 CallSiteInfo CSInfo = CSIt->second;
994 CallSitesInfo[New] = std::move(CSInfo);
995 }
996
997 CalledGlobalsMap::iterator CGIt = CalledGlobalsInfo.find(OldCallMI);
998 if (CGIt != CalledGlobalsInfo.end()) {
999 CalledGlobalInfo CGInfo = CGIt->second;
1000 CalledGlobalsInfo[New] = std::move(CGInfo);
1001 }
1002}
1003
1005 const MachineInstr *New) {
1007 "Call info refers only to call (MI) candidates or "
1008 "candidates inside bundles");
1009
1010 if (!New->isCandidateForAdditionalCallInfo())
1011 return eraseAdditionalCallInfo(Old);
1012
1013 const MachineInstr *OldCallMI = getCallInstr(Old);
1014 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
1015 if (CSIt != CallSitesInfo.end()) {
1016 CallSiteInfo CSInfo = std::move(CSIt->second);
1017 CallSitesInfo.erase(CSIt);
1018 CallSitesInfo[New] = std::move(CSInfo);
1019 }
1020
1021 CalledGlobalsMap::iterator CGIt = CalledGlobalsInfo.find(OldCallMI);
1022 if (CGIt != CalledGlobalsInfo.end()) {
1023 CalledGlobalInfo CGInfo = std::move(CGIt->second);
1024 CalledGlobalsInfo.erase(CGIt);
1025 CalledGlobalsInfo[New] = std::move(CGInfo);
1026 }
1027}
1028
1032
1035 unsigned Subreg) {
1036 // Catch any accidental self-loops.
1037 assert(A.first != B.first);
1038 // Don't allow any substitutions _from_ the memory operand number.
1039 assert(A.second != DebugOperandMemNumber);
1040
1041 DebugValueSubstitutions.push_back({A, B, Subreg});
1042}
1043
1045 MachineInstr &New,
1046 unsigned MaxOperand) {
1047 // If the Old instruction wasn't tracked at all, there is no work to do.
1048 unsigned OldInstrNum = Old.peekDebugInstrNum();
1049 if (!OldInstrNum)
1050 return;
1051
1052 // Iterate over all operands looking for defs to create substitutions for.
1053 // Avoid creating new instr numbers unless we create a new substitution.
1054 // While this has no functional effect, it risks confusing someone reading
1055 // MIR output.
1056 // Examine all the operands, or the first N specified by the caller.
1057 MaxOperand = std::min(MaxOperand, Old.getNumOperands());
1058 for (unsigned int I = 0; I < MaxOperand; ++I) {
1059 const auto &OldMO = Old.getOperand(I);
1060 auto &NewMO = New.getOperand(I);
1061 (void)NewMO;
1062
1063 if (!OldMO.isReg() || !OldMO.isDef())
1064 continue;
1065 assert(NewMO.isDef());
1066
1067 unsigned NewInstrNum = New.getDebugInstrNum();
1068 makeDebugValueSubstitution(std::make_pair(OldInstrNum, I),
1069 std::make_pair(NewInstrNum, I));
1070 }
1071}
1072
1077
1078 // Check whether this copy-like instruction has already been salvaged into
1079 // an operand pair.
1080 Register Dest;
1081 if (auto CopyDstSrc = TII.isCopyLikeInstr(MI)) {
1082 Dest = CopyDstSrc->Destination->getReg();
1083 } else {
1084 assert(MI.isSubregToReg());
1085 Dest = MI.getOperand(0).getReg();
1086 }
1087
1088 auto CacheIt = DbgPHICache.find(Dest);
1089 if (CacheIt != DbgPHICache.end())
1090 return CacheIt->second;
1091
1092 // Calculate the instruction number to use, or install a DBG_PHI.
1093 auto OperandPair = salvageCopySSAImpl(MI);
1094 DbgPHICache.insert({Dest, OperandPair});
1095 return OperandPair;
1096}
1097
1101 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
1103
1104 // Chase the value read by a copy-like instruction back to the instruction
1105 // that ultimately _defines_ that value. This may pass:
1106 // * Through multiple intermediate copies, including subregister moves /
1107 // copies,
1108 // * Copies from physical registers that must then be traced back to the
1109 // defining instruction,
1110 // * Or, physical registers may be live-in to (only) the entry block, which
1111 // requires a DBG_PHI to be created.
1112 // We can pursue this problem in that order: trace back through copies,
1113 // optionally through a physical register, to a defining instruction. We
1114 // should never move from physreg to vreg. As we're still in SSA form, no need
1115 // to worry about partial definitions of registers.
1116
1117 // Helper lambda to interpret a copy-like instruction. Takes instruction,
1118 // returns the register read and any subregister identifying which part is
1119 // read.
1120 auto GetRegAndSubreg =
1121 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> {
1122 Register NewReg, OldReg;
1123 unsigned SubReg;
1124 if (Cpy.isCopy()) {
1125 OldReg = Cpy.getOperand(0).getReg();
1126 NewReg = Cpy.getOperand(1).getReg();
1127 SubReg = Cpy.getOperand(1).getSubReg();
1128 } else if (Cpy.isSubregToReg()) {
1129 OldReg = Cpy.getOperand(0).getReg();
1130 NewReg = Cpy.getOperand(1).getReg();
1131 SubReg = Cpy.getOperand(2).getImm();
1132 } else {
1133 auto CopyDetails = *TII.isCopyInstr(Cpy);
1134 const MachineOperand &Src = *CopyDetails.Source;
1135 const MachineOperand &Dest = *CopyDetails.Destination;
1136 OldReg = Dest.getReg();
1137 NewReg = Src.getReg();
1138 SubReg = Src.getSubReg();
1139 }
1140
1141 return {NewReg, SubReg};
1142 };
1143
1144 // First seek either the defining instruction, or a copy from a physreg.
1145 // During search, the current state is the current copy instruction, and which
1146 // register we've read. Accumulate qualifying subregisters into SubregsSeen;
1147 // deal with those later.
1148 auto State = GetRegAndSubreg(MI);
1149 auto CurInst = MI.getIterator();
1150 SmallVector<unsigned, 4> SubregsSeen;
1151 while (true) {
1152 // If we've found a copy from a physreg, first portion of search is over.
1153 if (!State.first.isVirtual())
1154 break;
1155
1156 // Record any subregister qualifier.
1157 if (State.second)
1158 SubregsSeen.push_back(State.second);
1159
1160 assert(MRI.hasOneDef(State.first));
1161 MachineInstr &Inst = *MRI.def_begin(State.first)->getParent();
1162 CurInst = Inst.getIterator();
1163
1164 // Any non-copy instruction is the defining instruction we're seeking.
1165 if (!Inst.isCopyLike() && !TII.isCopyLikeInstr(Inst))
1166 break;
1167 State = GetRegAndSubreg(Inst);
1168 };
1169
1170 // Helper lambda to apply additional subregister substitutions to a known
1171 // instruction/operand pair. Adds new (fake) substitutions so that we can
1172 // record the subregister. FIXME: this isn't very space efficient if multiple
1173 // values are tracked back through the same copies; cache something later.
1174 auto ApplySubregisters =
1176 for (unsigned Subreg : reverse(SubregsSeen)) {
1177 // Fetch a new instruction number, not attached to an actual instruction.
1178 unsigned NewInstrNumber = getNewDebugInstrNum();
1179 // Add a substitution from the "new" number to the known one, with a
1180 // qualifying subreg.
1181 makeDebugValueSubstitution({NewInstrNumber, 0}, P, Subreg);
1182 // Return the new number; to find the underlying value, consumers need to
1183 // deal with the qualifying subreg.
1184 P = {NewInstrNumber, 0};
1185 }
1186 return P;
1187 };
1188
1189 // If we managed to find the defining instruction after COPYs, return an
1190 // instruction / operand pair after adding subregister qualifiers.
1191 if (State.first.isVirtual()) {
1192 // Virtual register def -- we can just look up where this happens.
1193 MachineInstr *Inst = MRI.def_begin(State.first)->getParent();
1194 for (auto &MO : Inst->all_defs()) {
1195 if (MO.getReg() != State.first)
1196 continue;
1197 return ApplySubregisters({Inst->getDebugInstrNum(), MO.getOperandNo()});
1198 }
1199
1200 llvm_unreachable("Vreg def with no corresponding operand?");
1201 }
1202
1203 // Our search ended in a copy from a physreg: walk back up the function
1204 // looking for whatever defines the physreg.
1205 assert(CurInst->isCopyLike() || TII.isCopyInstr(*CurInst));
1206 State = GetRegAndSubreg(*CurInst);
1207 Register RegToSeek = State.first;
1208
1209 auto RMII = CurInst->getReverseIterator();
1210 auto PrevInstrs = make_range(RMII, CurInst->getParent()->instr_rend());
1211 for (auto &ToExamine : PrevInstrs) {
1212 for (auto &MO : ToExamine.all_defs()) {
1213 // Test for operand that defines something aliasing RegToSeek.
1214 if (!TRI.regsOverlap(RegToSeek, MO.getReg()))
1215 continue;
1216
1217 return ApplySubregisters(
1218 {ToExamine.getDebugInstrNum(), MO.getOperandNo()});
1219 }
1220 }
1221
1222 MachineBasicBlock &InsertBB = *CurInst->getParent();
1223
1224 // We reached the start of the block before finding a defining instruction.
1225 // There are numerous scenarios where this can happen:
1226 // * Constant physical registers,
1227 // * Several intrinsics that allow LLVM-IR to read arbitary registers,
1228 // * Arguments in the entry block,
1229 // * Exception handling landing pads.
1230 // Validating all of them is too difficult, so just insert a DBG_PHI reading
1231 // the variable value at this position, rather than checking it makes sense.
1232
1233 // Create DBG_PHI for specified physreg.
1234 auto Builder = BuildMI(InsertBB, InsertBB.getFirstNonPHI(), DebugLoc(),
1235 TII.get(TargetOpcode::DBG_PHI));
1236 Builder.addReg(State.first);
1237 unsigned NewNum = getNewDebugInstrNum();
1238 Builder.addImm(NewNum);
1239 return ApplySubregisters({NewNum, 0u});
1240}
1241
1243 auto *TII = getSubtarget().getInstrInfo();
1244
1245 auto MakeUndefDbgValue = [&](MachineInstr &MI) {
1246 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_VALUE_LIST);
1247 MI.setDesc(RefII);
1248 MI.setDebugValueUndef();
1249 };
1250
1252 for (auto &MBB : *this) {
1253 for (auto &MI : MBB) {
1254 if (!MI.isDebugRef())
1255 continue;
1256
1257 bool IsValidRef = true;
1258
1259 for (MachineOperand &MO : MI.debug_operands()) {
1260 if (!MO.isReg())
1261 continue;
1262
1263 Register Reg = MO.getReg();
1264
1265 // Some vregs can be deleted as redundant in the meantime. Mark those
1266 // as DBG_VALUE $noreg. Additionally, some normal instructions are
1267 // quickly deleted, leaving dangling references to vregs with no def.
1268 if (Reg == 0 || !RegInfo->hasOneDef(Reg)) {
1269 IsValidRef = false;
1270 break;
1271 }
1272
1273 assert(Reg.isVirtual());
1274 MachineInstr &DefMI = *RegInfo->def_instr_begin(Reg);
1275
1276 // If we've found a copy-like instruction, follow it back to the
1277 // instruction that defines the source value, see salvageCopySSA docs
1278 // for why this is important.
1279 if (DefMI.isCopyLike() || TII->isCopyInstr(DefMI)) {
1280 auto Result = salvageCopySSA(DefMI, ArgDbgPHIs);
1281 MO.ChangeToDbgInstrRef(Result.first, Result.second);
1282 } else {
1283 // Otherwise, identify the operand number that the VReg refers to.
1284 unsigned OperandIdx = 0;
1285 for (const auto &DefMO : DefMI.operands()) {
1286 if (DefMO.isReg() && DefMO.isDef() && DefMO.getReg() == Reg)
1287 break;
1288 ++OperandIdx;
1289 }
1290 assert(OperandIdx < DefMI.getNumOperands());
1291
1292 // Morph this instr ref to point at the given instruction and operand.
1293 unsigned ID = DefMI.getDebugInstrNum();
1294 MO.ChangeToDbgInstrRef(ID, OperandIdx);
1295 }
1296 }
1297
1298 if (!IsValidRef)
1299 MakeUndefDbgValue(MI);
1300 }
1301 }
1302}
1303
1305 // Disable instr-ref at -O0: it's very slow (in compile time). We can still
1306 // have optimized code inlined into this unoptimized code, however with
1307 // fewer and less aggressive optimizations happening, coverage and accuracy
1308 // should not suffer.
1309 if (getTarget().getOptLevel() == CodeGenOptLevel::None)
1310 return false;
1311
1312 // Don't use instr-ref if this function is marked optnone.
1313 if (F.hasFnAttribute(Attribute::OptimizeNone))
1314 return false;
1315
1316 if (llvm::debuginfoShouldUseDebugInstrRef(getTarget().getTargetTriple()))
1317 return true;
1318
1319 return false;
1320}
1321
1323 return UseDebugInstrRef;
1324}
1325
1329
1330// Use one million as a high / reserved number.
1331const unsigned MachineFunction::DebugOperandMemNumber = 1000000;
1332
1333/// \}
1334
1335//===----------------------------------------------------------------------===//
1336// MachineJumpTableInfo implementation
1337//===----------------------------------------------------------------------===//
1338
1340 const std::vector<MachineBasicBlock *> &MBBs)
1342
1343/// Return the size of each entry in the jump table.
1345 // The size of a jump table entry is 4 bytes unless the entry is just the
1346 // address of a block, in which case it is the pointer size.
1347 switch (getEntryKind()) {
1349 return TD.getPointerSize();
1352 return 8;
1356 return 4;
1358 return 0;
1359 }
1360 llvm_unreachable("Unknown jump table encoding!");
1361}
1362
1363/// Return the alignment of each entry in the jump table.
1365 // The alignment of a jump table entry is the alignment of int32 unless the
1366 // entry is just the address of a block, in which case it is the pointer
1367 // alignment.
1368 switch (getEntryKind()) {
1370 return TD.getPointerABIAlignment(0).value();
1373 return TD.getABIIntegerTypeAlignment(64).value();
1377 return TD.getABIIntegerTypeAlignment(32).value();
1379 return 1;
1380 }
1381 llvm_unreachable("Unknown jump table encoding!");
1382}
1383
1384/// Create a new jump table entry in the jump table info.
1386 const std::vector<MachineBasicBlock*> &DestBBs) {
1387 assert(!DestBBs.empty() && "Cannot create an empty jump table!");
1388 JumpTables.push_back(MachineJumpTableEntry(DestBBs));
1389 return JumpTables.size()-1;
1390}
1391
1393 size_t JTI, MachineFunctionDataHotness Hotness) {
1394 assert(JTI < JumpTables.size() && "Invalid JTI!");
1395 // Record the largest hotness value.
1396 if (Hotness <= JumpTables[JTI].Hotness)
1397 return false;
1398
1399 JumpTables[JTI].Hotness = Hotness;
1400 return true;
1401}
1402
1403/// If Old is the target of any jump tables, update the jump tables to branch
1404/// to New instead.
1406 MachineBasicBlock *New) {
1407 assert(Old != New && "Not making a change?");
1408 bool MadeChange = false;
1409 for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
1410 ReplaceMBBInJumpTable(i, Old, New);
1411 return MadeChange;
1412}
1413
1414/// If MBB is present in any jump tables, remove it.
1416 bool MadeChange = false;
1417 for (MachineJumpTableEntry &JTE : JumpTables) {
1418 auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB);
1419 MadeChange |= (removeBeginItr != JTE.MBBs.end());
1420 JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end());
1421 }
1422 return MadeChange;
1423}
1424
1425/// If Old is a target of the jump tables, update the jump table to branch to
1426/// New instead.
1428 MachineBasicBlock *Old,
1429 MachineBasicBlock *New) {
1430 assert(Old != New && "Not making a change?");
1431 bool MadeChange = false;
1432 MachineJumpTableEntry &JTE = JumpTables[Idx];
1433 for (MachineBasicBlock *&MBB : JTE.MBBs)
1434 if (MBB == Old) {
1435 MBB = New;
1436 MadeChange = true;
1437 }
1438 return MadeChange;
1439}
1440
1442 if (JumpTables.empty()) return;
1443
1444 OS << "Jump Tables:\n";
1445
1446 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
1447 OS << printJumpTableEntryReference(i) << ':';
1448 for (const MachineBasicBlock *MBB : JumpTables[i].MBBs)
1449 OS << ' ' << printMBBReference(*MBB);
1450 OS << '\n';
1451 }
1452
1453 OS << '\n';
1454}
1455
1456#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1458#endif
1459
1461 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
1462}
1463
1464//===----------------------------------------------------------------------===//
1465// MachineConstantPool implementation
1466//===----------------------------------------------------------------------===//
1467
1468void MachineConstantPoolValue::anchor() {}
1469
1471 return DL.getTypeAllocSize(Ty);
1472}
1473
1476 return Val.MachineCPVal->getSizeInBytes(DL);
1477 return DL.getTypeAllocSize(Val.ConstVal->getType());
1478}
1479
1482 return true;
1483 return Val.ConstVal->needsDynamicRelocation();
1484}
1485
1488 if (needsRelocation())
1490 switch (getSizeInBytes(*DL)) {
1491 case 4:
1493 case 8:
1495 case 16:
1497 case 32:
1499 default:
1500 return SectionKind::getReadOnly();
1501 }
1502}
1503
1505 // A constant may be a member of both Constants and MachineCPVsSharingEntries,
1506 // so keep track of which we've deleted to avoid double deletions.
1508 for (const MachineConstantPoolEntry &C : Constants)
1509 if (C.isMachineConstantPoolEntry()) {
1510 Deleted.insert(C.Val.MachineCPVal);
1511 delete C.Val.MachineCPVal;
1512 }
1513 for (MachineConstantPoolValue *CPV : MachineCPVsSharingEntries) {
1514 if (Deleted.count(CPV) == 0)
1515 delete CPV;
1516 }
1517}
1518
1519/// Test whether the given two constants can be allocated the same constant pool
1520/// entry referenced by \param A.
1521static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
1522 const DataLayout &DL) {
1523 // Handle the trivial case quickly.
1524 if (A == B) return true;
1525
1526 // If they have the same type but weren't the same constant, quickly
1527 // reject them.
1528 if (A->getType() == B->getType()) return false;
1529
1530 // We can't handle structs or arrays.
1531 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
1532 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
1533 return false;
1534
1535 // For now, only support constants with the same size.
1536 uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
1537 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
1538 return false;
1539
1540 bool ContainsUndefOrPoisonA = A->containsUndefOrPoisonElement();
1541
1542 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
1543
1544 // Try constant folding a bitcast of both instructions to an integer. If we
1545 // get two identical ConstantInt's, then we are good to share them. We use
1546 // the constant folding APIs to do this so that we get the benefit of
1547 // DataLayout.
1548 if (isa<PointerType>(A->getType()))
1549 A = ConstantFoldCastOperand(Instruction::PtrToInt,
1550 const_cast<Constant *>(A), IntTy, DL);
1551 else if (A->getType() != IntTy)
1552 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
1553 IntTy, DL);
1554 if (isa<PointerType>(B->getType()))
1555 B = ConstantFoldCastOperand(Instruction::PtrToInt,
1556 const_cast<Constant *>(B), IntTy, DL);
1557 else if (B->getType() != IntTy)
1558 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
1559 IntTy, DL);
1560
1561 if (A != B)
1562 return false;
1563
1564 // Constants only safely match if A doesn't contain undef/poison.
1565 // As we'll be reusing A, it doesn't matter if B contain undef/poison.
1566 // TODO: Handle cases where A and B have the same undef/poison elements.
1567 // TODO: Merge A and B with mismatching undef/poison elements.
1568 return !ContainsUndefOrPoisonA;
1569}
1570
1571/// Create a new entry in the constant pool or return an existing one.
1572/// User must specify the log2 of the minimum required alignment for the object.
1574 Align Alignment) {
1575 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1576
1577 // Check to see if we already have this constant.
1578 //
1579 // FIXME, this could be made much more efficient for large constant pools.
1580 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1581 if (!Constants[i].isMachineConstantPoolEntry() &&
1582 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1583 if (Constants[i].getAlign() < Alignment)
1584 Constants[i].Alignment = Alignment;
1585 return i;
1586 }
1587
1588 Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1589 return Constants.size()-1;
1590}
1591
1593 Align Alignment) {
1594 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1595
1596 // Check to see if we already have this constant.
1597 //
1598 // FIXME, this could be made much more efficient for large constant pools.
1599 int Idx = V->getExistingMachineCPValue(this, Alignment);
1600 if (Idx != -1) {
1601 MachineCPVsSharingEntries.insert(V);
1602 return (unsigned)Idx;
1603 }
1604
1605 Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1606 return Constants.size()-1;
1607}
1608
1610 if (Constants.empty()) return;
1611
1612 OS << "Constant Pool:\n";
1613 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1614 OS << " cp#" << i << ": ";
1615 if (Constants[i].isMachineConstantPoolEntry())
1616 Constants[i].Val.MachineCPVal->print(OS);
1617 else
1618 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1619 OS << ", align=" << Constants[i].getAlign().value();
1620 OS << "\n";
1621 }
1622}
1623
1624//===----------------------------------------------------------------------===//
1625// Template specialization for MachineFunction implementation of
1626// ProfileSummaryInfo::getEntryCount().
1627//===----------------------------------------------------------------------===//
1628template <>
1629std::optional<Function::ProfileCount>
1630ProfileSummaryInfo::getEntryCount<llvm::MachineFunction>(
1631 const llvm::MachineFunction *F) const {
1632 return F->getFunction().getEntryCount();
1633}
1634
1635#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1637#endif
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition Compiler.h:661
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
static cl::opt< unsigned > AlignAllFunctions("align-all-functions", cl::desc("Force the alignment of all functions in log2 format (e.g. 4 " "means align on 16B boundaries)."), cl::init(0), cl::Hidden)
static const MachineInstr * getCallInstr(const MachineInstr *MI)
Return the call machine instruction or find a call within bundle.
static Align getFnStackAlignment(const TargetSubtargetInfo &STI, const Function &F)
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, const DataLayout &DL)
Test whether the given two constants can be allocated the same constant pool entry referenced by.
void setUnsafeStackSize(const Function &F, MachineFrameInfo &FrameInfo)
static const char * getPropertyName(MachineFunctionProperties::Property Prop)
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
#define P(N)
Basic Register Allocator
static bool isSimple(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallString class.
This file defines the SmallVector class.
static const int BlockSize
Definition TarWriter.cpp:33
This file describes how to lower LLVM code to machine code.
void print(OutputBuffer &OB) const
void clear(AllocatorType &Allocator)
Release all the tracked allocations to the allocator.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
Definition BasicBlock.h:171
unsigned size_type
Definition BitVector.h:115
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Align getABIIntegerTypeAlignment(unsigned BitWidth) const
Returns the minimum ABI-required alignment for an integer type of the specified bitwidth.
Definition DataLayout.h:630
LLVM_ABI unsigned getPointerSize(unsigned AS=0) const
The pointer representation size in bytes, rounded up to a whole number of bytes.
LLVM_ABI Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
A debug info location.
Definition DebugLoc.h:123
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
iterator end()
Definition DenseMap.h:81
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:729
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
Context object for machine code objects.
Definition MCContext.h:83
Describe properties that are true of each instruction in the target description file.
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
Tracking metadata reference owned by Metadata.
Definition Metadata.h:902
A single uniqued string.
Definition Metadata.h:722
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
void setIsEndSection(bool V=true)
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
MBBSectionID getSectionID() const
Returns the section ID of this basic block.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsBeginSection(bool V=true)
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool needsRelocation() const
This method classifies the entry according to whether or not it may generate a relocation entry.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
unsigned getSizeInBytes(const DataLayout &DL) const
SectionKind getSectionKind(const DataLayout *DL) const
Abstract base class for all machine specific constantpool value subclasses.
virtual unsigned getSizeInBytes(const DataLayout &DL) const
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
void dump() const
dump - Call print(cerr) to be called from the debugger.
void print(raw_ostream &OS) const
print - Used by the MachineFunction printer to print information about constant pool objects.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI void print(raw_ostream &OS) const
Print the MachineFunctionProperties in human-readable form.
MachineFunctionProperties & reset(Property P)
virtual void MF_HandleRemoval(MachineInstr &MI)=0
Callback before a removal. This should not modify the MI directly.
virtual void MF_HandleInsertion(MachineInstr &MI)=0
Callback after an insertion. This should not modify the MI directly.
int getFilterIDFor(ArrayRef< unsigned > TyIds)
Return the id of the filter encoded by TyIds. This is function wide.
bool UseDebugInstrRef
Flag for whether this function contains DBG_VALUEs (false) or DBG_INSTR_REF (true).
void moveAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
std::pair< unsigned, unsigned > DebugInstrOperandPair
Pair of instruction number and operand number.
unsigned addFrameInst(const MCCFIInstruction &Inst)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
SmallVector< DebugSubstitution, 8 > DebugValueSubstitutions
Debug value substitutions: a collection of DebugSubstitution objects, recording changes in where a va...
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
void viewCFGOnly() const
viewCFGOnly - This function is meant for use from the debugger.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
void substituteDebugValuesForInst(const MachineInstr &Old, MachineInstr &New, unsigned MaxOperand=UINT_MAX)
Create substitutions for any tracked values in Old, to point at New.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist,...
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair, unsigned SubReg=0)
Create a substitution between one <instr,operand> value to a different, new value.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFunction(Function &F, const TargetMachine &Target, const TargetSubtargetInfo &STI, MCContext &Ctx, unsigned FunctionNum)
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0, MDNode *MMRAs=nullptr, Value *DS=nullptr)
Allocate and construct an extra info structure for a MachineInstr.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
void finalizeDebugInstrRefs()
Finalise any partially emitted debug instructions.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
void initTargetMachineFunctionInfo(const TargetSubtargetInfo &STI)
Initialize the target specific MachineFunctionInfo.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef< unsigned > Sites)
Map the landing pad's EH symbol to the call site indexes.
void setUseDebugInstrRef(bool UseInstrRef)
Set whether this function will use instruction referencing or not.
LandingPadInfo & getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad)
Find or create an LandingPadInfo for the specified MachineBasicBlock.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
unsigned DebugInstrNumberingCount
A count of how many instructions in the function have had numbers assigned to them.
void deleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
Align getAlignment() const
getAlignment - Return the alignment of the function.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
static const unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
Function & getFunction()
Return the LLVM function that this machine code represents.
Align getPreferredAlignment() const
Returns the preferred alignment which comes from the function attributes (optsize,...
DebugInstrOperandPair salvageCopySSAImpl(MachineInstr &MI)
const MachineBasicBlock & back() const
BasicBlockListType::iterator iterator
void setDebugInstrNumberingCount(unsigned Num)
Set value of DebugInstrNumberingCount field.
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
void viewCFG() const
viewCFG - This function is meant for use from the debugger.
bool shouldUseDebugInstrRef() const
Determine whether, in the current machine configuration, we should use instruction referencing or not...
const MachineFunctionProperties & getProperties() const
Get the function properties.
void eraseAdditionalCallInfo(const MachineInstr *MI)
Following functions update call site info.
void RenumberBlocks(MachineBasicBlock *MBBFrom=nullptr)
RenumberBlocks - This discards all of the MachineBasicBlock numbers and recomputes them.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
int64_t estimateFunctionSizeInBytes()
Return an estimate of the function's code size, taking into account block and function alignment.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void copyAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Copy the call site info from Old to \ New.
VariableDbgInfoMapTy VariableDbgInfos
void assignBeginEndSections()
Assign IsBeginSection IsEndSection fields for basic blocks in this function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
DebugInstrOperandPair salvageCopySSA(MachineInstr &MI, DenseMap< Register, DebugInstrOperandPair > &DbgPHICache)
Find the underlying defining instruction / operand for a COPY instruction while in SSA form.
Representation of each machine instruction.
LLVM_ABI void bundleWithPred()
Bundle this instruction with its predecessor.
bool isCopyLike() const
Return true if the instruction behaves like a copy.
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
unsigned getNumOperands() const
Retuns the total number of operands.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI bool shouldUpdateAdditionalCallInfo() const
Return true if copying, moving, or erasing this instruction requires updating additional call info (s...
LLVM_ABI bool RemoveMBBFromJumpTables(MachineBasicBlock *MBB)
RemoveMBBFromJumpTables - If MBB is present in any jump tables, remove it.
LLVM_ABI bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New)
ReplaceMBBInJumpTables - If Old is the target of any jump tables, update the jump tables to branch to...
LLVM_ABI void print(raw_ostream &OS) const
print - Used by the MachineFunction printer to print information about jump tables.
LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
LLVM_ABI unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
createJumpTableIndex - Create a new jump table.
LLVM_ABI void dump() const
dump - Call to stderr.
LLVM_ABI bool ReplaceMBBInJumpTable(unsigned Idx, MachineBasicBlock *Old, MachineBasicBlock *New)
ReplaceMBBInJumpTable - If Old is a target of the jump tables, update the jump table to branch to New...
LLVM_ABI bool updateJumpTableEntryHotness(size_t JTI, MachineFunctionDataHotness Hotness)
JTEntryKind
JTEntryKind - This enum indicates how each entry of the jump table is represented and emitted.
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_LabelDifference64
EK_LabelDifference64 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
LLVM_ABI unsigned getEntryAlignment(const DataLayout &TD) const
getEntryAlignment - Return the alignment of each entry in the jump table.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
AtomicOrdering getFailureOrdering() const
For cmpxchg atomic operations, return the atomic ordering requirements when store does not occur.
const PseudoSourceValue * getPseudoValue() const
const MDNode * getRanges() const
Return the range tag for the memory reference.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID for this memory operation.
Flags
Flags values. These may be or'd together.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
Align getBaseAlign() const
Return the minimum known alignment in bytes of the base address, without the offset.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
std::vector< std::pair< MCRegister, Register > >::const_iterator livein_iterator
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Simple wrapper around std::function<void(raw_ostream&)>.
Definition Printable.h:38
Wrapper class representing virtual and physical registers.
Definition Register.h:20
SectionKind - This is a simple POD value that classifies the properties of a section.
Definition SectionKind.h:22
static SectionKind getMergeableConst4()
static SectionKind getReadOnlyWithRel()
static SectionKind getMergeableConst8()
static SectionKind getMergeableConst16()
static SectionKind getReadOnly()
static SectionKind getMergeableConst32()
SlotIndexes pass.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool isStackRealignable() const
isStackRealignable - This method returns whether the stack can be realigned.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Align getMinFunctionAlignment() const
Return the minimum function alignment.
Primary interface to the complete machine description for the target machine.
TargetOptions Options
unsigned ForceDwarfFrameSection
Emit DWARF debug frame section.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:297
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:708
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
self_iterator getIterator()
Definition ilist_node.h:123
iterator erase(iterator where)
Definition ilist.h:204
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
uint64_t MD5Hash(const FunctionId &Obj)
Definition FunctionId.h:167
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
MachineBasicBlock::instr_iterator getBundleStart(MachineBasicBlock::instr_iterator I)
Returns an iterator to the first instruction in the bundle containing I.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI Printable printJumpTableEntryReference(unsigned Idx)
Prints a jump table entry reference.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
MachineFunctionDataHotness
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
MachineBasicBlock::instr_iterator getBundleEnd(MachineBasicBlock::instr_iterator I)
Returns an iterator pointing beyond the bundle containing I.
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1885
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
bool debuginfoShouldUseDebugInstrRef(const Triple &T)
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
std::string getNodeLabel(const MachineBasicBlock *Node, const MachineFunction *Graph)
static std::string getGraphName(const MachineFunction *F)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
Represent subnormal handling kind for floating point instruction inputs and outputs.
This structure is used to retain landing pad info for the current function.
SmallVector< MCSymbol *, 1 > EndLabels
MachineBasicBlock * LandingPadBlock
SmallVector< MCSymbol *, 1 > BeginLabels
std::vector< int > TypeIds
SmallVector< ConstantInt *, 4 > CalleeTypeIds
Callee type ids.
MachineJumpTableEntry - One jump table in the jump table info.
LLVM_ABI MachineJumpTableEntry(const std::vector< MachineBasicBlock * > &M)
std::vector< MachineBasicBlock * > MBBs
MBBs - The vector of basic blocks from which to create the jump table.
MachineFunctionDataHotness Hotness
The hotness of MJTE is inferred from the hotness of the source basic block(s) that reference it.
This class contains a discriminated union of information about pointers in memory operands,...
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
static void deleteNode(NodeTy *V)
Definition ilist.h:42