LLVM 22.0.0git
MachineFunction.cpp
Go to the documentation of this file.
1//===- MachineFunction.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Collect native machine code information for a function. This allows
10// target-specific information about the generated code to be stored with each
11// function.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/BitVector.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/Twine.h"
43#include "llvm/Config/llvm-config.h"
44#include "llvm/IR/Attributes.h"
45#include "llvm/IR/BasicBlock.h"
46#include "llvm/IR/Constant.h"
47#include "llvm/IR/DataLayout.h"
50#include "llvm/IR/Function.h"
51#include "llvm/IR/GlobalValue.h"
52#include "llvm/IR/Instruction.h"
54#include "llvm/IR/Metadata.h"
55#include "llvm/IR/Module.h"
57#include "llvm/IR/Value.h"
58#include "llvm/MC/MCContext.h"
59#include "llvm/MC/MCSymbol.h"
60#include "llvm/MC/SectionKind.h"
69#include <algorithm>
70#include <cassert>
71#include <cstddef>
72#include <cstdint>
73#include <iterator>
74#include <string>
75#include <utility>
76#include <vector>
77
79
80using namespace llvm;
81
82#define DEBUG_TYPE "codegen"
83
85 "align-all-functions",
86 cl::desc("Force the alignment of all functions in log2 format (e.g. 4 "
87 "means align on 16B boundaries)."),
89
92
93 // clang-format off
94 switch(Prop) {
95 case P::FailedISel: return "FailedISel";
96 case P::IsSSA: return "IsSSA";
97 case P::Legalized: return "Legalized";
98 case P::NoPHIs: return "NoPHIs";
99 case P::NoVRegs: return "NoVRegs";
100 case P::RegBankSelected: return "RegBankSelected";
101 case P::Selected: return "Selected";
102 case P::TracksLiveness: return "TracksLiveness";
103 case P::TiedOpsRewritten: return "TiedOpsRewritten";
104 case P::FailsVerification: return "FailsVerification";
105 case P::FailedRegAlloc: return "FailedRegAlloc";
106 case P::TracksDebugUserValues: return "TracksDebugUserValues";
107 }
108 // clang-format on
109 llvm_unreachable("Invalid machine function property");
110}
111
113 if (!F.hasFnAttribute(Attribute::SafeStack))
114 return;
115
116 auto *Existing =
117 dyn_cast_or_null<MDTuple>(F.getMetadata(LLVMContext::MD_annotation));
118
119 if (!Existing || Existing->getNumOperands() != 2)
120 return;
121
122 auto *MetadataName = "unsafe-stack-size";
123 if (auto &N = Existing->getOperand(0)) {
124 if (N.equalsStr(MetadataName)) {
125 if (auto &Op = Existing->getOperand(1)) {
126 auto Val = mdconst::extract<ConstantInt>(Op)->getZExtValue();
127 FrameInfo.setUnsafeStackSize(Val);
128 }
129 }
130 }
131}
132
133// Pin the vtable to this file.
134void MachineFunction::Delegate::anchor() {}
135
137 const char *Separator = "";
138 for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
139 if (!Properties[I])
140 continue;
141 OS << Separator << getPropertyName(static_cast<Property>(I));
142 Separator = ", ";
143 }
144}
145
146//===----------------------------------------------------------------------===//
147// MachineFunction implementation
148//===----------------------------------------------------------------------===//
149
150// Out-of-line virtual method.
152
154 MBB->getParent()->deleteMachineBasicBlock(MBB);
155}
156
158 const Function &F) {
159 if (auto MA = F.getFnStackAlign())
160 return *MA;
161 return STI.getFrameLowering()->getStackAlign();
162}
163
165 const TargetSubtargetInfo &STI, MCContext &Ctx,
166 unsigned FunctionNum)
167 : F(F), Target(Target), STI(STI), Ctx(Ctx) {
168 FunctionNumber = FunctionNum;
169 init();
170}
171
172void MachineFunction::handleInsertion(MachineInstr &MI) {
173 if (TheDelegate)
174 TheDelegate->MF_HandleInsertion(MI);
175}
176
177void MachineFunction::handleRemoval(MachineInstr &MI) {
178 if (TheDelegate)
179 TheDelegate->MF_HandleRemoval(MI);
180}
181
183 const MCInstrDesc &TID) {
184 if (TheDelegate)
185 TheDelegate->MF_HandleChangeDesc(MI, TID);
186}
187
188void MachineFunction::init() {
189 // Assume the function starts in SSA form with correct liveness.
190 Properties.setIsSSA();
191 Properties.setTracksLiveness();
192 RegInfo = new (Allocator) MachineRegisterInfo(this);
193
194 MFInfo = nullptr;
195
196 // We can realign the stack if the target supports it and the user hasn't
197 // explicitly asked us not to.
198 bool CanRealignSP = STI.getFrameLowering()->isStackRealignable() &&
199 !F.hasFnAttribute("no-realign-stack");
200 bool ForceRealignSP = F.hasFnAttribute(Attribute::StackAlignment) ||
201 F.hasFnAttribute("stackrealign");
202 FrameInfo = new (Allocator) MachineFrameInfo(
203 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
204 /*ForcedRealign=*/ForceRealignSP && CanRealignSP);
205
206 setUnsafeStackSize(F, *FrameInfo);
207
208 if (F.hasFnAttribute(Attribute::StackAlignment))
209 FrameInfo->ensureMaxAlignment(*F.getFnStackAlign());
210
212 Alignment = STI.getTargetLowering()->getMinFunctionAlignment();
213
214 // FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
215 if (!F.hasOptSize())
216 Alignment = std::max(Alignment,
218
219 // -fsanitize=function and -fsanitize=kcfi instrument indirect function calls
220 // to load a type hash before the function label. Ensure functions are aligned
221 // by a least 4 to avoid unaligned access, which is especially important for
222 // -mno-unaligned-access.
223 if (F.hasMetadata(LLVMContext::MD_func_sanitize) ||
224 F.getMetadata(LLVMContext::MD_kcfi_type))
225 Alignment = std::max(Alignment, Align(4));
226
228 Alignment = Align(1ULL << AlignAllFunctions);
229
230 JumpTableInfo = nullptr;
231
233 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
234 WinEHInfo = new (Allocator) WinEHFuncInfo();
235 }
236
238 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
239 WasmEHInfo = new (Allocator) WasmEHFuncInfo();
240 }
241
242 assert(Target.isCompatibleDataLayout(getDataLayout()) &&
243 "Can't create a MachineFunction using a Module with a "
244 "Target-incompatible DataLayout attached\n");
245
246 PSVManager = std::make_unique<PseudoSourceValueManager>(getTarget());
247}
248
250 const TargetSubtargetInfo &STI) {
251 assert(!MFInfo && "MachineFunctionInfo already set");
252 MFInfo = Target.createMachineFunctionInfo(Allocator, F, &STI);
253}
254
258
259void MachineFunction::clear() {
260 Properties.reset();
261
262 // Clear JumpTableInfo first. Otherwise, every MBB we delete would do a
263 // linear search over the jump table entries to find and erase itself.
264 if (JumpTableInfo) {
265 JumpTableInfo->~MachineJumpTableInfo();
266 Allocator.Deallocate(JumpTableInfo);
267 JumpTableInfo = nullptr;
268 }
269
270 // Don't call destructors on MachineInstr and MachineOperand. All of their
271 // memory comes from the BumpPtrAllocator which is about to be purged.
272 //
273 // Do call MachineBasicBlock destructors, it contains std::vectors.
274 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
275 I->Insts.clearAndLeakNodesUnsafely();
276 MBBNumbering.clear();
277
278 InstructionRecycler.clear(Allocator);
279 OperandRecycler.clear(Allocator);
280 BasicBlockRecycler.clear(Allocator);
281 CodeViewAnnotations.clear();
283 if (RegInfo) {
284 RegInfo->~MachineRegisterInfo();
285 Allocator.Deallocate(RegInfo);
286 }
287 if (MFInfo) {
288 MFInfo->~MachineFunctionInfo();
289 Allocator.Deallocate(MFInfo);
290 }
291
292 FrameInfo->~MachineFrameInfo();
293 Allocator.Deallocate(FrameInfo);
294
295 ConstantPool->~MachineConstantPool();
296 Allocator.Deallocate(ConstantPool);
297
298 if (WinEHInfo) {
299 WinEHInfo->~WinEHFuncInfo();
300 Allocator.Deallocate(WinEHInfo);
301 }
302
303 if (WasmEHInfo) {
304 WasmEHInfo->~WasmEHFuncInfo();
305 Allocator.Deallocate(WasmEHInfo);
306 }
307}
308
310 return F.getDataLayout();
311}
312
313/// Get the JumpTableInfo for this function.
314/// If it does not already exist, allocate one.
316getOrCreateJumpTableInfo(unsigned EntryKind) {
317 if (JumpTableInfo) return JumpTableInfo;
318
319 JumpTableInfo = new (Allocator)
321 return JumpTableInfo;
322}
323
325 return F.getDenormalMode(FPType);
326}
327
328/// Should we be emitting segmented stack stuff for the function
330 return getFunction().hasFnAttribute("split-stack");
331}
332
333[[nodiscard]] unsigned
335 FrameInstructions.push_back(Inst);
336 return FrameInstructions.size() - 1;
337}
338
339/// This discards all of the MachineBasicBlock numbers and recomputes them.
340/// This guarantees that the MBB numbers are sequential, dense, and match the
341/// ordering of the blocks within the function. If a specific MachineBasicBlock
342/// is specified, only that block and those after it are renumbered.
344 if (empty()) { MBBNumbering.clear(); return; }
346 if (MBB == nullptr)
347 MBBI = begin();
348 else
349 MBBI = MBB->getIterator();
350
351 // Figure out the block number this should have.
352 unsigned BlockNo = 0;
353 if (MBBI != begin())
354 BlockNo = std::prev(MBBI)->getNumber() + 1;
355
356 for (; MBBI != E; ++MBBI, ++BlockNo) {
357 if (MBBI->getNumber() != (int)BlockNo) {
358 // Remove use of the old number.
359 if (MBBI->getNumber() != -1) {
360 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
361 "MBB number mismatch!");
362 MBBNumbering[MBBI->getNumber()] = nullptr;
363 }
364
365 // If BlockNo is already taken, set that block's number to -1.
366 if (MBBNumbering[BlockNo])
367 MBBNumbering[BlockNo]->setNumber(-1);
368
369 MBBNumbering[BlockNo] = &*MBBI;
370 MBBI->setNumber(BlockNo);
371 }
372 }
373
374 // Okay, all the blocks are renumbered. If we have compactified the block
375 // numbering, shrink MBBNumbering now.
376 assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
377 MBBNumbering.resize(BlockNo);
378 MBBNumberingEpoch++;
379}
380
383 const Align FunctionAlignment = getAlignment();
385 /// Offset - Distance from the beginning of the function to the end
386 /// of the basic block.
387 int64_t Offset = 0;
388
389 for (; MBBI != E; ++MBBI) {
390 const Align Alignment = MBBI->getAlignment();
391 int64_t BlockSize = 0;
392
393 for (auto &MI : *MBBI) {
394 BlockSize += TII.getInstSizeInBytes(MI);
395 }
396
397 int64_t OffsetBB;
398 if (Alignment <= FunctionAlignment) {
399 OffsetBB = alignTo(Offset, Alignment);
400 } else {
401 // The alignment of this MBB is larger than the function's alignment, so
402 // we can't tell whether or not it will insert nops. Assume that it will.
403 OffsetBB = alignTo(Offset, Alignment) + Alignment.value() -
404 FunctionAlignment.value();
405 }
406 Offset = OffsetBB + BlockSize;
407 }
408
409 return Offset;
410}
411
412/// This method iterates over the basic blocks and assigns their IsBeginSection
413/// and IsEndSection fields. This must be called after MBB layout is finalized
414/// and the SectionID's are assigned to MBBs.
417 auto CurrentSectionID = front().getSectionID();
418 for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) {
419 if (MBBI->getSectionID() == CurrentSectionID)
420 continue;
421 MBBI->setIsBeginSection();
422 std::prev(MBBI)->setIsEndSection();
423 CurrentSectionID = MBBI->getSectionID();
424 }
426}
427
428/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
429MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
430 DebugLoc DL,
431 bool NoImplicit) {
432 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
433 MachineInstr(*this, MCID, std::move(DL), NoImplicit);
434}
435
436/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
437/// identical in all ways except the instruction has no parent, prev, or next.
439MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
440 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
441 MachineInstr(*this, *Orig);
442}
443
444MachineInstr &MachineFunction::cloneMachineInstrBundle(
446 const MachineInstr &Orig) {
447 MachineInstr *FirstClone = nullptr;
449 while (true) {
450 MachineInstr *Cloned = CloneMachineInstr(&*I);
451 MBB.insert(InsertBefore, Cloned);
452 if (FirstClone == nullptr) {
453 FirstClone = Cloned;
454 } else {
455 Cloned->bundleWithPred();
456 }
457
458 if (!I->isBundledWithSucc())
459 break;
460 ++I;
461 }
462 // Copy over call info to the cloned instruction if needed. If Orig is in
463 // a bundle, copyAdditionalCallInfo takes care of finding the call instruction
464 // in the bundle.
466 copyAdditionalCallInfo(&Orig, FirstClone);
467 return *FirstClone;
468}
469
470/// Delete the given MachineInstr.
471///
472/// This function also serves as the MachineInstr destructor - the real
473/// ~MachineInstr() destructor must be empty.
474void MachineFunction::deleteMachineInstr(MachineInstr *MI) {
475 // Verify that a call site info is at valid state. This assertion should
476 // be triggered during the implementation of support for the
477 // call site info of a new architecture. If the assertion is triggered,
478 // back trace will tell where to insert a call to updateCallSiteInfo().
479 assert((!MI->isCandidateForAdditionalCallInfo() ||
480 !CallSitesInfo.contains(MI)) &&
481 "Call site info was not updated!");
482 // Verify that the "called globals" info is in a valid state.
483 assert((!MI->isCandidateForAdditionalCallInfo() ||
484 !CalledGlobalsInfo.contains(MI)) &&
485 "Called globals info was not updated!");
486 // Strip it for parts. The operand array and the MI object itself are
487 // independently recyclable.
488 if (MI->Operands)
489 deallocateOperandArray(MI->CapOperands, MI->Operands);
490 // Don't call ~MachineInstr() which must be trivial anyway because
491 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
492 // destructors.
493 InstructionRecycler.Deallocate(Allocator, MI);
494}
495
496/// Allocate a new MachineBasicBlock. Use this instead of
497/// `new MachineBasicBlock'.
500 std::optional<UniqueBBID> BBID) {
502 new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
503 MachineBasicBlock(*this, BB);
504 // Set BBID for `-basic-block-sections=list` and `-basic-block-address-map` to
505 // allow robust mapping of profiles to basic blocks.
506 if (Target.Options.BBAddrMap ||
507 Target.getBBSectionsType() == BasicBlockSection::List)
508 MBB->setBBID(BBID.has_value() ? *BBID : UniqueBBID{NextBBID++, 0});
509 return MBB;
510}
511
512/// Delete the given MachineBasicBlock.
514 assert(MBB->getParent() == this && "MBB parent mismatch!");
515 // Clean up any references to MBB in jump tables before deleting it.
516 if (JumpTableInfo)
517 JumpTableInfo->RemoveMBBFromJumpTables(MBB);
518 MBB->~MachineBasicBlock();
519 BasicBlockRecycler.Deallocate(Allocator, MBB);
520}
521
524 Align BaseAlignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
525 SyncScope::ID SSID, AtomicOrdering Ordering,
526 AtomicOrdering FailureOrdering) {
527 assert((!Size.hasValue() ||
528 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
529 "Unexpected an unknown size to be represented using "
530 "LocationSize::beforeOrAfter()");
531 return new (Allocator)
532 MachineMemOperand(PtrInfo, F, Size, BaseAlignment, AAInfo, Ranges, SSID,
533 Ordering, FailureOrdering);
534}
535
538 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
539 SyncScope::ID SSID, AtomicOrdering Ordering,
540 AtomicOrdering FailureOrdering) {
541 return new (Allocator)
542 MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID,
543 Ordering, FailureOrdering);
544}
545
548 const MachinePointerInfo &PtrInfo,
550 assert((!Size.hasValue() ||
551 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
552 "Unexpected an unknown size to be represented using "
553 "LocationSize::beforeOrAfter()");
554 return new (Allocator)
555 MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(),
556 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
558}
559
561 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) {
562 return new (Allocator)
563 MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(),
564 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
566}
567
570 int64_t Offset, LLT Ty) {
571 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
572
573 // If there is no pointer value, the offset isn't tracked so we need to adjust
574 // the base alignment.
575 Align Alignment = PtrInfo.V.isNull()
577 : MMO->getBaseAlign();
578
579 // Do not preserve ranges, since we don't necessarily know what the high bits
580 // are anymore.
581 return new (Allocator) MachineMemOperand(
582 PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment,
583 MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(),
585}
586
589 const AAMDNodes &AAInfo) {
590 MachinePointerInfo MPI = MMO->getValue() ?
591 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
593
594 return new (Allocator) MachineMemOperand(
595 MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo,
596 MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(),
597 MMO->getFailureOrdering());
598}
599
603 return new (Allocator) MachineMemOperand(
604 MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(),
605 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
607}
608
609MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo(
610 ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol,
611 MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections,
612 uint32_t CFIType, MDNode *MMRAs) {
613 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
614 PostInstrSymbol, HeapAllocMarker,
615 PCSections, CFIType, MMRAs);
616}
617
619 char *Dest = Allocator.Allocate<char>(Name.size() + 1);
620 llvm::copy(Name, Dest);
621 Dest[Name.size()] = 0;
622 return Dest;
623}
624
626 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
627 unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
628 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
629 memset(Mask, 0, Size * sizeof(Mask[0]));
630 return Mask;
631}
632
634 int* AllocMask = Allocator.Allocate<int>(Mask.size());
635 copy(Mask, AllocMask);
636 return {AllocMask, Mask.size()};
637}
638
639#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
643#endif
644
648
649void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
650 OS << "# Machine code for function " << getName() << ": ";
651 getProperties().print(OS);
652 OS << '\n';
653
654 // Print Frame Information
655 FrameInfo->print(*this, OS);
656
657 // Print JumpTable Information
658 if (JumpTableInfo)
659 JumpTableInfo->print(OS);
660
661 // Print Constant Pool
662 ConstantPool->print(OS);
663
665
666 if (RegInfo && !RegInfo->livein_empty()) {
667 OS << "Function Live Ins: ";
669 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
670 OS << printReg(I->first, TRI);
671 if (I->second)
672 OS << " in " << printReg(I->second, TRI);
673 if (std::next(I) != E)
674 OS << ", ";
675 }
676 OS << '\n';
677 }
678
681 for (const auto &BB : *this) {
682 OS << '\n';
683 // If we print the whole function, print it at its most verbose level.
684 BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
685 }
686
687 OS << "\n# End machine code for function " << getName() << ".\n\n";
688}
689
690/// True if this function needs frame moves for debug or exceptions.
692 // TODO: Ideally, what we'd like is to have a switch that allows emitting
693 // synchronous (precise at call-sites only) CFA into .eh_frame. However, even
694 // under this switch, we'd like .debug_frame to be precise when using -g. At
695 // this moment, there's no way to specify that some CFI directives go into
696 // .eh_frame only, while others go into .debug_frame only.
698 F.needsUnwindTableEntry() ||
699 !F.getParent()->debug_compile_units().empty();
700}
701
703 // Numeric callee_type ids are only for indirect calls.
704 if (!CB.isIndirectCall())
705 return;
706
707 MDNode *CalleeTypeList = CB.getMetadata(LLVMContext::MD_callee_type);
708 if (!CalleeTypeList)
709 return;
710
711 for (const MDOperand &Op : CalleeTypeList->operands()) {
712 MDNode *TypeMD = cast<MDNode>(Op);
713 MDString *TypeIdStr = cast<MDString>(TypeMD->getOperand(1));
714 // Compute numeric type id from generalized type id string
715 uint64_t TypeIdVal = MD5Hash(TypeIdStr->getString());
716 IntegerType *Int64Ty = Type::getInt64Ty(CB.getContext());
717 CalleeTypeIds.push_back(
718 ConstantInt::get(Int64Ty, TypeIdVal, /*IsSigned=*/false));
719 }
720}
721
722template <>
724 : public DefaultDOTGraphTraits {
726
727 static std::string getGraphName(const MachineFunction *F) {
728 return ("CFG for '" + F->getName() + "' function").str();
729 }
730
732 const MachineFunction *Graph) {
733 std::string OutStr;
734 {
735 raw_string_ostream OSS(OutStr);
736
737 if (isSimple()) {
738 OSS << printMBBReference(*Node);
739 if (const BasicBlock *BB = Node->getBasicBlock())
740 OSS << ": " << BB->getName();
741 } else
742 Node->print(OSS);
743 }
744
745 if (OutStr[0] == '\n')
746 OutStr.erase(OutStr.begin());
747
748 // Process string output to make it nicer...
749 for (unsigned i = 0; i != OutStr.length(); ++i)
750 if (OutStr[i] == '\n') { // Left justify
751 OutStr[i] = '\\';
752 OutStr.insert(OutStr.begin() + i + 1, 'l');
753 }
754 return OutStr;
755 }
756};
757
759{
760#ifndef NDEBUG
761 ViewGraph(this, "mf" + getName());
762#else
763 errs() << "MachineFunction::viewCFG is only available in debug builds on "
764 << "systems with Graphviz or gv!\n";
765#endif // NDEBUG
766}
767
769{
770#ifndef NDEBUG
771 ViewGraph(this, "mf" + getName(), true);
772#else
773 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
774 << "systems with Graphviz or gv!\n";
775#endif // NDEBUG
776}
777
778/// Add the specified physical register as a live-in value and
779/// create a corresponding virtual register for it.
781 const TargetRegisterClass *RC) {
783 Register VReg = MRI.getLiveInVirtReg(PReg);
784 if (VReg) {
785 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
786 (void)VRegRC;
787 // A physical register can be added several times.
788 // Between two calls, the register class of the related virtual register
789 // may have been constrained to match some operation constraints.
790 // In that case, check that the current register class includes the
791 // physical register and is a sub class of the specified RC.
792 assert((VRegRC == RC || (VRegRC->contains(PReg) &&
793 RC->hasSubClassEq(VRegRC))) &&
794 "Register class mismatch!");
795 return VReg;
796 }
797 VReg = MRI.createVirtualRegister(RC);
798 MRI.addLiveIn(PReg, VReg);
799 return VReg;
800}
801
802/// Return the MCSymbol for the specified non-empty jump table.
803/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
804/// normal 'L' label is returned.
806 bool isLinkerPrivate) const {
807 const DataLayout &DL = getDataLayout();
808 assert(JumpTableInfo && "No jump tables");
809 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
810
811 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
812 : DL.getPrivateGlobalPrefix();
813 SmallString<60> Name;
815 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
816 return Ctx.getOrCreateSymbol(Name);
817}
818
819/// Return a function-local symbol to represent the PIC base.
821 const DataLayout &DL = getDataLayout();
822 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
823 Twine(getFunctionNumber()) + "$pb");
824}
825
826/// \name Exception Handling
827/// \{
828
831 unsigned N = LandingPads.size();
832 for (unsigned i = 0; i < N; ++i) {
833 LandingPadInfo &LP = LandingPads[i];
834 if (LP.LandingPadBlock == LandingPad)
835 return LP;
836 }
837
838 LandingPads.push_back(LandingPadInfo(LandingPad));
839 return LandingPads[N];
840}
841
843 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
845 LP.BeginLabels.push_back(BeginLabel);
846 LP.EndLabels.push_back(EndLabel);
847}
848
850 MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
852 LP.LandingPadLabel = LandingPadLabel;
853
855 LandingPad->getBasicBlock()->getFirstNonPHIIt();
856 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
857 // If there's no typeid list specified, then "cleanup" is implicit.
858 // Otherwise, id 0 is reserved for the cleanup action.
859 if (LPI->isCleanup() && LPI->getNumClauses() != 0)
860 LP.TypeIds.push_back(0);
861
862 // FIXME: New EH - Add the clauses in reverse order. This isn't 100%
863 // correct, but we need to do it this way because of how the DWARF EH
864 // emitter processes the clauses.
865 for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
866 Value *Val = LPI->getClause(I - 1);
867 if (LPI->isCatch(I - 1)) {
868 LP.TypeIds.push_back(
870 } else {
871 // Add filters in a list.
872 auto *CVal = cast<Constant>(Val);
873 SmallVector<unsigned, 4> FilterList;
874 for (const Use &U : CVal->operands())
875 FilterList.push_back(
876 getTypeIDFor(cast<GlobalValue>(U->stripPointerCasts())));
877
878 LP.TypeIds.push_back(getFilterIDFor(FilterList));
879 }
880 }
881
882 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
883 for (unsigned I = CPI->arg_size(); I != 0; --I) {
884 auto *TypeInfo =
885 dyn_cast<GlobalValue>(CPI->getArgOperand(I - 1)->stripPointerCasts());
886 LP.TypeIds.push_back(getTypeIDFor(TypeInfo));
887 }
888
889 } else {
890 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!");
891 }
892
893 return LandingPadLabel;
894}
895
897 ArrayRef<unsigned> Sites) {
898 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
899}
900
902 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
903 if (TypeInfos[i] == TI) return i + 1;
904
905 TypeInfos.push_back(TI);
906 return TypeInfos.size();
907}
908
910 // If the new filter coincides with the tail of an existing filter, then
911 // re-use the existing filter. Folding filters more than this requires
912 // re-ordering filters and/or their elements - probably not worth it.
913 for (unsigned i : FilterEnds) {
914 unsigned j = TyIds.size();
915
916 while (i && j)
917 if (FilterIds[--i] != TyIds[--j])
918 goto try_next;
919
920 if (!j)
921 // The new filter coincides with range [i, end) of the existing filter.
922 return -(1 + i);
923
924try_next:;
925 }
926
927 // Add the new filter.
928 int FilterID = -(1 + FilterIds.size());
929 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
930 llvm::append_range(FilterIds, TyIds);
931 FilterEnds.push_back(FilterIds.size());
932 FilterIds.push_back(0); // terminator
933 return FilterID;
934}
935
937MachineFunction::getCallSiteInfo(const MachineInstr *MI) {
938 assert(MI->isCandidateForAdditionalCallInfo() &&
939 "Call site info refers only to call (MI) candidates");
940
941 if (!Target.Options.EmitCallSiteInfo && !Target.Options.EmitCallGraphSection)
942 return CallSitesInfo.end();
943 return CallSitesInfo.find(MI);
944}
945
946/// Return the call machine instruction or find a call within bundle.
948 if (!MI->isBundle())
949 return MI;
950
951 for (const auto &BMI : make_range(getBundleStart(MI->getIterator()),
952 getBundleEnd(MI->getIterator())))
953 if (BMI.isCandidateForAdditionalCallInfo())
954 return &BMI;
955
956 llvm_unreachable("Unexpected bundle without a call site candidate");
957}
958
960 assert(MI->shouldUpdateAdditionalCallInfo() &&
961 "Call info refers only to call (MI) candidates or "
962 "candidates inside bundles");
963
964 const MachineInstr *CallMI = getCallInstr(MI);
965
966 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI);
967 if (CSIt != CallSitesInfo.end())
968 CallSitesInfo.erase(CSIt);
969
970 CalledGlobalsInfo.erase(CallMI);
971}
972
974 const MachineInstr *New) {
976 "Call info refers only to call (MI) candidates or "
977 "candidates inside bundles");
978
979 if (!New->isCandidateForAdditionalCallInfo())
980 return eraseAdditionalCallInfo(Old);
981
982 const MachineInstr *OldCallMI = getCallInstr(Old);
983 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
984 if (CSIt != CallSitesInfo.end()) {
985 CallSiteInfo CSInfo = CSIt->second;
986 CallSitesInfo[New] = std::move(CSInfo);
987 }
988
989 CalledGlobalsMap::iterator CGIt = CalledGlobalsInfo.find(OldCallMI);
990 if (CGIt != CalledGlobalsInfo.end()) {
991 CalledGlobalInfo CGInfo = CGIt->second;
992 CalledGlobalsInfo[New] = std::move(CGInfo);
993 }
994}
995
997 const MachineInstr *New) {
999 "Call info refers only to call (MI) candidates or "
1000 "candidates inside bundles");
1001
1002 if (!New->isCandidateForAdditionalCallInfo())
1003 return eraseAdditionalCallInfo(Old);
1004
1005 const MachineInstr *OldCallMI = getCallInstr(Old);
1006 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
1007 if (CSIt != CallSitesInfo.end()) {
1008 CallSiteInfo CSInfo = std::move(CSIt->second);
1009 CallSitesInfo.erase(CSIt);
1010 CallSitesInfo[New] = std::move(CSInfo);
1011 }
1012
1013 CalledGlobalsMap::iterator CGIt = CalledGlobalsInfo.find(OldCallMI);
1014 if (CGIt != CalledGlobalsInfo.end()) {
1015 CalledGlobalInfo CGInfo = std::move(CGIt->second);
1016 CalledGlobalsInfo.erase(CGIt);
1017 CalledGlobalsInfo[New] = std::move(CGInfo);
1018 }
1019}
1020
1024
1027 unsigned Subreg) {
1028 // Catch any accidental self-loops.
1029 assert(A.first != B.first);
1030 // Don't allow any substitutions _from_ the memory operand number.
1031 assert(A.second != DebugOperandMemNumber);
1032
1033 DebugValueSubstitutions.push_back({A, B, Subreg});
1034}
1035
1037 MachineInstr &New,
1038 unsigned MaxOperand) {
1039 // If the Old instruction wasn't tracked at all, there is no work to do.
1040 unsigned OldInstrNum = Old.peekDebugInstrNum();
1041 if (!OldInstrNum)
1042 return;
1043
1044 // Iterate over all operands looking for defs to create substitutions for.
1045 // Avoid creating new instr numbers unless we create a new substitution.
1046 // While this has no functional effect, it risks confusing someone reading
1047 // MIR output.
1048 // Examine all the operands, or the first N specified by the caller.
1049 MaxOperand = std::min(MaxOperand, Old.getNumOperands());
1050 for (unsigned int I = 0; I < MaxOperand; ++I) {
1051 const auto &OldMO = Old.getOperand(I);
1052 auto &NewMO = New.getOperand(I);
1053 (void)NewMO;
1054
1055 if (!OldMO.isReg() || !OldMO.isDef())
1056 continue;
1057 assert(NewMO.isDef());
1058
1059 unsigned NewInstrNum = New.getDebugInstrNum();
1060 makeDebugValueSubstitution(std::make_pair(OldInstrNum, I),
1061 std::make_pair(NewInstrNum, I));
1062 }
1063}
1064
1069
1070 // Check whether this copy-like instruction has already been salvaged into
1071 // an operand pair.
1072 Register Dest;
1073 if (auto CopyDstSrc = TII.isCopyLikeInstr(MI)) {
1074 Dest = CopyDstSrc->Destination->getReg();
1075 } else {
1076 assert(MI.isSubregToReg());
1077 Dest = MI.getOperand(0).getReg();
1078 }
1079
1080 auto CacheIt = DbgPHICache.find(Dest);
1081 if (CacheIt != DbgPHICache.end())
1082 return CacheIt->second;
1083
1084 // Calculate the instruction number to use, or install a DBG_PHI.
1085 auto OperandPair = salvageCopySSAImpl(MI);
1086 DbgPHICache.insert({Dest, OperandPair});
1087 return OperandPair;
1088}
1089
1093 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
1095
1096 // Chase the value read by a copy-like instruction back to the instruction
1097 // that ultimately _defines_ that value. This may pass:
1098 // * Through multiple intermediate copies, including subregister moves /
1099 // copies,
1100 // * Copies from physical registers that must then be traced back to the
1101 // defining instruction,
1102 // * Or, physical registers may be live-in to (only) the entry block, which
1103 // requires a DBG_PHI to be created.
1104 // We can pursue this problem in that order: trace back through copies,
1105 // optionally through a physical register, to a defining instruction. We
1106 // should never move from physreg to vreg. As we're still in SSA form, no need
1107 // to worry about partial definitions of registers.
1108
1109 // Helper lambda to interpret a copy-like instruction. Takes instruction,
1110 // returns the register read and any subregister identifying which part is
1111 // read.
1112 auto GetRegAndSubreg =
1113 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> {
1114 Register NewReg, OldReg;
1115 unsigned SubReg;
1116 if (Cpy.isCopy()) {
1117 OldReg = Cpy.getOperand(0).getReg();
1118 NewReg = Cpy.getOperand(1).getReg();
1119 SubReg = Cpy.getOperand(1).getSubReg();
1120 } else if (Cpy.isSubregToReg()) {
1121 OldReg = Cpy.getOperand(0).getReg();
1122 NewReg = Cpy.getOperand(2).getReg();
1123 SubReg = Cpy.getOperand(3).getImm();
1124 } else {
1125 auto CopyDetails = *TII.isCopyInstr(Cpy);
1126 const MachineOperand &Src = *CopyDetails.Source;
1127 const MachineOperand &Dest = *CopyDetails.Destination;
1128 OldReg = Dest.getReg();
1129 NewReg = Src.getReg();
1130 SubReg = Src.getSubReg();
1131 }
1132
1133 return {NewReg, SubReg};
1134 };
1135
1136 // First seek either the defining instruction, or a copy from a physreg.
1137 // During search, the current state is the current copy instruction, and which
1138 // register we've read. Accumulate qualifying subregisters into SubregsSeen;
1139 // deal with those later.
1140 auto State = GetRegAndSubreg(MI);
1141 auto CurInst = MI.getIterator();
1142 SmallVector<unsigned, 4> SubregsSeen;
1143 while (true) {
1144 // If we've found a copy from a physreg, first portion of search is over.
1145 if (!State.first.isVirtual())
1146 break;
1147
1148 // Record any subregister qualifier.
1149 if (State.second)
1150 SubregsSeen.push_back(State.second);
1151
1152 assert(MRI.hasOneDef(State.first));
1153 MachineInstr &Inst = *MRI.def_begin(State.first)->getParent();
1154 CurInst = Inst.getIterator();
1155
1156 // Any non-copy instruction is the defining instruction we're seeking.
1157 if (!Inst.isCopyLike() && !TII.isCopyLikeInstr(Inst))
1158 break;
1159 State = GetRegAndSubreg(Inst);
1160 };
1161
1162 // Helper lambda to apply additional subregister substitutions to a known
1163 // instruction/operand pair. Adds new (fake) substitutions so that we can
1164 // record the subregister. FIXME: this isn't very space efficient if multiple
1165 // values are tracked back through the same copies; cache something later.
1166 auto ApplySubregisters =
1168 for (unsigned Subreg : reverse(SubregsSeen)) {
1169 // Fetch a new instruction number, not attached to an actual instruction.
1170 unsigned NewInstrNumber = getNewDebugInstrNum();
1171 // Add a substitution from the "new" number to the known one, with a
1172 // qualifying subreg.
1173 makeDebugValueSubstitution({NewInstrNumber, 0}, P, Subreg);
1174 // Return the new number; to find the underlying value, consumers need to
1175 // deal with the qualifying subreg.
1176 P = {NewInstrNumber, 0};
1177 }
1178 return P;
1179 };
1180
1181 // If we managed to find the defining instruction after COPYs, return an
1182 // instruction / operand pair after adding subregister qualifiers.
1183 if (State.first.isVirtual()) {
1184 // Virtual register def -- we can just look up where this happens.
1185 MachineInstr *Inst = MRI.def_begin(State.first)->getParent();
1186 for (auto &MO : Inst->all_defs()) {
1187 if (MO.getReg() != State.first)
1188 continue;
1189 return ApplySubregisters({Inst->getDebugInstrNum(), MO.getOperandNo()});
1190 }
1191
1192 llvm_unreachable("Vreg def with no corresponding operand?");
1193 }
1194
1195 // Our search ended in a copy from a physreg: walk back up the function
1196 // looking for whatever defines the physreg.
1197 assert(CurInst->isCopyLike() || TII.isCopyInstr(*CurInst));
1198 State = GetRegAndSubreg(*CurInst);
1199 Register RegToSeek = State.first;
1200
1201 auto RMII = CurInst->getReverseIterator();
1202 auto PrevInstrs = make_range(RMII, CurInst->getParent()->instr_rend());
1203 for (auto &ToExamine : PrevInstrs) {
1204 for (auto &MO : ToExamine.all_defs()) {
1205 // Test for operand that defines something aliasing RegToSeek.
1206 if (!TRI.regsOverlap(RegToSeek, MO.getReg()))
1207 continue;
1208
1209 return ApplySubregisters(
1210 {ToExamine.getDebugInstrNum(), MO.getOperandNo()});
1211 }
1212 }
1213
1214 MachineBasicBlock &InsertBB = *CurInst->getParent();
1215
1216 // We reached the start of the block before finding a defining instruction.
1217 // There are numerous scenarios where this can happen:
1218 // * Constant physical registers,
1219 // * Several intrinsics that allow LLVM-IR to read arbitary registers,
1220 // * Arguments in the entry block,
1221 // * Exception handling landing pads.
1222 // Validating all of them is too difficult, so just insert a DBG_PHI reading
1223 // the variable value at this position, rather than checking it makes sense.
1224
1225 // Create DBG_PHI for specified physreg.
1226 auto Builder = BuildMI(InsertBB, InsertBB.getFirstNonPHI(), DebugLoc(),
1227 TII.get(TargetOpcode::DBG_PHI));
1228 Builder.addReg(State.first);
1229 unsigned NewNum = getNewDebugInstrNum();
1230 Builder.addImm(NewNum);
1231 return ApplySubregisters({NewNum, 0u});
1232}
1233
1235 auto *TII = getSubtarget().getInstrInfo();
1236
1237 auto MakeUndefDbgValue = [&](MachineInstr &MI) {
1238 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_VALUE_LIST);
1239 MI.setDesc(RefII);
1240 MI.setDebugValueUndef();
1241 };
1242
1244 for (auto &MBB : *this) {
1245 for (auto &MI : MBB) {
1246 if (!MI.isDebugRef())
1247 continue;
1248
1249 bool IsValidRef = true;
1250
1251 for (MachineOperand &MO : MI.debug_operands()) {
1252 if (!MO.isReg())
1253 continue;
1254
1255 Register Reg = MO.getReg();
1256
1257 // Some vregs can be deleted as redundant in the meantime. Mark those
1258 // as DBG_VALUE $noreg. Additionally, some normal instructions are
1259 // quickly deleted, leaving dangling references to vregs with no def.
1260 if (Reg == 0 || !RegInfo->hasOneDef(Reg)) {
1261 IsValidRef = false;
1262 break;
1263 }
1264
1265 assert(Reg.isVirtual());
1266 MachineInstr &DefMI = *RegInfo->def_instr_begin(Reg);
1267
1268 // If we've found a copy-like instruction, follow it back to the
1269 // instruction that defines the source value, see salvageCopySSA docs
1270 // for why this is important.
1271 if (DefMI.isCopyLike() || TII->isCopyInstr(DefMI)) {
1272 auto Result = salvageCopySSA(DefMI, ArgDbgPHIs);
1273 MO.ChangeToDbgInstrRef(Result.first, Result.second);
1274 } else {
1275 // Otherwise, identify the operand number that the VReg refers to.
1276 unsigned OperandIdx = 0;
1277 for (const auto &DefMO : DefMI.operands()) {
1278 if (DefMO.isReg() && DefMO.isDef() && DefMO.getReg() == Reg)
1279 break;
1280 ++OperandIdx;
1281 }
1282 assert(OperandIdx < DefMI.getNumOperands());
1283
1284 // Morph this instr ref to point at the given instruction and operand.
1285 unsigned ID = DefMI.getDebugInstrNum();
1286 MO.ChangeToDbgInstrRef(ID, OperandIdx);
1287 }
1288 }
1289
1290 if (!IsValidRef)
1291 MakeUndefDbgValue(MI);
1292 }
1293 }
1294}
1295
1297 // Disable instr-ref at -O0: it's very slow (in compile time). We can still
1298 // have optimized code inlined into this unoptimized code, however with
1299 // fewer and less aggressive optimizations happening, coverage and accuracy
1300 // should not suffer.
1301 if (getTarget().getOptLevel() == CodeGenOptLevel::None)
1302 return false;
1303
1304 // Don't use instr-ref if this function is marked optnone.
1305 if (F.hasFnAttribute(Attribute::OptimizeNone))
1306 return false;
1307
1308 if (llvm::debuginfoShouldUseDebugInstrRef(getTarget().getTargetTriple()))
1309 return true;
1310
1311 return false;
1312}
1313
1315 return UseDebugInstrRef;
1316}
1317
1321
1322// Use one million as a high / reserved number.
1323const unsigned MachineFunction::DebugOperandMemNumber = 1000000;
1324
1325/// \}
1326
1327//===----------------------------------------------------------------------===//
1328// MachineJumpTableInfo implementation
1329//===----------------------------------------------------------------------===//
1330
1332 const std::vector<MachineBasicBlock *> &MBBs)
1334
1335/// Return the size of each entry in the jump table.
1337 // The size of a jump table entry is 4 bytes unless the entry is just the
1338 // address of a block, in which case it is the pointer size.
1339 switch (getEntryKind()) {
1341 return TD.getPointerSize();
1344 return 8;
1348 return 4;
1350 return 0;
1351 }
1352 llvm_unreachable("Unknown jump table encoding!");
1353}
1354
1355/// Return the alignment of each entry in the jump table.
1357 // The alignment of a jump table entry is the alignment of int32 unless the
1358 // entry is just the address of a block, in which case it is the pointer
1359 // alignment.
1360 switch (getEntryKind()) {
1362 return TD.getPointerABIAlignment(0).value();
1365 return TD.getABIIntegerTypeAlignment(64).value();
1369 return TD.getABIIntegerTypeAlignment(32).value();
1371 return 1;
1372 }
1373 llvm_unreachable("Unknown jump table encoding!");
1374}
1375
1376/// Create a new jump table entry in the jump table info.
1378 const std::vector<MachineBasicBlock*> &DestBBs) {
1379 assert(!DestBBs.empty() && "Cannot create an empty jump table!");
1380 JumpTables.push_back(MachineJumpTableEntry(DestBBs));
1381 return JumpTables.size()-1;
1382}
1383
1385 size_t JTI, MachineFunctionDataHotness Hotness) {
1386 assert(JTI < JumpTables.size() && "Invalid JTI!");
1387 // Record the largest hotness value.
1388 if (Hotness <= JumpTables[JTI].Hotness)
1389 return false;
1390
1391 JumpTables[JTI].Hotness = Hotness;
1392 return true;
1393}
1394
1395/// If Old is the target of any jump tables, update the jump tables to branch
1396/// to New instead.
1398 MachineBasicBlock *New) {
1399 assert(Old != New && "Not making a change?");
1400 bool MadeChange = false;
1401 for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
1402 ReplaceMBBInJumpTable(i, Old, New);
1403 return MadeChange;
1404}
1405
1406/// If MBB is present in any jump tables, remove it.
1408 bool MadeChange = false;
1409 for (MachineJumpTableEntry &JTE : JumpTables) {
1410 auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB);
1411 MadeChange |= (removeBeginItr != JTE.MBBs.end());
1412 JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end());
1413 }
1414 return MadeChange;
1415}
1416
1417/// If Old is a target of the jump tables, update the jump table to branch to
1418/// New instead.
1420 MachineBasicBlock *Old,
1421 MachineBasicBlock *New) {
1422 assert(Old != New && "Not making a change?");
1423 bool MadeChange = false;
1424 MachineJumpTableEntry &JTE = JumpTables[Idx];
1425 for (MachineBasicBlock *&MBB : JTE.MBBs)
1426 if (MBB == Old) {
1427 MBB = New;
1428 MadeChange = true;
1429 }
1430 return MadeChange;
1431}
1432
1434 if (JumpTables.empty()) return;
1435
1436 OS << "Jump Tables:\n";
1437
1438 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
1439 OS << printJumpTableEntryReference(i) << ':';
1440 for (const MachineBasicBlock *MBB : JumpTables[i].MBBs)
1441 OS << ' ' << printMBBReference(*MBB);
1442 if (i != e)
1443 OS << '\n';
1444 }
1445
1446 OS << '\n';
1447}
1448
1449#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1451#endif
1452
1454 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
1455}
1456
1457//===----------------------------------------------------------------------===//
1458// MachineConstantPool implementation
1459//===----------------------------------------------------------------------===//
1460
1461void MachineConstantPoolValue::anchor() {}
1462
1464 return DL.getTypeAllocSize(Ty);
1465}
1466
1469 return Val.MachineCPVal->getSizeInBytes(DL);
1470 return DL.getTypeAllocSize(Val.ConstVal->getType());
1471}
1472
1475 return true;
1476 return Val.ConstVal->needsDynamicRelocation();
1477}
1478
1481 if (needsRelocation())
1483 switch (getSizeInBytes(*DL)) {
1484 case 4:
1486 case 8:
1488 case 16:
1490 case 32:
1492 default:
1493 return SectionKind::getReadOnly();
1494 }
1495}
1496
1498 // A constant may be a member of both Constants and MachineCPVsSharingEntries,
1499 // so keep track of which we've deleted to avoid double deletions.
1501 for (const MachineConstantPoolEntry &C : Constants)
1502 if (C.isMachineConstantPoolEntry()) {
1503 Deleted.insert(C.Val.MachineCPVal);
1504 delete C.Val.MachineCPVal;
1505 }
1506 for (MachineConstantPoolValue *CPV : MachineCPVsSharingEntries) {
1507 if (Deleted.count(CPV) == 0)
1508 delete CPV;
1509 }
1510}
1511
1512/// Test whether the given two constants can be allocated the same constant pool
1513/// entry referenced by \param A.
1514static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
1515 const DataLayout &DL) {
1516 // Handle the trivial case quickly.
1517 if (A == B) return true;
1518
1519 // If they have the same type but weren't the same constant, quickly
1520 // reject them.
1521 if (A->getType() == B->getType()) return false;
1522
1523 // We can't handle structs or arrays.
1524 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
1525 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
1526 return false;
1527
1528 // For now, only support constants with the same size.
1529 uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
1530 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
1531 return false;
1532
1533 bool ContainsUndefOrPoisonA = A->containsUndefOrPoisonElement();
1534
1535 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
1536
1537 // Try constant folding a bitcast of both instructions to an integer. If we
1538 // get two identical ConstantInt's, then we are good to share them. We use
1539 // the constant folding APIs to do this so that we get the benefit of
1540 // DataLayout.
1541 if (isa<PointerType>(A->getType()))
1542 A = ConstantFoldCastOperand(Instruction::PtrToInt,
1543 const_cast<Constant *>(A), IntTy, DL);
1544 else if (A->getType() != IntTy)
1545 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
1546 IntTy, DL);
1547 if (isa<PointerType>(B->getType()))
1548 B = ConstantFoldCastOperand(Instruction::PtrToInt,
1549 const_cast<Constant *>(B), IntTy, DL);
1550 else if (B->getType() != IntTy)
1551 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
1552 IntTy, DL);
1553
1554 if (A != B)
1555 return false;
1556
1557 // Constants only safely match if A doesn't contain undef/poison.
1558 // As we'll be reusing A, it doesn't matter if B contain undef/poison.
1559 // TODO: Handle cases where A and B have the same undef/poison elements.
1560 // TODO: Merge A and B with mismatching undef/poison elements.
1561 return !ContainsUndefOrPoisonA;
1562}
1563
1564/// Create a new entry in the constant pool or return an existing one.
1565/// User must specify the log2 of the minimum required alignment for the object.
1567 Align Alignment) {
1568 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1569
1570 // Check to see if we already have this constant.
1571 //
1572 // FIXME, this could be made much more efficient for large constant pools.
1573 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1574 if (!Constants[i].isMachineConstantPoolEntry() &&
1575 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1576 if (Constants[i].getAlign() < Alignment)
1577 Constants[i].Alignment = Alignment;
1578 return i;
1579 }
1580
1581 Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1582 return Constants.size()-1;
1583}
1584
1586 Align Alignment) {
1587 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1588
1589 // Check to see if we already have this constant.
1590 //
1591 // FIXME, this could be made much more efficient for large constant pools.
1592 int Idx = V->getExistingMachineCPValue(this, Alignment);
1593 if (Idx != -1) {
1594 MachineCPVsSharingEntries.insert(V);
1595 return (unsigned)Idx;
1596 }
1597
1598 Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1599 return Constants.size()-1;
1600}
1601
1603 if (Constants.empty()) return;
1604
1605 OS << "Constant Pool:\n";
1606 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1607 OS << " cp#" << i << ": ";
1608 if (Constants[i].isMachineConstantPoolEntry())
1609 Constants[i].Val.MachineCPVal->print(OS);
1610 else
1611 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1612 OS << ", align=" << Constants[i].getAlign().value();
1613 OS << "\n";
1614 }
1615}
1616
1617//===----------------------------------------------------------------------===//
1618// Template specialization for MachineFunction implementation of
1619// ProfileSummaryInfo::getEntryCount().
1620//===----------------------------------------------------------------------===//
1621template <>
1622std::optional<Function::ProfileCount>
1623ProfileSummaryInfo::getEntryCount<llvm::MachineFunction>(
1624 const llvm::MachineFunction *F) const {
1625 return F->getFunction().getEntryCount();
1626}
1627
1628#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1630#endif
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition Compiler.h:638
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
static cl::opt< unsigned > AlignAllFunctions("align-all-functions", cl::desc("Force the alignment of all functions in log2 format (e.g. 4 " "means align on 16B boundaries)."), cl::init(0), cl::Hidden)
static const MachineInstr * getCallInstr(const MachineInstr *MI)
Return the call machine instruction or find a call within bundle.
static Align getFnStackAlignment(const TargetSubtargetInfo &STI, const Function &F)
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, const DataLayout &DL)
Test whether the given two constants can be allocated the same constant pool entry referenced by.
void setUnsafeStackSize(const Function &F, MachineFrameInfo &FrameInfo)
static const char * getPropertyName(MachineFunctionProperties::Property Prop)
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
#define P(N)
Basic Register Allocator
static bool isSimple(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallString class.
This file defines the SmallVector class.
static const int BlockSize
Definition TarWriter.cpp:33
This file describes how to lower LLVM code to machine code.
void print(OutputBuffer &OB) const
void clear(AllocatorType &Allocator)
Release all the tracked allocations to the allocator.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
iterator end() const
Definition ArrayRef.h:136
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
iterator begin() const
Definition ArrayRef.h:135
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
Definition BasicBlock.h:171
unsigned size_type
Definition BitVector.h:115
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Align getABIIntegerTypeAlignment(unsigned BitWidth) const
Returns the minimum ABI-required alignment for an integer type of the specified bitwidth.
Definition DataLayout.h:619
LLVM_ABI unsigned getPointerSize(unsigned AS=0) const
The pointer representation size in bytes, rounded up to a whole number of bytes.
LLVM_ABI Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
A debug info location.
Definition DebugLoc.h:124
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
iterator end()
Definition DenseMap.h:81
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
Context object for machine code objects.
Definition MCContext.h:83
Describe properties that are true of each instruction in the target description file.
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
Tracking metadata reference owned by Metadata.
Definition Metadata.h:900
A single uniqued string.
Definition Metadata.h:721
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
void setIsEndSection(bool V=true)
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
MBBSectionID getSectionID() const
Returns the section ID of this basic block.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsBeginSection(bool V=true)
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool needsRelocation() const
This method classifies the entry according to whether or not it may generate a relocation entry.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
unsigned getSizeInBytes(const DataLayout &DL) const
SectionKind getSectionKind(const DataLayout *DL) const
Abstract base class for all machine specific constantpool value subclasses.
virtual unsigned getSizeInBytes(const DataLayout &DL) const
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
void dump() const
dump - Call print(cerr) to be called from the debugger.
void print(raw_ostream &OS) const
print - Used by the MachineFunction printer to print information about constant pool objects.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
void setUnsafeStackSize(uint64_t Size)
LLVM_ABI void print(raw_ostream &OS) const
Print the MachineFunctionProperties in human-readable form.
MachineFunctionProperties & reset(Property P)
virtual void MF_HandleRemoval(MachineInstr &MI)=0
Callback before a removal. This should not modify the MI directly.
virtual void MF_HandleInsertion(MachineInstr &MI)=0
Callback after an insertion. This should not modify the MI directly.
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0, MDNode *MMRAs=nullptr)
Allocate and construct an extra info structure for a MachineInstr.
int getFilterIDFor(ArrayRef< unsigned > TyIds)
Return the id of the filter encoded by TyIds. This is function wide.
bool UseDebugInstrRef
Flag for whether this function contains DBG_VALUEs (false) or DBG_INSTR_REF (true).
void moveAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
std::pair< unsigned, unsigned > DebugInstrOperandPair
Pair of instruction number and operand number.
unsigned addFrameInst(const MCCFIInstruction &Inst)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
SmallVector< DebugSubstitution, 8 > DebugValueSubstitutions
Debug value substitutions: a collection of DebugSubstitution objects, recording changes in where a va...
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
void viewCFGOnly() const
viewCFGOnly - This function is meant for use from the debugger.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
void substituteDebugValuesForInst(const MachineInstr &Old, MachineInstr &New, unsigned MaxOperand=UINT_MAX)
Create substitutions for any tracked values in Old, to point at New.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist,...
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair, unsigned SubReg=0)
Create a substitution between one <instr,operand> value to a different, new value.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFunction(Function &F, const TargetMachine &Target, const TargetSubtargetInfo &STI, MCContext &Ctx, unsigned FunctionNum)
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
void finalizeDebugInstrRefs()
Finalise any partially emitted debug instructions.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
void initTargetMachineFunctionInfo(const TargetSubtargetInfo &STI)
Initialize the target specific MachineFunctionInfo.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef< unsigned > Sites)
Map the landing pad's EH symbol to the call site indexes.
void setUseDebugInstrRef(bool UseInstrRef)
Set whether this function will use instruction referencing or not.
LandingPadInfo & getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad)
Find or create an LandingPadInfo for the specified MachineBasicBlock.
unsigned size() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
unsigned DebugInstrNumberingCount
A count of how many instructions in the function have had numbers assigned to them.
void deleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
Align getAlignment() const
getAlignment - Return the alignment of the function.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
static const unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
Function & getFunction()
Return the LLVM function that this machine code represents.
DebugInstrOperandPair salvageCopySSAImpl(MachineInstr &MI)
const MachineBasicBlock & back() const
BasicBlockListType::iterator iterator
void setDebugInstrNumberingCount(unsigned Num)
Set value of DebugInstrNumberingCount field.
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
void viewCFG() const
viewCFG - This function is meant for use from the debugger.
bool shouldUseDebugInstrRef() const
Determine whether, in the current machine configuration, we should use instruction referencing or not...
const MachineFunctionProperties & getProperties() const
Get the function properties.
void eraseAdditionalCallInfo(const MachineInstr *MI)
Following functions update call site info.
void RenumberBlocks(MachineBasicBlock *MBBFrom=nullptr)
RenumberBlocks - This discards all of the MachineBasicBlock numbers and recomputes them.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
int64_t estimateFunctionSizeInBytes()
Return an estimate of the function's code size, taking into account block and function alignment.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void copyAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Copy the call site info from Old to \ New.
VariableDbgInfoMapTy VariableDbgInfos
void assignBeginEndSections()
Assign IsBeginSection IsEndSection fields for basic blocks in this function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
DebugInstrOperandPair salvageCopySSA(MachineInstr &MI, DenseMap< Register, DebugInstrOperandPair > &DbgPHICache)
Find the underlying defining instruction / operand for a COPY instruction while in SSA form.
Representation of each machine instruction.
LLVM_ABI void bundleWithPred()
Bundle this instruction with its predecessor.
bool isCopyLike() const
Return true if the instruction behaves like a copy.
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
unsigned getNumOperands() const
Retuns the total number of operands.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI bool shouldUpdateAdditionalCallInfo() const
Return true if copying, moving, or erasing this instruction requires updating additional call info (s...
LLVM_ABI bool RemoveMBBFromJumpTables(MachineBasicBlock *MBB)
RemoveMBBFromJumpTables - If MBB is present in any jump tables, remove it.
LLVM_ABI bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New)
ReplaceMBBInJumpTables - If Old is the target of any jump tables, update the jump tables to branch to...
LLVM_ABI void print(raw_ostream &OS) const
print - Used by the MachineFunction printer to print information about jump tables.
LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
LLVM_ABI unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
createJumpTableIndex - Create a new jump table.
LLVM_ABI void dump() const
dump - Call to stderr.
LLVM_ABI bool ReplaceMBBInJumpTable(unsigned Idx, MachineBasicBlock *Old, MachineBasicBlock *New)
ReplaceMBBInJumpTable - If Old is a target of the jump tables, update the jump table to branch to New...
LLVM_ABI bool updateJumpTableEntryHotness(size_t JTI, MachineFunctionDataHotness Hotness)
JTEntryKind
JTEntryKind - This enum indicates how each entry of the jump table is represented and emitted.
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_LabelDifference64
EK_LabelDifference64 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
LLVM_ABI unsigned getEntryAlignment(const DataLayout &TD) const
getEntryAlignment - Return the alignment of each entry in the jump table.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
AtomicOrdering getFailureOrdering() const
For cmpxchg atomic operations, return the atomic ordering requirements when store does not occur.
const PseudoSourceValue * getPseudoValue() const
const MDNode * getRanges() const
Return the range tag for the memory reference.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID for this memory operation.
Flags
Flags values. These may be or'd together.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
Align getBaseAlign() const
Return the minimum known alignment in bytes of the base address, without the offset.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
std::vector< std::pair< MCRegister, Register > >::const_iterator livein_iterator
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Simple wrapper around std::function<void(raw_ostream&)>.
Definition Printable.h:38
Wrapper class representing virtual and physical registers.
Definition Register.h:19
SectionKind - This is a simple POD value that classifies the properties of a section.
Definition SectionKind.h:22
static SectionKind getMergeableConst4()
static SectionKind getReadOnlyWithRel()
static SectionKind getMergeableConst8()
static SectionKind getMergeableConst16()
static SectionKind getReadOnly()
static SectionKind getMergeableConst32()
SlotIndexes pass.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool isStackRealignable() const
isStackRealignable - This method returns whether the stack can be realigned.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Align getPrefFunctionAlignment() const
Return the preferred function alignment.
Align getMinFunctionAlignment() const
Return the minimum function alignment.
Primary interface to the complete machine description for the target machine.
TargetOptions Options
unsigned ForceDwarfFrameSection
Emit DWARF debug frame section.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:298
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
self_iterator getIterator()
Definition ilist_node.h:123
iterator erase(iterator where)
Definition ilist.h:204
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
uint64_t MD5Hash(const FunctionId &Obj)
Definition FunctionId.h:167
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
MachineBasicBlock::instr_iterator getBundleStart(MachineBasicBlock::instr_iterator I)
Returns an iterator to the first instruction in the bundle containing I.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
LLVM_ABI Printable printJumpTableEntryReference(unsigned Idx)
Prints a jump table entry reference.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
MachineFunctionDataHotness
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
MachineBasicBlock::instr_iterator getBundleEnd(MachineBasicBlock::instr_iterator I)
Returns an iterator pointing beyond the bundle containing I.
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1835
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
bool debuginfoShouldUseDebugInstrRef(const Triple &T)
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
std::string getNodeLabel(const MachineBasicBlock *Node, const MachineFunction *Graph)
static std::string getGraphName(const MachineFunction *F)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
Represent subnormal handling kind for floating point instruction inputs and outputs.
This structure is used to retain landing pad info for the current function.
SmallVector< MCSymbol *, 1 > EndLabels
MachineBasicBlock * LandingPadBlock
SmallVector< MCSymbol *, 1 > BeginLabels
std::vector< int > TypeIds
SmallVector< ConstantInt *, 4 > CalleeTypeIds
Callee type ids.
MachineJumpTableEntry - One jump table in the jump table info.
LLVM_ABI MachineJumpTableEntry(const std::vector< MachineBasicBlock * > &M)
std::vector< MachineBasicBlock * > MBBs
MBBs - The vector of basic blocks from which to create the jump table.
MachineFunctionDataHotness Hotness
The hotness of MJTE is inferred from the hotness of the source basic block(s) that reference it.
This class contains a discriminated union of information about pointers in memory operands,...
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
static void deleteNode(NodeTy *V)
Definition ilist.h:42