LLVM 20.0.0git
HexagonFrameLowering.cpp
Go to the documentation of this file.
1//===- HexagonFrameLowering.cpp - Define frame lowering -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//
8//===----------------------------------------------------------------------===//
9
11#include "HexagonBlockRanges.h"
12#include "HexagonInstrInfo.h"
14#include "HexagonRegisterInfo.h"
15#include "HexagonSubtarget.h"
18#include "llvm/ADT/BitVector.h"
19#include "llvm/ADT/DenseMap.h"
21#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/DebugLoc.h"
42#include "llvm/IR/Function.h"
43#include "llvm/MC/MCDwarf.h"
45#include "llvm/Pass.h"
49#include "llvm/Support/Debug.h"
55#include <algorithm>
56#include <cassert>
57#include <cstdint>
58#include <iterator>
59#include <limits>
60#include <map>
61#include <optional>
62#include <utility>
63#include <vector>
64
65#define DEBUG_TYPE "hexagon-pei"
66
67// Hexagon stack frame layout as defined by the ABI:
68//
69// Incoming arguments
70// passed via stack
71// |
72// |
73// SP during function's FP during function's |
74// +-- runtime (top of stack) runtime (bottom) --+ |
75// | | |
76// --++---------------------+------------------+-----------------++-+-------
77// | parameter area for | variable-size | fixed-size |LR| arg
78// | called functions | local objects | local objects |FP|
79// --+----------------------+------------------+-----------------+--+-------
80// <- size known -> <- size unknown -> <- size known ->
81//
82// Low address High address
83//
84// <--- stack growth
85//
86//
87// - In any circumstances, the outgoing function arguments are always accessi-
88// ble using the SP, and the incoming arguments are accessible using the FP.
89// - If the local objects are not aligned, they can always be accessed using
90// the FP.
91// - If there are no variable-sized objects, the local objects can always be
92// accessed using the SP, regardless whether they are aligned or not. (The
93// alignment padding will be at the bottom of the stack (highest address),
94// and so the offset with respect to the SP will be known at the compile-
95// -time.)
96//
97// The only complication occurs if there are both, local aligned objects, and
98// dynamically allocated (variable-sized) objects. The alignment pad will be
99// placed between the FP and the local objects, thus preventing the use of the
100// FP to access the local objects. At the same time, the variable-sized objects
101// will be between the SP and the local objects, thus introducing an unknown
102// distance from the SP to the locals.
103//
104// To avoid this problem, a new register is created that holds the aligned
105// address of the bottom of the stack, referred in the sources as AP (aligned
106// pointer). The AP will be equal to "FP-p", where "p" is the smallest pad
107// that aligns AP to the required boundary (a maximum of the alignments of
108// all stack objects, fixed- and variable-sized). All local objects[1] will
109// then use AP as the base pointer.
110// [1] The exception is with "fixed" stack objects. "Fixed" stack objects get
111// their name from being allocated at fixed locations on the stack, relative
112// to the FP. In the presence of dynamic allocation and local alignment, such
113// objects can only be accessed through the FP.
114//
115// Illustration of the AP:
116// FP --+
117// |
118// ---------------+---------------------+-----+-----------------------++-+--
119// Rest of the | Local stack objects | Pad | Fixed stack objects |LR|
120// stack frame | (aligned) | | (CSR, spills, etc.) |FP|
121// ---------------+---------------------+-----+-----------------+-----+--+--
122// |<-- Multiple of the -->|
123// stack alignment +-- AP
124//
125// The AP is set up at the beginning of the function. Since it is not a dedi-
126// cated (reserved) register, it needs to be kept live throughout the function
127// to be available as the base register for local object accesses.
128// Normally, an address of a stack objects is obtained by a pseudo-instruction
129// PS_fi. To access local objects with the AP register present, a different
130// pseudo-instruction needs to be used: PS_fia. The PS_fia takes one extra
131// argument compared to PS_fi: the first input register is the AP register.
132// This keeps the register live between its definition and its uses.
133
134// The AP register is originally set up using pseudo-instruction PS_aligna:
135// AP = PS_aligna A
136// where
137// A - required stack alignment
138// The alignment value must be the maximum of all alignments required by
139// any stack object.
140
141// The dynamic allocation uses a pseudo-instruction PS_alloca:
142// Rd = PS_alloca Rs, A
143// where
144// Rd - address of the allocated space
145// Rs - minimum size (the actual allocated can be larger to accommodate
146// alignment)
147// A - required alignment
148
149using namespace llvm;
150
151static cl::opt<bool> DisableDeallocRet("disable-hexagon-dealloc-ret",
152 cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"));
153
155 NumberScavengerSlots("number-scavenger-slots", cl::Hidden,
156 cl::desc("Set the number of scavenger slots"),
157 cl::init(2));
158
159static cl::opt<int>
160 SpillFuncThreshold("spill-func-threshold", cl::Hidden,
161 cl::desc("Specify O2(not Os) spill func threshold"),
162 cl::init(6));
163
164static cl::opt<int>
165 SpillFuncThresholdOs("spill-func-threshold-Os", cl::Hidden,
166 cl::desc("Specify Os spill func threshold"),
167 cl::init(1));
168
170 "enable-stackovf-sanitizer", cl::Hidden,
171 cl::desc("Enable runtime checks for stack overflow."), cl::init(false));
172
173static cl::opt<bool>
174 EnableShrinkWrapping("hexagon-shrink-frame", cl::init(true), cl::Hidden,
175 cl::desc("Enable stack frame shrink wrapping"));
176
178 ShrinkLimit("shrink-frame-limit",
179 cl::init(std::numeric_limits<unsigned>::max()), cl::Hidden,
180 cl::desc("Max count of stack frame shrink-wraps"));
181
182static cl::opt<bool>
183 EnableSaveRestoreLong("enable-save-restore-long", cl::Hidden,
184 cl::desc("Enable long calls for save-restore stubs."),
185 cl::init(false));
186
187static cl::opt<bool> EliminateFramePointer("hexagon-fp-elim", cl::init(true),
188 cl::Hidden, cl::desc("Refrain from using FP whenever possible"));
189
190static cl::opt<bool> OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden,
191 cl::init(true), cl::desc("Optimize spill slots"));
192
193#ifndef NDEBUG
195 cl::init(std::numeric_limits<unsigned>::max()));
196static unsigned SpillOptCount = 0;
197#endif
198
199namespace llvm {
200
203
204} // end namespace llvm
205
206namespace {
207
208 class HexagonCallFrameInformation : public MachineFunctionPass {
209 public:
210 static char ID;
211
212 HexagonCallFrameInformation() : MachineFunctionPass(ID) {
215 }
216
217 bool runOnMachineFunction(MachineFunction &MF) override;
218
221 MachineFunctionProperties::Property::NoVRegs);
222 }
223 };
224
225 char HexagonCallFrameInformation::ID = 0;
226
227} // end anonymous namespace
228
229bool HexagonCallFrameInformation::runOnMachineFunction(MachineFunction &MF) {
230 auto &HFI = *MF.getSubtarget<HexagonSubtarget>().getFrameLowering();
231 bool NeedCFI = MF.needsFrameMoves();
232
233 if (!NeedCFI)
234 return false;
235 HFI.insertCFIInstructions(MF);
236 return true;
237}
238
239INITIALIZE_PASS(HexagonCallFrameInformation, "hexagon-cfi",
240 "Hexagon call frame information", false, false)
241
243 return new HexagonCallFrameInformation();
244}
245
246/// Map a register pair Reg to the subregister that has the greater "number",
247/// i.e. D3 (aka R7:6) will be mapped to R7, etc.
249 const TargetRegisterInfo &TRI,
250 bool hireg = true) {
251 if (Reg < Hexagon::D0 || Reg > Hexagon::D15)
252 return Reg;
253
254 Register RegNo = 0;
255 for (MCPhysReg SubReg : TRI.subregs(Reg)) {
256 if (hireg) {
257 if (SubReg > RegNo)
258 RegNo = SubReg;
259 } else {
260 if (!RegNo || SubReg < RegNo)
261 RegNo = SubReg;
262 }
263 }
264 return RegNo;
265}
266
267/// Returns the callee saved register with the largest id in the vector.
269 const TargetRegisterInfo &TRI) {
270 static_assert(Hexagon::R1 > 0,
271 "Assume physical registers are encoded as positive integers");
272 if (CSI.empty())
273 return 0;
274
275 Register Max = getMax32BitSubRegister(CSI[0].getReg(), TRI);
276 for (unsigned I = 1, E = CSI.size(); I < E; ++I) {
278 if (Reg > Max)
279 Max = Reg;
280 }
281 return Max;
282}
283
284/// Checks if the basic block contains any instruction that needs a stack
285/// frame to be already in place.
286static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR,
287 const HexagonRegisterInfo &HRI) {
288 for (const MachineInstr &MI : MBB) {
289 if (MI.isCall())
290 return true;
291 unsigned Opc = MI.getOpcode();
292 switch (Opc) {
293 case Hexagon::PS_alloca:
294 case Hexagon::PS_aligna:
295 return true;
296 default:
297 break;
298 }
299 // Check individual operands.
300 for (const MachineOperand &MO : MI.operands()) {
301 // While the presence of a frame index does not prove that a stack
302 // frame will be required, all frame indexes should be within alloc-
303 // frame/deallocframe. Otherwise, the code that translates a frame
304 // index into an offset would have to be aware of the placement of
305 // the frame creation/destruction instructions.
306 if (MO.isFI())
307 return true;
308 if (MO.isReg()) {
309 Register R = MO.getReg();
310 // Debug instructions may refer to $noreg.
311 if (!R)
312 continue;
313 // Virtual registers will need scavenging, which then may require
314 // a stack slot.
315 if (R.isVirtual())
316 return true;
317 for (MCPhysReg S : HRI.subregs_inclusive(R))
318 if (CSR[S])
319 return true;
320 continue;
321 }
322 if (MO.isRegMask()) {
323 // A regmask would normally have all callee-saved registers marked
324 // as preserved, so this check would not be needed, but in case of
325 // ever having other regmasks (for other calling conventions),
326 // make sure they would be processed correctly.
327 const uint32_t *BM = MO.getRegMask();
328 for (int x = CSR.find_first(); x >= 0; x = CSR.find_next(x)) {
329 unsigned R = x;
330 // If this regmask does not preserve a CSR, a frame will be needed.
331 if (!(BM[R/32] & (1u << (R%32))))
332 return true;
333 }
334 }
335 }
336 }
337 return false;
338}
339
340 /// Returns true if MBB has a machine instructions that indicates a tail call
341 /// in the block.
342static bool hasTailCall(const MachineBasicBlock &MBB) {
344 if (I == MBB.end())
345 return false;
346 unsigned RetOpc = I->getOpcode();
347 return RetOpc == Hexagon::PS_tailcall_i || RetOpc == Hexagon::PS_tailcall_r;
348}
349
350/// Returns true if MBB contains an instruction that returns.
351static bool hasReturn(const MachineBasicBlock &MBB) {
352 for (const MachineInstr &MI : MBB.terminators())
353 if (MI.isReturn())
354 return true;
355 return false;
356}
357
358/// Returns the "return" instruction from this block, or nullptr if there
359/// isn't any.
361 for (auto &I : MBB)
362 if (I.isReturn())
363 return &I;
364 return nullptr;
365}
366
367static bool isRestoreCall(unsigned Opc) {
368 switch (Opc) {
369 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
370 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
371 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT:
372 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC:
373 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT:
374 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC:
375 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4:
376 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC:
377 return true;
378 }
379 return false;
380}
381
382static inline bool isOptNone(const MachineFunction &MF) {
383 return MF.getFunction().hasOptNone() ||
384 MF.getTarget().getOptLevel() == CodeGenOptLevel::None;
385}
386
387static inline bool isOptSize(const MachineFunction &MF) {
388 const Function &F = MF.getFunction();
389 return F.hasOptSize() && !F.hasMinSize();
390}
391
392static inline bool isMinSize(const MachineFunction &MF) {
393 return MF.getFunction().hasMinSize();
394}
395
396/// Implements shrink-wrapping of the stack frame. By default, stack frame
397/// is created in the function entry block, and is cleaned up in every block
398/// that returns. This function finds alternate blocks: one for the frame
399/// setup (prolog) and one for the cleanup (epilog).
400void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
401 MachineBasicBlock *&PrologB, MachineBasicBlock *&EpilogB) const {
402 static unsigned ShrinkCounter = 0;
403
405 MF.getFunction().isVarArg())
406 return;
407 if (ShrinkLimit.getPosition()) {
408 if (ShrinkCounter >= ShrinkLimit)
409 return;
410 ShrinkCounter++;
411 }
412
413 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
414
416 MDT.calculate(MF);
418 MPT.recalculate(MF);
419
420 using UnsignedMap = DenseMap<unsigned, unsigned>;
422
423 UnsignedMap RPO;
424 RPOTType RPOT(&MF);
425 unsigned RPON = 0;
426 for (auto &I : RPOT)
427 RPO[I->getNumber()] = RPON++;
428
429 // Don't process functions that have loops, at least for now. Placement
430 // of prolog and epilog must take loop structure into account. For simpli-
431 // city don't do it right now.
432 for (auto &I : MF) {
433 unsigned BN = RPO[I.getNumber()];
434 for (MachineBasicBlock *Succ : I.successors())
435 // If found a back-edge, return.
436 if (RPO[Succ->getNumber()] <= BN)
437 return;
438 }
439
440 // Collect the set of blocks that need a stack frame to execute. Scan
441 // each block for uses/defs of callee-saved registers, calls, etc.
443 BitVector CSR(Hexagon::NUM_TARGET_REGS);
444 for (const MCPhysReg *P = HRI.getCalleeSavedRegs(&MF); *P; ++P)
445 for (MCPhysReg S : HRI.subregs_inclusive(*P))
446 CSR[S] = true;
447
448 for (auto &I : MF)
449 if (needsStackFrame(I, CSR, HRI))
450 SFBlocks.push_back(&I);
451
452 LLVM_DEBUG({
453 dbgs() << "Blocks needing SF: {";
454 for (auto &B : SFBlocks)
455 dbgs() << " " << printMBBReference(*B);
456 dbgs() << " }\n";
457 });
458 // No frame needed?
459 if (SFBlocks.empty())
460 return;
461
462 // Pick a common dominator and a common post-dominator.
463 MachineBasicBlock *DomB = SFBlocks[0];
464 for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) {
465 DomB = MDT.findNearestCommonDominator(DomB, SFBlocks[i]);
466 if (!DomB)
467 break;
468 }
469 MachineBasicBlock *PDomB = SFBlocks[0];
470 for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) {
471 PDomB = MPT.findNearestCommonDominator(PDomB, SFBlocks[i]);
472 if (!PDomB)
473 break;
474 }
475 LLVM_DEBUG({
476 dbgs() << "Computed dom block: ";
477 if (DomB)
478 dbgs() << printMBBReference(*DomB);
479 else
480 dbgs() << "<null>";
481 dbgs() << ", computed pdom block: ";
482 if (PDomB)
483 dbgs() << printMBBReference(*PDomB);
484 else
485 dbgs() << "<null>";
486 dbgs() << "\n";
487 });
488 if (!DomB || !PDomB)
489 return;
490
491 // Make sure that DomB dominates PDomB and PDomB post-dominates DomB.
492 if (!MDT.dominates(DomB, PDomB)) {
493 LLVM_DEBUG(dbgs() << "Dom block does not dominate pdom block\n");
494 return;
495 }
496 if (!MPT.dominates(PDomB, DomB)) {
497 LLVM_DEBUG(dbgs() << "PDom block does not post-dominate dom block\n");
498 return;
499 }
500
501 // Finally, everything seems right.
502 PrologB = DomB;
503 EpilogB = PDomB;
504}
505
506/// Perform most of the PEI work here:
507/// - saving/restoring of the callee-saved registers,
508/// - stack frame creation and destruction.
509/// Normally, this work is distributed among various functions, but doing it
510/// in one place allows shrink-wrapping of the stack frame.
512 MachineBasicBlock &MBB) const {
513 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
514
515 MachineFrameInfo &MFI = MF.getFrameInfo();
516 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
517
518 MachineBasicBlock *PrologB = &MF.front(), *EpilogB = nullptr;
520 findShrunkPrologEpilog(MF, PrologB, EpilogB);
521
522 bool PrologueStubs = false;
523 insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs);
524 insertPrologueInBlock(*PrologB, PrologueStubs);
525 updateEntryPaths(MF, *PrologB);
526
527 if (EpilogB) {
528 insertCSRRestoresInBlock(*EpilogB, CSI, HRI);
529 insertEpilogueInBlock(*EpilogB);
530 } else {
531 for (auto &B : MF)
532 if (B.isReturnBlock())
533 insertCSRRestoresInBlock(B, CSI, HRI);
534
535 for (auto &B : MF)
536 if (B.isReturnBlock())
537 insertEpilogueInBlock(B);
538
539 for (auto &B : MF) {
540 if (B.empty())
541 continue;
542 MachineInstr *RetI = getReturn(B);
543 if (!RetI || isRestoreCall(RetI->getOpcode()))
544 continue;
545 for (auto &R : CSI)
546 RetI->addOperand(MachineOperand::CreateReg(R.getReg(), false, true));
547 }
548 }
549
550 if (EpilogB) {
551 // If there is an epilog block, it may not have a return instruction.
552 // In such case, we need to add the callee-saved registers as live-ins
553 // in all blocks on all paths from the epilog to any return block.
554 unsigned MaxBN = MF.getNumBlockIDs();
555 BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1);
556 updateExitPaths(*EpilogB, *EpilogB, DoneT, DoneF, Path);
557 }
558}
559
560/// Returns true if the target can safely skip saving callee-saved registers
561/// for noreturn nounwind functions.
563 const MachineFunction &MF) const {
564 const auto &F = MF.getFunction();
565 assert(F.hasFnAttribute(Attribute::NoReturn) &&
566 F.getFunction().hasFnAttribute(Attribute::NoUnwind) &&
567 !F.getFunction().hasFnAttribute(Attribute::UWTable));
568 (void)F;
569
570 // No need to save callee saved registers if the function does not return.
571 return MF.getSubtarget<HexagonSubtarget>().noreturnStackElim();
572}
573
574// Helper function used to determine when to eliminate the stack frame for
575// functions marked as noreturn and when the noreturn-stack-elim options are
576// specified. When both these conditions are true, then a FP may not be needed
577// if the function makes a call. It is very similar to enableCalleeSaveSkip,
578// but it used to check if the allocframe can be eliminated as well.
579static bool enableAllocFrameElim(const MachineFunction &MF) {
580 const auto &F = MF.getFunction();
581 const auto &MFI = MF.getFrameInfo();
582 const auto &HST = MF.getSubtarget<HexagonSubtarget>();
583 assert(!MFI.hasVarSizedObjects() &&
584 !HST.getRegisterInfo()->hasStackRealignment(MF));
585 return F.hasFnAttribute(Attribute::NoReturn) &&
586 F.hasFnAttribute(Attribute::NoUnwind) &&
587 !F.hasFnAttribute(Attribute::UWTable) && HST.noreturnStackElim() &&
588 MFI.getStackSize() == 0;
589}
590
591void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB,
592 bool PrologueStubs) const {
594 MachineFrameInfo &MFI = MF.getFrameInfo();
595 auto &HST = MF.getSubtarget<HexagonSubtarget>();
596 auto &HII = *HST.getInstrInfo();
597 auto &HRI = *HST.getRegisterInfo();
598
599 Align MaxAlign = std::max(MFI.getMaxAlign(), getStackAlign());
600
601 // Calculate the total stack frame size.
602 // Get the number of bytes to allocate from the FrameInfo.
603 unsigned FrameSize = MFI.getStackSize();
604 // Round up the max call frame size to the max alignment on the stack.
605 unsigned MaxCFA = alignTo(MFI.getMaxCallFrameSize(), MaxAlign);
606 MFI.setMaxCallFrameSize(MaxCFA);
607
608 FrameSize = MaxCFA + alignTo(FrameSize, MaxAlign);
609 MFI.setStackSize(FrameSize);
610
611 bool AlignStack = (MaxAlign > getStackAlign());
612
613 // Get the number of bytes to allocate from the FrameInfo.
614 unsigned NumBytes = MFI.getStackSize();
615 Register SP = HRI.getStackRegister();
616 unsigned MaxCF = MFI.getMaxCallFrameSize();
618
620 for (auto &MBB : MF)
621 for (auto &MI : MBB)
622 if (MI.getOpcode() == Hexagon::PS_alloca)
623 AdjustRegs.push_back(&MI);
624
625 for (auto *MI : AdjustRegs) {
626 assert((MI->getOpcode() == Hexagon::PS_alloca) && "Expected alloca");
627 expandAlloca(MI, HII, SP, MaxCF);
628 MI->eraseFromParent();
629 }
630
631 DebugLoc dl = MBB.findDebugLoc(InsertPt);
632
633 if (MF.getFunction().isVarArg() &&
634 MF.getSubtarget<HexagonSubtarget>().isEnvironmentMusl()) {
635 // Calculate the size of register saved area.
636 int NumVarArgRegs = 6 - FirstVarArgSavedReg;
637 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0)
638 ? NumVarArgRegs * 4
639 : NumVarArgRegs * 4 + 4;
640 if (RegisterSavedAreaSizePlusPadding > 0) {
641 // Decrement the stack pointer by size of register saved area plus
642 // padding if any.
643 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
644 .addReg(SP)
645 .addImm(-RegisterSavedAreaSizePlusPadding)
647
648 int NumBytes = 0;
649 // Copy all the named arguments below register saved area.
650 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
651 for (int i = HMFI.getFirstNamedArgFrameIndex(),
652 e = HMFI.getLastNamedArgFrameIndex(); i >= e; --i) {
653 uint64_t ObjSize = MFI.getObjectSize(i);
654 Align ObjAlign = MFI.getObjectAlign(i);
655
656 // Determine the kind of load/store that should be used.
657 unsigned LDOpc, STOpc;
658 uint64_t OpcodeChecker = ObjAlign.value();
659
660 // Handle cases where alignment of an object is > its size.
661 if (ObjAlign > ObjSize) {
662 if (ObjSize <= 1)
663 OpcodeChecker = 1;
664 else if (ObjSize <= 2)
665 OpcodeChecker = 2;
666 else if (ObjSize <= 4)
667 OpcodeChecker = 4;
668 else if (ObjSize > 4)
669 OpcodeChecker = 8;
670 }
671
672 switch (OpcodeChecker) {
673 case 1:
674 LDOpc = Hexagon::L2_loadrb_io;
675 STOpc = Hexagon::S2_storerb_io;
676 break;
677 case 2:
678 LDOpc = Hexagon::L2_loadrh_io;
679 STOpc = Hexagon::S2_storerh_io;
680 break;
681 case 4:
682 LDOpc = Hexagon::L2_loadri_io;
683 STOpc = Hexagon::S2_storeri_io;
684 break;
685 case 8:
686 default:
687 LDOpc = Hexagon::L2_loadrd_io;
688 STOpc = Hexagon::S2_storerd_io;
689 break;
690 }
691
692 Register RegUsed = LDOpc == Hexagon::L2_loadrd_io ? Hexagon::D3
693 : Hexagon::R6;
694 int LoadStoreCount = ObjSize / OpcodeChecker;
695
696 if (ObjSize % OpcodeChecker)
697 ++LoadStoreCount;
698
699 // Get the start location of the load. NumBytes is basically the
700 // offset from the stack pointer of previous function, which would be
701 // the caller in this case, as this function has variable argument
702 // list.
703 if (NumBytes != 0)
704 NumBytes = alignTo(NumBytes, ObjAlign);
705
706 int Count = 0;
707 while (Count < LoadStoreCount) {
708 // Load the value of the named argument on stack.
709 BuildMI(MBB, InsertPt, dl, HII.get(LDOpc), RegUsed)
710 .addReg(SP)
711 .addImm(RegisterSavedAreaSizePlusPadding +
712 ObjAlign.value() * Count + NumBytes)
714
715 // Store it below the register saved area plus padding.
716 BuildMI(MBB, InsertPt, dl, HII.get(STOpc))
717 .addReg(SP)
718 .addImm(ObjAlign.value() * Count + NumBytes)
719 .addReg(RegUsed)
721
722 Count++;
723 }
724 NumBytes += MFI.getObjectSize(i);
725 }
726
727 // Make NumBytes 8 byte aligned
728 NumBytes = alignTo(NumBytes, 8);
729
730 // If the number of registers having variable arguments is odd,
731 // leave 4 bytes of padding to get to the location where first
732 // variable argument which was passed through register was copied.
733 NumBytes = (NumVarArgRegs % 2 == 0) ? NumBytes : NumBytes + 4;
734
735 for (int j = FirstVarArgSavedReg, i = 0; j < 6; ++j, ++i) {
736 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_storeri_io))
737 .addReg(SP)
738 .addImm(NumBytes + 4 * i)
739 .addReg(Hexagon::R0 + j)
741 }
742 }
743 }
744
745 if (hasFP(MF)) {
746 insertAllocframe(MBB, InsertPt, NumBytes);
747 if (AlignStack) {
748 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_andir), SP)
749 .addReg(SP)
750 .addImm(-int64_t(MaxAlign.value()));
751 }
752 // If the stack-checking is enabled, and we spilled the callee-saved
753 // registers inline (i.e. did not use a spill function), then call
754 // the stack checker directly.
755 if (EnableStackOVFSanitizer && !PrologueStubs)
756 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::PS_call_stk))
757 .addExternalSymbol("__runtime_stack_check");
758 } else if (NumBytes > 0) {
759 assert(alignTo(NumBytes, 8) == NumBytes);
760 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
761 .addReg(SP)
762 .addImm(-int(NumBytes));
763 }
764}
765
766void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &MBB) const {
768 auto &HST = MF.getSubtarget<HexagonSubtarget>();
769 auto &HII = *HST.getInstrInfo();
770 auto &HRI = *HST.getRegisterInfo();
771 Register SP = HRI.getStackRegister();
772
774 DebugLoc dl = MBB.findDebugLoc(InsertPt);
775
776 if (!hasFP(MF)) {
777 MachineFrameInfo &MFI = MF.getFrameInfo();
778 unsigned NumBytes = MFI.getStackSize();
779 if (MF.getFunction().isVarArg() &&
780 MF.getSubtarget<HexagonSubtarget>().isEnvironmentMusl()) {
781 // On Hexagon Linux, deallocate the stack for the register saved area.
782 int NumVarArgRegs = 6 - FirstVarArgSavedReg;
783 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0) ?
784 (NumVarArgRegs * 4) : (NumVarArgRegs * 4 + 4);
785 NumBytes += RegisterSavedAreaSizePlusPadding;
786 }
787 if (NumBytes) {
788 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
789 .addReg(SP)
790 .addImm(NumBytes);
791 }
792 return;
793 }
794
795 MachineInstr *RetI = getReturn(MBB);
796 unsigned RetOpc = RetI ? RetI->getOpcode() : 0;
797
798 // Handle EH_RETURN.
799 if (RetOpc == Hexagon::EH_RETURN_JMPR) {
800 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
801 .addDef(Hexagon::D15)
802 .addReg(Hexagon::R30);
803 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_add), SP)
804 .addReg(SP)
805 .addReg(Hexagon::R28);
806 return;
807 }
808
809 // Check for RESTORE_DEALLOC_RET* tail call. Don't emit an extra dealloc-
810 // frame instruction if we encounter it.
811 if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 ||
812 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC ||
813 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT ||
814 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC) {
816 ++It;
817 // Delete all instructions after the RESTORE (except labels).
818 while (It != MBB.end()) {
819 if (!It->isLabel())
820 It = MBB.erase(It);
821 else
822 ++It;
823 }
824 return;
825 }
826
827 // It is possible that the restoring code is a call to a library function.
828 // All of the restore* functions include "deallocframe", so we need to make
829 // sure that we don't add an extra one.
830 bool NeedsDeallocframe = true;
831 if (!MBB.empty() && InsertPt != MBB.begin()) {
832 MachineBasicBlock::iterator PrevIt = std::prev(InsertPt);
833 unsigned COpc = PrevIt->getOpcode();
834 if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 ||
835 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC ||
836 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT ||
837 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC ||
838 COpc == Hexagon::PS_call_nr || COpc == Hexagon::PS_callr_nr)
839 NeedsDeallocframe = false;
840 }
841
843 !MF.getFunction().isVarArg()) {
844 if (!NeedsDeallocframe)
845 return;
846 // If the returning instruction is PS_jmpret, replace it with
847 // dealloc_return, otherwise just add deallocframe. The function
848 // could be returning via a tail call.
849 if (RetOpc != Hexagon::PS_jmpret || DisableDeallocRet) {
850 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
851 .addDef(Hexagon::D15)
852 .addReg(Hexagon::R30);
853 return;
854 }
855 unsigned NewOpc = Hexagon::L4_return;
856 MachineInstr *NewI = BuildMI(MBB, RetI, dl, HII.get(NewOpc))
857 .addDef(Hexagon::D15)
858 .addReg(Hexagon::R30);
859 // Transfer the function live-out registers.
860 NewI->copyImplicitOps(MF, *RetI);
861 MBB.erase(RetI);
862 } else {
863 // L2_deallocframe instruction after it.
864 // Calculate the size of register saved area.
865 int NumVarArgRegs = 6 - FirstVarArgSavedReg;
866 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0) ?
867 (NumVarArgRegs * 4) : (NumVarArgRegs * 4 + 4);
868
871 : std::prev(Term);
872 if (I == MBB.end() ||
873 (I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT &&
874 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC &&
875 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 &&
876 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC))
877 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
878 .addDef(Hexagon::D15)
879 .addReg(Hexagon::R30);
880 if (RegisterSavedAreaSizePlusPadding != 0)
881 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
882 .addReg(SP)
883 .addImm(RegisterSavedAreaSizePlusPadding);
884 }
885}
886
887void HexagonFrameLowering::insertAllocframe(MachineBasicBlock &MBB,
888 MachineBasicBlock::iterator InsertPt, unsigned NumBytes) const {
890 auto &HST = MF.getSubtarget<HexagonSubtarget>();
891 auto &HII = *HST.getInstrInfo();
892 auto &HRI = *HST.getRegisterInfo();
893
894 // Check for overflow.
895 // Hexagon_TODO: Ugh! hardcoding. Is there an API that can be used?
896 const unsigned int ALLOCFRAME_MAX = 16384;
897
898 // Create a dummy memory operand to avoid allocframe from being treated as
899 // a volatile memory reference.
902
903 DebugLoc dl = MBB.findDebugLoc(InsertPt);
904 Register SP = HRI.getStackRegister();
905
906 if (NumBytes >= ALLOCFRAME_MAX) {
907 // Emit allocframe(#0).
908 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
909 .addDef(SP)
910 .addReg(SP)
911 .addImm(0)
912 .addMemOperand(MMO);
913
914 // Subtract the size from the stack pointer.
915 Register SP = HRI.getStackRegister();
916 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
917 .addReg(SP)
918 .addImm(-int(NumBytes));
919 } else {
920 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
921 .addDef(SP)
922 .addReg(SP)
923 .addImm(NumBytes)
924 .addMemOperand(MMO);
925 }
926}
927
928void HexagonFrameLowering::updateEntryPaths(MachineFunction &MF,
929 MachineBasicBlock &SaveB) const {
930 SetVector<unsigned> Worklist;
931
932 MachineBasicBlock &EntryB = MF.front();
933 Worklist.insert(EntryB.getNumber());
934
935 unsigned SaveN = SaveB.getNumber();
936 auto &CSI = MF.getFrameInfo().getCalleeSavedInfo();
937
938 for (unsigned i = 0; i < Worklist.size(); ++i) {
939 unsigned BN = Worklist[i];
941 for (auto &R : CSI)
942 if (!MBB.isLiveIn(R.getReg()))
943 MBB.addLiveIn(R.getReg());
944 if (BN != SaveN)
945 for (auto &SB : MBB.successors())
946 Worklist.insert(SB->getNumber());
947 }
948}
949
950bool HexagonFrameLowering::updateExitPaths(MachineBasicBlock &MBB,
951 MachineBasicBlock &RestoreB, BitVector &DoneT, BitVector &DoneF,
952 BitVector &Path) const {
953 assert(MBB.getNumber() >= 0);
954 unsigned BN = MBB.getNumber();
955 if (Path[BN] || DoneF[BN])
956 return false;
957 if (DoneT[BN])
958 return true;
959
960 auto &CSI = MBB.getParent()->getFrameInfo().getCalleeSavedInfo();
961
962 Path[BN] = true;
963 bool ReachedExit = false;
964 for (auto &SB : MBB.successors())
965 ReachedExit |= updateExitPaths(*SB, RestoreB, DoneT, DoneF, Path);
966
967 if (!MBB.empty() && MBB.back().isReturn()) {
968 // Add implicit uses of all callee-saved registers to the reached
969 // return instructions. This is to prevent the anti-dependency breaker
970 // from renaming these registers.
971 MachineInstr &RetI = MBB.back();
972 if (!isRestoreCall(RetI.getOpcode()))
973 for (auto &R : CSI)
974 RetI.addOperand(MachineOperand::CreateReg(R.getReg(), false, true));
975 ReachedExit = true;
976 }
977
978 // We don't want to add unnecessary live-ins to the restore block: since
979 // the callee-saved registers are being defined in it, the entry of the
980 // restore block cannot be on the path from the definitions to any exit.
981 if (ReachedExit && &MBB != &RestoreB) {
982 for (auto &R : CSI)
983 if (!MBB.isLiveIn(R.getReg()))
984 MBB.addLiveIn(R.getReg());
985 DoneT[BN] = true;
986 }
987 if (!ReachedExit)
988 DoneF[BN] = true;
989
990 Path[BN] = false;
991 return ReachedExit;
992}
993
994static std::optional<MachineBasicBlock::iterator>
996 // The CFI instructions need to be inserted right after allocframe.
997 // An exception to this is a situation where allocframe is bundled
998 // with a call: then the CFI instructions need to be inserted before
999 // the packet with the allocframe+call (in case the call throws an
1000 // exception).
1001 auto End = B.instr_end();
1002
1003 for (MachineInstr &I : B) {
1004 MachineBasicBlock::iterator It = I.getIterator();
1005 if (!I.isBundle()) {
1006 if (I.getOpcode() == Hexagon::S2_allocframe)
1007 return std::next(It);
1008 continue;
1009 }
1010 // I is a bundle.
1011 bool HasCall = false, HasAllocFrame = false;
1012 auto T = It.getInstrIterator();
1013 while (++T != End && T->isBundled()) {
1014 if (T->getOpcode() == Hexagon::S2_allocframe)
1015 HasAllocFrame = true;
1016 else if (T->isCall())
1017 HasCall = true;
1018 }
1019 if (HasAllocFrame)
1020 return HasCall ? It : std::next(It);
1021 }
1022 return std::nullopt;
1023}
1024
1026 for (auto &B : MF)
1027 if (auto At = findCFILocation(B))
1028 insertCFIInstructionsAt(B, *At);
1029}
1030
1031void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB,
1032 MachineBasicBlock::iterator At) const {
1033 MachineFunction &MF = *MBB.getParent();
1034 MachineFrameInfo &MFI = MF.getFrameInfo();
1035 auto &HST = MF.getSubtarget<HexagonSubtarget>();
1036 auto &HII = *HST.getInstrInfo();
1037 auto &HRI = *HST.getRegisterInfo();
1038
1039 // If CFI instructions have debug information attached, something goes
1040 // wrong with the final assembly generation: the prolog_end is placed
1041 // in a wrong location.
1042 DebugLoc DL;
1043 const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION);
1044
1045 MCSymbol *FrameLabel = MF.getContext().createTempSymbol();
1046 bool HasFP = hasFP(MF);
1047
1048 if (HasFP) {
1049 unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(), true);
1050 unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(), true);
1051
1052 // Define CFA via an offset from the value of FP.
1053 //
1054 // -8 -4 0 (SP)
1055 // --+----+----+---------------------
1056 // | FP | LR | increasing addresses -->
1057 // --+----+----+---------------------
1058 // | +-- Old SP (before allocframe)
1059 // +-- New FP (after allocframe)
1060 //
1061 // MCCFIInstruction::cfiDefCfa adds the offset from the register.
1062 // MCCFIInstruction::createOffset takes the offset without sign change.
1063 auto DefCfa = MCCFIInstruction::cfiDefCfa(FrameLabel, DwFPReg, 8);
1064 BuildMI(MBB, At, DL, CFID)
1065 .addCFIIndex(MF.addFrameInst(DefCfa));
1066 // R31 (return addr) = CFA - 4
1067 auto OffR31 = MCCFIInstruction::createOffset(FrameLabel, DwRAReg, -4);
1068 BuildMI(MBB, At, DL, CFID)
1069 .addCFIIndex(MF.addFrameInst(OffR31));
1070 // R30 (frame ptr) = CFA - 8
1071 auto OffR30 = MCCFIInstruction::createOffset(FrameLabel, DwFPReg, -8);
1072 BuildMI(MBB, At, DL, CFID)
1073 .addCFIIndex(MF.addFrameInst(OffR30));
1074 }
1075
1076 static Register RegsToMove[] = {
1077 Hexagon::R1, Hexagon::R0, Hexagon::R3, Hexagon::R2,
1078 Hexagon::R17, Hexagon::R16, Hexagon::R19, Hexagon::R18,
1079 Hexagon::R21, Hexagon::R20, Hexagon::R23, Hexagon::R22,
1080 Hexagon::R25, Hexagon::R24, Hexagon::R27, Hexagon::R26,
1081 Hexagon::D0, Hexagon::D1, Hexagon::D8, Hexagon::D9,
1082 Hexagon::D10, Hexagon::D11, Hexagon::D12, Hexagon::D13,
1083 Hexagon::NoRegister
1084 };
1085
1086 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
1087
1088 for (unsigned i = 0; RegsToMove[i] != Hexagon::NoRegister; ++i) {
1089 Register Reg = RegsToMove[i];
1090 auto IfR = [Reg] (const CalleeSavedInfo &C) -> bool {
1091 return C.getReg() == Reg;
1092 };
1093 auto F = find_if(CSI, IfR);
1094 if (F == CSI.end())
1095 continue;
1096
1097 int64_t Offset;
1098 if (HasFP) {
1099 // If the function has a frame pointer (i.e. has an allocframe),
1100 // then the CFA has been defined in terms of FP. Any offsets in
1101 // the following CFI instructions have to be defined relative
1102 // to FP, which points to the bottom of the stack frame.
1103 // The function getFrameIndexReference can still choose to use SP
1104 // for the offset calculation, so we cannot simply call it here.
1105 // Instead, get the offset (relative to the FP) directly.
1106 Offset = MFI.getObjectOffset(F->getFrameIdx());
1107 } else {
1108 Register FrameReg;
1109 Offset =
1110 getFrameIndexReference(MF, F->getFrameIdx(), FrameReg).getFixed();
1111 }
1112 // Subtract 8 to make room for R30 and R31, which are added above.
1113 Offset -= 8;
1114
1115 if (Reg < Hexagon::D0 || Reg > Hexagon::D15) {
1116 unsigned DwarfReg = HRI.getDwarfRegNum(Reg, true);
1117 auto OffReg = MCCFIInstruction::createOffset(FrameLabel, DwarfReg,
1118 Offset);
1119 BuildMI(MBB, At, DL, CFID)
1120 .addCFIIndex(MF.addFrameInst(OffReg));
1121 } else {
1122 // Split the double regs into subregs, and generate appropriate
1123 // cfi_offsets.
1124 // The only reason, we are split double regs is, llvm-mc does not
1125 // understand paired registers for cfi_offset.
1126 // Eg .cfi_offset r1:0, -64
1127
1128 Register HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi);
1129 Register LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo);
1130 unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg, true);
1131 unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg, true);
1132 auto OffHi = MCCFIInstruction::createOffset(FrameLabel, HiDwarfReg,
1133 Offset+4);
1134 BuildMI(MBB, At, DL, CFID)
1135 .addCFIIndex(MF.addFrameInst(OffHi));
1136 auto OffLo = MCCFIInstruction::createOffset(FrameLabel, LoDwarfReg,
1137 Offset);
1138 BuildMI(MBB, At, DL, CFID)
1139 .addCFIIndex(MF.addFrameInst(OffLo));
1140 }
1141 }
1142}
1143
1145 if (MF.getFunction().hasFnAttribute(Attribute::Naked))
1146 return false;
1147
1148 auto &MFI = MF.getFrameInfo();
1149 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1150 bool HasExtraAlign = HRI.hasStackRealignment(MF);
1151 bool HasAlloca = MFI.hasVarSizedObjects();
1152
1153 // Insert ALLOCFRAME if we need to or at -O0 for the debugger. Think
1154 // that this shouldn't be required, but doing so now because gcc does and
1155 // gdb can't break at the start of the function without it. Will remove if
1156 // this turns out to be a gdb bug.
1157 //
1159 return true;
1160
1161 // By default we want to use SP (since it's always there). FP requires
1162 // some setup (i.e. ALLOCFRAME).
1163 // Both, alloca and stack alignment modify the stack pointer by an
1164 // undetermined value, so we need to save it at the entry to the function
1165 // (i.e. use allocframe).
1166 if (HasAlloca || HasExtraAlign)
1167 return true;
1168
1169 if (MFI.getStackSize() > 0) {
1170 // If FP-elimination is disabled, we have to use FP at this point.
1171 const TargetMachine &TM = MF.getTarget();
1172 if (TM.Options.DisableFramePointerElim(MF) || !EliminateFramePointer)
1173 return true;
1175 return true;
1176 }
1177
1178 const auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1179 if ((MFI.hasCalls() && !enableAllocFrameElim(MF)) || HMFI.hasClobberLR())
1180 return true;
1181
1182 return false;
1183}
1184
1190
1191static const char *getSpillFunctionFor(Register MaxReg, SpillKind SpillType,
1192 bool Stkchk = false) {
1193 const char * V4SpillToMemoryFunctions[] = {
1194 "__save_r16_through_r17",
1195 "__save_r16_through_r19",
1196 "__save_r16_through_r21",
1197 "__save_r16_through_r23",
1198 "__save_r16_through_r25",
1199 "__save_r16_through_r27" };
1200
1201 const char * V4SpillToMemoryStkchkFunctions[] = {
1202 "__save_r16_through_r17_stkchk",
1203 "__save_r16_through_r19_stkchk",
1204 "__save_r16_through_r21_stkchk",
1205 "__save_r16_through_r23_stkchk",
1206 "__save_r16_through_r25_stkchk",
1207 "__save_r16_through_r27_stkchk" };
1208
1209 const char * V4SpillFromMemoryFunctions[] = {
1210 "__restore_r16_through_r17_and_deallocframe",
1211 "__restore_r16_through_r19_and_deallocframe",
1212 "__restore_r16_through_r21_and_deallocframe",
1213 "__restore_r16_through_r23_and_deallocframe",
1214 "__restore_r16_through_r25_and_deallocframe",
1215 "__restore_r16_through_r27_and_deallocframe" };
1216
1217 const char * V4SpillFromMemoryTailcallFunctions[] = {
1218 "__restore_r16_through_r17_and_deallocframe_before_tailcall",
1219 "__restore_r16_through_r19_and_deallocframe_before_tailcall",
1220 "__restore_r16_through_r21_and_deallocframe_before_tailcall",
1221 "__restore_r16_through_r23_and_deallocframe_before_tailcall",
1222 "__restore_r16_through_r25_and_deallocframe_before_tailcall",
1223 "__restore_r16_through_r27_and_deallocframe_before_tailcall"
1224 };
1225
1226 const char **SpillFunc = nullptr;
1227
1228 switch(SpillType) {
1229 case SK_ToMem:
1230 SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions
1231 : V4SpillToMemoryFunctions;
1232 break;
1233 case SK_FromMem:
1234 SpillFunc = V4SpillFromMemoryFunctions;
1235 break;
1236 case SK_FromMemTailcall:
1237 SpillFunc = V4SpillFromMemoryTailcallFunctions;
1238 break;
1239 }
1240 assert(SpillFunc && "Unknown spill kind");
1241
1242 // Spill all callee-saved registers up to the highest register used.
1243 switch (MaxReg) {
1244 case Hexagon::R17:
1245 return SpillFunc[0];
1246 case Hexagon::R19:
1247 return SpillFunc[1];
1248 case Hexagon::R21:
1249 return SpillFunc[2];
1250 case Hexagon::R23:
1251 return SpillFunc[3];
1252 case Hexagon::R25:
1253 return SpillFunc[4];
1254 case Hexagon::R27:
1255 return SpillFunc[5];
1256 default:
1257 llvm_unreachable("Unhandled maximum callee save register");
1258 }
1259 return nullptr;
1260}
1261
1264 Register &FrameReg) const {
1265 auto &MFI = MF.getFrameInfo();
1266 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1267
1268 int Offset = MFI.getObjectOffset(FI);
1269 bool HasAlloca = MFI.hasVarSizedObjects();
1270 bool HasExtraAlign = HRI.hasStackRealignment(MF);
1271 bool NoOpt = MF.getTarget().getOptLevel() == CodeGenOptLevel::None;
1272
1273 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1274 unsigned FrameSize = MFI.getStackSize();
1275 Register SP = HRI.getStackRegister();
1276 Register FP = HRI.getFrameRegister();
1277 Register AP = HMFI.getStackAlignBaseReg();
1278 // It may happen that AP will be absent even HasAlloca && HasExtraAlign
1279 // is true. HasExtraAlign may be set because of vector spills, without
1280 // aligned locals or aligned outgoing function arguments. Since vector
1281 // spills will ultimately be "unaligned", it is safe to use FP as the
1282 // base register.
1283 // In fact, in such a scenario the stack is actually not required to be
1284 // aligned, although it may end up being aligned anyway, since this
1285 // particular case is not easily detectable. The alignment will be
1286 // unnecessary, but not incorrect.
1287 // Unfortunately there is no quick way to verify that the above is
1288 // indeed the case (and that it's not a result of an error), so just
1289 // assume that missing AP will be replaced by FP.
1290 // (A better fix would be to rematerialize AP from FP and always align
1291 // vector spills.)
1292 bool UseFP = false, UseAP = false; // Default: use SP (except at -O0).
1293 // Use FP at -O0, except when there are objects with extra alignment.
1294 // That additional alignment requirement may cause a pad to be inserted,
1295 // which will make it impossible to use FP to access objects located
1296 // past the pad.
1297 if (NoOpt && !HasExtraAlign)
1298 UseFP = true;
1299 if (MFI.isFixedObjectIndex(FI) || MFI.isObjectPreAllocated(FI)) {
1300 // Fixed and preallocated objects will be located before any padding
1301 // so FP must be used to access them.
1302 UseFP |= (HasAlloca || HasExtraAlign);
1303 } else {
1304 if (HasAlloca) {
1305 if (HasExtraAlign)
1306 UseAP = true;
1307 else
1308 UseFP = true;
1309 }
1310 }
1311
1312 // If FP was picked, then there had better be FP.
1313 bool HasFP = hasFP(MF);
1314 assert((HasFP || !UseFP) && "This function must have frame pointer");
1315
1316 // Having FP implies allocframe. Allocframe will store extra 8 bytes:
1317 // FP/LR. If the base register is used to access an object across these
1318 // 8 bytes, then the offset will need to be adjusted by 8.
1319 //
1320 // After allocframe:
1321 // HexagonISelLowering adds 8 to ---+
1322 // the offsets of all stack-based |
1323 // arguments (*) |
1324 // |
1325 // getObjectOffset < 0 0 8 getObjectOffset >= 8
1326 // ------------------------+-----+------------------------> increasing
1327 // <local objects> |FP/LR| <input arguments> addresses
1328 // -----------------+------+-----+------------------------>
1329 // | |
1330 // SP/AP point --+ +-- FP points here (**)
1331 // somewhere on
1332 // this side of FP/LR
1333 //
1334 // (*) See LowerFormalArguments. The FP/LR is assumed to be present.
1335 // (**) *FP == old-FP. FP+0..7 are the bytes of FP/LR.
1336
1337 // The lowering assumes that FP/LR is present, and so the offsets of
1338 // the formal arguments start at 8. If FP/LR is not there we need to
1339 // reduce the offset by 8.
1340 if (Offset > 0 && !HasFP)
1341 Offset -= 8;
1342
1343 if (UseFP)
1344 FrameReg = FP;
1345 else if (UseAP)
1346 FrameReg = AP;
1347 else
1348 FrameReg = SP;
1349
1350 // Calculate the actual offset in the instruction. If there is no FP
1351 // (in other words, no allocframe), then SP will not be adjusted (i.e.
1352 // there will be no SP -= FrameSize), so the frame size should not be
1353 // added to the calculated offset.
1354 int RealOffset = Offset;
1355 if (!UseFP && !UseAP)
1356 RealOffset = FrameSize+Offset;
1357 return StackOffset::getFixed(RealOffset);
1358}
1359
1360bool HexagonFrameLowering::insertCSRSpillsInBlock(MachineBasicBlock &MBB,
1361 const CSIVect &CSI, const HexagonRegisterInfo &HRI,
1362 bool &PrologueStubs) const {
1363 if (CSI.empty())
1364 return true;
1365
1367 PrologueStubs = false;
1368 MachineFunction &MF = *MBB.getParent();
1369 auto &HST = MF.getSubtarget<HexagonSubtarget>();
1370 auto &HII = *HST.getInstrInfo();
1371
1372 if (useSpillFunction(MF, CSI)) {
1373 PrologueStubs = true;
1374 Register MaxReg = getMaxCalleeSavedReg(CSI, HRI);
1375 bool StkOvrFlowEnabled = EnableStackOVFSanitizer;
1376 const char *SpillFun = getSpillFunctionFor(MaxReg, SK_ToMem,
1377 StkOvrFlowEnabled);
1378 auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget());
1379 bool IsPIC = HTM.isPositionIndependent();
1380 bool LongCalls = HST.useLongCalls() || EnableSaveRestoreLong;
1381
1382 // Call spill function.
1383 DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc() : DebugLoc();
1384 unsigned SpillOpc;
1385 if (StkOvrFlowEnabled) {
1386 if (LongCalls)
1387 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT_PIC
1388 : Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT;
1389 else
1390 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC
1391 : Hexagon::SAVE_REGISTERS_CALL_V4STK;
1392 } else {
1393 if (LongCalls)
1394 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC
1395 : Hexagon::SAVE_REGISTERS_CALL_V4_EXT;
1396 else
1397 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC
1398 : Hexagon::SAVE_REGISTERS_CALL_V4;
1399 }
1400
1401 MachineInstr *SaveRegsCall =
1402 BuildMI(MBB, MI, DL, HII.get(SpillOpc))
1403 .addExternalSymbol(SpillFun);
1404
1405 // Add callee-saved registers as use.
1406 addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI, false, true);
1407 // Add live in registers.
1408 for (const CalleeSavedInfo &I : CSI)
1409 MBB.addLiveIn(I.getReg());
1410 return true;
1411 }
1412
1413 for (const CalleeSavedInfo &I : CSI) {
1414 Register Reg = I.getReg();
1415 // Add live in registers. We treat eh_return callee saved register r0 - r3
1416 // specially. They are not really callee saved registers as they are not
1417 // supposed to be killed.
1418 bool IsKill = !HRI.isEHReturnCalleeSaveReg(Reg);
1419 int FI = I.getFrameIdx();
1420 const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
1421 HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, &HRI, Register());
1422 if (IsKill)
1423 MBB.addLiveIn(Reg);
1424 }
1425 return true;
1426}
1427
1428bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB,
1429 const CSIVect &CSI, const HexagonRegisterInfo &HRI) const {
1430 if (CSI.empty())
1431 return false;
1432
1434 MachineFunction &MF = *MBB.getParent();
1435 auto &HST = MF.getSubtarget<HexagonSubtarget>();
1436 auto &HII = *HST.getInstrInfo();
1437
1438 if (useRestoreFunction(MF, CSI)) {
1439 bool HasTC = hasTailCall(MBB) || !hasReturn(MBB);
1440 Register MaxR = getMaxCalleeSavedReg(CSI, HRI);
1442 const char *RestoreFn = getSpillFunctionFor(MaxR, Kind);
1443 auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget());
1444 bool IsPIC = HTM.isPositionIndependent();
1445 bool LongCalls = HST.useLongCalls() || EnableSaveRestoreLong;
1446
1447 // Call spill function.
1448 DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc()
1449 : MBB.findDebugLoc(MBB.end());
1450 MachineInstr *DeallocCall = nullptr;
1451
1452 if (HasTC) {
1453 unsigned RetOpc;
1454 if (LongCalls)
1455 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC
1456 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT;
1457 else
1458 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC
1459 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;
1460 DeallocCall = BuildMI(MBB, MI, DL, HII.get(RetOpc))
1461 .addExternalSymbol(RestoreFn);
1462 } else {
1463 // The block has a return.
1465 assert(It->isReturn() && std::next(It) == MBB.end());
1466 unsigned RetOpc;
1467 if (LongCalls)
1468 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC
1469 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT;
1470 else
1471 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC
1472 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4;
1473 DeallocCall = BuildMI(MBB, It, DL, HII.get(RetOpc))
1474 .addExternalSymbol(RestoreFn);
1475 // Transfer the function live-out registers.
1476 DeallocCall->copyImplicitOps(MF, *It);
1477 }
1478 addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI, true, false);
1479 return true;
1480 }
1481
1482 for (const CalleeSavedInfo &I : CSI) {
1483 Register Reg = I.getReg();
1484 const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
1485 int FI = I.getFrameIdx();
1486 HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI, Register());
1487 }
1488
1489 return true;
1490}
1491
1495 MachineInstr &MI = *I;
1496 unsigned Opc = MI.getOpcode();
1497 (void)Opc; // Silence compiler warning.
1498 assert((Opc == Hexagon::ADJCALLSTACKDOWN || Opc == Hexagon::ADJCALLSTACKUP) &&
1499 "Cannot handle this call frame pseudo instruction");
1500 return MBB.erase(I);
1501}
1502
1504 MachineFunction &MF, RegScavenger *RS) const {
1505 // If this function has uses aligned stack and also has variable sized stack
1506 // objects, then we need to map all spill slots to fixed positions, so that
1507 // they can be accessed through FP. Otherwise they would have to be accessed
1508 // via AP, which may not be available at the particular place in the program.
1509 MachineFrameInfo &MFI = MF.getFrameInfo();
1510 bool HasAlloca = MFI.hasVarSizedObjects();
1511 bool NeedsAlign = (MFI.getMaxAlign() > getStackAlign());
1512
1513 if (!HasAlloca || !NeedsAlign)
1514 return;
1515
1516 // Set the physical aligned-stack base address register.
1517 Register AP = 0;
1518 if (const MachineInstr *AI = getAlignaInstr(MF))
1519 AP = AI->getOperand(0).getReg();
1520 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1521 assert(!AP.isValid() || AP.isPhysical());
1522 HMFI.setStackAlignBaseReg(AP);
1523}
1524
1525/// Returns true if there are no caller-saved registers available in class RC.
1527 const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC) {
1529
1530 auto IsUsed = [&HRI,&MRI] (Register Reg) -> bool {
1531 for (MCRegAliasIterator AI(Reg, &HRI, true); AI.isValid(); ++AI)
1532 if (MRI.isPhysRegUsed(*AI))
1533 return true;
1534 return false;
1535 };
1536
1537 // Check for an unused caller-saved register. Callee-saved registers
1538 // have become pristine by now.
1539 for (const MCPhysReg *P = HRI.getCallerSavedRegs(&MF, RC); *P; ++P)
1540 if (!IsUsed(*P))
1541 return false;
1542
1543 // All caller-saved registers are used.
1544 return true;
1545}
1546
1547#ifndef NDEBUG
1549 dbgs() << '{';
1550 for (int x = Regs.find_first(); x >= 0; x = Regs.find_next(x)) {
1551 Register R = x;
1552 dbgs() << ' ' << printReg(R, &TRI);
1553 }
1554 dbgs() << " }";
1555}
1556#endif
1557
1559 const TargetRegisterInfo *TRI, std::vector<CalleeSavedInfo> &CSI) const {
1560 LLVM_DEBUG(dbgs() << __func__ << " on " << MF.getName() << '\n');
1561 MachineFrameInfo &MFI = MF.getFrameInfo();
1562 BitVector SRegs(Hexagon::NUM_TARGET_REGS);
1563
1564 // Generate a set of unique, callee-saved registers (SRegs), where each
1565 // register in the set is maximal in terms of sub-/super-register relation,
1566 // i.e. for each R in SRegs, no proper super-register of R is also in SRegs.
1567
1568 // (1) For each callee-saved register, add that register and all of its
1569 // sub-registers to SRegs.
1570 LLVM_DEBUG(dbgs() << "Initial CS registers: {");
1571 for (const CalleeSavedInfo &I : CSI) {
1572 Register R = I.getReg();
1573 LLVM_DEBUG(dbgs() << ' ' << printReg(R, TRI));
1574 for (MCPhysReg SR : TRI->subregs_inclusive(R))
1575 SRegs[SR] = true;
1576 }
1577 LLVM_DEBUG(dbgs() << " }\n");
1578 LLVM_DEBUG(dbgs() << "SRegs.1: "; dump_registers(SRegs, *TRI);
1579 dbgs() << "\n");
1580
1581 // (2) For each reserved register, remove that register and all of its
1582 // sub- and super-registers from SRegs.
1583 BitVector Reserved = TRI->getReservedRegs(MF);
1584 // Unreserve the stack align register: it is reserved for this function
1585 // only, it still needs to be saved/restored.
1586 Register AP =
1587 MF.getInfo<HexagonMachineFunctionInfo>()->getStackAlignBaseReg();
1588 if (AP.isValid()) {
1589 Reserved[AP] = false;
1590 // Unreserve super-regs if no other subregisters are reserved.
1591 for (MCPhysReg SP : TRI->superregs(AP)) {
1592 bool HasResSub = false;
1593 for (MCPhysReg SB : TRI->subregs(SP)) {
1594 if (!Reserved[SB])
1595 continue;
1596 HasResSub = true;
1597 break;
1598 }
1599 if (!HasResSub)
1600 Reserved[SP] = false;
1601 }
1602 }
1603
1604 for (int x = Reserved.find_first(); x >= 0; x = Reserved.find_next(x)) {
1605 Register R = x;
1606 for (MCPhysReg SR : TRI->superregs_inclusive(R))
1607 SRegs[SR] = false;
1608 }
1609 LLVM_DEBUG(dbgs() << "Res: "; dump_registers(Reserved, *TRI);
1610 dbgs() << "\n");
1611 LLVM_DEBUG(dbgs() << "SRegs.2: "; dump_registers(SRegs, *TRI);
1612 dbgs() << "\n");
1613
1614 // (3) Collect all registers that have at least one sub-register in SRegs,
1615 // and also have no sub-registers that are reserved. These will be the can-
1616 // didates for saving as a whole instead of their individual sub-registers.
1617 // (Saving R17:16 instead of R16 is fine, but only if R17 was not reserved.)
1618 BitVector TmpSup(Hexagon::NUM_TARGET_REGS);
1619 for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1620 Register R = x;
1621 for (MCPhysReg SR : TRI->superregs(R))
1622 TmpSup[SR] = true;
1623 }
1624 for (int x = TmpSup.find_first(); x >= 0; x = TmpSup.find_next(x)) {
1625 Register R = x;
1626 for (MCPhysReg SR : TRI->subregs_inclusive(R)) {
1627 if (!Reserved[SR])
1628 continue;
1629 TmpSup[R] = false;
1630 break;
1631 }
1632 }
1633 LLVM_DEBUG(dbgs() << "TmpSup: "; dump_registers(TmpSup, *TRI);
1634 dbgs() << "\n");
1635
1636 // (4) Include all super-registers found in (3) into SRegs.
1637 SRegs |= TmpSup;
1638 LLVM_DEBUG(dbgs() << "SRegs.4: "; dump_registers(SRegs, *TRI);
1639 dbgs() << "\n");
1640
1641 // (5) For each register R in SRegs, if any super-register of R is in SRegs,
1642 // remove R from SRegs.
1643 for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1644 Register R = x;
1645 for (MCPhysReg SR : TRI->superregs(R)) {
1646 if (!SRegs[SR])
1647 continue;
1648 SRegs[R] = false;
1649 break;
1650 }
1651 }
1652 LLVM_DEBUG(dbgs() << "SRegs.5: "; dump_registers(SRegs, *TRI);
1653 dbgs() << "\n");
1654
1655 // Now, for each register that has a fixed stack slot, create the stack
1656 // object for it.
1657 CSI.clear();
1658
1660
1661 unsigned NumFixed;
1662 int64_t MinOffset = 0; // CS offsets are negative.
1663 const SpillSlot *FixedSlots = getCalleeSavedSpillSlots(NumFixed);
1664 for (const SpillSlot *S = FixedSlots; S != FixedSlots+NumFixed; ++S) {
1665 if (!SRegs[S->Reg])
1666 continue;
1667 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(S->Reg);
1668 int FI = MFI.CreateFixedSpillStackObject(TRI->getSpillSize(*RC), S->Offset);
1669 MinOffset = std::min(MinOffset, S->Offset);
1670 CSI.push_back(CalleeSavedInfo(S->Reg, FI));
1671 SRegs[S->Reg] = false;
1672 }
1673
1674 // There can be some registers that don't have fixed slots. For example,
1675 // we need to store R0-R3 in functions with exception handling. For each
1676 // such register, create a non-fixed stack object.
1677 for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1678 Register R = x;
1679 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(R);
1680 unsigned Size = TRI->getSpillSize(*RC);
1681 int64_t Off = MinOffset - Size;
1682 Align Alignment = std::min(TRI->getSpillAlign(*RC), getStackAlign());
1683 Off &= -Alignment.value();
1684 int FI = MFI.CreateFixedSpillStackObject(Size, Off);
1685 MinOffset = std::min(MinOffset, Off);
1686 CSI.push_back(CalleeSavedInfo(R, FI));
1687 SRegs[R] = false;
1688 }
1689
1690 LLVM_DEBUG({
1691 dbgs() << "CS information: {";
1692 for (const CalleeSavedInfo &I : CSI) {
1693 int FI = I.getFrameIdx();
1694 int Off = MFI.getObjectOffset(FI);
1695 dbgs() << ' ' << printReg(I.getReg(), TRI) << ":fi#" << FI << ":sp";
1696 if (Off >= 0)
1697 dbgs() << '+';
1698 dbgs() << Off;
1699 }
1700 dbgs() << " }\n";
1701 });
1702
1703#ifndef NDEBUG
1704 // Verify that all registers were handled.
1705 bool MissedReg = false;
1706 for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1707 Register R = x;
1708 dbgs() << printReg(R, TRI) << ' ';
1709 MissedReg = true;
1710 }
1711 if (MissedReg)
1712 llvm_unreachable("...there are unhandled callee-saved registers!");
1713#endif
1714
1715 return true;
1716}
1717
1718bool HexagonFrameLowering::expandCopy(MachineBasicBlock &B,
1720 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs) const {
1721 MachineInstr *MI = &*It;
1722 DebugLoc DL = MI->getDebugLoc();
1723 Register DstR = MI->getOperand(0).getReg();
1724 Register SrcR = MI->getOperand(1).getReg();
1725 if (!Hexagon::ModRegsRegClass.contains(DstR) ||
1726 !Hexagon::ModRegsRegClass.contains(SrcR))
1727 return false;
1728
1729 Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1730 BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR).add(MI->getOperand(1));
1731 BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR)
1732 .addReg(TmpR, RegState::Kill);
1733
1734 NewRegs.push_back(TmpR);
1735 B.erase(It);
1736 return true;
1737}
1738
1739bool HexagonFrameLowering::expandStoreInt(MachineBasicBlock &B,
1741 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs) const {
1742 MachineInstr *MI = &*It;
1743 if (!MI->getOperand(0).isFI())
1744 return false;
1745
1746 DebugLoc DL = MI->getDebugLoc();
1747 unsigned Opc = MI->getOpcode();
1748 Register SrcR = MI->getOperand(2).getReg();
1749 bool IsKill = MI->getOperand(2).isKill();
1750 int FI = MI->getOperand(0).getIndex();
1751
1752 // TmpR = C2_tfrpr SrcR if SrcR is a predicate register
1753 // TmpR = A2_tfrcrr SrcR if SrcR is a modifier register
1754 Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1755 unsigned TfrOpc = (Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr
1756 : Hexagon::A2_tfrcrr;
1757 BuildMI(B, It, DL, HII.get(TfrOpc), TmpR)
1758 .addReg(SrcR, getKillRegState(IsKill));
1759
1760 // S2_storeri_io FI, 0, TmpR
1761 BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io))
1762 .addFrameIndex(FI)
1763 .addImm(0)
1764 .addReg(TmpR, RegState::Kill)
1765 .cloneMemRefs(*MI);
1766
1767 NewRegs.push_back(TmpR);
1768 B.erase(It);
1769 return true;
1770}
1771
1772bool HexagonFrameLowering::expandLoadInt(MachineBasicBlock &B,
1774 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs) const {
1775 MachineInstr *MI = &*It;
1776 if (!MI->getOperand(1).isFI())
1777 return false;
1778
1779 DebugLoc DL = MI->getDebugLoc();
1780 unsigned Opc = MI->getOpcode();
1781 Register DstR = MI->getOperand(0).getReg();
1782 int FI = MI->getOperand(1).getIndex();
1783
1784 // TmpR = L2_loadri_io FI, 0
1785 Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1786 BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR)
1787 .addFrameIndex(FI)
1788 .addImm(0)
1789 .cloneMemRefs(*MI);
1790
1791 // DstR = C2_tfrrp TmpR if DstR is a predicate register
1792 // DstR = A2_tfrrcr TmpR if DstR is a modifier register
1793 unsigned TfrOpc = (Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp
1794 : Hexagon::A2_tfrrcr;
1795 BuildMI(B, It, DL, HII.get(TfrOpc), DstR)
1796 .addReg(TmpR, RegState::Kill);
1797
1798 NewRegs.push_back(TmpR);
1799 B.erase(It);
1800 return true;
1801}
1802
1803bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B,
1805 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs) const {
1806 MachineInstr *MI = &*It;
1807 if (!MI->getOperand(0).isFI())
1808 return false;
1809
1810 DebugLoc DL = MI->getDebugLoc();
1811 Register SrcR = MI->getOperand(2).getReg();
1812 bool IsKill = MI->getOperand(2).isKill();
1813 int FI = MI->getOperand(0).getIndex();
1814 auto *RC = &Hexagon::HvxVRRegClass;
1815
1816 // Insert transfer to general vector register.
1817 // TmpR0 = A2_tfrsi 0x01010101
1818 // TmpR1 = V6_vandqrt Qx, TmpR0
1819 // store FI, 0, TmpR1
1820 Register TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1821 Register TmpR1 = MRI.createVirtualRegister(RC);
1822
1823 BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1824 .addImm(0x01010101);
1825
1826 BuildMI(B, It, DL, HII.get(Hexagon::V6_vandqrt), TmpR1)
1827 .addReg(SrcR, getKillRegState(IsKill))
1828 .addReg(TmpR0, RegState::Kill);
1829
1830 auto *HRI = B.getParent()->getSubtarget<HexagonSubtarget>().getRegisterInfo();
1831 HII.storeRegToStackSlot(B, It, TmpR1, true, FI, RC, HRI, Register());
1832 expandStoreVec(B, std::prev(It), MRI, HII, NewRegs);
1833
1834 NewRegs.push_back(TmpR0);
1835 NewRegs.push_back(TmpR1);
1836 B.erase(It);
1837 return true;
1838}
1839
1840bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B,
1842 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs) const {
1843 MachineInstr *MI = &*It;
1844 if (!MI->getOperand(1).isFI())
1845 return false;
1846
1847 DebugLoc DL = MI->getDebugLoc();
1848 Register DstR = MI->getOperand(0).getReg();
1849 int FI = MI->getOperand(1).getIndex();
1850 auto *RC = &Hexagon::HvxVRRegClass;
1851
1852 // TmpR0 = A2_tfrsi 0x01010101
1853 // TmpR1 = load FI, 0
1854 // DstR = V6_vandvrt TmpR1, TmpR0
1855 Register TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1856 Register TmpR1 = MRI.createVirtualRegister(RC);
1857
1858 BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1859 .addImm(0x01010101);
1860 MachineFunction &MF = *B.getParent();
1861 auto *HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1862 HII.loadRegFromStackSlot(B, It, TmpR1, FI, RC, HRI, Register());
1863 expandLoadVec(B, std::prev(It), MRI, HII, NewRegs);
1864
1865 BuildMI(B, It, DL, HII.get(Hexagon::V6_vandvrt), DstR)
1866 .addReg(TmpR1, RegState::Kill)
1867 .addReg(TmpR0, RegState::Kill);
1868
1869 NewRegs.push_back(TmpR0);
1870 NewRegs.push_back(TmpR1);
1871 B.erase(It);
1872 return true;
1873}
1874
1875bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B,
1877 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs) const {
1878 MachineFunction &MF = *B.getParent();
1879 auto &MFI = MF.getFrameInfo();
1880 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1881 MachineInstr *MI = &*It;
1882 if (!MI->getOperand(0).isFI())
1883 return false;
1884
1885 // It is possible that the double vector being stored is only partially
1886 // defined. From the point of view of the liveness tracking, it is ok to
1887 // store it as a whole, but if we break it up we may end up storing a
1888 // register that is entirely undefined.
1889 LivePhysRegs LPR(HRI);
1890 LPR.addLiveIns(B);
1892 for (auto R = B.begin(); R != It; ++R) {
1893 Clobbers.clear();
1894 LPR.stepForward(*R, Clobbers);
1895 }
1896
1897 DebugLoc DL = MI->getDebugLoc();
1898 Register SrcR = MI->getOperand(2).getReg();
1899 Register SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo);
1900 Register SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi);
1901 bool IsKill = MI->getOperand(2).isKill();
1902 int FI = MI->getOperand(0).getIndex();
1903
1904 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1905 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1906 Align HasAlign = MFI.getObjectAlign(FI);
1907 unsigned StoreOpc;
1908
1909 // Store low part.
1910 if (LPR.contains(SrcLo)) {
1911 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1912 : Hexagon::V6_vS32Ub_ai;
1913 BuildMI(B, It, DL, HII.get(StoreOpc))
1914 .addFrameIndex(FI)
1915 .addImm(0)
1916 .addReg(SrcLo, getKillRegState(IsKill))
1917 .cloneMemRefs(*MI);
1918 }
1919
1920 // Store high part.
1921 if (LPR.contains(SrcHi)) {
1922 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1923 : Hexagon::V6_vS32Ub_ai;
1924 BuildMI(B, It, DL, HII.get(StoreOpc))
1925 .addFrameIndex(FI)
1926 .addImm(Size)
1927 .addReg(SrcHi, getKillRegState(IsKill))
1928 .cloneMemRefs(*MI);
1929 }
1930
1931 B.erase(It);
1932 return true;
1933}
1934
1935bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B,
1937 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs) const {
1938 MachineFunction &MF = *B.getParent();
1939 auto &MFI = MF.getFrameInfo();
1940 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1941 MachineInstr *MI = &*It;
1942 if (!MI->getOperand(1).isFI())
1943 return false;
1944
1945 DebugLoc DL = MI->getDebugLoc();
1946 Register DstR = MI->getOperand(0).getReg();
1947 Register DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi);
1948 Register DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo);
1949 int FI = MI->getOperand(1).getIndex();
1950
1951 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1952 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1953 Align HasAlign = MFI.getObjectAlign(FI);
1954 unsigned LoadOpc;
1955
1956 // Load low part.
1957 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1958 : Hexagon::V6_vL32Ub_ai;
1959 BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)
1960 .addFrameIndex(FI)
1961 .addImm(0)
1962 .cloneMemRefs(*MI);
1963
1964 // Load high part.
1965 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1966 : Hexagon::V6_vL32Ub_ai;
1967 BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)
1968 .addFrameIndex(FI)
1969 .addImm(Size)
1970 .cloneMemRefs(*MI);
1971
1972 B.erase(It);
1973 return true;
1974}
1975
1976bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B,
1978 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs) const {
1979 MachineFunction &MF = *B.getParent();
1980 auto &MFI = MF.getFrameInfo();
1981 MachineInstr *MI = &*It;
1982 if (!MI->getOperand(0).isFI())
1983 return false;
1984
1985 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1986 DebugLoc DL = MI->getDebugLoc();
1987 Register SrcR = MI->getOperand(2).getReg();
1988 bool IsKill = MI->getOperand(2).isKill();
1989 int FI = MI->getOperand(0).getIndex();
1990
1991 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1992 Align HasAlign = MFI.getObjectAlign(FI);
1993 unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1994 : Hexagon::V6_vS32Ub_ai;
1995 BuildMI(B, It, DL, HII.get(StoreOpc))
1996 .addFrameIndex(FI)
1997 .addImm(0)
1998 .addReg(SrcR, getKillRegState(IsKill))
1999 .cloneMemRefs(*MI);
2000
2001 B.erase(It);
2002 return true;
2003}
2004
2005bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B,
2007 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs) const {
2008 MachineFunction &MF = *B.getParent();
2009 auto &MFI = MF.getFrameInfo();
2010 MachineInstr *MI = &*It;
2011 if (!MI->getOperand(1).isFI())
2012 return false;
2013
2014 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
2015 DebugLoc DL = MI->getDebugLoc();
2016 Register DstR = MI->getOperand(0).getReg();
2017 int FI = MI->getOperand(1).getIndex();
2018
2019 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
2020 Align HasAlign = MFI.getObjectAlign(FI);
2021 unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
2022 : Hexagon::V6_vL32Ub_ai;
2023 BuildMI(B, It, DL, HII.get(LoadOpc), DstR)
2024 .addFrameIndex(FI)
2025 .addImm(0)
2026 .cloneMemRefs(*MI);
2027
2028 B.erase(It);
2029 return true;
2030}
2031
2032bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF,
2033 SmallVectorImpl<Register> &NewRegs) const {
2034 auto &HII = *MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
2036 bool Changed = false;
2037
2038 for (auto &B : MF) {
2039 // Traverse the basic block.
2041 for (auto I = B.begin(), E = B.end(); I != E; I = NextI) {
2042 MachineInstr *MI = &*I;
2043 NextI = std::next(I);
2044 unsigned Opc = MI->getOpcode();
2045
2046 switch (Opc) {
2047 case TargetOpcode::COPY:
2048 Changed |= expandCopy(B, I, MRI, HII, NewRegs);
2049 break;
2050 case Hexagon::STriw_pred:
2051 case Hexagon::STriw_ctr:
2052 Changed |= expandStoreInt(B, I, MRI, HII, NewRegs);
2053 break;
2054 case Hexagon::LDriw_pred:
2055 case Hexagon::LDriw_ctr:
2056 Changed |= expandLoadInt(B, I, MRI, HII, NewRegs);
2057 break;
2058 case Hexagon::PS_vstorerq_ai:
2059 Changed |= expandStoreVecPred(B, I, MRI, HII, NewRegs);
2060 break;
2061 case Hexagon::PS_vloadrq_ai:
2062 Changed |= expandLoadVecPred(B, I, MRI, HII, NewRegs);
2063 break;
2064 case Hexagon::PS_vloadrw_ai:
2065 Changed |= expandLoadVec2(B, I, MRI, HII, NewRegs);
2066 break;
2067 case Hexagon::PS_vstorerw_ai:
2068 Changed |= expandStoreVec2(B, I, MRI, HII, NewRegs);
2069 break;
2070 }
2071 }
2072 }
2073
2074 return Changed;
2075}
2076
2078 BitVector &SavedRegs,
2079 RegScavenger *RS) const {
2080 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
2081
2082 SavedRegs.resize(HRI.getNumRegs());
2083
2084 // If we have a function containing __builtin_eh_return we want to spill and
2085 // restore all callee saved registers. Pretend that they are used.
2087 for (const MCPhysReg *R = HRI.getCalleeSavedRegs(&MF); *R; ++R)
2088 SavedRegs.set(*R);
2089
2090 // Replace predicate register pseudo spill code.
2092 expandSpillMacros(MF, NewRegs);
2093 if (OptimizeSpillSlots && !isOptNone(MF))
2094 optimizeSpillSlots(MF, NewRegs);
2095
2096 // We need to reserve a spill slot if scavenging could potentially require
2097 // spilling a scavenged register.
2098 if (!NewRegs.empty() || mayOverflowFrameOffset(MF)) {
2099 MachineFrameInfo &MFI = MF.getFrameInfo();
2102 // Reserve an int register in any case, because it could be used to hold
2103 // the stack offset in case it does not fit into a spill instruction.
2104 SpillRCs.insert(&Hexagon::IntRegsRegClass);
2105
2106 for (Register VR : NewRegs)
2107 SpillRCs.insert(MRI.getRegClass(VR));
2108
2109 for (const auto *RC : SpillRCs) {
2110 if (!needToReserveScavengingSpillSlots(MF, HRI, RC))
2111 continue;
2112 unsigned Num = 1;
2113 switch (RC->getID()) {
2114 case Hexagon::IntRegsRegClassID:
2116 break;
2117 case Hexagon::HvxQRRegClassID:
2118 Num = 2; // Vector predicate spills also need a vector register.
2119 break;
2120 }
2121 unsigned S = HRI.getSpillSize(*RC);
2122 Align A = HRI.getSpillAlign(*RC);
2123 for (unsigned i = 0; i < Num; i++) {
2124 int NewFI = MFI.CreateSpillStackObject(S, A);
2125 RS->addScavengingFrameIndex(NewFI);
2126 }
2127 }
2128 }
2129
2131}
2132
2133Register HexagonFrameLowering::findPhysReg(MachineFunction &MF,
2137 const TargetRegisterClass *RC) const {
2138 auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
2139 auto &MRI = MF.getRegInfo();
2140
2141 auto isDead = [&FIR,&DeadMap] (Register Reg) -> bool {
2142 auto F = DeadMap.find({Reg,0});
2143 if (F == DeadMap.end())
2144 return false;
2145 for (auto &DR : F->second)
2146 if (DR.contains(FIR))
2147 return true;
2148 return false;
2149 };
2150
2151 for (Register Reg : RC->getRawAllocationOrder(MF)) {
2152 bool Dead = true;
2153 for (auto R : HexagonBlockRanges::expandToSubRegs({Reg,0}, MRI, HRI)) {
2154 if (isDead(R.Reg))
2155 continue;
2156 Dead = false;
2157 break;
2158 }
2159 if (Dead)
2160 return Reg;
2161 }
2162 return 0;
2163}
2164
2165void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
2166 SmallVectorImpl<Register> &VRegs) const {
2167 auto &HST = MF.getSubtarget<HexagonSubtarget>();
2168 auto &HII = *HST.getInstrInfo();
2169 auto &HRI = *HST.getRegisterInfo();
2170 auto &MRI = MF.getRegInfo();
2171 HexagonBlockRanges HBR(MF);
2172
2173 using BlockIndexMap =
2174 std::map<MachineBasicBlock *, HexagonBlockRanges::InstrIndexMap>;
2175 using BlockRangeMap =
2176 std::map<MachineBasicBlock *, HexagonBlockRanges::RangeList>;
2177 using IndexType = HexagonBlockRanges::IndexType;
2178
2179 struct SlotInfo {
2180 BlockRangeMap Map;
2181 unsigned Size = 0;
2182 const TargetRegisterClass *RC = nullptr;
2183
2184 SlotInfo() = default;
2185 };
2186
2187 BlockIndexMap BlockIndexes;
2188 SmallSet<int,4> BadFIs;
2189 std::map<int,SlotInfo> FIRangeMap;
2190
2191 // Accumulate register classes: get a common class for a pre-existing
2192 // class HaveRC and a new class NewRC. Return nullptr if a common class
2193 // cannot be found, otherwise return the resulting class. If HaveRC is
2194 // nullptr, assume that it is still unset.
2195 auto getCommonRC =
2196 [](const TargetRegisterClass *HaveRC,
2197 const TargetRegisterClass *NewRC) -> const TargetRegisterClass * {
2198 if (HaveRC == nullptr || HaveRC == NewRC)
2199 return NewRC;
2200 // Different classes, both non-null. Pick the more general one.
2201 if (HaveRC->hasSubClassEq(NewRC))
2202 return HaveRC;
2203 if (NewRC->hasSubClassEq(HaveRC))
2204 return NewRC;
2205 return nullptr;
2206 };
2207
2208 // Scan all blocks in the function. Check all occurrences of frame indexes,
2209 // and collect relevant information.
2210 for (auto &B : MF) {
2211 std::map<int,IndexType> LastStore, LastLoad;
2212 // Emplace appears not to be supported in gcc 4.7.2-4.
2213 //auto P = BlockIndexes.emplace(&B, HexagonBlockRanges::InstrIndexMap(B));
2214 auto P = BlockIndexes.insert(
2215 std::make_pair(&B, HexagonBlockRanges::InstrIndexMap(B)));
2216 auto &IndexMap = P.first->second;
2217 LLVM_DEBUG(dbgs() << "Index map for " << printMBBReference(B) << "\n"
2218 << IndexMap << '\n');
2219
2220 for (auto &In : B) {
2221 int LFI, SFI;
2222 bool Load = HII.isLoadFromStackSlot(In, LFI) && !HII.isPredicated(In);
2223 bool Store = HII.isStoreToStackSlot(In, SFI) && !HII.isPredicated(In);
2224 if (Load && Store) {
2225 // If it's both a load and a store, then we won't handle it.
2226 BadFIs.insert(LFI);
2227 BadFIs.insert(SFI);
2228 continue;
2229 }
2230 // Check for register classes of the register used as the source for
2231 // the store, and the register used as the destination for the load.
2232 // Also, only accept base+imm_offset addressing modes. Other addressing
2233 // modes can have side-effects (post-increments, etc.). For stack
2234 // slots they are very unlikely, so there is not much loss due to
2235 // this restriction.
2236 if (Load || Store) {
2237 int TFI = Load ? LFI : SFI;
2238 unsigned AM = HII.getAddrMode(In);
2239 SlotInfo &SI = FIRangeMap[TFI];
2240 bool Bad = (AM != HexagonII::BaseImmOffset);
2241 if (!Bad) {
2242 // If the addressing mode is ok, check the register class.
2243 unsigned OpNum = Load ? 0 : 2;
2244 auto *RC = HII.getRegClass(In.getDesc(), OpNum, &HRI, MF);
2245 RC = getCommonRC(SI.RC, RC);
2246 if (RC == nullptr)
2247 Bad = true;
2248 else
2249 SI.RC = RC;
2250 }
2251 if (!Bad) {
2252 // Check sizes.
2253 unsigned S = HII.getMemAccessSize(In);
2254 if (SI.Size != 0 && SI.Size != S)
2255 Bad = true;
2256 else
2257 SI.Size = S;
2258 }
2259 if (!Bad) {
2260 for (auto *Mo : In.memoperands()) {
2261 if (!Mo->isVolatile() && !Mo->isAtomic())
2262 continue;
2263 Bad = true;
2264 break;
2265 }
2266 }
2267 if (Bad)
2268 BadFIs.insert(TFI);
2269 }
2270
2271 // Locate uses of frame indices.
2272 for (unsigned i = 0, n = In.getNumOperands(); i < n; ++i) {
2273 const MachineOperand &Op = In.getOperand(i);
2274 if (!Op.isFI())
2275 continue;
2276 int FI = Op.getIndex();
2277 // Make sure that the following operand is an immediate and that
2278 // it is 0. This is the offset in the stack object.
2279 if (i+1 >= n || !In.getOperand(i+1).isImm() ||
2280 In.getOperand(i+1).getImm() != 0)
2281 BadFIs.insert(FI);
2282 if (BadFIs.count(FI))
2283 continue;
2284
2285 IndexType Index = IndexMap.getIndex(&In);
2286 if (Load) {
2287 if (LastStore[FI] == IndexType::None)
2288 LastStore[FI] = IndexType::Entry;
2289 LastLoad[FI] = Index;
2290 } else if (Store) {
2291 HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B];
2292 if (LastStore[FI] != IndexType::None)
2293 RL.add(LastStore[FI], LastLoad[FI], false, false);
2294 else if (LastLoad[FI] != IndexType::None)
2295 RL.add(IndexType::Entry, LastLoad[FI], false, false);
2296 LastLoad[FI] = IndexType::None;
2297 LastStore[FI] = Index;
2298 } else {
2299 BadFIs.insert(FI);
2300 }
2301 }
2302 }
2303
2304 for (auto &I : LastLoad) {
2305 IndexType LL = I.second;
2306 if (LL == IndexType::None)
2307 continue;
2308 auto &RL = FIRangeMap[I.first].Map[&B];
2309 IndexType &LS = LastStore[I.first];
2310 if (LS != IndexType::None)
2311 RL.add(LS, LL, false, false);
2312 else
2313 RL.add(IndexType::Entry, LL, false, false);
2314 LS = IndexType::None;
2315 }
2316 for (auto &I : LastStore) {
2317 IndexType LS = I.second;
2318 if (LS == IndexType::None)
2319 continue;
2320 auto &RL = FIRangeMap[I.first].Map[&B];
2321 RL.add(LS, IndexType::None, false, false);
2322 }
2323 }
2324
2325 LLVM_DEBUG({
2326 for (auto &P : FIRangeMap) {
2327 dbgs() << "fi#" << P.first;
2328 if (BadFIs.count(P.first))
2329 dbgs() << " (bad)";
2330 dbgs() << " RC: ";
2331 if (P.second.RC != nullptr)
2332 dbgs() << HRI.getRegClassName(P.second.RC) << '\n';
2333 else
2334 dbgs() << "<null>\n";
2335 for (auto &R : P.second.Map)
2336 dbgs() << " " << printMBBReference(*R.first) << " { " << R.second
2337 << "}\n";
2338 }
2339 });
2340
2341 // When a slot is loaded from in a block without being stored to in the
2342 // same block, it is live-on-entry to this block. To avoid CFG analysis,
2343 // consider this slot to be live-on-exit from all blocks.
2344 SmallSet<int,4> LoxFIs;
2345
2346 std::map<MachineBasicBlock*,std::vector<int>> BlockFIMap;
2347
2348 for (auto &P : FIRangeMap) {
2349 // P = pair(FI, map: BB->RangeList)
2350 if (BadFIs.count(P.first))
2351 continue;
2352 for (auto &B : MF) {
2353 auto F = P.second.Map.find(&B);
2354 // F = pair(BB, RangeList)
2355 if (F == P.second.Map.end() || F->second.empty())
2356 continue;
2357 HexagonBlockRanges::IndexRange &IR = F->second.front();
2358 if (IR.start() == IndexType::Entry)
2359 LoxFIs.insert(P.first);
2360 BlockFIMap[&B].push_back(P.first);
2361 }
2362 }
2363
2364 LLVM_DEBUG({
2365 dbgs() << "Block-to-FI map (* -- live-on-exit):\n";
2366 for (auto &P : BlockFIMap) {
2367 auto &FIs = P.second;
2368 if (FIs.empty())
2369 continue;
2370 dbgs() << " " << printMBBReference(*P.first) << ": {";
2371 for (auto I : FIs) {
2372 dbgs() << " fi#" << I;
2373 if (LoxFIs.count(I))
2374 dbgs() << '*';
2375 }
2376 dbgs() << " }\n";
2377 }
2378 });
2379
2380#ifndef NDEBUG
2381 bool HasOptLimit = SpillOptMax.getPosition();
2382#endif
2383
2384 // eliminate loads, when all loads eliminated, eliminate all stores.
2385 for (auto &B : MF) {
2386 auto F = BlockIndexes.find(&B);
2387 assert(F != BlockIndexes.end());
2389 HexagonBlockRanges::RegToRangeMap LM = HBR.computeLiveMap(IM);
2390 HexagonBlockRanges::RegToRangeMap DM = HBR.computeDeadMap(IM, LM);
2391 LLVM_DEBUG(dbgs() << printMBBReference(B) << " dead map\n"
2393
2394 for (auto FI : BlockFIMap[&B]) {
2395 if (BadFIs.count(FI))
2396 continue;
2397 LLVM_DEBUG(dbgs() << "Working on fi#" << FI << '\n');
2398 HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B];
2399 for (auto &Range : RL) {
2400 LLVM_DEBUG(dbgs() << "--Examining range:" << RL << '\n');
2401 if (!IndexType::isInstr(Range.start()) ||
2402 !IndexType::isInstr(Range.end()))
2403 continue;
2404 MachineInstr &SI = *IM.getInstr(Range.start());
2405 MachineInstr &EI = *IM.getInstr(Range.end());
2406 assert(SI.mayStore() && "Unexpected start instruction");
2407 assert(EI.mayLoad() && "Unexpected end instruction");
2408 MachineOperand &SrcOp = SI.getOperand(2);
2409
2411 SrcOp.getSubReg() };
2412 auto *RC = HII.getRegClass(SI.getDesc(), 2, &HRI, MF);
2413 // The this-> is needed to unconfuse MSVC.
2414 Register FoundR = this->findPhysReg(MF, Range, IM, DM, RC);
2415 LLVM_DEBUG(dbgs() << "Replacement reg:" << printReg(FoundR, &HRI)
2416 << '\n');
2417 if (FoundR == 0)
2418 continue;
2419#ifndef NDEBUG
2420 if (HasOptLimit) {
2422 return;
2423 SpillOptCount++;
2424 }
2425#endif
2426
2427 // Generate the copy-in: "FoundR = COPY SrcR" at the store location.
2428 MachineBasicBlock::iterator StartIt = SI.getIterator(), NextIt;
2429 MachineInstr *CopyIn = nullptr;
2430 if (SrcRR.Reg != FoundR || SrcRR.Sub != 0) {
2431 const DebugLoc &DL = SI.getDebugLoc();
2432 CopyIn = BuildMI(B, StartIt, DL, HII.get(TargetOpcode::COPY), FoundR)
2433 .add(SrcOp);
2434 }
2435
2436 ++StartIt;
2437 // Check if this is a last store and the FI is live-on-exit.
2438 if (LoxFIs.count(FI) && (&Range == &RL.back())) {
2439 // Update store's source register.
2440 if (unsigned SR = SrcOp.getSubReg())
2441 SrcOp.setReg(HRI.getSubReg(FoundR, SR));
2442 else
2443 SrcOp.setReg(FoundR);
2444 SrcOp.setSubReg(0);
2445 // We are keeping this register live.
2446 SrcOp.setIsKill(false);
2447 } else {
2448 B.erase(&SI);
2449 IM.replaceInstr(&SI, CopyIn);
2450 }
2451
2452 auto EndIt = std::next(EI.getIterator());
2453 for (auto It = StartIt; It != EndIt; It = NextIt) {
2454 MachineInstr &MI = *It;
2455 NextIt = std::next(It);
2456 int TFI;
2457 if (!HII.isLoadFromStackSlot(MI, TFI) || TFI != FI)
2458 continue;
2459 Register DstR = MI.getOperand(0).getReg();
2460 assert(MI.getOperand(0).getSubReg() == 0);
2461 MachineInstr *CopyOut = nullptr;
2462 if (DstR != FoundR) {
2463 DebugLoc DL = MI.getDebugLoc();
2464 unsigned MemSize = HII.getMemAccessSize(MI);
2465 assert(HII.getAddrMode(MI) == HexagonII::BaseImmOffset);
2466 unsigned CopyOpc = TargetOpcode::COPY;
2467 if (HII.isSignExtendingLoad(MI))
2468 CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;
2469 else if (HII.isZeroExtendingLoad(MI))
2470 CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;
2471 CopyOut = BuildMI(B, It, DL, HII.get(CopyOpc), DstR)
2472 .addReg(FoundR, getKillRegState(&MI == &EI));
2473 }
2474 IM.replaceInstr(&MI, CopyOut);
2475 B.erase(It);
2476 }
2477
2478 // Update the dead map.
2479 HexagonBlockRanges::RegisterRef FoundRR = { FoundR, 0 };
2480 for (auto RR : HexagonBlockRanges::expandToSubRegs(FoundRR, MRI, HRI))
2481 DM[RR].subtract(Range);
2482 } // for Range in range list
2483 }
2484 }
2485}
2486
2487void HexagonFrameLowering::expandAlloca(MachineInstr *AI,
2488 const HexagonInstrInfo &HII, Register SP, unsigned CF) const {
2489 MachineBasicBlock &MB = *AI->getParent();
2490 DebugLoc DL = AI->getDebugLoc();
2491 unsigned A = AI->getOperand(2).getImm();
2492
2493 // Have
2494 // Rd = alloca Rs, #A
2495 //
2496 // If Rs and Rd are different registers, use this sequence:
2497 // Rd = sub(r29, Rs)
2498 // r29 = sub(r29, Rs)
2499 // Rd = and(Rd, #-A) ; if necessary
2500 // r29 = and(r29, #-A) ; if necessary
2501 // Rd = add(Rd, #CF) ; CF size aligned to at most A
2502 // otherwise, do
2503 // Rd = sub(r29, Rs)
2504 // Rd = and(Rd, #-A) ; if necessary
2505 // r29 = Rd
2506 // Rd = add(Rd, #CF) ; CF size aligned to at most A
2507
2508 MachineOperand &RdOp = AI->getOperand(0);
2509 MachineOperand &RsOp = AI->getOperand(1);
2510 Register Rd = RdOp.getReg(), Rs = RsOp.getReg();
2511
2512 // Rd = sub(r29, Rs)
2513 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), Rd)
2514 .addReg(SP)
2515 .addReg(Rs);
2516 if (Rs != Rd) {
2517 // r29 = sub(r29, Rs)
2518 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), SP)
2519 .addReg(SP)
2520 .addReg(Rs);
2521 }
2522 if (A > 8) {
2523 // Rd = and(Rd, #-A)
2524 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), Rd)
2525 .addReg(Rd)
2526 .addImm(-int64_t(A));
2527 if (Rs != Rd)
2528 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), SP)
2529 .addReg(SP)
2530 .addImm(-int64_t(A));
2531 }
2532 if (Rs == Rd) {
2533 // r29 = Rd
2534 BuildMI(MB, AI, DL, HII.get(TargetOpcode::COPY), SP)
2535 .addReg(Rd);
2536 }
2537 if (CF > 0) {
2538 // Rd = add(Rd, #CF)
2539 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_addi), Rd)
2540 .addReg(Rd)
2541 .addImm(CF);
2542 }
2543}
2544
2546 const MachineFrameInfo &MFI = MF.getFrameInfo();
2547 if (!MFI.hasVarSizedObjects())
2548 return false;
2549 // Do not check for max stack object alignment here, because the stack
2550 // may not be complete yet. Assume that we will need PS_aligna if there
2551 // are variable-sized objects.
2552 return true;
2553}
2554
2556 const MachineFunction &MF) const {
2557 for (auto &B : MF)
2558 for (auto &I : B)
2559 if (I.getOpcode() == Hexagon::PS_aligna)
2560 return &I;
2561 return nullptr;
2562}
2563
2564/// Adds all callee-saved registers as implicit uses or defs to the
2565/// instruction.
2566void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(MachineInstr *MI,
2567 const CSIVect &CSI, bool IsDef, bool IsKill) const {
2568 // Add the callee-saved registers as implicit uses.
2569 for (auto &R : CSI)
2570 MI->addOperand(MachineOperand::CreateReg(R.getReg(), IsDef, true, IsKill));
2571}
2572
2573/// Determine whether the callee-saved register saves and restores should
2574/// be generated via inline code. If this function returns "true", inline
2575/// code will be generated. If this function returns "false", additional
2576/// checks are performed, which may still lead to the inline code.
2577bool HexagonFrameLowering::shouldInlineCSR(const MachineFunction &MF,
2578 const CSIVect &CSI) const {
2580 return true;
2582 return true;
2583 if (!hasFP(MF))
2584 return true;
2585 if (!isOptSize(MF) && !isMinSize(MF))
2587 return true;
2588
2589 // Check if CSI only has double registers, and if the registers form
2590 // a contiguous block starting from D8.
2591 BitVector Regs(Hexagon::NUM_TARGET_REGS);
2592 for (const CalleeSavedInfo &I : CSI) {
2593 Register R = I.getReg();
2594 if (!Hexagon::DoubleRegsRegClass.contains(R))
2595 return true;
2596 Regs[R] = true;
2597 }
2598 int F = Regs.find_first();
2599 if (F != Hexagon::D8)
2600 return true;
2601 while (F >= 0) {
2602 int N = Regs.find_next(F);
2603 if (N >= 0 && N != F+1)
2604 return true;
2605 F = N;
2606 }
2607
2608 return false;
2609}
2610
2611bool HexagonFrameLowering::useSpillFunction(const MachineFunction &MF,
2612 const CSIVect &CSI) const {
2613 if (shouldInlineCSR(MF, CSI))
2614 return false;
2615 unsigned NumCSI = CSI.size();
2616 if (NumCSI <= 1)
2617 return false;
2618
2619 unsigned Threshold = isOptSize(MF) ? SpillFuncThresholdOs
2621 return Threshold < NumCSI;
2622}
2623
2624bool HexagonFrameLowering::useRestoreFunction(const MachineFunction &MF,
2625 const CSIVect &CSI) const {
2626 if (shouldInlineCSR(MF, CSI))
2627 return false;
2628 // The restore functions do a bit more than just restoring registers.
2629 // The non-returning versions will go back directly to the caller's
2630 // caller, others will clean up the stack frame in preparation for
2631 // a tail call. Using them can still save code size even if only one
2632 // register is getting restores. Make the decision based on -Oz:
2633 // using -Os will use inline restore for a single register.
2634 if (isMinSize(MF))
2635 return true;
2636 unsigned NumCSI = CSI.size();
2637 if (NumCSI <= 1)
2638 return false;
2639
2640 unsigned Threshold = isOptSize(MF) ? SpillFuncThresholdOs-1
2642 return Threshold < NumCSI;
2643}
2644
2645bool HexagonFrameLowering::mayOverflowFrameOffset(MachineFunction &MF) const {
2646 unsigned StackSize = MF.getFrameInfo().estimateStackSize(MF);
2647 auto &HST = MF.getSubtarget<HexagonSubtarget>();
2648 // A fairly simplistic guess as to whether a potential load/store to a
2649 // stack location could require an extra register.
2650 if (HST.useHVXOps() && StackSize > 256)
2651 return true;
2652
2653 // Check if the function has store-immediate instructions that access
2654 // the stack. Since the offset field is not extendable, if the stack
2655 // size exceeds the offset limit (6 bits, shifted), the stores will
2656 // require a new base register.
2657 bool HasImmStack = false;
2658 unsigned MinLS = ~0u; // Log_2 of the memory access size.
2659
2660 for (const MachineBasicBlock &B : MF) {
2661 for (const MachineInstr &MI : B) {
2662 unsigned LS = 0;
2663 switch (MI.getOpcode()) {
2664 case Hexagon::S4_storeirit_io:
2665 case Hexagon::S4_storeirif_io:
2666 case Hexagon::S4_storeiri_io:
2667 ++LS;
2668 [[fallthrough]];
2669 case Hexagon::S4_storeirht_io:
2670 case Hexagon::S4_storeirhf_io:
2671 case Hexagon::S4_storeirh_io:
2672 ++LS;
2673 [[fallthrough]];
2674 case Hexagon::S4_storeirbt_io:
2675 case Hexagon::S4_storeirbf_io:
2676 case Hexagon::S4_storeirb_io:
2677 if (MI.getOperand(0).isFI())
2678 HasImmStack = true;
2679 MinLS = std::min(MinLS, LS);
2680 break;
2681 }
2682 }
2683 }
2684
2685 if (HasImmStack)
2686 return !isUInt<6>(StackSize >> MinLS);
2687
2688 return false;
2689}
2690
2691namespace {
2692// Struct used by orderFrameObjects to help sort the stack objects.
2693struct HexagonFrameSortingObject {
2694 bool IsValid = false;
2695 unsigned Index = 0; // Index of Object into MFI list.
2696 unsigned Size = 0;
2697 Align ObjectAlignment = Align(1); // Alignment of Object in bytes.
2698};
2699
2700struct HexagonFrameSortingComparator {
2701 inline bool operator()(const HexagonFrameSortingObject &A,
2702 const HexagonFrameSortingObject &B) const {
2703 return std::make_tuple(!A.IsValid, A.ObjectAlignment, A.Size) <
2704 std::make_tuple(!B.IsValid, B.ObjectAlignment, B.Size);
2705 }
2706};
2707} // namespace
2708
2709// Sort objects on the stack by alignment value and then by size to minimize
2710// padding.
2712 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
2713
2714 if (ObjectsToAllocate.empty())
2715 return;
2716
2717 const MachineFrameInfo &MFI = MF.getFrameInfo();
2718 int NObjects = ObjectsToAllocate.size();
2719
2720 // Create an array of all MFI objects.
2722 MFI.getObjectIndexEnd());
2723
2724 for (int i = 0, j = 0, e = MFI.getObjectIndexEnd(); i < e && j != NObjects;
2725 ++i) {
2726 if (i != ObjectsToAllocate[j])
2727 continue;
2728 j++;
2729
2730 // A variable size object has size equal to 0. Since Hexagon sets
2731 // getUseLocalStackAllocationBlock() to true, a local block is allocated
2732 // earlier. This case is not handled here for now.
2733 int Size = MFI.getObjectSize(i);
2734 if (Size == 0)
2735 return;
2736
2737 SortingObjects[i].IsValid = true;
2738 SortingObjects[i].Index = i;
2739 SortingObjects[i].Size = Size;
2740 SortingObjects[i].ObjectAlignment = MFI.getObjectAlign(i);
2741 }
2742
2743 // Sort objects by alignment and then by size.
2744 llvm::stable_sort(SortingObjects, HexagonFrameSortingComparator());
2745
2746 // Modify the original list to represent the final order.
2747 int i = NObjects;
2748 for (auto &Obj : SortingObjects) {
2749 if (i == 0)
2750 break;
2751 ObjectsToAllocate[--i] = Obj.Index;
2752 }
2753}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
This file implements the BitVector class.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
static RegisterPass< DebugifyModulePass > DM("debugify", "Attach debug info to everything")
This file defines the DenseMap class.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static MachineInstr * getReturn(MachineBasicBlock &MBB)
Returns the "return" instruction from this block, or nullptr if there isn't any.
static cl::opt< unsigned > ShrinkLimit("shrink-frame-limit", cl::init(std::numeric_limits< unsigned >::max()), cl::Hidden, cl::desc("Max count of stack frame shrink-wraps"))
static bool isOptNone(const MachineFunction &MF)
static cl::opt< int > SpillFuncThreshold("spill-func-threshold", cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"), cl::init(6))
static std::optional< MachineBasicBlock::iterator > findCFILocation(MachineBasicBlock &B)
static cl::opt< bool > EliminateFramePointer("hexagon-fp-elim", cl::init(true), cl::Hidden, cl::desc("Refrain from using FP whenever possible"))
@ SK_FromMemTailcall
static bool enableAllocFrameElim(const MachineFunction &MF)
static const char * getSpillFunctionFor(Register MaxReg, SpillKind SpillType, bool Stkchk=false)
static bool hasReturn(const MachineBasicBlock &MBB)
Returns true if MBB contains an instruction that returns.
static cl::opt< bool > EnableSaveRestoreLong("enable-save-restore-long", cl::Hidden, cl::desc("Enable long calls for save-restore stubs."), cl::init(false))
static bool needToReserveScavengingSpillSlots(MachineFunction &MF, const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC)
Returns true if there are no caller-saved registers available in class RC.
static bool isOptSize(const MachineFunction &MF)
static Register getMax32BitSubRegister(Register Reg, const TargetRegisterInfo &TRI, bool hireg=true)
Map a register pair Reg to the subregister that has the greater "number", i.e.
static cl::opt< int > SpillFuncThresholdOs("spill-func-threshold-Os", cl::Hidden, cl::desc("Specify Os spill func threshold"), cl::init(1))
static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR, const HexagonRegisterInfo &HRI)
Checks if the basic block contains any instruction that needs a stack frame to be already in place.
static cl::opt< bool > DisableDeallocRet("disable-hexagon-dealloc-ret", cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"))
static cl::opt< bool > EnableShrinkWrapping("hexagon-shrink-frame", cl::init(true), cl::Hidden, cl::desc("Enable stack frame shrink wrapping"))
static bool hasTailCall(const MachineBasicBlock &MBB)
Returns true if MBB has a machine instructions that indicates a tail call in the block.
static cl::opt< unsigned > NumberScavengerSlots("number-scavenger-slots", cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2))
static Register getMaxCalleeSavedReg(ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo &TRI)
Returns the callee saved register with the largest id in the vector.
static bool isMinSize(const MachineFunction &MF)
static cl::opt< unsigned > SpillOptMax("spill-opt-max", cl::Hidden, cl::init(std::numeric_limits< unsigned >::max()))
static unsigned SpillOptCount
static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI)
static bool isRestoreCall(unsigned Opc)
static cl::opt< bool > OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden, cl::init(true), cl::desc("Optimize spill slots"))
static cl::opt< bool > EnableStackOVFSanitizer("enable-stackovf-sanitizer", cl::Hidden, cl::desc("Enable runtime checks for stack overflow."), cl::init(false))
IRTranslator LLVM IR MI
Legalize the Machine IR a function s Machine IR
Definition: Legalizer.cpp:81
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallSet class.
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
int find_first() const
find_first - Returns the index of the first set bit, -1 if none of the bits are set.
Definition: BitVector.h:300
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:341
BitVector & set()
Definition: BitVector.h:351
int find_next(unsigned Prev) const
find_next - Returns the index of the next set bit following the "Prev" bit.
Definition: BitVector.h:308
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:702
bool hasOptNone() const
Do not optimize this function (-O0).
Definition: Function.h:699
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:232
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:743
void replaceInstr(MachineInstr *OldMI, MachineInstr *NewMI)
IndexType getIndex(MachineInstr *MI) const
MachineInstr * getInstr(IndexType Idx) const
void add(IndexType Start, IndexType End, bool Fixed, bool TiedEnd)
const MachineInstr * getAlignaInstr(const MachineFunction &MF) const
void insertCFIInstructions(MachineFunction &MF) const
bool enableCalleeSaveSkip(const MachineFunction &MF) const override
Returns true if the target can safely skip saving callee-saved registers for noreturn nounwind functi...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
Perform most of the PEI work here:
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
const SpillSlot * getCalleeSavedSpillSlots(unsigned &NumEntries) const override
getCalleeSavedSpillSlots - This method returns a pointer to an array of pairs, that contains an entry...
bool needsAligna(const MachineFunction &MF) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
Hexagon target-specific information for each MachineFunction.
bool isEHReturnCalleeSaveReg(Register Reg) const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const MCPhysReg * getCallerSavedRegs(const MachineFunction *MF, const TargetRegisterClass *RC) const
const HexagonInstrInfo * getInstrInfo() const override
bool isEnvironmentMusl() const
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:52
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
Definition: MCDwarf.h:558
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
Definition: MCDwarf.h:600
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Definition: MCContext.cpp:346
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
MCRegAliasIterator enumerates all registers aliasing Reg.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< iterator > terminators()
iterator_range< succ_iterator > successors()
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineBasicBlock * findNearestCommonDominator(MachineBasicBlock *A, MachineBasicBlock *B)
findNearestCommonDominator - Find nearest common dominator basic block for basic block A and B.
void calculate(MachineFunction &F)
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setMaxCallFrameSize(uint64_t S)
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool isObjectPreAllocated(int ObjectIdx) const
Return true if the object was pre-allocated into the local block.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool hasCalls() const
Return true if the current function has any function calls.
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)
Create a spill slot at a fixed location on the stack.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setStackSize(uint64_t Size)
Set the size of the stack.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
unsigned addFrameInst(const MCCFIInstruction &Inst)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:940
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:346
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:498
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
MachineBasicBlock * findNearestCommonDominator(ArrayRef< MachineBasicBlock * > Blocks) const
Returns the nearest common dominator of the given blocks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
A vector that has set insertion semantics.
Definition: SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Register getReg() const
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:49
static StackOffset getFixed(int64_t Fixed)
Definition: TypeSize.h:42
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
bool isPositionIndependent() const
unsigned getID() const
Return the register class ID number.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
ArrayRef< MCPhysReg > getRawAllocationOrder(const MachineFunction &MF) const
Returns the preferred order for allocating registers from this register class in MF.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ Dead
Unused definition.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
constexpr double e
Definition: MathExtras.h:47
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
void stable_sort(R &&Range)
Definition: STLExtras.h:2020
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
FunctionPass * createHexagonCallFrameInformation()
void initializeHexagonCallFrameInformationPass(PassRegistry &)
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
static RegisterSet expandToSubRegs(RegisterRef R, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI)
std::map< RegisterRef, RangeList > RegToRangeMap
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.