LLVM  8.0.0svn
HexagonFrameLowering.cpp
Go to the documentation of this file.
1 //===- HexagonFrameLowering.cpp - Define frame lowering -------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //
9 //===----------------------------------------------------------------------===//
10 
11 #include "HexagonFrameLowering.h"
12 #include "HexagonBlockRanges.h"
13 #include "HexagonInstrInfo.h"
15 #include "HexagonRegisterInfo.h"
16 #include "HexagonSubtarget.h"
17 #include "HexagonTargetMachine.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/Optional.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/SmallVector.h"
42 #include "llvm/IR/Attributes.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/MC/MCDwarf.h"
46 #include "llvm/MC/MCRegisterInfo.h"
47 #include "llvm/Pass.h"
48 #include "llvm/Support/CodeGen.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/Debug.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstdint>
60 #include <iterator>
61 #include <limits>
62 #include <map>
63 #include <utility>
64 #include <vector>
65 
66 #define DEBUG_TYPE "hexagon-pei"
67 
68 // Hexagon stack frame layout as defined by the ABI:
69 //
70 // Incoming arguments
71 // passed via stack
72 // |
73 // |
74 // SP during function's FP during function's |
75 // +-- runtime (top of stack) runtime (bottom) --+ |
76 // | | |
77 // --++---------------------+------------------+-----------------++-+-------
78 // | parameter area for | variable-size | fixed-size |LR| arg
79 // | called functions | local objects | local objects |FP|
80 // --+----------------------+------------------+-----------------+--+-------
81 // <- size known -> <- size unknown -> <- size known ->
82 //
83 // Low address High address
84 //
85 // <--- stack growth
86 //
87 //
88 // - In any circumstances, the outgoing function arguments are always accessi-
89 // ble using the SP, and the incoming arguments are accessible using the FP.
90 // - If the local objects are not aligned, they can always be accessed using
91 // the FP.
92 // - If there are no variable-sized objects, the local objects can always be
93 // accessed using the SP, regardless whether they are aligned or not. (The
94 // alignment padding will be at the bottom of the stack (highest address),
95 // and so the offset with respect to the SP will be known at the compile-
96 // -time.)
97 //
98 // The only complication occurs if there are both, local aligned objects, and
99 // dynamically allocated (variable-sized) objects. The alignment pad will be
100 // placed between the FP and the local objects, thus preventing the use of the
101 // FP to access the local objects. At the same time, the variable-sized objects
102 // will be between the SP and the local objects, thus introducing an unknown
103 // distance from the SP to the locals.
104 //
105 // To avoid this problem, a new register is created that holds the aligned
106 // address of the bottom of the stack, referred in the sources as AP (aligned
107 // pointer). The AP will be equal to "FP-p", where "p" is the smallest pad
108 // that aligns AP to the required boundary (a maximum of the alignments of
109 // all stack objects, fixed- and variable-sized). All local objects[1] will
110 // then use AP as the base pointer.
111 // [1] The exception is with "fixed" stack objects. "Fixed" stack objects get
112 // their name from being allocated at fixed locations on the stack, relative
113 // to the FP. In the presence of dynamic allocation and local alignment, such
114 // objects can only be accessed through the FP.
115 //
116 // Illustration of the AP:
117 // FP --+
118 // |
119 // ---------------+---------------------+-----+-----------------------++-+--
120 // Rest of the | Local stack objects | Pad | Fixed stack objects |LR|
121 // stack frame | (aligned) | | (CSR, spills, etc.) |FP|
122 // ---------------+---------------------+-----+-----------------+-----+--+--
123 // |<-- Multiple of the -->|
124 // stack alignment +-- AP
125 //
126 // The AP is set up at the beginning of the function. Since it is not a dedi-
127 // cated (reserved) register, it needs to be kept live throughout the function
128 // to be available as the base register for local object accesses.
129 // Normally, an address of a stack objects is obtained by a pseudo-instruction
130 // PS_fi. To access local objects with the AP register present, a different
131 // pseudo-instruction needs to be used: PS_fia. The PS_fia takes one extra
132 // argument compared to PS_fi: the first input register is the AP register.
133 // This keeps the register live between its definition and its uses.
134 
135 // The AP register is originally set up using pseudo-instruction PS_aligna:
136 // AP = PS_aligna A
137 // where
138 // A - required stack alignment
139 // The alignment value must be the maximum of all alignments required by
140 // any stack object.
141 
142 // The dynamic allocation uses a pseudo-instruction PS_alloca:
143 // Rd = PS_alloca Rs, A
144 // where
145 // Rd - address of the allocated space
146 // Rs - minimum size (the actual allocated can be larger to accommodate
147 // alignment)
148 // A - required alignment
149 
150 using namespace llvm;
151 
152 static cl::opt<bool> DisableDeallocRet("disable-hexagon-dealloc-ret",
153  cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"));
154 
155 static cl::opt<unsigned> NumberScavengerSlots("number-scavenger-slots",
156  cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2),
158 
159 static cl::opt<int> SpillFuncThreshold("spill-func-threshold",
160  cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"),
162 
163 static cl::opt<int> SpillFuncThresholdOs("spill-func-threshold-Os",
164  cl::Hidden, cl::desc("Specify Os spill func threshold"),
166 
167 static cl::opt<bool> EnableStackOVFSanitizer("enable-stackovf-sanitizer",
168  cl::Hidden, cl::desc("Enable runtime checks for stack overflow."),
169  cl::init(false), cl::ZeroOrMore);
170 
171 static cl::opt<bool> EnableShrinkWrapping("hexagon-shrink-frame",
173  cl::desc("Enable stack frame shrink wrapping"));
174 
175 static cl::opt<unsigned> ShrinkLimit("shrink-frame-limit",
177  cl::desc("Max count of stack frame shrink-wraps"));
178 
179 static cl::opt<bool> EnableSaveRestoreLong("enable-save-restore-long",
180  cl::Hidden, cl::desc("Enable long calls for save-restore stubs."),
181  cl::init(false), cl::ZeroOrMore);
182 
183 static cl::opt<bool> EliminateFramePointer("hexagon-fp-elim", cl::init(true),
184  cl::Hidden, cl::desc("Refrain from using FP whenever possible"));
185 
186 static cl::opt<bool> OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden,
187  cl::init(true), cl::desc("Optimize spill slots"));
188 
189 #ifndef NDEBUG
190 static cl::opt<unsigned> SpillOptMax("spill-opt-max", cl::Hidden,
192 static unsigned SpillOptCount = 0;
193 #endif
194 
195 namespace llvm {
196 
199 
200 } // end namespace llvm
201 
202 namespace {
203 
204  class HexagonCallFrameInformation : public MachineFunctionPass {
205  public:
206  static char ID;
207 
208  HexagonCallFrameInformation() : MachineFunctionPass(ID) {
211  }
212 
213  bool runOnMachineFunction(MachineFunction &MF) override;
214 
215  MachineFunctionProperties getRequiredProperties() const override {
218  }
219  };
220 
222 
223 } // end anonymous namespace
224 
225 bool HexagonCallFrameInformation::runOnMachineFunction(MachineFunction &MF) {
226  auto &HFI = *MF.getSubtarget<HexagonSubtarget>().getFrameLowering();
227  bool NeedCFI = MF.getMMI().hasDebugInfo() ||
229 
230  if (!NeedCFI)
231  return false;
232  HFI.insertCFIInstructions(MF);
233  return true;
234 }
235 
236 INITIALIZE_PASS(HexagonCallFrameInformation, "hexagon-cfi",
237  "Hexagon call frame information", false, false)
238 
240  return new HexagonCallFrameInformation();
241 }
242 
243 /// Map a register pair Reg to the subregister that has the greater "number",
244 /// i.e. D3 (aka R7:6) will be mapped to R7, etc.
245 static unsigned getMax32BitSubRegister(unsigned Reg,
246  const TargetRegisterInfo &TRI,
247  bool hireg = true) {
248  if (Reg < Hexagon::D0 || Reg > Hexagon::D15)
249  return Reg;
250 
251  unsigned RegNo = 0;
252  for (MCSubRegIterator SubRegs(Reg, &TRI); SubRegs.isValid(); ++SubRegs) {
253  if (hireg) {
254  if (*SubRegs > RegNo)
255  RegNo = *SubRegs;
256  } else {
257  if (!RegNo || *SubRegs < RegNo)
258  RegNo = *SubRegs;
259  }
260  }
261  return RegNo;
262 }
263 
264 /// Returns the callee saved register with the largest id in the vector.
265 static unsigned getMaxCalleeSavedReg(const std::vector<CalleeSavedInfo> &CSI,
266  const TargetRegisterInfo &TRI) {
267  static_assert(Hexagon::R1 > 0,
268  "Assume physical registers are encoded as positive integers");
269  if (CSI.empty())
270  return 0;
271 
272  unsigned Max = getMax32BitSubRegister(CSI[0].getReg(), TRI);
273  for (unsigned I = 1, E = CSI.size(); I < E; ++I) {
274  unsigned Reg = getMax32BitSubRegister(CSI[I].getReg(), TRI);
275  if (Reg > Max)
276  Max = Reg;
277  }
278  return Max;
279 }
280 
281 /// Checks if the basic block contains any instruction that needs a stack
282 /// frame to be already in place.
283 static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR,
284  const HexagonRegisterInfo &HRI) {
285  for (auto &I : MBB) {
286  const MachineInstr *MI = &I;
287  if (MI->isCall())
288  return true;
289  unsigned Opc = MI->getOpcode();
290  switch (Opc) {
291  case Hexagon::PS_alloca:
292  case Hexagon::PS_aligna:
293  return true;
294  default:
295  break;
296  }
297  // Check individual operands.
298  for (const MachineOperand &MO : MI->operands()) {
299  // While the presence of a frame index does not prove that a stack
300  // frame will be required, all frame indexes should be within alloc-
301  // frame/deallocframe. Otherwise, the code that translates a frame
302  // index into an offset would have to be aware of the placement of
303  // the frame creation/destruction instructions.
304  if (MO.isFI())
305  return true;
306  if (MO.isReg()) {
307  unsigned R = MO.getReg();
308  // Virtual registers will need scavenging, which then may require
309  // a stack slot.
311  return true;
312  for (MCSubRegIterator S(R, &HRI, true); S.isValid(); ++S)
313  if (CSR[*S])
314  return true;
315  continue;
316  }
317  if (MO.isRegMask()) {
318  // A regmask would normally have all callee-saved registers marked
319  // as preserved, so this check would not be needed, but in case of
320  // ever having other regmasks (for other calling conventions),
321  // make sure they would be processed correctly.
322  const uint32_t *BM = MO.getRegMask();
323  for (int x = CSR.find_first(); x >= 0; x = CSR.find_next(x)) {
324  unsigned R = x;
325  // If this regmask does not preserve a CSR, a frame will be needed.
326  if (!(BM[R/32] & (1u << (R%32))))
327  return true;
328  }
329  }
330  }
331  }
332  return false;
333 }
334 
335  /// Returns true if MBB has a machine instructions that indicates a tail call
336  /// in the block.
337 static bool hasTailCall(const MachineBasicBlock &MBB) {
339  if (I == MBB.end())
340  return false;
341  unsigned RetOpc = I->getOpcode();
342  return RetOpc == Hexagon::PS_tailcall_i || RetOpc == Hexagon::PS_tailcall_r;
343 }
344 
345 /// Returns true if MBB contains an instruction that returns.
346 static bool hasReturn(const MachineBasicBlock &MBB) {
347  for (auto I = MBB.getFirstTerminator(), E = MBB.end(); I != E; ++I)
348  if (I->isReturn())
349  return true;
350  return false;
351 }
352 
353 /// Returns the "return" instruction from this block, or nullptr if there
354 /// isn't any.
356  for (auto &I : MBB)
357  if (I.isReturn())
358  return &I;
359  return nullptr;
360 }
361 
362 static bool isRestoreCall(unsigned Opc) {
363  switch (Opc) {
364  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
365  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
366  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT:
367  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC:
368  case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT:
369  case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC:
370  case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4:
371  case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC:
372  return true;
373  }
374  return false;
375 }
376 
377 static inline bool isOptNone(const MachineFunction &MF) {
378  return MF.getFunction().hasFnAttribute(Attribute::OptimizeNone) ||
380 }
381 
382 static inline bool isOptSize(const MachineFunction &MF) {
383  const Function &F = MF.getFunction();
384  return F.optForSize() && !F.optForMinSize();
385 }
386 
387 static inline bool isMinSize(const MachineFunction &MF) {
388  return MF.getFunction().optForMinSize();
389 }
390 
391 /// Implements shrink-wrapping of the stack frame. By default, stack frame
392 /// is created in the function entry block, and is cleaned up in every block
393 /// that returns. This function finds alternate blocks: one for the frame
394 /// setup (prolog) and one for the cleanup (epilog).
395 void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
396  MachineBasicBlock *&PrologB, MachineBasicBlock *&EpilogB) const {
397  static unsigned ShrinkCounter = 0;
398 
399  if (ShrinkLimit.getPosition()) {
400  if (ShrinkCounter >= ShrinkLimit)
401  return;
402  ShrinkCounter++;
403  }
404 
405  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
406 
408  MDT.runOnMachineFunction(MF);
410  MPT.runOnMachineFunction(MF);
411 
412  using UnsignedMap = DenseMap<unsigned, unsigned>;
414 
415  UnsignedMap RPO;
416  RPOTType RPOT(&MF);
417  unsigned RPON = 0;
418  for (RPOTType::rpo_iterator I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
419  RPO[(*I)->getNumber()] = RPON++;
420 
421  // Don't process functions that have loops, at least for now. Placement
422  // of prolog and epilog must take loop structure into account. For simpli-
423  // city don't do it right now.
424  for (auto &I : MF) {
425  unsigned BN = RPO[I.getNumber()];
426  for (auto SI = I.succ_begin(), SE = I.succ_end(); SI != SE; ++SI) {
427  // If found a back-edge, return.
428  if (RPO[(*SI)->getNumber()] <= BN)
429  return;
430  }
431  }
432 
433  // Collect the set of blocks that need a stack frame to execute. Scan
434  // each block for uses/defs of callee-saved registers, calls, etc.
436  BitVector CSR(Hexagon::NUM_TARGET_REGS);
437  for (const MCPhysReg *P = HRI.getCalleeSavedRegs(&MF); *P; ++P)
438  for (MCSubRegIterator S(*P, &HRI, true); S.isValid(); ++S)
439  CSR[*S] = true;
440 
441  for (auto &I : MF)
442  if (needsStackFrame(I, CSR, HRI))
443  SFBlocks.push_back(&I);
444 
445  LLVM_DEBUG({
446  dbgs() << "Blocks needing SF: {";
447  for (auto &B : SFBlocks)
448  dbgs() << " " << printMBBReference(*B);
449  dbgs() << " }\n";
450  });
451  // No frame needed?
452  if (SFBlocks.empty())
453  return;
454 
455  // Pick a common dominator and a common post-dominator.
456  MachineBasicBlock *DomB = SFBlocks[0];
457  for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) {
458  DomB = MDT.findNearestCommonDominator(DomB, SFBlocks[i]);
459  if (!DomB)
460  break;
461  }
462  MachineBasicBlock *PDomB = SFBlocks[0];
463  for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) {
464  PDomB = MPT.findNearestCommonDominator(PDomB, SFBlocks[i]);
465  if (!PDomB)
466  break;
467  }
468  LLVM_DEBUG({
469  dbgs() << "Computed dom block: ";
470  if (DomB)
471  dbgs() << printMBBReference(*DomB);
472  else
473  dbgs() << "<null>";
474  dbgs() << ", computed pdom block: ";
475  if (PDomB)
476  dbgs() << printMBBReference(*PDomB);
477  else
478  dbgs() << "<null>";
479  dbgs() << "\n";
480  });
481  if (!DomB || !PDomB)
482  return;
483 
484  // Make sure that DomB dominates PDomB and PDomB post-dominates DomB.
485  if (!MDT.dominates(DomB, PDomB)) {
486  LLVM_DEBUG(dbgs() << "Dom block does not dominate pdom block\n");
487  return;
488  }
489  if (!MPT.dominates(PDomB, DomB)) {
490  LLVM_DEBUG(dbgs() << "PDom block does not post-dominate dom block\n");
491  return;
492  }
493 
494  // Finally, everything seems right.
495  PrologB = DomB;
496  EpilogB = PDomB;
497 }
498 
499 /// Perform most of the PEI work here:
500 /// - saving/restoring of the callee-saved registers,
501 /// - stack frame creation and destruction.
502 /// Normally, this work is distributed among various functions, but doing it
503 /// in one place allows shrink-wrapping of the stack frame.
505  MachineBasicBlock &MBB) const {
506  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
507 
508  MachineFrameInfo &MFI = MF.getFrameInfo();
509  const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
510 
511  MachineBasicBlock *PrologB = &MF.front(), *EpilogB = nullptr;
513  findShrunkPrologEpilog(MF, PrologB, EpilogB);
514 
515  bool PrologueStubs = false;
516  insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs);
517  insertPrologueInBlock(*PrologB, PrologueStubs);
518  updateEntryPaths(MF, *PrologB);
519 
520  if (EpilogB) {
521  insertCSRRestoresInBlock(*EpilogB, CSI, HRI);
522  insertEpilogueInBlock(*EpilogB);
523  } else {
524  for (auto &B : MF)
525  if (B.isReturnBlock())
526  insertCSRRestoresInBlock(B, CSI, HRI);
527 
528  for (auto &B : MF)
529  if (B.isReturnBlock())
530  insertEpilogueInBlock(B);
531 
532  for (auto &B : MF) {
533  if (B.empty())
534  continue;
535  MachineInstr *RetI = getReturn(B);
536  if (!RetI || isRestoreCall(RetI->getOpcode()))
537  continue;
538  for (auto &R : CSI)
539  RetI->addOperand(MachineOperand::CreateReg(R.getReg(), false, true));
540  }
541  }
542 
543  if (EpilogB) {
544  // If there is an epilog block, it may not have a return instruction.
545  // In such case, we need to add the callee-saved registers as live-ins
546  // in all blocks on all paths from the epilog to any return block.
547  unsigned MaxBN = MF.getNumBlockIDs();
548  BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1);
549  updateExitPaths(*EpilogB, *EpilogB, DoneT, DoneF, Path);
550  }
551 }
552 
553 /// Returns true if the target can safely skip saving callee-saved registers
554 /// for noreturn nounwind functions.
556  const MachineFunction &MF) const {
557  const auto &F = MF.getFunction();
558  assert(F.hasFnAttribute(Attribute::NoReturn) &&
559  F.getFunction().hasFnAttribute(Attribute::NoUnwind) &&
560  !F.getFunction().hasFnAttribute(Attribute::UWTable));
561  (void)F;
562 
563  // No need to save callee saved registers if the function does not return.
564  return MF.getSubtarget<HexagonSubtarget>().noreturnStackElim();
565 }
566 
567 // Helper function used to determine when to eliminate the stack frame for
568 // functions marked as noreturn and when the noreturn-stack-elim options are
569 // specified. When both these conditions are true, then a FP may not be needed
570 // if the function makes a call. It is very similar to enableCalleeSaveSkip,
571 // but it used to check if the allocframe can be eliminated as well.
572 static bool enableAllocFrameElim(const MachineFunction &MF) {
573  const auto &F = MF.getFunction();
574  const auto &MFI = MF.getFrameInfo();
575  const auto &HST = MF.getSubtarget<HexagonSubtarget>();
576  assert(!MFI.hasVarSizedObjects() &&
577  !HST.getRegisterInfo()->needsStackRealignment(MF));
578  return F.hasFnAttribute(Attribute::NoReturn) &&
579  F.hasFnAttribute(Attribute::NoUnwind) &&
580  !F.hasFnAttribute(Attribute::UWTable) && HST.noreturnStackElim() &&
581  MFI.getStackSize() == 0;
582 }
583 
584 void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB,
585  bool PrologueStubs) const {
586  MachineFunction &MF = *MBB.getParent();
587  MachineFrameInfo &MFI = MF.getFrameInfo();
588  auto &HST = MF.getSubtarget<HexagonSubtarget>();
589  auto &HII = *HST.getInstrInfo();
590  auto &HRI = *HST.getRegisterInfo();
591 
592  unsigned MaxAlign = std::max(MFI.getMaxAlignment(), getStackAlignment());
593 
594  // Calculate the total stack frame size.
595  // Get the number of bytes to allocate from the FrameInfo.
596  unsigned FrameSize = MFI.getStackSize();
597  // Round up the max call frame size to the max alignment on the stack.
598  unsigned MaxCFA = alignTo(MFI.getMaxCallFrameSize(), MaxAlign);
599  MFI.setMaxCallFrameSize(MaxCFA);
600 
601  FrameSize = MaxCFA + alignTo(FrameSize, MaxAlign);
602  MFI.setStackSize(FrameSize);
603 
604  bool AlignStack = (MaxAlign > getStackAlignment());
605 
606  // Get the number of bytes to allocate from the FrameInfo.
607  unsigned NumBytes = MFI.getStackSize();
608  unsigned SP = HRI.getStackRegister();
609  unsigned MaxCF = MFI.getMaxCallFrameSize();
610  MachineBasicBlock::iterator InsertPt = MBB.begin();
611 
613  for (auto &MBB : MF)
614  for (auto &MI : MBB)
615  if (MI.getOpcode() == Hexagon::PS_alloca)
616  AdjustRegs.push_back(&MI);
617 
618  for (auto MI : AdjustRegs) {
619  assert((MI->getOpcode() == Hexagon::PS_alloca) && "Expected alloca");
620  expandAlloca(MI, HII, SP, MaxCF);
621  MI->eraseFromParent();
622  }
623 
624  DebugLoc dl = MBB.findDebugLoc(InsertPt);
625 
626  if (hasFP(MF)) {
627  insertAllocframe(MBB, InsertPt, NumBytes);
628  if (AlignStack) {
629  BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_andir), SP)
630  .addReg(SP)
631  .addImm(-int64_t(MaxAlign));
632  }
633  // If the stack-checking is enabled, and we spilled the callee-saved
634  // registers inline (i.e. did not use a spill function), then call
635  // the stack checker directly.
636  if (EnableStackOVFSanitizer && !PrologueStubs)
637  BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::PS_call_stk))
638  .addExternalSymbol("__runtime_stack_check");
639  } else if (NumBytes > 0) {
640  assert(alignTo(NumBytes, 8) == NumBytes);
641  BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
642  .addReg(SP)
643  .addImm(-int(NumBytes));
644  }
645 }
646 
647 void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &MBB) const {
648  MachineFunction &MF = *MBB.getParent();
649  auto &HST = MF.getSubtarget<HexagonSubtarget>();
650  auto &HII = *HST.getInstrInfo();
651  auto &HRI = *HST.getRegisterInfo();
652  unsigned SP = HRI.getStackRegister();
653 
655  DebugLoc dl = MBB.findDebugLoc(InsertPt);
656 
657  if (!hasFP(MF)) {
658  MachineFrameInfo &MFI = MF.getFrameInfo();
659  if (unsigned NumBytes = MFI.getStackSize()) {
660  BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
661  .addReg(SP)
662  .addImm(NumBytes);
663  }
664  return;
665  }
666 
667  MachineInstr *RetI = getReturn(MBB);
668  unsigned RetOpc = RetI ? RetI->getOpcode() : 0;
669 
670  // Handle EH_RETURN.
671  if (RetOpc == Hexagon::EH_RETURN_JMPR) {
672  BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
673  .addDef(Hexagon::D15)
674  .addReg(Hexagon::R30);
675  BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_add), SP)
676  .addReg(SP)
677  .addReg(Hexagon::R28);
678  return;
679  }
680 
681  // Check for RESTORE_DEALLOC_RET* tail call. Don't emit an extra dealloc-
682  // frame instruction if we encounter it.
683  if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 ||
684  RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC ||
685  RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT ||
686  RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC) {
687  MachineBasicBlock::iterator It = RetI;
688  ++It;
689  // Delete all instructions after the RESTORE (except labels).
690  while (It != MBB.end()) {
691  if (!It->isLabel())
692  It = MBB.erase(It);
693  else
694  ++It;
695  }
696  return;
697  }
698 
699  // It is possible that the restoring code is a call to a library function.
700  // All of the restore* functions include "deallocframe", so we need to make
701  // sure that we don't add an extra one.
702  bool NeedsDeallocframe = true;
703  if (!MBB.empty() && InsertPt != MBB.begin()) {
704  MachineBasicBlock::iterator PrevIt = std::prev(InsertPt);
705  unsigned COpc = PrevIt->getOpcode();
706  if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 ||
707  COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC ||
708  COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT ||
709  COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC ||
710  COpc == Hexagon::PS_call_nr || COpc == Hexagon::PS_callr_nr)
711  NeedsDeallocframe = false;
712  }
713 
714  if (!NeedsDeallocframe)
715  return;
716  // If the returning instruction is PS_jmpret, replace it with dealloc_return,
717  // otherwise just add deallocframe. The function could be returning via a
718  // tail call.
719  if (RetOpc != Hexagon::PS_jmpret || DisableDeallocRet) {
720  BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
721  .addDef(Hexagon::D15)
722  .addReg(Hexagon::R30);
723  return;
724  }
725  unsigned NewOpc = Hexagon::L4_return;
726  MachineInstr *NewI = BuildMI(MBB, RetI, dl, HII.get(NewOpc))
727  .addDef(Hexagon::D15)
728  .addReg(Hexagon::R30);
729  // Transfer the function live-out registers.
730  NewI->copyImplicitOps(MF, *RetI);
731  MBB.erase(RetI);
732 }
733 
734 void HexagonFrameLowering::insertAllocframe(MachineBasicBlock &MBB,
735  MachineBasicBlock::iterator InsertPt, unsigned NumBytes) const {
736  MachineFunction &MF = *MBB.getParent();
737  auto &HST = MF.getSubtarget<HexagonSubtarget>();
738  auto &HII = *HST.getInstrInfo();
739  auto &HRI = *HST.getRegisterInfo();
740 
741  // Check for overflow.
742  // Hexagon_TODO: Ugh! hardcoding. Is there an API that can be used?
743  const unsigned int ALLOCFRAME_MAX = 16384;
744 
745  // Create a dummy memory operand to avoid allocframe from being treated as
746  // a volatile memory reference.
749 
750  DebugLoc dl = MBB.findDebugLoc(InsertPt);
751  unsigned SP = HRI.getStackRegister();
752 
753  if (NumBytes >= ALLOCFRAME_MAX) {
754  // Emit allocframe(#0).
755  BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
756  .addDef(SP)
757  .addReg(SP)
758  .addImm(0)
759  .addMemOperand(MMO);
760 
761  // Subtract the size from the stack pointer.
762  unsigned SP = HRI.getStackRegister();
763  BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
764  .addReg(SP)
765  .addImm(-int(NumBytes));
766  } else {
767  BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
768  .addDef(SP)
769  .addReg(SP)
770  .addImm(NumBytes)
771  .addMemOperand(MMO);
772  }
773 }
774 
775 void HexagonFrameLowering::updateEntryPaths(MachineFunction &MF,
776  MachineBasicBlock &SaveB) const {
777  SetVector<unsigned> Worklist;
778 
779  MachineBasicBlock &EntryB = MF.front();
780  Worklist.insert(EntryB.getNumber());
781 
782  unsigned SaveN = SaveB.getNumber();
783  auto &CSI = MF.getFrameInfo().getCalleeSavedInfo();
784 
785  for (unsigned i = 0; i < Worklist.size(); ++i) {
786  unsigned BN = Worklist[i];
787  MachineBasicBlock &MBB = *MF.getBlockNumbered(BN);
788  for (auto &R : CSI)
789  if (!MBB.isLiveIn(R.getReg()))
790  MBB.addLiveIn(R.getReg());
791  if (BN != SaveN)
792  for (auto &SB : MBB.successors())
793  Worklist.insert(SB->getNumber());
794  }
795 }
796 
797 bool HexagonFrameLowering::updateExitPaths(MachineBasicBlock &MBB,
798  MachineBasicBlock &RestoreB, BitVector &DoneT, BitVector &DoneF,
799  BitVector &Path) const {
800  assert(MBB.getNumber() >= 0);
801  unsigned BN = MBB.getNumber();
802  if (Path[BN] || DoneF[BN])
803  return false;
804  if (DoneT[BN])
805  return true;
806 
807  auto &CSI = MBB.getParent()->getFrameInfo().getCalleeSavedInfo();
808 
809  Path[BN] = true;
810  bool ReachedExit = false;
811  for (auto &SB : MBB.successors())
812  ReachedExit |= updateExitPaths(*SB, RestoreB, DoneT, DoneF, Path);
813 
814  if (!MBB.empty() && MBB.back().isReturn()) {
815  // Add implicit uses of all callee-saved registers to the reached
816  // return instructions. This is to prevent the anti-dependency breaker
817  // from renaming these registers.
818  MachineInstr &RetI = MBB.back();
819  if (!isRestoreCall(RetI.getOpcode()))
820  for (auto &R : CSI)
821  RetI.addOperand(MachineOperand::CreateReg(R.getReg(), false, true));
822  ReachedExit = true;
823  }
824 
825  // We don't want to add unnecessary live-ins to the restore block: since
826  // the callee-saved registers are being defined in it, the entry of the
827  // restore block cannot be on the path from the definitions to any exit.
828  if (ReachedExit && &MBB != &RestoreB) {
829  for (auto &R : CSI)
830  if (!MBB.isLiveIn(R.getReg()))
831  MBB.addLiveIn(R.getReg());
832  DoneT[BN] = true;
833  }
834  if (!ReachedExit)
835  DoneF[BN] = true;
836 
837  Path[BN] = false;
838  return ReachedExit;
839 }
840 
843  // The CFI instructions need to be inserted right after allocframe.
844  // An exception to this is a situation where allocframe is bundled
845  // with a call: then the CFI instructions need to be inserted before
846  // the packet with the allocframe+call (in case the call throws an
847  // exception).
848  auto End = B.instr_end();
849 
850  for (MachineInstr &I : B) {
851  MachineBasicBlock::iterator It = I.getIterator();
852  if (!I.isBundle()) {
853  if (I.getOpcode() == Hexagon::S2_allocframe)
854  return std::next(It);
855  continue;
856  }
857  // I is a bundle.
858  bool HasCall = false, HasAllocFrame = false;
859  auto T = It.getInstrIterator();
860  while (++T != End && T->isBundled()) {
861  if (T->getOpcode() == Hexagon::S2_allocframe)
862  HasAllocFrame = true;
863  else if (T->isCall())
864  HasCall = true;
865  }
866  if (HasAllocFrame)
867  return HasCall ? It : std::next(It);
868  }
869  return None;
870 }
871 
873  for (auto &B : MF) {
874  auto At = findCFILocation(B);
875  if (At.hasValue())
876  insertCFIInstructionsAt(B, At.getValue());
877  }
878 }
879 
880 void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB,
881  MachineBasicBlock::iterator At) const {
882  MachineFunction &MF = *MBB.getParent();
883  MachineFrameInfo &MFI = MF.getFrameInfo();
884  MachineModuleInfo &MMI = MF.getMMI();
885  auto &HST = MF.getSubtarget<HexagonSubtarget>();
886  auto &HII = *HST.getInstrInfo();
887  auto &HRI = *HST.getRegisterInfo();
888 
889  // If CFI instructions have debug information attached, something goes
890  // wrong with the final assembly generation: the prolog_end is placed
891  // in a wrong location.
892  DebugLoc DL;
893  const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION);
894 
895  MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
896  bool HasFP = hasFP(MF);
897 
898  if (HasFP) {
899  unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(), true);
900  unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(), true);
901 
902  // Define CFA via an offset from the value of FP.
903  //
904  // -8 -4 0 (SP)
905  // --+----+----+---------------------
906  // | FP | LR | increasing addresses -->
907  // --+----+----+---------------------
908  // | +-- Old SP (before allocframe)
909  // +-- New FP (after allocframe)
910  //
911  // MCCFIInstruction::createDefCfa subtracts the offset from the register.
912  // MCCFIInstruction::createOffset takes the offset without sign change.
913  auto DefCfa = MCCFIInstruction::createDefCfa(FrameLabel, DwFPReg, -8);
914  BuildMI(MBB, At, DL, CFID)
915  .addCFIIndex(MF.addFrameInst(DefCfa));
916  // R31 (return addr) = CFA - 4
917  auto OffR31 = MCCFIInstruction::createOffset(FrameLabel, DwRAReg, -4);
918  BuildMI(MBB, At, DL, CFID)
919  .addCFIIndex(MF.addFrameInst(OffR31));
920  // R30 (frame ptr) = CFA - 8
921  auto OffR30 = MCCFIInstruction::createOffset(FrameLabel, DwFPReg, -8);
922  BuildMI(MBB, At, DL, CFID)
923  .addCFIIndex(MF.addFrameInst(OffR30));
924  }
925 
926  static unsigned int RegsToMove[] = {
927  Hexagon::R1, Hexagon::R0, Hexagon::R3, Hexagon::R2,
928  Hexagon::R17, Hexagon::R16, Hexagon::R19, Hexagon::R18,
929  Hexagon::R21, Hexagon::R20, Hexagon::R23, Hexagon::R22,
930  Hexagon::R25, Hexagon::R24, Hexagon::R27, Hexagon::R26,
931  Hexagon::D0, Hexagon::D1, Hexagon::D8, Hexagon::D9,
932  Hexagon::D10, Hexagon::D11, Hexagon::D12, Hexagon::D13,
933  Hexagon::NoRegister
934  };
935 
936  const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
937 
938  for (unsigned i = 0; RegsToMove[i] != Hexagon::NoRegister; ++i) {
939  unsigned Reg = RegsToMove[i];
940  auto IfR = [Reg] (const CalleeSavedInfo &C) -> bool {
941  return C.getReg() == Reg;
942  };
943  auto F = find_if(CSI, IfR);
944  if (F == CSI.end())
945  continue;
946 
947  int64_t Offset;
948  if (HasFP) {
949  // If the function has a frame pointer (i.e. has an allocframe),
950  // then the CFA has been defined in terms of FP. Any offsets in
951  // the following CFI instructions have to be defined relative
952  // to FP, which points to the bottom of the stack frame.
953  // The function getFrameIndexReference can still choose to use SP
954  // for the offset calculation, so we cannot simply call it here.
955  // Instead, get the offset (relative to the FP) directly.
956  Offset = MFI.getObjectOffset(F->getFrameIdx());
957  } else {
958  unsigned FrameReg;
959  Offset = getFrameIndexReference(MF, F->getFrameIdx(), FrameReg);
960  }
961  // Subtract 8 to make room for R30 and R31, which are added above.
962  Offset -= 8;
963 
964  if (Reg < Hexagon::D0 || Reg > Hexagon::D15) {
965  unsigned DwarfReg = HRI.getDwarfRegNum(Reg, true);
966  auto OffReg = MCCFIInstruction::createOffset(FrameLabel, DwarfReg,
967  Offset);
968  BuildMI(MBB, At, DL, CFID)
969  .addCFIIndex(MF.addFrameInst(OffReg));
970  } else {
971  // Split the double regs into subregs, and generate appropriate
972  // cfi_offsets.
973  // The only reason, we are split double regs is, llvm-mc does not
974  // understand paired registers for cfi_offset.
975  // Eg .cfi_offset r1:0, -64
976 
977  unsigned HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi);
978  unsigned LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo);
979  unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg, true);
980  unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg, true);
981  auto OffHi = MCCFIInstruction::createOffset(FrameLabel, HiDwarfReg,
982  Offset+4);
983  BuildMI(MBB, At, DL, CFID)
984  .addCFIIndex(MF.addFrameInst(OffHi));
985  auto OffLo = MCCFIInstruction::createOffset(FrameLabel, LoDwarfReg,
986  Offset);
987  BuildMI(MBB, At, DL, CFID)
988  .addCFIIndex(MF.addFrameInst(OffLo));
989  }
990  }
991 }
992 
994  if (MF.getFunction().hasFnAttribute(Attribute::Naked))
995  return false;
996 
997  auto &MFI = MF.getFrameInfo();
998  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
999  bool HasExtraAlign = HRI.needsStackRealignment(MF);
1000  bool HasAlloca = MFI.hasVarSizedObjects();
1001 
1002  // Insert ALLOCFRAME if we need to or at -O0 for the debugger. Think
1003  // that this shouldn't be required, but doing so now because gcc does and
1004  // gdb can't break at the start of the function without it. Will remove if
1005  // this turns out to be a gdb bug.
1006  //
1007  if (MF.getTarget().getOptLevel() == CodeGenOpt::None)
1008  return true;
1009 
1010  // By default we want to use SP (since it's always there). FP requires
1011  // some setup (i.e. ALLOCFRAME).
1012  // Both, alloca and stack alignment modify the stack pointer by an
1013  // undetermined value, so we need to save it at the entry to the function
1014  // (i.e. use allocframe).
1015  if (HasAlloca || HasExtraAlign)
1016  return true;
1017 
1018  if (MFI.getStackSize() > 0) {
1019  // If FP-elimination is disabled, we have to use FP at this point.
1020  const TargetMachine &TM = MF.getTarget();
1022  return true;
1024  return true;
1025  }
1026 
1027  const auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1028  if ((MFI.hasCalls() && !enableAllocFrameElim(MF)) || HMFI.hasClobberLR())
1029  return true;
1030 
1031  return false;
1032 }
1033 
1038 };
1039 
1040 static const char *getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType,
1041  bool Stkchk = false) {
1042  const char * V4SpillToMemoryFunctions[] = {
1043  "__save_r16_through_r17",
1044  "__save_r16_through_r19",
1045  "__save_r16_through_r21",
1046  "__save_r16_through_r23",
1047  "__save_r16_through_r25",
1048  "__save_r16_through_r27" };
1049 
1050  const char * V4SpillToMemoryStkchkFunctions[] = {
1051  "__save_r16_through_r17_stkchk",
1052  "__save_r16_through_r19_stkchk",
1053  "__save_r16_through_r21_stkchk",
1054  "__save_r16_through_r23_stkchk",
1055  "__save_r16_through_r25_stkchk",
1056  "__save_r16_through_r27_stkchk" };
1057 
1058  const char * V4SpillFromMemoryFunctions[] = {
1059  "__restore_r16_through_r17_and_deallocframe",
1060  "__restore_r16_through_r19_and_deallocframe",
1061  "__restore_r16_through_r21_and_deallocframe",
1062  "__restore_r16_through_r23_and_deallocframe",
1063  "__restore_r16_through_r25_and_deallocframe",
1064  "__restore_r16_through_r27_and_deallocframe" };
1065 
1066  const char * V4SpillFromMemoryTailcallFunctions[] = {
1067  "__restore_r16_through_r17_and_deallocframe_before_tailcall",
1068  "__restore_r16_through_r19_and_deallocframe_before_tailcall",
1069  "__restore_r16_through_r21_and_deallocframe_before_tailcall",
1070  "__restore_r16_through_r23_and_deallocframe_before_tailcall",
1071  "__restore_r16_through_r25_and_deallocframe_before_tailcall",
1072  "__restore_r16_through_r27_and_deallocframe_before_tailcall"
1073  };
1074 
1075  const char **SpillFunc = nullptr;
1076 
1077  switch(SpillType) {
1078  case SK_ToMem:
1079  SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions
1080  : V4SpillToMemoryFunctions;
1081  break;
1082  case SK_FromMem:
1083  SpillFunc = V4SpillFromMemoryFunctions;
1084  break;
1085  case SK_FromMemTailcall:
1086  SpillFunc = V4SpillFromMemoryTailcallFunctions;
1087  break;
1088  }
1089  assert(SpillFunc && "Unknown spill kind");
1090 
1091  // Spill all callee-saved registers up to the highest register used.
1092  switch (MaxReg) {
1093  case Hexagon::R17:
1094  return SpillFunc[0];
1095  case Hexagon::R19:
1096  return SpillFunc[1];
1097  case Hexagon::R21:
1098  return SpillFunc[2];
1099  case Hexagon::R23:
1100  return SpillFunc[3];
1101  case Hexagon::R25:
1102  return SpillFunc[4];
1103  case Hexagon::R27:
1104  return SpillFunc[5];
1105  default:
1106  llvm_unreachable("Unhandled maximum callee save register");
1107  }
1108  return nullptr;
1109 }
1110 
1112  int FI, unsigned &FrameReg) const {
1113  auto &MFI = MF.getFrameInfo();
1114  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1115 
1116  int Offset = MFI.getObjectOffset(FI);
1117  bool HasAlloca = MFI.hasVarSizedObjects();
1118  bool HasExtraAlign = HRI.needsStackRealignment(MF);
1119  bool NoOpt = MF.getTarget().getOptLevel() == CodeGenOpt::None;
1120 
1121  auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1122  unsigned FrameSize = MFI.getStackSize();
1123  unsigned SP = HRI.getStackRegister();
1124  unsigned FP = HRI.getFrameRegister();
1125  unsigned AP = HMFI.getStackAlignBasePhysReg();
1126  // It may happen that AP will be absent even HasAlloca && HasExtraAlign
1127  // is true. HasExtraAlign may be set because of vector spills, without
1128  // aligned locals or aligned outgoing function arguments. Since vector
1129  // spills will ultimately be "unaligned", it is safe to use FP as the
1130  // base register.
1131  // In fact, in such a scenario the stack is actually not required to be
1132  // aligned, although it may end up being aligned anyway, since this
1133  // particular case is not easily detectable. The alignment will be
1134  // unnecessary, but not incorrect.
1135  // Unfortunately there is no quick way to verify that the above is
1136  // indeed the case (and that it's not a result of an error), so just
1137  // assume that missing AP will be replaced by FP.
1138  // (A better fix would be to rematerialize AP from FP and always align
1139  // vector spills.)
1140  if (AP == 0)
1141  AP = FP;
1142 
1143  bool UseFP = false, UseAP = false; // Default: use SP (except at -O0).
1144  // Use FP at -O0, except when there are objects with extra alignment.
1145  // That additional alignment requirement may cause a pad to be inserted,
1146  // which will make it impossible to use FP to access objects located
1147  // past the pad.
1148  if (NoOpt && !HasExtraAlign)
1149  UseFP = true;
1150  if (MFI.isFixedObjectIndex(FI) || MFI.isObjectPreAllocated(FI)) {
1151  // Fixed and preallocated objects will be located before any padding
1152  // so FP must be used to access them.
1153  UseFP |= (HasAlloca || HasExtraAlign);
1154  } else {
1155  if (HasAlloca) {
1156  if (HasExtraAlign)
1157  UseAP = true;
1158  else
1159  UseFP = true;
1160  }
1161  }
1162 
1163  // If FP was picked, then there had better be FP.
1164  bool HasFP = hasFP(MF);
1165  assert((HasFP || !UseFP) && "This function must have frame pointer");
1166 
1167  // Having FP implies allocframe. Allocframe will store extra 8 bytes:
1168  // FP/LR. If the base register is used to access an object across these
1169  // 8 bytes, then the offset will need to be adjusted by 8.
1170  //
1171  // After allocframe:
1172  // HexagonISelLowering adds 8 to ---+
1173  // the offsets of all stack-based |
1174  // arguments (*) |
1175  // |
1176  // getObjectOffset < 0 0 8 getObjectOffset >= 8
1177  // ------------------------+-----+------------------------> increasing
1178  // <local objects> |FP/LR| <input arguments> addresses
1179  // -----------------+------+-----+------------------------>
1180  // | |
1181  // SP/AP point --+ +-- FP points here (**)
1182  // somewhere on
1183  // this side of FP/LR
1184  //
1185  // (*) See LowerFormalArguments. The FP/LR is assumed to be present.
1186  // (**) *FP == old-FP. FP+0..7 are the bytes of FP/LR.
1187 
1188  // The lowering assumes that FP/LR is present, and so the offsets of
1189  // the formal arguments start at 8. If FP/LR is not there we need to
1190  // reduce the offset by 8.
1191  if (Offset > 0 && !HasFP)
1192  Offset -= 8;
1193 
1194  if (UseFP)
1195  FrameReg = FP;
1196  else if (UseAP)
1197  FrameReg = AP;
1198  else
1199  FrameReg = SP;
1200 
1201  // Calculate the actual offset in the instruction. If there is no FP
1202  // (in other words, no allocframe), then SP will not be adjusted (i.e.
1203  // there will be no SP -= FrameSize), so the frame size should not be
1204  // added to the calculated offset.
1205  int RealOffset = Offset;
1206  if (!UseFP && !UseAP)
1207  RealOffset = FrameSize+Offset;
1208  return RealOffset;
1209 }
1210 
1211 bool HexagonFrameLowering::insertCSRSpillsInBlock(MachineBasicBlock &MBB,
1212  const CSIVect &CSI, const HexagonRegisterInfo &HRI,
1213  bool &PrologueStubs) const {
1214  if (CSI.empty())
1215  return true;
1216 
1218  PrologueStubs = false;
1219  MachineFunction &MF = *MBB.getParent();
1220  auto &HST = MF.getSubtarget<HexagonSubtarget>();
1221  auto &HII = *HST.getInstrInfo();
1222 
1223  if (useSpillFunction(MF, CSI)) {
1224  PrologueStubs = true;
1225  unsigned MaxReg = getMaxCalleeSavedReg(CSI, HRI);
1226  bool StkOvrFlowEnabled = EnableStackOVFSanitizer;
1227  const char *SpillFun = getSpillFunctionFor(MaxReg, SK_ToMem,
1228  StkOvrFlowEnabled);
1229  auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget());
1230  bool IsPIC = HTM.isPositionIndependent();
1231  bool LongCalls = HST.useLongCalls() || EnableSaveRestoreLong;
1232 
1233  // Call spill function.
1234  DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc() : DebugLoc();
1235  unsigned SpillOpc;
1236  if (StkOvrFlowEnabled) {
1237  if (LongCalls)
1238  SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT_PIC
1239  : Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT;
1240  else
1241  SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC
1242  : Hexagon::SAVE_REGISTERS_CALL_V4STK;
1243  } else {
1244  if (LongCalls)
1245  SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC
1246  : Hexagon::SAVE_REGISTERS_CALL_V4_EXT;
1247  else
1248  SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC
1249  : Hexagon::SAVE_REGISTERS_CALL_V4;
1250  }
1251 
1252  MachineInstr *SaveRegsCall =
1253  BuildMI(MBB, MI, DL, HII.get(SpillOpc))
1254  .addExternalSymbol(SpillFun);
1255 
1256  // Add callee-saved registers as use.
1257  addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI, false, true);
1258  // Add live in registers.
1259  for (unsigned I = 0; I < CSI.size(); ++I)
1260  MBB.addLiveIn(CSI[I].getReg());
1261  return true;
1262  }
1263 
1264  for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
1265  unsigned Reg = CSI[i].getReg();
1266  // Add live in registers. We treat eh_return callee saved register r0 - r3
1267  // specially. They are not really callee saved registers as they are not
1268  // supposed to be killed.
1269  bool IsKill = !HRI.isEHReturnCalleeSaveReg(Reg);
1270  int FI = CSI[i].getFrameIdx();
1271  const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
1272  HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, &HRI);
1273  if (IsKill)
1274  MBB.addLiveIn(Reg);
1275  }
1276  return true;
1277 }
1278 
1279 bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB,
1280  const CSIVect &CSI, const HexagonRegisterInfo &HRI) const {
1281  if (CSI.empty())
1282  return false;
1283 
1285  MachineFunction &MF = *MBB.getParent();
1286  auto &HST = MF.getSubtarget<HexagonSubtarget>();
1287  auto &HII = *HST.getInstrInfo();
1288 
1289  if (useRestoreFunction(MF, CSI)) {
1290  bool HasTC = hasTailCall(MBB) || !hasReturn(MBB);
1291  unsigned MaxR = getMaxCalleeSavedReg(CSI, HRI);
1293  const char *RestoreFn = getSpillFunctionFor(MaxR, Kind);
1294  auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget());
1295  bool IsPIC = HTM.isPositionIndependent();
1296  bool LongCalls = HST.useLongCalls() || EnableSaveRestoreLong;
1297 
1298  // Call spill function.
1299  DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc()
1300  : MBB.findDebugLoc(MBB.end());
1301  MachineInstr *DeallocCall = nullptr;
1302 
1303  if (HasTC) {
1304  unsigned RetOpc;
1305  if (LongCalls)
1306  RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC
1307  : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT;
1308  else
1309  RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC
1310  : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;
1311  DeallocCall = BuildMI(MBB, MI, DL, HII.get(RetOpc))
1312  .addExternalSymbol(RestoreFn);
1313  } else {
1314  // The block has a return.
1316  assert(It->isReturn() && std::next(It) == MBB.end());
1317  unsigned RetOpc;
1318  if (LongCalls)
1319  RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC
1320  : Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT;
1321  else
1322  RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC
1323  : Hexagon::RESTORE_DEALLOC_RET_JMP_V4;
1324  DeallocCall = BuildMI(MBB, It, DL, HII.get(RetOpc))
1325  .addExternalSymbol(RestoreFn);
1326  // Transfer the function live-out registers.
1327  DeallocCall->copyImplicitOps(MF, *It);
1328  }
1329  addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI, true, false);
1330  return true;
1331  }
1332 
1333  for (unsigned i = 0; i < CSI.size(); ++i) {
1334  unsigned Reg = CSI[i].getReg();
1335  const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
1336  int FI = CSI[i].getFrameIdx();
1337  HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI);
1338  }
1339 
1340  return true;
1341 }
1342 
1346  MachineInstr &MI = *I;
1347  unsigned Opc = MI.getOpcode();
1348  (void)Opc; // Silence compiler warning.
1349  assert((Opc == Hexagon::ADJCALLSTACKDOWN || Opc == Hexagon::ADJCALLSTACKUP) &&
1350  "Cannot handle this call frame pseudo instruction");
1351  return MBB.erase(I);
1352 }
1353 
1355  MachineFunction &MF, RegScavenger *RS) const {
1356  // If this function has uses aligned stack and also has variable sized stack
1357  // objects, then we need to map all spill slots to fixed positions, so that
1358  // they can be accessed through FP. Otherwise they would have to be accessed
1359  // via AP, which may not be available at the particular place in the program.
1360  MachineFrameInfo &MFI = MF.getFrameInfo();
1361  bool HasAlloca = MFI.hasVarSizedObjects();
1362  bool NeedsAlign = (MFI.getMaxAlignment() > getStackAlignment());
1363 
1364  if (!HasAlloca || !NeedsAlign)
1365  return;
1366 
1367  unsigned LFS = MFI.getLocalFrameSize();
1368  for (int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
1369  if (!MFI.isSpillSlotObjectIndex(i) || MFI.isDeadObjectIndex(i))
1370  continue;
1371  unsigned S = MFI.getObjectSize(i);
1372  // Reduce the alignment to at most 8. This will require unaligned vector
1373  // stores if they happen here.
1374  unsigned A = std::max(MFI.getObjectAlignment(i), 8U);
1375  MFI.setObjectAlignment(i, 8);
1376  LFS = alignTo(LFS+S, A);
1377  MFI.mapLocalFrameObject(i, -LFS);
1378  }
1379 
1380  MFI.setLocalFrameSize(LFS);
1381  unsigned A = MFI.getLocalFrameMaxAlign();
1382  assert(A <= 8 && "Unexpected local frame alignment");
1383  if (A == 0)
1384  MFI.setLocalFrameMaxAlign(8);
1386 
1387  // Set the physical aligned-stack base address register.
1388  unsigned AP = 0;
1389  if (const MachineInstr *AI = getAlignaInstr(MF))
1390  AP = AI->getOperand(0).getReg();
1391  auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1392  HMFI.setStackAlignBasePhysReg(AP);
1393 }
1394 
1395 /// Returns true if there are no caller-saved registers available in class RC.
1397  const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC) {
1399 
1400  auto IsUsed = [&HRI,&MRI] (unsigned Reg) -> bool {
1401  for (MCRegAliasIterator AI(Reg, &HRI, true); AI.isValid(); ++AI)
1402  if (MRI.isPhysRegUsed(*AI))
1403  return true;
1404  return false;
1405  };
1406 
1407  // Check for an unused caller-saved register. Callee-saved registers
1408  // have become pristine by now.
1409  for (const MCPhysReg *P = HRI.getCallerSavedRegs(&MF, RC); *P; ++P)
1410  if (!IsUsed(*P))
1411  return false;
1412 
1413  // All caller-saved registers are used.
1414  return true;
1415 }
1416 
1417 #ifndef NDEBUG
1418 static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI) {
1419  dbgs() << '{';
1420  for (int x = Regs.find_first(); x >= 0; x = Regs.find_next(x)) {
1421  unsigned R = x;
1422  dbgs() << ' ' << printReg(R, &TRI);
1423  }
1424  dbgs() << " }";
1425 }
1426 #endif
1427 
1429  const TargetRegisterInfo *TRI, std::vector<CalleeSavedInfo> &CSI) const {
1430  LLVM_DEBUG(dbgs() << __func__ << " on " << MF.getName() << '\n');
1431  MachineFrameInfo &MFI = MF.getFrameInfo();
1432  BitVector SRegs(Hexagon::NUM_TARGET_REGS);
1433 
1434  // Generate a set of unique, callee-saved registers (SRegs), where each
1435  // register in the set is maximal in terms of sub-/super-register relation,
1436  // i.e. for each R in SRegs, no proper super-register of R is also in SRegs.
1437 
1438  // (1) For each callee-saved register, add that register and all of its
1439  // sub-registers to SRegs.
1440  LLVM_DEBUG(dbgs() << "Initial CS registers: {");
1441  for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
1442  unsigned R = CSI[i].getReg();
1443  LLVM_DEBUG(dbgs() << ' ' << printReg(R, TRI));
1444  for (MCSubRegIterator SR(R, TRI, true); SR.isValid(); ++SR)
1445  SRegs[*SR] = true;
1446  }
1447  LLVM_DEBUG(dbgs() << " }\n");
1448  LLVM_DEBUG(dbgs() << "SRegs.1: "; dump_registers(SRegs, *TRI);
1449  dbgs() << "\n");
1450 
1451  // (2) For each reserved register, remove that register and all of its
1452  // sub- and super-registers from SRegs.
1453  BitVector Reserved = TRI->getReservedRegs(MF);
1454  for (int x = Reserved.find_first(); x >= 0; x = Reserved.find_next(x)) {
1455  unsigned R = x;
1456  for (MCSuperRegIterator SR(R, TRI, true); SR.isValid(); ++SR)
1457  SRegs[*SR] = false;
1458  }
1459  LLVM_DEBUG(dbgs() << "Res: "; dump_registers(Reserved, *TRI);
1460  dbgs() << "\n");
1461  LLVM_DEBUG(dbgs() << "SRegs.2: "; dump_registers(SRegs, *TRI);
1462  dbgs() << "\n");
1463 
1464  // (3) Collect all registers that have at least one sub-register in SRegs,
1465  // and also have no sub-registers that are reserved. These will be the can-
1466  // didates for saving as a whole instead of their individual sub-registers.
1467  // (Saving R17:16 instead of R16 is fine, but only if R17 was not reserved.)
1468  BitVector TmpSup(Hexagon::NUM_TARGET_REGS);
1469  for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1470  unsigned R = x;
1471  for (MCSuperRegIterator SR(R, TRI); SR.isValid(); ++SR)
1472  TmpSup[*SR] = true;
1473  }
1474  for (int x = TmpSup.find_first(); x >= 0; x = TmpSup.find_next(x)) {
1475  unsigned R = x;
1476  for (MCSubRegIterator SR(R, TRI, true); SR.isValid(); ++SR) {
1477  if (!Reserved[*SR])
1478  continue;
1479  TmpSup[R] = false;
1480  break;
1481  }
1482  }
1483  LLVM_DEBUG(dbgs() << "TmpSup: "; dump_registers(TmpSup, *TRI);
1484  dbgs() << "\n");
1485 
1486  // (4) Include all super-registers found in (3) into SRegs.
1487  SRegs |= TmpSup;
1488  LLVM_DEBUG(dbgs() << "SRegs.4: "; dump_registers(SRegs, *TRI);
1489  dbgs() << "\n");
1490 
1491  // (5) For each register R in SRegs, if any super-register of R is in SRegs,
1492  // remove R from SRegs.
1493  for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1494  unsigned R = x;
1495  for (MCSuperRegIterator SR(R, TRI); SR.isValid(); ++SR) {
1496  if (!SRegs[*SR])
1497  continue;
1498  SRegs[R] = false;
1499  break;
1500  }
1501  }
1502  LLVM_DEBUG(dbgs() << "SRegs.5: "; dump_registers(SRegs, *TRI);
1503  dbgs() << "\n");
1504 
1505  // Now, for each register that has a fixed stack slot, create the stack
1506  // object for it.
1507  CSI.clear();
1508 
1510 
1511  unsigned NumFixed;
1512  int MinOffset = 0; // CS offsets are negative.
1513  const SpillSlot *FixedSlots = getCalleeSavedSpillSlots(NumFixed);
1514  for (const SpillSlot *S = FixedSlots; S != FixedSlots+NumFixed; ++S) {
1515  if (!SRegs[S->Reg])
1516  continue;
1517  const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(S->Reg);
1518  int FI = MFI.CreateFixedSpillStackObject(TRI->getSpillSize(*RC), S->Offset);
1519  MinOffset = std::min(MinOffset, S->Offset);
1520  CSI.push_back(CalleeSavedInfo(S->Reg, FI));
1521  SRegs[S->Reg] = false;
1522  }
1523 
1524  // There can be some registers that don't have fixed slots. For example,
1525  // we need to store R0-R3 in functions with exception handling. For each
1526  // such register, create a non-fixed stack object.
1527  for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1528  unsigned R = x;
1529  const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(R);
1530  unsigned Size = TRI->getSpillSize(*RC);
1531  int Off = MinOffset - Size;
1532  unsigned Align = std::min(TRI->getSpillAlignment(*RC), getStackAlignment());
1533  assert(isPowerOf2_32(Align));
1534  Off &= -Align;
1535  int FI = MFI.CreateFixedSpillStackObject(Size, Off);
1536  MinOffset = std::min(MinOffset, Off);
1537  CSI.push_back(CalleeSavedInfo(R, FI));
1538  SRegs[R] = false;
1539  }
1540 
1541  LLVM_DEBUG({
1542  dbgs() << "CS information: {";
1543  for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
1544  int FI = CSI[i].getFrameIdx();
1545  int Off = MFI.getObjectOffset(FI);
1546  dbgs() << ' ' << printReg(CSI[i].getReg(), TRI) << ":fi#" << FI << ":sp";
1547  if (Off >= 0)
1548  dbgs() << '+';
1549  dbgs() << Off;
1550  }
1551  dbgs() << " }\n";
1552  });
1553 
1554 #ifndef NDEBUG
1555  // Verify that all registers were handled.
1556  bool MissedReg = false;
1557  for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1558  unsigned R = x;
1559  dbgs() << printReg(R, TRI) << ' ';
1560  MissedReg = true;
1561  }
1562  if (MissedReg)
1563  llvm_unreachable("...there are unhandled callee-saved registers!");
1564 #endif
1565 
1566  return true;
1567 }
1568 
1569 bool HexagonFrameLowering::expandCopy(MachineBasicBlock &B,
1571  const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1572  MachineInstr *MI = &*It;
1573  DebugLoc DL = MI->getDebugLoc();
1574  unsigned DstR = MI->getOperand(0).getReg();
1575  unsigned SrcR = MI->getOperand(1).getReg();
1576  if (!Hexagon::ModRegsRegClass.contains(DstR) ||
1577  !Hexagon::ModRegsRegClass.contains(SrcR))
1578  return false;
1579 
1580  unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1581  BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR).add(MI->getOperand(1));
1582  BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR)
1583  .addReg(TmpR, RegState::Kill);
1584 
1585  NewRegs.push_back(TmpR);
1586  B.erase(It);
1587  return true;
1588 }
1589 
1590 bool HexagonFrameLowering::expandStoreInt(MachineBasicBlock &B,
1592  const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1593  MachineInstr *MI = &*It;
1594  if (!MI->getOperand(0).isFI())
1595  return false;
1596 
1597  DebugLoc DL = MI->getDebugLoc();
1598  unsigned Opc = MI->getOpcode();
1599  unsigned SrcR = MI->getOperand(2).getReg();
1600  bool IsKill = MI->getOperand(2).isKill();
1601  int FI = MI->getOperand(0).getIndex();
1602 
1603  // TmpR = C2_tfrpr SrcR if SrcR is a predicate register
1604  // TmpR = A2_tfrcrr SrcR if SrcR is a modifier register
1605  unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1606  unsigned TfrOpc = (Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr
1607  : Hexagon::A2_tfrcrr;
1608  BuildMI(B, It, DL, HII.get(TfrOpc), TmpR)
1609  .addReg(SrcR, getKillRegState(IsKill));
1610 
1611  // S2_storeri_io FI, 0, TmpR
1612  BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io))
1613  .addFrameIndex(FI)
1614  .addImm(0)
1615  .addReg(TmpR, RegState::Kill)
1616  .cloneMemRefs(*MI);
1617 
1618  NewRegs.push_back(TmpR);
1619  B.erase(It);
1620  return true;
1621 }
1622 
1623 bool HexagonFrameLowering::expandLoadInt(MachineBasicBlock &B,
1625  const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1626  MachineInstr *MI = &*It;
1627  if (!MI->getOperand(1).isFI())
1628  return false;
1629 
1630  DebugLoc DL = MI->getDebugLoc();
1631  unsigned Opc = MI->getOpcode();
1632  unsigned DstR = MI->getOperand(0).getReg();
1633  int FI = MI->getOperand(1).getIndex();
1634 
1635  // TmpR = L2_loadri_io FI, 0
1636  unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1637  BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR)
1638  .addFrameIndex(FI)
1639  .addImm(0)
1640  .cloneMemRefs(*MI);
1641 
1642  // DstR = C2_tfrrp TmpR if DstR is a predicate register
1643  // DstR = A2_tfrrcr TmpR if DstR is a modifier register
1644  unsigned TfrOpc = (Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp
1645  : Hexagon::A2_tfrrcr;
1646  BuildMI(B, It, DL, HII.get(TfrOpc), DstR)
1647  .addReg(TmpR, RegState::Kill);
1648 
1649  NewRegs.push_back(TmpR);
1650  B.erase(It);
1651  return true;
1652 }
1653 
1654 bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B,
1656  const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1657  MachineInstr *MI = &*It;
1658  if (!MI->getOperand(0).isFI())
1659  return false;
1660 
1661  DebugLoc DL = MI->getDebugLoc();
1662  unsigned SrcR = MI->getOperand(2).getReg();
1663  bool IsKill = MI->getOperand(2).isKill();
1664  int FI = MI->getOperand(0).getIndex();
1665  auto *RC = &Hexagon::HvxVRRegClass;
1666 
1667  // Insert transfer to general vector register.
1668  // TmpR0 = A2_tfrsi 0x01010101
1669  // TmpR1 = V6_vandqrt Qx, TmpR0
1670  // store FI, 0, TmpR1
1671  unsigned TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1672  unsigned TmpR1 = MRI.createVirtualRegister(RC);
1673 
1674  BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1675  .addImm(0x01010101);
1676 
1677  BuildMI(B, It, DL, HII.get(Hexagon::V6_vandqrt), TmpR1)
1678  .addReg(SrcR, getKillRegState(IsKill))
1679  .addReg(TmpR0, RegState::Kill);
1680 
1681  auto *HRI = B.getParent()->getSubtarget<HexagonSubtarget>().getRegisterInfo();
1682  HII.storeRegToStackSlot(B, It, TmpR1, true, FI, RC, HRI);
1683  expandStoreVec(B, std::prev(It), MRI, HII, NewRegs);
1684 
1685  NewRegs.push_back(TmpR0);
1686  NewRegs.push_back(TmpR1);
1687  B.erase(It);
1688  return true;
1689 }
1690 
1691 bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B,
1693  const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1694  MachineInstr *MI = &*It;
1695  if (!MI->getOperand(1).isFI())
1696  return false;
1697 
1698  DebugLoc DL = MI->getDebugLoc();
1699  unsigned DstR = MI->getOperand(0).getReg();
1700  int FI = MI->getOperand(1).getIndex();
1701  auto *RC = &Hexagon::HvxVRRegClass;
1702 
1703  // TmpR0 = A2_tfrsi 0x01010101
1704  // TmpR1 = load FI, 0
1705  // DstR = V6_vandvrt TmpR1, TmpR0
1706  unsigned TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1707  unsigned TmpR1 = MRI.createVirtualRegister(RC);
1708 
1709  BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1710  .addImm(0x01010101);
1711  MachineFunction &MF = *B.getParent();
1712  auto *HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1713  HII.loadRegFromStackSlot(B, It, TmpR1, FI, RC, HRI);
1714  expandLoadVec(B, std::prev(It), MRI, HII, NewRegs);
1715 
1716  BuildMI(B, It, DL, HII.get(Hexagon::V6_vandvrt), DstR)
1717  .addReg(TmpR1, RegState::Kill)
1718  .addReg(TmpR0, RegState::Kill);
1719 
1720  NewRegs.push_back(TmpR0);
1721  NewRegs.push_back(TmpR1);
1722  B.erase(It);
1723  return true;
1724 }
1725 
1726 bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B,
1728  const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1729  MachineFunction &MF = *B.getParent();
1730  auto &MFI = MF.getFrameInfo();
1731  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1732  MachineInstr *MI = &*It;
1733  if (!MI->getOperand(0).isFI())
1734  return false;
1735 
1736  // It is possible that the double vector being stored is only partially
1737  // defined. From the point of view of the liveness tracking, it is ok to
1738  // store it as a whole, but if we break it up we may end up storing a
1739  // register that is entirely undefined.
1740  LivePhysRegs LPR(HRI);
1741  LPR.addLiveIns(B);
1743  for (auto R = B.begin(); R != It; ++R) {
1744  Clobbers.clear();
1745  LPR.stepForward(*R, Clobbers);
1746  }
1747 
1748  DebugLoc DL = MI->getDebugLoc();
1749  unsigned SrcR = MI->getOperand(2).getReg();
1750  unsigned SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo);
1751  unsigned SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi);
1752  bool IsKill = MI->getOperand(2).isKill();
1753  int FI = MI->getOperand(0).getIndex();
1754 
1755  unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1756  unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1757  unsigned HasAlign = MFI.getObjectAlignment(FI);
1758  unsigned StoreOpc;
1759 
1760  // Store low part.
1761  if (LPR.contains(SrcLo)) {
1762  StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1763  : Hexagon::V6_vS32Ub_ai;
1764  BuildMI(B, It, DL, HII.get(StoreOpc))
1765  .addFrameIndex(FI)
1766  .addImm(0)
1767  .addReg(SrcLo, getKillRegState(IsKill))
1768  .cloneMemRefs(*MI);
1769  }
1770 
1771  // Store high part.
1772  if (LPR.contains(SrcHi)) {
1773  StoreOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vS32b_ai
1774  : Hexagon::V6_vS32Ub_ai;
1775  BuildMI(B, It, DL, HII.get(StoreOpc))
1776  .addFrameIndex(FI)
1777  .addImm(Size)
1778  .addReg(SrcHi, getKillRegState(IsKill))
1779  .cloneMemRefs(*MI);
1780  }
1781 
1782  B.erase(It);
1783  return true;
1784 }
1785 
1786 bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B,
1788  const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1789  MachineFunction &MF = *B.getParent();
1790  auto &MFI = MF.getFrameInfo();
1791  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1792  MachineInstr *MI = &*It;
1793  if (!MI->getOperand(1).isFI())
1794  return false;
1795 
1796  DebugLoc DL = MI->getDebugLoc();
1797  unsigned DstR = MI->getOperand(0).getReg();
1798  unsigned DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi);
1799  unsigned DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo);
1800  int FI = MI->getOperand(1).getIndex();
1801 
1802  unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1803  unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1804  unsigned HasAlign = MFI.getObjectAlignment(FI);
1805  unsigned LoadOpc;
1806 
1807  // Load low part.
1808  LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1809  : Hexagon::V6_vL32Ub_ai;
1810  BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)
1811  .addFrameIndex(FI)
1812  .addImm(0)
1813  .cloneMemRefs(*MI);
1814 
1815  // Load high part.
1816  LoadOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vL32b_ai
1817  : Hexagon::V6_vL32Ub_ai;
1818  BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)
1819  .addFrameIndex(FI)
1820  .addImm(Size)
1821  .cloneMemRefs(*MI);
1822 
1823  B.erase(It);
1824  return true;
1825 }
1826 
1827 bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B,
1829  const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1830  MachineFunction &MF = *B.getParent();
1831  auto &MFI = MF.getFrameInfo();
1832  MachineInstr *MI = &*It;
1833  if (!MI->getOperand(0).isFI())
1834  return false;
1835 
1836  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1837  DebugLoc DL = MI->getDebugLoc();
1838  unsigned SrcR = MI->getOperand(2).getReg();
1839  bool IsKill = MI->getOperand(2).isKill();
1840  int FI = MI->getOperand(0).getIndex();
1841 
1842  unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1843  unsigned HasAlign = MFI.getObjectAlignment(FI);
1844  unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1845  : Hexagon::V6_vS32Ub_ai;
1846  BuildMI(B, It, DL, HII.get(StoreOpc))
1847  .addFrameIndex(FI)
1848  .addImm(0)
1849  .addReg(SrcR, getKillRegState(IsKill))
1850  .cloneMemRefs(*MI);
1851 
1852  B.erase(It);
1853  return true;
1854 }
1855 
1856 bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B,
1858  const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1859  MachineFunction &MF = *B.getParent();
1860  auto &MFI = MF.getFrameInfo();
1861  MachineInstr *MI = &*It;
1862  if (!MI->getOperand(1).isFI())
1863  return false;
1864 
1865  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1866  DebugLoc DL = MI->getDebugLoc();
1867  unsigned DstR = MI->getOperand(0).getReg();
1868  int FI = MI->getOperand(1).getIndex();
1869 
1870  unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1871  unsigned HasAlign = MFI.getObjectAlignment(FI);
1872  unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1873  : Hexagon::V6_vL32Ub_ai;
1874  BuildMI(B, It, DL, HII.get(LoadOpc), DstR)
1875  .addFrameIndex(FI)
1876  .addImm(0)
1877  .cloneMemRefs(*MI);
1878 
1879  B.erase(It);
1880  return true;
1881 }
1882 
1883 bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF,
1884  SmallVectorImpl<unsigned> &NewRegs) const {
1885  auto &HII = *MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
1886  MachineRegisterInfo &MRI = MF.getRegInfo();
1887  bool Changed = false;
1888 
1889  for (auto &B : MF) {
1890  // Traverse the basic block.
1892  for (auto I = B.begin(), E = B.end(); I != E; I = NextI) {
1893  MachineInstr *MI = &*I;
1894  NextI = std::next(I);
1895  unsigned Opc = MI->getOpcode();
1896 
1897  switch (Opc) {
1898  case TargetOpcode::COPY:
1899  Changed |= expandCopy(B, I, MRI, HII, NewRegs);
1900  break;
1901  case Hexagon::STriw_pred:
1902  case Hexagon::STriw_ctr:
1903  Changed |= expandStoreInt(B, I, MRI, HII, NewRegs);
1904  break;
1905  case Hexagon::LDriw_pred:
1906  case Hexagon::LDriw_ctr:
1907  Changed |= expandLoadInt(B, I, MRI, HII, NewRegs);
1908  break;
1909  case Hexagon::PS_vstorerq_ai:
1910  Changed |= expandStoreVecPred(B, I, MRI, HII, NewRegs);
1911  break;
1912  case Hexagon::PS_vloadrq_ai:
1913  Changed |= expandLoadVecPred(B, I, MRI, HII, NewRegs);
1914  break;
1915  case Hexagon::PS_vloadrw_ai:
1916  case Hexagon::PS_vloadrwu_ai:
1917  Changed |= expandLoadVec2(B, I, MRI, HII, NewRegs);
1918  break;
1919  case Hexagon::PS_vstorerw_ai:
1920  case Hexagon::PS_vstorerwu_ai:
1921  Changed |= expandStoreVec2(B, I, MRI, HII, NewRegs);
1922  break;
1923  }
1924  }
1925  }
1926 
1927  return Changed;
1928 }
1929 
1931  BitVector &SavedRegs,
1932  RegScavenger *RS) const {
1933  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1934 
1935  SavedRegs.resize(HRI.getNumRegs());
1936 
1937  // If we have a function containing __builtin_eh_return we want to spill and
1938  // restore all callee saved registers. Pretend that they are used.
1940  for (const MCPhysReg *R = HRI.getCalleeSavedRegs(&MF); *R; ++R)
1941  SavedRegs.set(*R);
1942 
1943  // Replace predicate register pseudo spill code.
1944  SmallVector<unsigned,8> NewRegs;
1945  expandSpillMacros(MF, NewRegs);
1946  if (OptimizeSpillSlots && !isOptNone(MF))
1947  optimizeSpillSlots(MF, NewRegs);
1948 
1949  // We need to reserve a spill slot if scavenging could potentially require
1950  // spilling a scavenged register.
1951  if (!NewRegs.empty() || mayOverflowFrameOffset(MF)) {
1952  MachineFrameInfo &MFI = MF.getFrameInfo();
1953  MachineRegisterInfo &MRI = MF.getRegInfo();
1955  // Reserve an int register in any case, because it could be used to hold
1956  // the stack offset in case it does not fit into a spill instruction.
1957  SpillRCs.insert(&Hexagon::IntRegsRegClass);
1958 
1959  for (unsigned VR : NewRegs)
1960  SpillRCs.insert(MRI.getRegClass(VR));
1961 
1962  for (auto *RC : SpillRCs) {
1963  if (!needToReserveScavengingSpillSlots(MF, HRI, RC))
1964  continue;
1965  unsigned Num = RC == &Hexagon::IntRegsRegClass ? NumberScavengerSlots : 1;
1966  unsigned S = HRI.getSpillSize(*RC), A = HRI.getSpillAlignment(*RC);
1967  for (unsigned i = 0; i < Num; i++) {
1968  int NewFI = MFI.CreateSpillStackObject(S, A);
1969  RS->addScavengingFrameIndex(NewFI);
1970  }
1971  }
1972  }
1973 
1974  TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1975 }
1976 
1977 unsigned HexagonFrameLowering::findPhysReg(MachineFunction &MF,
1981  const TargetRegisterClass *RC) const {
1982  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1983  auto &MRI = MF.getRegInfo();
1984 
1985  auto isDead = [&FIR,&DeadMap] (unsigned Reg) -> bool {
1986  auto F = DeadMap.find({Reg,0});
1987  if (F == DeadMap.end())
1988  return false;
1989  for (auto &DR : F->second)
1990  if (DR.contains(FIR))
1991  return true;
1992  return false;
1993  };
1994 
1995  for (unsigned Reg : RC->getRawAllocationOrder(MF)) {
1996  bool Dead = true;
1997  for (auto R : HexagonBlockRanges::expandToSubRegs({Reg,0}, MRI, HRI)) {
1998  if (isDead(R.Reg))
1999  continue;
2000  Dead = false;
2001  break;
2002  }
2003  if (Dead)
2004  return Reg;
2005  }
2006  return 0;
2007 }
2008 
2009 void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
2011  auto &HST = MF.getSubtarget<HexagonSubtarget>();
2012  auto &HII = *HST.getInstrInfo();
2013  auto &HRI = *HST.getRegisterInfo();
2014  auto &MRI = MF.getRegInfo();
2015  HexagonBlockRanges HBR(MF);
2016 
2017  using BlockIndexMap =
2018  std::map<MachineBasicBlock *, HexagonBlockRanges::InstrIndexMap>;
2019  using BlockRangeMap =
2020  std::map<MachineBasicBlock *, HexagonBlockRanges::RangeList>;
2021  using IndexType = HexagonBlockRanges::IndexType;
2022 
2023  struct SlotInfo {
2024  BlockRangeMap Map;
2025  unsigned Size = 0;
2026  const TargetRegisterClass *RC = nullptr;
2027 
2028  SlotInfo() = default;
2029  };
2030 
2031  BlockIndexMap BlockIndexes;
2032  SmallSet<int,4> BadFIs;
2033  std::map<int,SlotInfo> FIRangeMap;
2034 
2035  // Accumulate register classes: get a common class for a pre-existing
2036  // class HaveRC and a new class NewRC. Return nullptr if a common class
2037  // cannot be found, otherwise return the resulting class. If HaveRC is
2038  // nullptr, assume that it is still unset.
2039  auto getCommonRC =
2040  [](const TargetRegisterClass *HaveRC,
2041  const TargetRegisterClass *NewRC) -> const TargetRegisterClass * {
2042  if (HaveRC == nullptr || HaveRC == NewRC)
2043  return NewRC;
2044  // Different classes, both non-null. Pick the more general one.
2045  if (HaveRC->hasSubClassEq(NewRC))
2046  return HaveRC;
2047  if (NewRC->hasSubClassEq(HaveRC))
2048  return NewRC;
2049  return nullptr;
2050  };
2051 
2052  // Scan all blocks in the function. Check all occurrences of frame indexes,
2053  // and collect relevant information.
2054  for (auto &B : MF) {
2055  std::map<int,IndexType> LastStore, LastLoad;
2056  // Emplace appears not to be supported in gcc 4.7.2-4.
2057  //auto P = BlockIndexes.emplace(&B, HexagonBlockRanges::InstrIndexMap(B));
2058  auto P = BlockIndexes.insert(
2059  std::make_pair(&B, HexagonBlockRanges::InstrIndexMap(B)));
2060  auto &IndexMap = P.first->second;
2061  LLVM_DEBUG(dbgs() << "Index map for " << printMBBReference(B) << "\n"
2062  << IndexMap << '\n');
2063 
2064  for (auto &In : B) {
2065  int LFI, SFI;
2066  bool Load = HII.isLoadFromStackSlot(In, LFI) && !HII.isPredicated(In);
2067  bool Store = HII.isStoreToStackSlot(In, SFI) && !HII.isPredicated(In);
2068  if (Load && Store) {
2069  // If it's both a load and a store, then we won't handle it.
2070  BadFIs.insert(LFI);
2071  BadFIs.insert(SFI);
2072  continue;
2073  }
2074  // Check for register classes of the register used as the source for
2075  // the store, and the register used as the destination for the load.
2076  // Also, only accept base+imm_offset addressing modes. Other addressing
2077  // modes can have side-effects (post-increments, etc.). For stack
2078  // slots they are very unlikely, so there is not much loss due to
2079  // this restriction.
2080  if (Load || Store) {
2081  int TFI = Load ? LFI : SFI;
2082  unsigned AM = HII.getAddrMode(In);
2083  SlotInfo &SI = FIRangeMap[TFI];
2084  bool Bad = (AM != HexagonII::BaseImmOffset);
2085  if (!Bad) {
2086  // If the addressing mode is ok, check the register class.
2087  unsigned OpNum = Load ? 0 : 2;
2088  auto *RC = HII.getRegClass(In.getDesc(), OpNum, &HRI, MF);
2089  RC = getCommonRC(SI.RC, RC);
2090  if (RC == nullptr)
2091  Bad = true;
2092  else
2093  SI.RC = RC;
2094  }
2095  if (!Bad) {
2096  // Check sizes.
2097  unsigned S = HII.getMemAccessSize(In);
2098  if (SI.Size != 0 && SI.Size != S)
2099  Bad = true;
2100  else
2101  SI.Size = S;
2102  }
2103  if (!Bad) {
2104  for (auto *Mo : In.memoperands()) {
2105  if (!Mo->isVolatile())
2106  continue;
2107  Bad = true;
2108  break;
2109  }
2110  }
2111  if (Bad)
2112  BadFIs.insert(TFI);
2113  }
2114 
2115  // Locate uses of frame indices.
2116  for (unsigned i = 0, n = In.getNumOperands(); i < n; ++i) {
2117  const MachineOperand &Op = In.getOperand(i);
2118  if (!Op.isFI())
2119  continue;
2120  int FI = Op.getIndex();
2121  // Make sure that the following operand is an immediate and that
2122  // it is 0. This is the offset in the stack object.
2123  if (i+1 >= n || !In.getOperand(i+1).isImm() ||
2124  In.getOperand(i+1).getImm() != 0)
2125  BadFIs.insert(FI);
2126  if (BadFIs.count(FI))
2127  continue;
2128 
2129  IndexType Index = IndexMap.getIndex(&In);
2130  if (Load) {
2131  if (LastStore[FI] == IndexType::None)
2132  LastStore[FI] = IndexType::Entry;
2133  LastLoad[FI] = Index;
2134  } else if (Store) {
2135  HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B];
2136  if (LastStore[FI] != IndexType::None)
2137  RL.add(LastStore[FI], LastLoad[FI], false, false);
2138  else if (LastLoad[FI] != IndexType::None)
2139  RL.add(IndexType::Entry, LastLoad[FI], false, false);
2140  LastLoad[FI] = IndexType::None;
2141  LastStore[FI] = Index;
2142  } else {
2143  BadFIs.insert(FI);
2144  }
2145  }
2146  }
2147 
2148  for (auto &I : LastLoad) {
2149  IndexType LL = I.second;
2150  if (LL == IndexType::None)
2151  continue;
2152  auto &RL = FIRangeMap[I.first].Map[&B];
2153  IndexType &LS = LastStore[I.first];
2154  if (LS != IndexType::None)
2155  RL.add(LS, LL, false, false);
2156  else
2157  RL.add(IndexType::Entry, LL, false, false);
2158  LS = IndexType::None;
2159  }
2160  for (auto &I : LastStore) {
2161  IndexType LS = I.second;
2162  if (LS == IndexType::None)
2163  continue;
2164  auto &RL = FIRangeMap[I.first].Map[&B];
2165  RL.add(LS, IndexType::None, false, false);
2166  }
2167  }
2168 
2169  LLVM_DEBUG({
2170  for (auto &P : FIRangeMap) {
2171  dbgs() << "fi#" << P.first;
2172  if (BadFIs.count(P.first))
2173  dbgs() << " (bad)";
2174  dbgs() << " RC: ";
2175  if (P.second.RC != nullptr)
2176  dbgs() << HRI.getRegClassName(P.second.RC) << '\n';
2177  else
2178  dbgs() << "<null>\n";
2179  for (auto &R : P.second.Map)
2180  dbgs() << " " << printMBBReference(*R.first) << " { " << R.second
2181  << "}\n";
2182  }
2183  });
2184 
2185  // When a slot is loaded from in a block without being stored to in the
2186  // same block, it is live-on-entry to this block. To avoid CFG analysis,
2187  // consider this slot to be live-on-exit from all blocks.
2188  SmallSet<int,4> LoxFIs;
2189 
2190  std::map<MachineBasicBlock*,std::vector<int>> BlockFIMap;
2191 
2192  for (auto &P : FIRangeMap) {
2193  // P = pair(FI, map: BB->RangeList)
2194  if (BadFIs.count(P.first))
2195  continue;
2196  for (auto &B : MF) {
2197  auto F = P.second.Map.find(&B);
2198  // F = pair(BB, RangeList)
2199  if (F == P.second.Map.end() || F->second.empty())
2200  continue;
2201  HexagonBlockRanges::IndexRange &IR = F->second.front();
2202  if (IR.start() == IndexType::Entry)
2203  LoxFIs.insert(P.first);
2204  BlockFIMap[&B].push_back(P.first);
2205  }
2206  }
2207 
2208  LLVM_DEBUG({
2209  dbgs() << "Block-to-FI map (* -- live-on-exit):\n";
2210  for (auto &P : BlockFIMap) {
2211  auto &FIs = P.second;
2212  if (FIs.empty())
2213  continue;
2214  dbgs() << " " << printMBBReference(*P.first) << ": {";
2215  for (auto I : FIs) {
2216  dbgs() << " fi#" << I;
2217  if (LoxFIs.count(I))
2218  dbgs() << '*';
2219  }
2220  dbgs() << " }\n";
2221  }
2222  });
2223 
2224 #ifndef NDEBUG
2225  bool HasOptLimit = SpillOptMax.getPosition();
2226 #endif
2227 
2228  // eliminate loads, when all loads eliminated, eliminate all stores.
2229  for (auto &B : MF) {
2230  auto F = BlockIndexes.find(&B);
2231  assert(F != BlockIndexes.end());
2232  HexagonBlockRanges::InstrIndexMap &IM = F->second;
2235  LLVM_DEBUG(dbgs() << printMBBReference(B) << " dead map\n"
2236  << HexagonBlockRanges::PrintRangeMap(DM, HRI));
2237 
2238  for (auto FI : BlockFIMap[&B]) {
2239  if (BadFIs.count(FI))
2240  continue;
2241  LLVM_DEBUG(dbgs() << "Working on fi#" << FI << '\n');
2242  HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B];
2243  for (auto &Range : RL) {
2244  LLVM_DEBUG(dbgs() << "--Examining range:" << RL << '\n');
2245  if (!IndexType::isInstr(Range.start()) ||
2246  !IndexType::isInstr(Range.end()))
2247  continue;
2248  MachineInstr &SI = *IM.getInstr(Range.start());
2249  MachineInstr &EI = *IM.getInstr(Range.end());
2250  assert(SI.mayStore() && "Unexpected start instruction");
2251  assert(EI.mayLoad() && "Unexpected end instruction");
2252  MachineOperand &SrcOp = SI.getOperand(2);
2253 
2254  HexagonBlockRanges::RegisterRef SrcRR = { SrcOp.getReg(),
2255  SrcOp.getSubReg() };
2256  auto *RC = HII.getRegClass(SI.getDesc(), 2, &HRI, MF);
2257  // The this-> is needed to unconfuse MSVC.
2258  unsigned FoundR = this->findPhysReg(MF, Range, IM, DM, RC);
2259  LLVM_DEBUG(dbgs() << "Replacement reg:" << printReg(FoundR, &HRI)
2260  << '\n');
2261  if (FoundR == 0)
2262  continue;
2263 #ifndef NDEBUG
2264  if (HasOptLimit) {
2265  if (SpillOptCount >= SpillOptMax)
2266  return;
2267  SpillOptCount++;
2268  }
2269 #endif
2270 
2271  // Generate the copy-in: "FoundR = COPY SrcR" at the store location.
2272  MachineBasicBlock::iterator StartIt = SI.getIterator(), NextIt;
2273  MachineInstr *CopyIn = nullptr;
2274  if (SrcRR.Reg != FoundR || SrcRR.Sub != 0) {
2275  const DebugLoc &DL = SI.getDebugLoc();
2276  CopyIn = BuildMI(B, StartIt, DL, HII.get(TargetOpcode::COPY), FoundR)
2277  .add(SrcOp);
2278  }
2279 
2280  ++StartIt;
2281  // Check if this is a last store and the FI is live-on-exit.
2282  if (LoxFIs.count(FI) && (&Range == &RL.back())) {
2283  // Update store's source register.
2284  if (unsigned SR = SrcOp.getSubReg())
2285  SrcOp.setReg(HRI.getSubReg(FoundR, SR));
2286  else
2287  SrcOp.setReg(FoundR);
2288  SrcOp.setSubReg(0);
2289  // We are keeping this register live.
2290  SrcOp.setIsKill(false);
2291  } else {
2292  B.erase(&SI);
2293  IM.replaceInstr(&SI, CopyIn);
2294  }
2295 
2296  auto EndIt = std::next(EI.getIterator());
2297  for (auto It = StartIt; It != EndIt; It = NextIt) {
2298  MachineInstr &MI = *It;
2299  NextIt = std::next(It);
2300  int TFI;
2301  if (!HII.isLoadFromStackSlot(MI, TFI) || TFI != FI)
2302  continue;
2303  unsigned DstR = MI.getOperand(0).getReg();
2304  assert(MI.getOperand(0).getSubReg() == 0);
2305  MachineInstr *CopyOut = nullptr;
2306  if (DstR != FoundR) {
2307  DebugLoc DL = MI.getDebugLoc();
2308  unsigned MemSize = HII.getMemAccessSize(MI);
2309  assert(HII.getAddrMode(MI) == HexagonII::BaseImmOffset);
2310  unsigned CopyOpc = TargetOpcode::COPY;
2311  if (HII.isSignExtendingLoad(MI))
2312  CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;
2313  else if (HII.isZeroExtendingLoad(MI))
2314  CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;
2315  CopyOut = BuildMI(B, It, DL, HII.get(CopyOpc), DstR)
2316  .addReg(FoundR, getKillRegState(&MI == &EI));
2317  }
2318  IM.replaceInstr(&MI, CopyOut);
2319  B.erase(It);
2320  }
2321 
2322  // Update the dead map.
2323  HexagonBlockRanges::RegisterRef FoundRR = { FoundR, 0 };
2324  for (auto RR : HexagonBlockRanges::expandToSubRegs(FoundRR, MRI, HRI))
2325  DM[RR].subtract(Range);
2326  } // for Range in range list
2327  }
2328  }
2329 }
2330 
2331 void HexagonFrameLowering::expandAlloca(MachineInstr *AI,
2332  const HexagonInstrInfo &HII, unsigned SP, unsigned CF) const {
2333  MachineBasicBlock &MB = *AI->getParent();
2334  DebugLoc DL = AI->getDebugLoc();
2335  unsigned A = AI->getOperand(2).getImm();
2336 
2337  // Have
2338  // Rd = alloca Rs, #A
2339  //
2340  // If Rs and Rd are different registers, use this sequence:
2341  // Rd = sub(r29, Rs)
2342  // r29 = sub(r29, Rs)
2343  // Rd = and(Rd, #-A) ; if necessary
2344  // r29 = and(r29, #-A) ; if necessary
2345  // Rd = add(Rd, #CF) ; CF size aligned to at most A
2346  // otherwise, do
2347  // Rd = sub(r29, Rs)
2348  // Rd = and(Rd, #-A) ; if necessary
2349  // r29 = Rd
2350  // Rd = add(Rd, #CF) ; CF size aligned to at most A
2351 
2352  MachineOperand &RdOp = AI->getOperand(0);
2353  MachineOperand &RsOp = AI->getOperand(1);
2354  unsigned Rd = RdOp.getReg(), Rs = RsOp.getReg();
2355 
2356  // Rd = sub(r29, Rs)
2357  BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), Rd)
2358  .addReg(SP)
2359  .addReg(Rs);
2360  if (Rs != Rd) {
2361  // r29 = sub(r29, Rs)
2362  BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), SP)
2363  .addReg(SP)
2364  .addReg(Rs);
2365  }
2366  if (A > 8) {
2367  // Rd = and(Rd, #-A)
2368  BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), Rd)
2369  .addReg(Rd)
2370  .addImm(-int64_t(A));
2371  if (Rs != Rd)
2372  BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), SP)
2373  .addReg(SP)
2374  .addImm(-int64_t(A));
2375  }
2376  if (Rs == Rd) {
2377  // r29 = Rd
2378  BuildMI(MB, AI, DL, HII.get(TargetOpcode::COPY), SP)
2379  .addReg(Rd);
2380  }
2381  if (CF > 0) {
2382  // Rd = add(Rd, #CF)
2383  BuildMI(MB, AI, DL, HII.get(Hexagon::A2_addi), Rd)
2384  .addReg(Rd)
2385  .addImm(CF);
2386  }
2387 }
2388 
2390  const MachineFrameInfo &MFI = MF.getFrameInfo();
2391  if (!MFI.hasVarSizedObjects())
2392  return false;
2393  unsigned MaxA = MFI.getMaxAlignment();
2394  if (MaxA <= getStackAlignment())
2395  return false;
2396  return true;
2397 }
2398 
2400  const MachineFunction &MF) const {
2401  for (auto &B : MF)
2402  for (auto &I : B)
2403  if (I.getOpcode() == Hexagon::PS_aligna)
2404  return &I;
2405  return nullptr;
2406 }
2407 
2408 /// Adds all callee-saved registers as implicit uses or defs to the
2409 /// instruction.
2410 void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(MachineInstr *MI,
2411  const CSIVect &CSI, bool IsDef, bool IsKill) const {
2412  // Add the callee-saved registers as implicit uses.
2413  for (auto &R : CSI)
2414  MI->addOperand(MachineOperand::CreateReg(R.getReg(), IsDef, true, IsKill));
2415 }
2416 
2417 /// Determine whether the callee-saved register saves and restores should
2418 /// be generated via inline code. If this function returns "true", inline
2419 /// code will be generated. If this function returns "false", additional
2420 /// checks are performed, which may still lead to the inline code.
2421 bool HexagonFrameLowering::shouldInlineCSR(const MachineFunction &MF,
2422  const CSIVect &CSI) const {
2424  return true;
2425  if (!hasFP(MF))
2426  return true;
2427  if (!isOptSize(MF) && !isMinSize(MF))
2429  return true;
2430 
2431  // Check if CSI only has double registers, and if the registers form
2432  // a contiguous block starting from D8.
2433  BitVector Regs(Hexagon::NUM_TARGET_REGS);
2434  for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
2435  unsigned R = CSI[i].getReg();
2436  if (!Hexagon::DoubleRegsRegClass.contains(R))
2437  return true;
2438  Regs[R] = true;
2439  }
2440  int F = Regs.find_first();
2441  if (F != Hexagon::D8)
2442  return true;
2443  while (F >= 0) {
2444  int N = Regs.find_next(F);
2445  if (N >= 0 && N != F+1)
2446  return true;
2447  F = N;
2448  }
2449 
2450  return false;
2451 }
2452 
2453 bool HexagonFrameLowering::useSpillFunction(const MachineFunction &MF,
2454  const CSIVect &CSI) const {
2455  if (shouldInlineCSR(MF, CSI))
2456  return false;
2457  unsigned NumCSI = CSI.size();
2458  if (NumCSI <= 1)
2459  return false;
2460 
2461  unsigned Threshold = isOptSize(MF) ? SpillFuncThresholdOs
2463  return Threshold < NumCSI;
2464 }
2465 
2466 bool HexagonFrameLowering::useRestoreFunction(const MachineFunction &MF,
2467  const CSIVect &CSI) const {
2468  if (shouldInlineCSR(MF, CSI))
2469  return false;
2470  // The restore functions do a bit more than just restoring registers.
2471  // The non-returning versions will go back directly to the caller's
2472  // caller, others will clean up the stack frame in preparation for
2473  // a tail call. Using them can still save code size even if only one
2474  // register is getting restores. Make the decision based on -Oz:
2475  // using -Os will use inline restore for a single register.
2476  if (isMinSize(MF))
2477  return true;
2478  unsigned NumCSI = CSI.size();
2479  if (NumCSI <= 1)
2480  return false;
2481 
2482  unsigned Threshold = isOptSize(MF) ? SpillFuncThresholdOs-1
2484  return Threshold < NumCSI;
2485 }
2486 
2487 bool HexagonFrameLowering::mayOverflowFrameOffset(MachineFunction &MF) const {
2488  unsigned StackSize = MF.getFrameInfo().estimateStackSize(MF);
2489  auto &HST = MF.getSubtarget<HexagonSubtarget>();
2490  // A fairly simplistic guess as to whether a potential load/store to a
2491  // stack location could require an extra register.
2492  if (HST.useHVXOps() && StackSize > 256)
2493  return true;
2494 
2495  // Check if the function has store-immediate instructions that access
2496  // the stack. Since the offset field is not extendable, if the stack
2497  // size exceeds the offset limit (6 bits, shifted), the stores will
2498  // require a new base register.
2499  bool HasImmStack = false;
2500  unsigned MinLS = ~0u; // Log_2 of the memory access size.
2501 
2502  for (const MachineBasicBlock &B : MF) {
2503  for (const MachineInstr &MI : B) {
2504  unsigned LS = 0;
2505  switch (MI.getOpcode()) {
2506  case Hexagon::S4_storeirit_io:
2507  case Hexagon::S4_storeirif_io:
2508  case Hexagon::S4_storeiri_io:
2509  ++LS;
2511  case Hexagon::S4_storeirht_io:
2512  case Hexagon::S4_storeirhf_io:
2513  case Hexagon::S4_storeirh_io:
2514  ++LS;
2516  case Hexagon::S4_storeirbt_io:
2517  case Hexagon::S4_storeirbf_io:
2518  case Hexagon::S4_storeirb_io:
2519  if (MI.getOperand(0).isFI())
2520  HasImmStack = true;
2521  MinLS = std::min(MinLS, LS);
2522  break;
2523  }
2524  }
2525  }
2526 
2527  if (HasImmStack)
2528  return !isUInt<6>(StackSize >> MinLS);
2529 
2530  return false;
2531 }
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:372
uint64_t CallInst * C
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
BitVector & set()
Definition: BitVector.h:398
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:633
instr_iterator instr_end()
bool hasDebugInfo() const
Returns true if valid debug info is present.
void mapLocalFrameObject(int ObjectIndex, int64_t Offset)
Map a frame index into the local object block.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
static cl::opt< int > SpillFuncThresholdOs("spill-func-threshold-Os", cl::Hidden, cl::desc("Specify Os spill func threshold"), cl::init(1), cl::ZeroOrMore)
This class represents lattice values for constants.
Definition: AllocatorList.h:24
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:78
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
#define LLVM_FALLTHROUGH
Definition: Compiler.h:86
INITIALIZE_PASS(HexagonCallFrameInformation, "hexagon-cfi", "Hexagon call frame information", false, false) FunctionPass *llvm
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID&#39;s allocated.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index...
ArrayRef< MCPhysReg > getRawAllocationOrder(const MachineFunction &MF) const
Returns the preferred order for allocating registers from this register class in MF.
MachineBasicBlock * findNearestCommonDominator(MachineBasicBlock *A, MachineBasicBlock *B)
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
static Optional< MachineBasicBlock::iterator > findCFILocation(MachineBasicBlock &B)
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:164
unsigned getReg() const
getReg - Returns the register number.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset)
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
Definition: MCDwarf.h:487
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
unsigned Reg
static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI)
unsigned getSubReg() const
static RegisterSet expandToSubRegs(RegisterRef R, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI)
static const MCPhysReg VRegs[32]
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:321
Hexagon target-specific information for each MachineFunction.
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
F(f)
MachineModuleInfo & getMMI() const
static const char * getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType, bool Stkchk=false)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:685
bool contains(MCPhysReg Reg) const
Returns true if register Reg is contained in the set.
Definition: LivePhysRegs.h:107
#define R2(n)
static unsigned SpillOptCount
static cl::opt< int > SpillFuncThreshold("spill-func-threshold", cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"), cl::init(6), cl::ZeroOrMore)
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:459
static cl::opt< unsigned > SpillOptMax("spill-opt-max", cl::Hidden, cl::init(std::numeric_limits< unsigned >::max()))
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
return AArch64::GPR64RegClass contains(Reg)
iterator_range< succ_iterator > successors()
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
std::map< RegisterRef, RangeList > RegToRangeMap
bool isEHReturnCalleeSaveReg(unsigned Reg) const
unsigned getSpillAlignment(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class...
void add(IndexType Start, IndexType End, bool Fixed, bool TiedEnd)
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index...
void setLocalFrameSize(int64_t sz)
Set the size of the local object blob.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void setUseLocalStackAllocationBlock(bool v)
setUseLocalStackAllocationBlock - Set whether the local allocation blob should be allocated together ...
MCSuperRegIterator enumerates all super-registers of Reg.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
int find_first() const
find_first - Returns the index of the first set bit, -1 if none of the bits are set.
Definition: BitVector.h:332
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static cl::opt< bool > EnableStackOVFSanitizer("enable-stackovf-sanitizer", cl::Hidden, cl::desc("Enable runtime checks for stack overflow."), cl::init(false), cl::ZeroOrMore)
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int find_next(unsigned Prev) const
find_next - Returns the index of the next set bit following the "Prev" bit.
Definition: BitVector.h:340
static cl::opt< bool > EnableShrinkWrapping("hexagon-shrink-frame", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("Enable stack frame shrink wrapping"))
This file contains the simple types necessary to represent the attributes associated with functions a...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
static unsigned getMax32BitSubRegister(unsigned Reg, const TargetRegisterInfo &TRI, bool hireg=true)
Map a register pair Reg to the subregister that has the greater "number", i.e.
LLVM_NODISCARD unsigned addFrameInst(const MCCFIInstruction &Inst)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:406
const MachineInstr * getAlignaInstr(const MachineFunction &MF) const
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:142
static bool isMinSize(const MachineFunction &MF)
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
const MCContext & getContext() const
static bool hasTailCall(const MachineBasicBlock &MBB)
Returns true if MBB has a machine instructions that indicates a tail call in the block.
void setLocalFrameMaxAlign(unsigned Align)
Required alignment of the local object blob, which is the strictest alignment of any object in it...
int getObjectIndexEnd() const
Return one past the maximum frame object index.
iterator getLastNonDebugInstr()
Returns an iterator to the last non-debug instruction in the basic block, or end().
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they&#39;re not in a MachineFuncti...
MachineBasicBlock * findNearestCommonDominator(MachineBasicBlock *A, MachineBasicBlock *B)
findNearestCommonDominator - Find nearest common dominator basic block for basic block A and B...
const MCPhysReg * getCallerSavedRegs(const MachineFunction *MF, const TargetRegisterClass *RC) const
static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR, const HexagonRegisterInfo &HRI)
Checks if the basic block contains any instruction that needs a stack frame to be already in place...
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
Perform most of the PEI work here:
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getKillRegState(bool B)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static cl::opt< bool > EliminateFramePointer("hexagon-fp-elim", cl::init(true), cl::Hidden, cl::desc("Refrain from using FP whenever possible"))
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:623
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:610
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:820
#define P(N)
static bool needToReserveScavengingSpillSlots(MachineFunction &MF, const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC)
Returns true if there are no caller-saved registers available in class RC.
static bool isOptSize(const MachineFunction &MF)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:419
static cl::opt< unsigned > NumberScavengerSlots("number-scavenger-slots", cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2), cl::ZeroOrMore)
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
This file declares the machine register scavenger class.
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
MCSymbol * createTempSymbol(bool CanBeUnnamed=true)
Create and return a new assembler temporary symbol with a unique but unspecified name.
Definition: MCContext.cpp:217
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned const MachineRegisterInfo * MRI
static MCCFIInstruction createDefCfa(MCSymbol *L, unsigned Register, int Offset)
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it...
Definition: MCDwarf.h:460
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:429
static cl::opt< bool > DisableDeallocRet("disable-hexagon-dealloc-ret", cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"))
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void setStackSize(uint64_t Size)
Set the size of the stack.
void addLiveIns(const MachineBasicBlock &MBB)
Adds all live-in registers of basic block MBB.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE and DBG_LABEL instructions...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
MCRegAliasIterator enumerates all registers aliasing Reg.
unsigned getMaxAlignment() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
bool optForSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:598
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
self_iterator getIterator()
Definition: ilist_node.h:82
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:181
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1208
int CreateSpillStackObject(uint64_t Size, unsigned Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const MachineBasicBlock & front() const
MCSubRegIterator enumerates all sub-registers of Reg.
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
size_t size() const
Definition: SmallVector.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
IndexType getIndex(MachineInstr *MI) const
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
static MachineInstr * getReturn(MachineBasicBlock &MBB)
Returns the "return" instruction from this block, or nullptr if there isn&#39;t any.
static bool hasReturn(const MachineBasicBlock &MBB)
Returns true if MBB contains an instruction that returns.
The memory access writes data.
static cl::opt< unsigned > ShrinkLimit("shrink-frame-limit", cl::init(std::numeric_limits< unsigned >::max()), cl::Hidden, cl::ZeroOrMore, cl::desc("Max count of stack frame shrink-wraps"))
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call...
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
unsigned estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
int getFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
int64_t getImm() const
const Function & getFunction() const
Return the LLVM function that this machine code represents.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
bool needsUnwindTableEntry() const
True if this function needs an unwind table.
Definition: Function.h:573
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool isPhysRegUsed(unsigned PhysReg) const
Return true if the specified register is modified or read in this function.
static cl::opt< bool > EnableSaveRestoreLong("enable-save-restore-long", cl::Hidden, cl::desc("Enable long calls for save-restore stubs."), cl::init(false), cl::ZeroOrMore)
virtual BitVector getReservedRegs(const MachineFunction &MF) const =0
Returns a bitset indexed by physical register number indicating if a register is a special register t...
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
FunctionPass * createHexagonCallFrameInformation()
static cl::opt< unsigned > Threshold("loop-unswitch-threshold", cl::desc("Max loop size to unswitch"), cl::init(100), cl::Hidden)
bool runOnMachineFunction(MachineFunction &F) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:254
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
MachineFunctionProperties & set(Property P)
Representation of each machine instruction.
Definition: MachineInstr.h:64
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
RegToRangeMap computeLiveMap(InstrIndexMap &IndexMap)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
static bool isRestoreCall(unsigned Opc)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:49
static bool enableAllocFrameElim(const MachineFunction &MF)
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
bool isPositionIndependent() const
static cl::opt< bool > OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden, cl::init(true), cl::desc("Optimize spill slots"))
TargetOptions Options
Definition: TargetMachine.h:97
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
void setMaxCallFrameSize(unsigned S)
void insertCFIInstructions(MachineFunction &MF) const
unsigned getLocalFrameMaxAlign() const
Return the required alignment of the local object blob.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:595
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
RegToRangeMap computeDeadMap(InstrIndexMap &IndexMap, RegToRangeMap &LiveMap)
uint32_t Size
Definition: Profile.cpp:47
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const TargetRegisterClass * getMinimalPhysRegClass(unsigned Reg, MVT VT=MVT::Other) const
Returns the Register Class of a physical register of the given type, picking the most sub register cl...
const unsigned Kind
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:807
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool enableCalleeSaveSkip(const MachineFunction &MF) const override
Returns true if the target can safely skip saving callee-saved registers for noreturn nounwind functi...
const HexagonInstrInfo * getInstrInfo() const override
rpo Deduce function attributes in RPO
A vector that has set insertion semantics.
Definition: SetVector.h:41
static bool isOptNone(const MachineFunction &MF)
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register...
IRTranslator LLVM IR MI
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:39
void stepForward(const MachineInstr &MI, SmallVectorImpl< std::pair< MCPhysReg, const MachineOperand *>> &Clobbers)
Simulates liveness when stepping forward over an instruction(bundle).
void initializeHexagonCallFrameInformationPass(PassRegistry &)
#define LLVM_DEBUG(X)
Definition: Debug.h:123
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
void setObjectAlignment(int ObjectIdx, unsigned Align)
setObjectAlignment - Change the alignment of the specified stack object.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects...
bool needsAligna(const MachineFunction &MF) const
static unsigned getMaxCalleeSavedReg(const std::vector< CalleeSavedInfo > &CSI, const TargetRegisterInfo &TRI)
Returns the callee saved register with the largest id in the vector.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Statically lint checks LLVM IR
Definition: Lint.cpp:193
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
Properties which a MachineFunction may have at a given point in time.
This class contains meta information specific to a module.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:165